repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
borgstrom/django-fujita
https://github.com/borgstrom/django-fujita
1039e6ca205a1cfba5abed92a67176ae532cb200
dc8be3bfc96dae431957e01a9e67f26e72eddf1c
7ca45c3b9c993dead435b48831dab826dbd9fceb
refs/heads/master
2021-01-15T08:26:11.216567
2015-05-27T15:10:58
2015-05-27T15:10:58
14,289,813
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6032786965370178, "alphanum_fraction": 0.6087431907653809, "avg_line_length": 35.599998474121094, "blob_id": "c32d3a93a35dcd07f7630950c71f1c779b3ad991", "content_id": "d059d60a9a51c2675dceddbe169c5389f11e9af6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 915, "license_type": "permissive", "max_line_length": 93, "num_lines": 25, "path": "/setup.py", "repo_name": "borgstrom/django-fujita", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nsetup(name='django-fujita',\n packages=['fujita'],\n scripts=['fujita.py'],\n include_package_data=True,\n version='0.3',\n license=\"Apache License, Version 2.0\",\n description='A web based console for Django\\'s development server built using Tornado',\n long_description=open('README.rst').read(),\n author='Evan Borgstrom',\n author_email='[email protected]',\n url='https://github.com/borgstrom/django-fujita',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=['setuptools'])\n" }, { "alpha_fraction": 0.6509971618652344, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 27.632652282714844, "blob_id": "b15bc67e94b8ac3d54f6fd63fe5958f7563b6169", "content_id": "ff37b74bee05456973c0c02c62beacdc055fd7d2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1404, "license_type": "permissive", "max_line_length": 78, "num_lines": 49, "path": "/fujita/main.py", "repo_name": "borgstrom/django-fujita", "src_encoding": "UTF-8", "text": "import inspect\nimport logging\nimport os\n\nfrom tornado import ioloop, web\nfrom tornado.options import define, options, parse_command_line\n\nfrom .handlers import LogHandler, IndexHandler, StatusHandler, StartHandler, \\\n StopHandler\nfrom .runner import DjangoRunner\n\ndefine(\"port\", type=int, default=5665, help=\"The port to run on\",\n metavar=\"PORT\")\ndefine(\"command\", type=str, help=\"The command to run to start Django\",\n metavar=\"COMMANDLINE\")\n\ndef main():\n parse_command_line()\n if not options.command:\n print \"ERROR: You must specify a command. See --help\"\n return\n\n logging.info(\"Django Fujita - Starting up\")\n\n module_dir = os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe())\n ))\n\n handlers = [\n (r\"/log\", LogHandler),\n (r\"/status\", StatusHandler),\n (r\"/start\", StartHandler),\n (r\"/stop\", StopHandler),\n (r\"/\", IndexHandler),\n ]\n settings = dict(\n debug=True,\n template_path=os.path.join(module_dir, \"templates\"),\n )\n\n logging.info(\"Application listening on port %d\" % options.port)\n application = web.Application(handlers, **settings)\n application.listen(options.port)\n\n # create the Django runner\n application.django = DjangoRunner(options.command)\n\n logging.info(\"Starting main IO loop...\")\n ioloop.IOLoop.instance().start()\n\n" }, { "alpha_fraction": 0.7121211886405945, "alphanum_fraction": 0.7173458933830261, "avg_line_length": 37.279998779296875, "blob_id": "a523304176e553ff022914f38d6bf710a202cb42", "content_id": "2f61d003dec4face10d6dd2c76263f251493b462", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1914, "license_type": "permissive", "max_line_length": 140, "num_lines": 50, "path": "/README.rst", "repo_name": "borgstrom/django-fujita", "src_encoding": "UTF-8", "text": "Django-Fujita\n=============\n\nThis is a Tornado server built to run the Django_ builtin 'runserver' command.\n\nThis uses WebSockets, so modern browsers only.\n\nWhy on earth would one want to do such a crazy thing?\n-----------------------------------------------------\nThe main reason this was built was for the use case of a team environment where\nthe developers are using Vagrant_ for local development and want to give the\nless terminal savvy users an easy way to run and control the Django_ development\nserver.\n\nHow do I use this?\n------------------\nFor testing purposes:\n\n#. Clone this repository.\n#. Create a new virtualenv\n#. Install ``Django`` & ``tornado`` via pip\n#. Run the fujita.py script with the command to run the testproject (see below)\n#. Visit the Fujita Console in your browser\n\nTo distribute a Fujita Console in your project simply list ``django-fujita`` in\nyour requirements and then add a ``fujita.py`` script to the root.\n\nRunning the fujita.py script\n----------------------------\nYou need to tell the script how to run your development server through the\n``--command`` option.\n\nIf you're following the steps above then you'll want to use something similar\nto the following command::\n\n ./fujita.py --command \"/home/user/.virtualenvs/fujita/bin/python /home/user/projects/django-fujita/testproject/manage.py runserver 0:8000\"\n\nThe above command assumes you have a virtualenv named ``fujita`` in the\n``.virtualenvs`` directory in your home. It also assumes that you have a\nprojects directory in your home and the ``django-fujita`` repository has been\nchecked out there. Adjust for your setup.\n\nThe ``command`` is run under ``/bin/sh`` so you can set environment variables\nby prefixing them in front of the command::\n\n ./fujita.py --command \"DJANGO_SETTINGS_MODULE=project.settings.dev django-admin.py runserver 0:8000\"\n\n\n.. _Django: http://djangoproject.com/\n.. _Vagrant: http://vagrantup.com/\n" }, { "alpha_fraction": 0.7358490824699402, "alphanum_fraction": 0.7358490824699402, "avg_line_length": 16.66666603088379, "blob_id": "de846178127e68d62df15f54cea545017f5cf388", "content_id": "e0f98193f9cad5ee9dfc6e78e7f3925c5cf126b8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "permissive", "max_line_length": 23, "num_lines": 3, "path": "/fujita.py", "repo_name": "borgstrom/django-fujita", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom fujita import main\nmain()\n" }, { "alpha_fraction": 0.6336400508880615, "alphanum_fraction": 0.6364219784736633, "avg_line_length": 35.5078125, "blob_id": "98596f59525ce8c750b85df92fa0f85cafa41819", "content_id": "b39d272aa44a84d458c7629a3c6f866266fb65d7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4673, "license_type": "permissive", "max_line_length": 197, "num_lines": 128, "path": "/fujita/runner.py", "repo_name": "borgstrom/django-fujita", "src_encoding": "UTF-8", "text": "import logging\nimport uuid\n\nfrom tornado import ioloop, process\n\nclass DjangoRunner(object):\n \"\"\"\n The DjangoRunner interface. It launches the django runserver command, or\n any command really, and provides a buffer for the stdout & stderr that is\n generated by the process.\n\n Waiters will receive lists with three items, the first is a UUID to id this\n specific message, the second is the file handle the message came in on (0\n for stdout, 1 for stderr) and the last is the line received.\n \"\"\"\n\n STOPPED = 0\n RUNNING = 1\n\n def __init__(self, command, **process_kwargs):\n logging.info(\"Initializing Django Runner\")\n\n # setup our variables used to track callbacks and manage the cache\n self.line_waiters = set()\n self.cache = []\n self.cache_size = 500\n\n self.status_waiters = set()\n self.set_status(DjangoRunner.STOPPED, \"Initializing\")\n\n # prepare the command & kwargs for the subprocess\n if isinstance(command, list):\n command = \" \".join(command)\n\n # Set shell, stdout & stderr in our kwargs to ensure that we use the\n # proper values. We need to use the shell to ensure that we maintain\n # streams to stdout & stderr so that as the django server restarts\n # our subprocess stays connected\n if not process_kwargs:\n process_kwargs = dict()\n\n process_kwargs.update(dict(\n shell=True,\n stdout=process.Subprocess.STREAM,\n stderr=process.Subprocess.STREAM\n ))\n\n self.process = None\n self.process_command = command\n self.process_kwargs = process_kwargs\n self.start()\n\n def start(self):\n if not self.process:\n # start the process and begin reading our streams\n logging.info(\"Starting subprocess: %s\" % self.process_command)\n self.process = process.Subprocess(self.process_command, **self.process_kwargs)\n self.read_line(self.process.stdout, self.handle_stdout)\n self.read_line(self.process.stderr, self.handle_stderr)\n self.set_status(DjangoRunner.RUNNING, \"Django is running\")\n\n # setup an exit callback\n self.process.set_exit_callback(self.process_exit)\n\n # XXX TODO\n self.send_line_to_waiters(0, \"Starting Django Builtin Server\")\n self.send_line_to_waiters(0, \"For some reason we never get the initial runserver output lines when we start the sub process. This is a known oddity, but your server is indeed running.\")\n self.send_line_to_waiters(0, \"\")\n\n def stop(self):\n if self.process:\n self.process.proc.terminate()\n\n def process_exit(self, retcode):\n logging.info(\"Django exited with return code %d\" % retcode)\n self.set_status(DjangoRunner.STOPPED, \"Django is not running (return code %d)\" % retcode)\n self.process = None\n\n def set_status(self, code, status):\n self.status = status\n self.status_code = code\n\n for callback in self.status_waiters:\n callback(code, status)\n\n def add_line_waiter(self, callback):\n logging.debug(\"Adding new line_waiter %s. Sending %d cache items\" % (callback, len(self.cache)))\n for id, fd, line in self.cache:\n callback(id, fd, line)\n\n self.line_waiters.add(callback)\n\n def remove_line_waiter(self, callback):\n logging.debug(\"Removing line_waiter %s\" % callback)\n self.line_waiters.remove(callback)\n\n def send_line_to_waiters(self, fd, line):\n # generate an id and add it to the cache\n id = str(uuid.uuid4())\n self.cache.append((id, fd, line))\n\n # send it to the waiters\n for callback in self.line_waiters:\n callback(id, fd, line)\n\n # trim cache, if necessary\n if len(self.cache) > self.cache_size:\n self.cache = self.cache[-self.cache_size:]\n\n def read_line(self, stream, callback):\n stream.read_until(\"\\n\", callback)\n\n def handle_stdout(self, line):\n self.send_line_to_waiters(0, line)\n self.read_line(self.process.stdout, self.handle_stdout)\n\n def handle_stderr(self, line):\n self.send_line_to_waiters(1, line)\n self.read_line(self.process.stderr, self.handle_stderr)\n\n def add_status_waiter(self, callback):\n logging.debug(\"Adding new status waiter %s\" % callback)\n callback(self.status_code, self.status)\n self.status_waiters.add(callback)\n\n def remove_status_waiter(self, callback):\n logging.debug(\"Removing status waiter %s\" % callback)\n self.status_waiters.remove(callback)\n" }, { "alpha_fraction": 0.6272084712982178, "alphanum_fraction": 0.6272084712982178, "avg_line_length": 24.727272033691406, "blob_id": "157c37503fe1f79c91303940c0add8ff8e481848", "content_id": "1fad3118df6cd2221dc9578f19d8b72f52ede4d9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1132, "license_type": "permissive", "max_line_length": 69, "num_lines": 44, "path": "/fujita/handlers.py", "repo_name": "borgstrom/django-fujita", "src_encoding": "UTF-8", "text": "import logging\n\nfrom tornado import web, websocket\n\nclass IndexHandler(web.RequestHandler):\n def get(self):\n self.render(\"index.html\")\n\nclass LogHandler(websocket.WebSocketHandler):\n def open(self):\n self.application.django.add_line_waiter(self.new_line)\n\n def on_close(self):\n self.application.django.remove_line_waiter(self.new_line)\n\n def new_line(self, id, fd, line):\n self.write_message({\n 'id': id,\n 'fd': fd,\n 'line': line\n })\n\nclass StatusHandler(websocket.WebSocketHandler):\n def open(self):\n self.application.django.add_status_waiter(self.new_status)\n\n def on_close(self):\n self.application.django.remove_status_waiter(self.new_status)\n\n def new_status(self, code, status):\n self.write_message({\n 'code': code,\n 'status': status\n })\n\nclass StartHandler(web.RequestHandler):\n def post(self):\n self.application.django.start()\n self.write(\"ok\")\n\nclass StopHandler(web.RequestHandler):\n def post(self):\n self.application.django.stop()\n self.write(\"ok\")\n" } ]
6
1000sang/IoT_Project
https://github.com/1000sang/IoT_Project
21bb3797f0673670009662a1f55d7b71ddd2a19c
3fb70b634fbb3a797b982479b31b0d7d60c4d413
b55322272da46f17349ef58624758b8b8253fc92
refs/heads/master
2023-04-14T02:22:54.248374
2020-12-10T14:52:26
2020-12-10T14:52:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6144578456878662, "alphanum_fraction": 0.6144578456878662, "avg_line_length": 23.899999618530273, "blob_id": "82141f85822e1e794431ef368b05ea04ceff05a9", "content_id": "86061eaac04ee38e7719cde3045e84e0d631b29a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 249, "license_type": "no_license", "max_line_length": 57, "num_lines": 10, "path": "/front/containers/Topbar/TopbarLink.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport naverBlog from '@iso/assets/images/naverBlog.png';\n\nexport default function TopbarLink() {\n return (\n <div className=\"isoImgWrapper\">\n <img alt=\"blog\" src={naverBlog} />\n </div>\n );\n}\n" }, { "alpha_fraction": 0.49505892395973206, "alphanum_fraction": 0.49543899297714233, "avg_line_length": 30.508981704711914, "blob_id": "81169506a1a6a9b0dae979a9d1689b050753f3cf", "content_id": "a41e51d422b158330535ecf42fdcf594eddfe41f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5262, "license_type": "no_license", "max_line_length": 78, "num_lines": 167, "path": "/front/pages/index.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import React, { useState, useCallback, useEffect } from 'react';\nimport Link from 'next/link';\nimport Head from 'next/head';\nimport { useDispatch, useSelector } from 'react-redux';\nimport { useRouter } from 'next/router';\nimport Input from '@iso/components/uielements/input';\nimport Checkbox from '@iso/components/uielements/checkbox';\nimport Button from '@iso/components/uielements/button';\nimport IntlMessages from '@iso/components/utility/intlMessages';\nimport jwtConfig from '@iso/config/jwt.config';\nimport authActions from '../authentication/actions';\nimport socketActions from '../redux/socket/actions'\nimport loadActions from '../redux/load/actions';\nimport SignInStyleWrapper from '../styled/SignIn.styles';\n\nimport ConnectedLine from '../containers/ConnectedLine';\n\nconst { login } = authActions;\nconst { socketConnect } = socketActions;\nconst { loadUser, loadData } = loadActions;\n\nexport default function SignInPage(props) {\n const dispatch = useDispatch();\n const router = useRouter();\n\n const { idToken, err, userData } = useSelector((state) => state.Auth)\n\n const [email, setEmail] = useState('');\n const [password, setPassword] = useState('');\n\n const handleLogin = useCallback(() => {\n if (email == '' && password == '') {\n const data = {\n email: '[email protected]',\n password: 'a'\n }\n dispatch(login(data));\n } else {\n const data = {\n email: email,\n password: password\n }\n dispatch(login(data));\n }\n\n }, [email, password]);\n\n const handleJWTLogin = () => {\n const { jwtLogin, history } = props;\n const userInfo = {\n username:\n (process.browser && document.getElementById('inputUserName').value) ||\n '',\n password:\n (process.browser && document.getElementById('inpuPassword').value) ||\n '',\n };\n // jwtLogin(history, userInfo);\n };\n\n return (\n <>\n <Head>\n <title>SignIn</title>\n </Head>\n <ConnectedLine />\n <SignInStyleWrapper className=\"isoSignInPage\">\n <div className=\"isoLoginContentWrapper\">\n <div className=\"isoLoginContent\">\n <div className=\"isoLogoWrapper\">\n <Link href=\"/dashboard\">\n <a>\n <IntlMessages id=\"page.signInTitle\" />\n </a>\n </Link>\n </div>\n\n <div className=\"isoSignInForm\">\n <div className=\"isoInputWrapper\">\n <Input\n id=\"email\"\n size=\"large\"\n placeholder=\"[email protected]\"\n onChange={useCallback(e => {\n setEmail(e.target.value);\n })}\n />\n </div>\n\n <div className=\"isoInputWrapper\">\n <Input\n id=\"password\"\n size=\"large\"\n type=\"password\"\n placeholder=\"Password\"\n onChange={useCallback(e => {\n setPassword(e.target.value);\n })}\n />\n </div>\n\n {err && <div style={{ color: 'red' }}>{err}</div>}\n\n <div className=\"isoInputWrapper isoLeftRightComponent\">\n <Checkbox>\n <IntlMessages id=\"page.signInRememberMe\" />\n </Checkbox>\n <Button\n type=\"primary\"\n onClick={jwtConfig.enabled ? handleJWTLogin : handleLogin}\n >\n <IntlMessages id=\"page.signInButton\" />\n </Button>\n </div>\n\n <p className=\"isoHelperText\">\n <IntlMessages id=\"page.signInPreview\" />\n </p>\n\n {/* <div className=\"isoInputWrapper isoOtherLogin\">\n <Button\n onClick={handleLogin}\n type=\"primary\"\n className=\"btnFacebook\"\n >\n <IntlMessages id=\"page.signInFacebook\" />\n </Button>\n <Button\n onClick={handleLogin}\n type=\"primary\"\n className=\"btnGooglePlus\"\n >\n <IntlMessages id=\"page.signInGooglePlus\" />\n </Button>\n\n <Button\n onClick={() => Auth0.login(handleLogin)}\n type=\"primary\"\n className=\"btnAuthZero\"\n >\n <IntlMessages id=\"page.signInAuth0\" />\n </Button>\n\n <FirebaseLogin\n history={router}\n login={token => dispatch(login(token))}\n />\n </div> */}\n <div className=\"isoCenterComponent isoHelperWrapper\">\n <Link href=\"/forgotpassword\">\n <div className=\"isoForgotPass\">\n <IntlMessages id=\"page.signInForgotPass\" />\n </div>\n </Link>\n <Link href=\"/signup\">\n <a>\n <IntlMessages id=\"page.signInCreateAccount\" />\n </a>\n </Link>\n </div>\n </div>\n </div>\n </div>\n </SignInStyleWrapper>\n </>\n );\n}\n" }, { "alpha_fraction": 0.5751157999038696, "alphanum_fraction": 0.5751157999038696, "avg_line_length": 22.78740119934082, "blob_id": "d35fd8e1b3c46260992b78882dd93be8d269f9c7", "content_id": "e9ee79e755cd642094bc9f73786d5575125e280d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3022, "license_type": "no_license", "max_line_length": 69, "num_lines": 127, "path": "/back/service/device.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const { Device, User, UserDevice } = require('../models');\nconst { createError } = require('../utils/error/error');\n\nconst Errors = (exports.Errors = {\n BadDeviceDataError: createError('BadDeviceDataError'),\n DuplicateDeviceError: createError('DuplicateDeviceError'),\n FailureCreateDevice: createError('FailureCreateDevice'),\n DeviceIdNotFound: createError('DeviceIdNotFound'),\n UnknownError: createError('UnkownError')\n})\n\nexports.findDeviceByUserId = async (userId) => {\n const result = await UserDevice.findAll({\n where: { userId: userId }\n })\n\n if (!result) {\n throw new Errors.DeviceIdNotFound()\n }\n\n return result\n}\n\nexports.findAllDeviceById = async (userId) => {\n let deviceData = {};\n\n const result = await UserDevice.findAll({\n where: { userId: userId },\n attributes: ['deviceId']\n })\n\n if (!result) {\n throw new Errors.DeviceIdNotFound()\n }\n\n result.map((r, v) => {\n deviceData[v] = r.dataValues.deviceId\n })\n\n return deviceData;\n}\n\nexports.findAllDeviceTopic = async () => {\n try {\n const findAllDeviceTopic = await Device.findAll();\n\n return findAllDeviceTopic;\n } catch (err) {\n console.log(err)\n }\n}\n\nexports.getAllDevice = async (payload) => {\n try {\n const findAll = await this.findAllDeviceById(payload.userId);\n\n // findAll.map((v) => {\n // console.log(v)\n // })\n\n return findAll;\n } catch (err) {\n console.log(err)\n }\n}\n\nexports.createDevice = async (payload) => {\n try {\n const createDevice = await Device.create({\n siteCode: payload.siteCode,\n topic: payload.topic\n });\n\n const createUserDevice = await UserDevice.create({\n deviceId: createDevice.deviceId,\n userId: payload.userId\n })\n\n const result = await UserDevice.findAll({\n where: {\n userId: payload.userId\n }\n })\n\n return result\n } catch (err) {\n switch (err.name) {\n case 'ValidationError':\n throw new Errors.BadDeviceDataError()\n default:\n throw new Errors.UnknownError()\n }\n }\n}\n\nexports.addDevice = async (payload) => {\n try {\n const addUserDevice = await UserDevice.create({\n deviceId: payload.deviceId,\n userId: payload.userId\n })\n\n return addUserDevice;\n } catch (err) {\n console.log(err)\n switch (err.name) {\n case 'SequelizeUniqueConstraintError':\n throw new Errors.DuplicateDeviceError()\n default:\n throw new Errors.UnknownError()\n }\n }\n}\n\nexports.findOneByDeviceId = async (deviceId) => {\n const findOneByDeviceId = await Device.findOne({\n where: {\n deviceId: deviceId\n }\n })\n\n if (!findOneByDeviceId) {\n throw new Errors.DeviceIdNotFound()\n }\n\n return findOneByDeviceId\n}\n\n" }, { "alpha_fraction": 0.4555555582046509, "alphanum_fraction": 0.46888887882232666, "avg_line_length": 21.549999237060547, "blob_id": "68a7da8bdf39c2d343051e53da2b1a6a2b9bea8f", "content_id": "cb233e3226afc0dca92be8ea4f806e371f46a24d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 450, "license_type": "no_license", "max_line_length": 44, "num_lines": 20, "path": "/back/models/site.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "module.exports = (sequelize, DataTypes) => {\n const Site = sequelize.define('Site', {\n siteCode: {\n type: DataTypes.STRING(30),\n aloowNull: false,\n unique: true\n },\n siteName: {\n type: DataTypes.STRING(30),\n aloowNull: false\n },\n }, {\n charset: 'utf8',\n callate: 'utf8_general_ci',\n });\n Site.associate = (db) => {\n\n };\n return Site;\n}" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 34.29999923706055, "blob_id": "cd32777879cd24f9405b239531ddddfebc80670f", "content_id": "837039a1c6a5b18110be5b245897c8f6bfd889a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 352, "license_type": "no_license", "max_line_length": 80, "num_lines": 10, "path": "/back/routes/oauth.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const express = require('express');\nconst router = express.Router();\n\nconst { isLoggedIn, isNotLoggedIn } = require('../utils/passport/confirmLogin');\nconst oauthController = require('../controllers/oauth');\n\nrouter.get('/', isLoggedIn, oauthController.getTokenData);\nrouter.post('/', isLoggedIn, oauthController.createToken);\n\nmodule.exports = router;" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 26.147058486938477, "blob_id": "462ce358d90334a3d01eb387a33d13d3e3ef4d53", "content_id": "054225d832869446ebbd41736afd7ce9993bcee9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 923, "license_type": "no_license", "max_line_length": 68, "num_lines": 34, "path": "/front/authentication/reducer.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import Router from 'next/router';\nimport produce from 'immer';\nimport actions from './actions';\n\nconst initState = {\n idToken: null,\n err: null,\n userData: null,\n};\n\nexport default function authReducer(state = initState, action) {\n return produce(state, (draft) => {\n switch (action.type) {\n case actions.LOGIN_REQUEST_SUCCESS:\n draft.userData = action.payload;\n draft.err = null;\n // draft.userData = action.payload\n Router.replace('/dashboard');\n // Router.replace(`/dashboard?id=${action.payload.userId}`);\n break;\n case actions.LOGIN_REQUEST_FAILURE:\n // draft.idToken = null;\n draft.userData = null;\n draft.err = action.payload;\n break;\n case actions.LOGOUT_REQUEST_FAILURE:\n case actions.LOGOUT_REQUEST_SUCCESS:\n // Router.replace('/');\n return initState;\n default:\n return state;\n }\n })\n}\n" }, { "alpha_fraction": 0.5106349587440491, "alphanum_fraction": 0.5115802884101868, "avg_line_length": 30.420791625976562, "blob_id": "778d05790c9589ed107cfc8d339190316333cf2f", "content_id": "9a9dc0a89ee7e9a8bffbd6b87283b3cb230b6437", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6373, "license_type": "no_license", "max_line_length": 85, "num_lines": 202, "path": "/front/pages/signup.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import React, { useCallback, useEffect, useState } from 'react';\nimport Link from 'next/link';\nimport { Router, useRouter } from 'next/router';\nimport { useDispatch, useSelector } from 'react-redux';\nimport Input from '@iso/components/uielements/input';\nimport Checkbox from '@iso/components/uielements/checkbox';\nimport Button from '@iso/components/uielements/button';\nimport FirebaseSignUpForm from '@iso/containers/FirebaseForm/FirebaseForm';\nimport authAction from '@iso/redux/auth/actions';\nimport appActions from '@iso/redux/app/actions';\nimport Auth0 from '../authentication/Auth0';\nimport IntlMessages from '@iso/components/utility/intlMessages';\nimport SignUpStyleWrapper from '../styled/SignUp.styles';\nimport axios from 'axios';\n\nimport userAction from '../redux/user/actions'\n\nconst { signup } = userAction;\nconst { login } = authAction;\nconst { clearMenu } = appActions;\n\nexport default function SignUp() {\n const dispatch = useDispatch();\n const router = useRouter();\n\n const { isSignedUp, signUpError } = useSelector((state) => state.userReducer);\n\n useEffect(() => {\n if (isSignedUp) {\n router.replace('/');\n }\n }, [isSignedUp])\n\n useEffect(() => {\n if (signUpError) {\n // console.log(signUpError)\n alert(signUpError)\n }\n }, [signUpError]);\n\n // const handleLogin = (token = false) => {\n // console.log(token, 'handlelogin');\n // if (token) {\n // dispatch(login(token));\n // } else {\n // dispatch(login());\n // }\n // dispatch(clearMenu());\n // history.push('/dashboard');\n // };\n const [email, setEmail] = useState('');\n const [nickname, setNickname] = useState('');\n const [password, setPassword] = useState('');\n const [passwordConfirm, setPasswordConfirm] = useState('');\n const [passwordError, setPasswordError] = useState(false);\n\n const handleSubmit = useCallback(() => {\n if (password !== passwordConfirm) {\n return setPasswordError(true)\n }\n const data = {\n email: email,\n nickname: nickname,\n password: password\n };\n\n // const requestSignUp = async () => {\n // try {\n // const res = await axios.post(\n // `${process.env.BACKEND_URL}/user`,\n // data\n // )\n // console.log(res)\n // } catch (err) {\n // console.log(err)\n // }\n // }\n // requestSignUp();\n dispatch(signup(data))\n }, [email, nickname, password, passwordConfirm])\n\n return (\n <SignUpStyleWrapper className=\"isoSignUpPage\">\n <div className=\"isoSignUpContentWrapper\">\n <div className=\"isoSignUpContent\">\n <div className=\"isoLogoWrapper\">\n <Link href=\"/dashboard\">\n <IntlMessages id=\"page.signUpTitle\" />\n </Link>\n </div>\n\n <form className=\"isoSignUpForm\" onSubmit={(e) => [\n e.preventDefault()\n ]} >\n {/* <div className=\"isoInputWrapper isoLeftRightComponent\">\n <Input size=\"large\" placeholder=\"First name\" />\n <Input size=\"large\" placeholder=\"Last name\" />\n </div> */}\n\n <div className=\"isoInputWrapper\">\n <Input\n id=\"email\"\n size=\"large\"\n placeholder=\"Email\"\n onChange={useCallback((e) => {\n setEmail(e.target.value)\n }, [])}\n />\n </div>\n\n <div className=\"isoInputWrapper\">\n <Input\n id=\"nickname\"\n size=\"large\"\n placeholder=\"Nickname\"\n onChange={useCallback((e) => {\n setNickname(e.target.value)\n }, [])}\n />\n </div>\n\n <div className=\"isoInputWrapper\">\n <Input\n id=\"password\"\n size=\"large\"\n type=\"password\"\n placeholder=\"Password\"\n\n onChange={useCallback((e) => {\n setPassword(e.target.value)\n }, [])}\n />\n </div>\n\n <div className=\"isoInputWrapper\">\n <Input\n id=\"passwordConfirm\"\n size=\"large\"\n type=\"password\"\n placeholder=\"Confirm Password\"\n value={passwordConfirm}\n onChange={useCallback((e) => {\n setPasswordConfirm(e.target.value);\n setPasswordError(e.target.value !== password)\n }, [password])}\n />\n {passwordError && <div style={{ color: 'red' }}>비밀번호가 일치하지 않습니다.</div>}\n </div>\n\n {/* <div className=\"isoInputWrapper\" style={{ marginBottom: '50px' }}>\n <Checkbox>\n <IntlMessages id=\"page.signUpTermsConditions\" />\n </Checkbox>\n </div> */}\n\n <div className=\"isoInputWrapper\">\n <Button type=\"primary\" onClick={handleSubmit}>\n <IntlMessages id=\"page.signUpButton\" />\n </Button>\n </div>\n {/* <div className=\"isoInputWrapper isoOtherLogin\">\n <Button\n onClick={handleLogin}\n type=\"primary\"\n className=\"btnFacebook\"\n >\n <IntlMessages id=\"page.signUpFacebook\" />\n </Button>\n <Button\n onClick={handleLogin}\n type=\"primary\"\n className=\"btnGooglePlus\"\n >\n <IntlMessages id=\"page.signUpGooglePlus\" />\n </Button>\n <Button\n onClick={() => {\n Auth0.login();\n }}\n type=\"primary\"\n className=\"btnAuthZero\"\n >\n <IntlMessages id=\"page.signUpAuth0\" />\n </Button>\n\n <FirebaseSignUpForm\n signup={true}\n history={router}\n login={() => dispatch(login())}\n />\n </div> */}\n <div className=\"isoInputWrapper isoCenterComponent isoHelperWrapper\">\n <Link href=\"/signin\">\n <IntlMessages id=\"page.signUpAlreadyAccount\" />\n </Link>\n </div>\n </form>\n </div>\n </div>\n </SignUpStyleWrapper>\n );\n}\n" }, { "alpha_fraction": 0.6465210914611816, "alphanum_fraction": 0.6505840420722961, "avg_line_length": 22.734939575195312, "blob_id": "c85be19eed0d45ae13c74d41e561b88166ef6e77", "content_id": "56b55894201dac0e01336042cf6843d27fdd5ec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1977, "license_type": "no_license", "max_line_length": 53, "num_lines": 83, "path": "/back/app.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const express = require('express');\nconst cors = require('cors');\nconst bodyParser = require('body-parser');\nconst dotenv = require('dotenv');\nconst morgan = require('morgan');\nconst hpp = require('hpp');\nconst helmet = require('helmet');\n\nconst session = require('express-session');\nconst cookieParser = require('cookie-parser');\nconst passport = require('passport');\nconst RedisStore = require('connect-redis')(session);\nconst redisClient = require('./utils/redis');\n\nconst routers = require('./routes');\nconst db = require('./models');\nconst mongoDB = require('./models/mongo');\nconst passportConfig = require('./utils/passport');\nconst errMsg = require('./utils/error/errorMessage');\nconst webSocket = require('./utils/socket');\n\ndotenv.config()\n\n\nconst app = express();\n\nconst sessionMiddleware = session({\n store: new RedisStore({\n client: redisClient,\n // ttl: 3,\n logErrors: true\n }),\n saveUninitialized: false,\n resave: false,\n secret: process.env.PASSPORT_SECRET,\n cookie: {\n sameSite: 'lax'\n }\n});\napp.use('/favicon.ico', () => { });\napp.use(bodyParser.json());\napp.use(express.urlencoded({ extended: true }));\n\npassportConfig();\n\nif (process.env.NODE_ENV === 'production') {\n app.use(morgan('combined'));\n app.use(hpp());\n app.use(helmet());\n} else {\n app.use(morgan('dev'));\n}\n\napp.use(cookieParser(process.env.PASSPORT_SECRET));\napp.use(sessionMiddleware);\napp.use(passport.initialize());\napp.use(passport.session());\n\napp.use(cors({\n origin: true,\n credentials: true,\n}));\nmongoDB();\n\ndb.sequelize.sync()\n .then(() => {\n console.log('db 연결 성공 ')\n })\n .catch(console.error);\n\napp.use('/', routers);\napp.use(function (err, req, res, next) {\n console.log('err', err);\n res.status(500).send(errMsg.createErrMsg(err))\n});\n\nconst http = require('http').createServer(app);\n\nconst server = http.listen(3065, () => {\n console.log('connected')\n});\n\nwebSocket(server, app);" }, { "alpha_fraction": 0.6355077624320984, "alphanum_fraction": 0.637525200843811, "avg_line_length": 28.739999771118164, "blob_id": "bc740691388efbd89d615f5b99a66f14e8b2d616", "content_id": "0ca4ffab6b2fd6f8e36792870a0c645a2796b9ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1487, "license_type": "no_license", "max_line_length": 91, "num_lines": 50, "path": "/back/controllers/oauth.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const bcrypt = require('bcryptjs');\nconst oauthService = require('../service/oauth');\nconst userService = require('../service/user');\nconst deviceService = require('../service/device');\nconst { createError } = require('../utils/error/error');\nconst { verifyToken } = require('../utils/token');\n\nconst Errors = (exports.Errors = {\n FailureSignIn: createError('FailureSignIn'),\n UnknownError: createError('UnkownError')\n})\n\nexports.getTokenData = async (req, res, next) => {\n const tokenData = verifyToken(req.headers.authorization);\n\n return res.status(200).send(tokenData);\n}\n\nexports.createToken = async (req, res, next) => {\n try {\n const data = {\n email: req.body.email,\n password: req.body.password\n }\n\n const findOneUser = await userService.findUser(data);\n\n const compare = bcrypt.compareSync(data.password, findOneUser.dataValues.password);\n\n if (!compare) {\n throw new Errors.FailureSignIn()\n }\n\n data.userId = findOneUser.dataValues.userId;\n\n const findAllDeviceById = await deviceService.findAllDeviceById(data.userId);\n\n const tokenData = {\n userId: data.userId,\n email: findOneUser.dataValues.email,\n nickname: findOneUser.dataValues.nickname,\n deviceId: findAllDeviceById\n }\n\n const token = await oauthService.createToken(tokenData);\n return res.send(token)\n } catch (err) {\n next(err);\n }\n}\n" }, { "alpha_fraction": 0.5914396643638611, "alphanum_fraction": 0.5914396643638611, "avg_line_length": 22.363636016845703, "blob_id": "42d91f38528dfa544b870222132831ca532323cc", "content_id": "16e1e39ee1174c0cd53d6fdab069121ce11a7ad9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 257, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/front/redux/user/actions.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const actions = {\n SIGN_UP_REQUEST: 'SIGN_UP_REQUEST',\n SIGN_UP_SUCCESS: 'SIGN_UP_SUCCESS',\n SIGN_UP_FAILURE: 'SIGN_UP_FAILURE',\n\n signup: (data) => ({\n type: actions.SIGN_UP_REQUEST,\n data: data\n }),\n};\nexport default actions;\n" }, { "alpha_fraction": 0.7640918493270874, "alphanum_fraction": 0.7640918493270874, "avg_line_length": 39, "blob_id": "9fb61200bffdc8cf85f267892677f86bd4dbf675", "content_id": "abec7a258587fccc93f85bd2fd7a3851e4a440d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 479, "license_type": "no_license", "max_line_length": 80, "num_lines": 12, "path": "/back/routes/device.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const express = require('express');\nconst router = express.Router();\n\nconst { isLoggedIn, isNotLoggedIn } = require('../utils/passport/confirmLogin');\nconst deviceController = require('../controllers/device');\n\nrouter.get('/', isLoggedIn, deviceController.getAllHasUser);\nrouter.post('/', isLoggedIn, deviceController.createDevice);\nrouter.post('/data', deviceController.getSensorData);\nrouter.post('/:deviceId', isLoggedIn, deviceController.addDevice);\n\nmodule.exports = router;" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 25.33333396911621, "blob_id": "25bbe021c98e5a8b1657e0f32d76e3787aab1845", "content_id": "de99ec08d9e8aa5ed0341ed7f24c3af17387d6a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 396, "license_type": "no_license", "max_line_length": 56, "num_lines": 15, "path": "/back/service/oauth.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const { createToken } = require('../utils/token');\nconst { createError } = require('../utils/error/error');\n\nconst Errors = (exports.Errors = {\n FailureSignIn: createError('FailureSignIn'),\n UnknownError: createError('UnkownError')\n})\n\nexports.createToken = async (data) => {\n try {\n return createToken(data);\n } catch (err) {\n throw new Errors.FailureSignIn()\n }\n}\n\n" }, { "alpha_fraction": 0.5245901346206665, "alphanum_fraction": 0.5275707840919495, "avg_line_length": 28.217391967773438, "blob_id": "bd41e8db5ab1a2352d0fe18209fa29c8a51fa54d", "content_id": "2118eaa2fd538824ac37b74496d00e4f20c94dda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 671, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/back/models/userDevice.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "module.exports = (sequelize, DataTypes) => {\n const UserDevice = sequelize.define('UserDevice', {\n deviceId: {\n type: DataTypes.INTEGER,\n allowNull: false,\n primaryKey: true\n },\n userId: {\n type: DataTypes.INTEGER,\n allowNull: false,\n primaryKey: true\n },\n }, {\n charset: 'utf8',\n callate: 'utf8_general_ci',\n timestamps: false\n });\n UserDevice.associate = (db) => {\n // db.UserDevice.belongsTo(db.User, { foreignKey: 'userId' });\n // db.UserDevice.belongsTo(db.Device, { foreignKey: 'deviceId' });\n };\n return UserDevice;\n}" }, { "alpha_fraction": 0.5126582384109497, "alphanum_fraction": 0.5126582384109497, "avg_line_length": 21.714284896850586, "blob_id": "18b905eeaf634f0f4f700575565127180c59fc34", "content_id": "20106e96cd86ad0fd26cc2c93405753f13246ace", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 158, "license_type": "no_license", "max_line_length": 30, "num_lines": 7, "path": "/back/utils/error/error.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "exports.createError = name =>\n class extends Error {\n constructor(message) {\n super(message)\n this.name = name\n }\n }" }, { "alpha_fraction": 0.542119562625885, "alphanum_fraction": 0.542119562625885, "avg_line_length": 24.379310607910156, "blob_id": "baf86c43ca280b084de99d07d85bedaf3fb909cb", "content_id": "a420cec797df819ac07e1d90bcd6a043e568496f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 736, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/front/redux/user/saga.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import { all, takeEvery, fork, call, put } from 'redux-saga/effects';\nimport axios from '../../pages/api/axios';\nimport actions from './actions';\n\nfunction signupAPI(data) {\n return axios.post('/user', data);\n}\n\nexport function* signUpRequest() {\n yield takeEvery(actions.SIGN_UP_REQUEST, function* ({ data }) {\n try {\n const result = yield call(signupAPI, data)\n yield put({\n type: actions.SIGN_UP_SUCCESS\n });\n } catch (err) {\n yield put({\n type: actions.SIGN_UP_FAILURE,\n data: err.response.data\n });\n }\n })\n}\n\nexport default function* rootSaga() {\n yield all([\n fork(signUpRequest)\n ]);\n}\n" }, { "alpha_fraction": 0.6485061645507812, "alphanum_fraction": 0.6528998017311096, "avg_line_length": 29.756755828857422, "blob_id": "6655d9c53358e5af6173d12b0b5ed3117e284fd0", "content_id": "942bdfd880ba054c7c2a70590ce2b531a1c17f5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1156, "license_type": "no_license", "max_line_length": 81, "num_lines": 37, "path": "/front/containers/devices.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import React from 'react';\nimport Head from 'next/head';\nimport dynamic from 'next/dynamic';\n\nimport { Row, Col } from 'antd';\nimport basicStyle from '@iso/assets/styles/constants';\n\nconst RandomizedDataLine = dynamic(() =>\n import(\n '@iso/containers/Charts/ReactChart2/Components/RandomizedLine/RandomizedLine'\n )\n);\nconst PageHeader = dynamic(() => import('@iso/components/utility/pageHeader'));\nconst Box = dynamic(() => import('@iso/components/utility/box'));\nconst LayoutWrapper = dynamic(() =>\n import('@iso/components/utility/layoutWrapper')\n);\nconst ContentHolder = dynamic(() =>\n import('@iso/components/utility/contentHolder')\n);\nexport default function Devices() {\n const { rowStyle, colStyle, gutter } = basicStyle;\n return (\n <LayoutWrapper className=\"isoMapPage\">\n <PageHeader>Chart 준비중</PageHeader>\n <Row style={rowStyle} gutter={gutter} justify=\"start\">\n <Col md={12} xs={24} style={colStyle}>\n <Box title=\"디바이스 차트\">\n <ContentHolder>\n <RandomizedDataLine />\n </ContentHolder>\n </Box>\n </Col>\n </Row>\n </LayoutWrapper>\n );\n}\n" }, { "alpha_fraction": 0.6615315079689026, "alphanum_fraction": 0.6619846224784851, "avg_line_length": 25.2738094329834, "blob_id": "96e643492afe398d2e282e829ff0d4ea39911b55", "content_id": "6077a66998e5c0bd5fe62101873c2c0c9d757ff0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2207, "license_type": "no_license", "max_line_length": 76, "num_lines": 84, "path": "/front/authentication/sagas.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import { all, takeLatest, put, call, fork } from 'redux-saga/effects';\nimport fetch from 'isomorphic-unfetch';\n\nimport { login, logout, setCookie, removeCookie } from './auth.utils';\nimport { notification } from '@iso/components';\nimport JwtAuthentication from './jwtAuthentication';\nimport actions from './actions';\nimport axios from '../pages/api/axios';\n\nimport cookie from 'js-cookie';\n\nfunction loginAPI(payload) {\n return axios.post('/user/login', payload);\n}\n\nfunction logoutAPI() {\n return axios.post('/user/logout');\n}\n\nfunction* loginRequest({ payload }) {\n try {\n const result = yield call(loginAPI, payload);\n const userData = result.data;\n\n // cookie.set('token', userData.userId, { expires: 1 });\n\n yield put(actions.loginRequestSuccess(userData));\n\n\n } catch (err) {\n yield put(actions.loginRequestFailure(err.response.data));\n }\n}\n\nexport function* jwtLoginRequest() {\n // yield takeLatest(actions.JWT_LOGIN_REQUEST, function*({ payload }) {\n // const result = yield call(JwtAuthentication.login, payload.userInfo);\n // if (result.error) {\n // notification('error', result.error);\n // yield put(actions.loginRequestFailure(error));\n // } else {\n // payload.history.push('/dashboard');\n // yield put({\n // type: actions.LOGIN_REQUEST_SUCCESS,\n // token: result.token,\n // profile: result.profile,\n // });\n // }\n // });\n}\n\nfunction* logoutRequest() {\n try {\n yield call(logoutAPI);\n yield call(logout);\n yield put(actions.logoutRequestSuccess());\n } catch (error) {\n yield put(actions.logoutRequestFailure(error));\n }\n}\n\n// export function* loginSuccess() {\n// yield takeLatest(actions.LOGIN_SUCCESS, function* (token) {\n// yield setCookie('id_token', token);\n// });\n// }\n\n// export function* loginError() {\n// yield takeLatest(actions.LOGIN_ERROR, function*() {});\n// }\n\nexport function* onLogin() {\n yield takeLatest(actions.LOGIN_REQUEST_START, loginRequest);\n}\nexport function* onLogout() {\n yield takeLatest(actions.LOGOUT_REQUEST_START, logoutRequest);\n}\nexport default function* rootSaga() {\n yield all([\n call(onLogin),\n call(jwtLoginRequest),\n call(onLogout)\n ]);\n}\n" }, { "alpha_fraction": 0.5324826240539551, "alphanum_fraction": 0.5413766503334045, "avg_line_length": 27.428571701049805, "blob_id": "f6d090230c4c918ad636e551299fbf1ca4f39f86", "content_id": "5518736a3e8ae9f82d6a919ab70458241fa12033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2586, "license_type": "no_license", "max_line_length": 96, "num_lines": 91, "path": "/back/controllers/user.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const bcrypt = require('bcryptjs');\nconst passport = require('passport');\n\nconst { verifyToken } = require('../utils/token');\nconst userService = require('../service/user');\nconst redisService = require('../service/redis');\n\nexports.getUser = async (req, res, next) => {\n if (req.user) {\n const user = await userService.getUser(req.user.userId);\n return res.status(200).json(user);\n } else {\n return res.status(200).json(null);\n }\n}\n\nexports.getSesssion = async (req, res, next) => {\n try {\n const session = req.session.passport;\n console.log(session);\n return res.status(200).json(session)\n } catch (err) {\n console.log('getSession', err);\n next(err);\n }\n}\n\nexports.createUser = async (req, res, next) => {\n try {\n const findOneUser = await userService.findOneUserByEmail(req.body.email);\n\n const hashedPassword = await bcrypt.hash(req.body.password, 12);\n req.body.password = hashedPassword;\n\n const createUser = await userService.createUser(req.body);\n res.status(200).send(createUser);\n } catch (err) {\n next(err)\n }\n}\n\nexports.login = async (req, res, next) => {\n passport.authenticate('local', (err, user, info) => {\n if (err) {\n console.log(err);\n return next(err);\n }\n if (info) {\n return res.status(401).send(info.reason);\n }\n return req.login(user, async (loginErr) => {\n if (loginErr) {\n console.log(loginErr);\n return next(loginErr);\n }\n\n const findOneUser = await userService.findOneUser(user.userId);\n // await redisService.setRedisUsersDevices(findOneUser.userId, findOneUser.Devices);\n const payload = {\n userId: findOneUser.userId,\n Devices: findOneUser.Devices,\n sessionID: req.sessionID\n }\n\n return res.status(200).send(payload);\n })\n })(req, res, next);\n}\n\nexports.logout = async (req, res, next) => {\n try {\n await redisService.deleteRedisKeys(req.user.userId);\n req.logout();\n req.session.destroy((err) => {\n if (err) {\n console.log(err);\n return res.status(500).json(err)\n }\n return res.send('ok')\n });\n\n // req.session.destroy();\n // res.send('ok');\n // req.session.save(() => {\n // res.send('ok')\n // })\n } catch (err) {\n console.log(err)\n next(err)\n }\n}" }, { "alpha_fraction": 0.594298243522644, "alphanum_fraction": 0.6008771657943726, "avg_line_length": 27.5625, "blob_id": "bdce3936c362fb7a23b26250ee1de5973d2e28da", "content_id": "0fe493dff70e1cb4660c71483afb962cff6205d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 456, "license_type": "no_license", "max_line_length": 76, "num_lines": 16, "path": "/back/controllers/site.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const siteService = require('../service/site');\n\nexports.createSite = async (req, res, next) => {\n try {\n const payload = {\n siteCode: req.body.siteCode,\n siteName: req.body.siteName\n }\n const findOneSite = await siteService.findOneSite(payload.siteCode);\n\n const createSite = await siteService.createSite(payload);\n res.status(200).send(createSite);\n } catch (err) {\n next(err)\n }\n}" }, { "alpha_fraction": 0.5401069521903992, "alphanum_fraction": 0.5401069521903992, "avg_line_length": 17.75, "blob_id": "0393bf9ec843001dbbcadaea273a48b71f63e77f", "content_id": "29f6e9fedd2b0818d606e2f43367a51a5700cdcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 374, "license_type": "no_license", "max_line_length": 52, "num_lines": 20, "path": "/back/models/mongo/room.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const mongoose = require('mongoose');\n\nconst roomSchema = new mongoose.Schema({\n userId: {\n type: String,\n required: true,\n unique: true\n },\n deviceId: {\n type: Array\n },\n topic: {\n type: Array\n },\n sessionID: {\n type: String\n }\n}, { timestamps: true });\n\nmodule.exports = mongoose.model('Room', roomSchema);" }, { "alpha_fraction": 0.6312292218208313, "alphanum_fraction": 0.634551465511322, "avg_line_length": 20.571428298950195, "blob_id": "b1fc3d05667cfc211184e642b5291badf5c4daf8", "content_id": "c4f9be42d43184ef046b92b9a6c4215785bbed00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 301, "license_type": "no_license", "max_line_length": 64, "num_lines": 14, "path": "/back/models/mongo/sensorData.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const mongoose = require('mongoose');\n\nconst sensorDataSchema = new mongoose.Schema({\n topic: {\n type: String,\n },\n data: {\n type: String\n }\n}, { timestamps: true });\n\nsensorDataSchema.index({ \"createAt\": 1 })\n\nmodule.exports = mongoose.model('SensorData', sensorDataSchema);" }, { "alpha_fraction": 0.5366001129150391, "alphanum_fraction": 0.5366001129150391, "avg_line_length": 32.060001373291016, "blob_id": "aa13b4710196bf24b9c8ee664dc284a806fee637", "content_id": "1623775af651561cfb5fc1bc8e5fd5e18019f381", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1653, "license_type": "no_license", "max_line_length": 64, "num_lines": 50, "path": "/front/redux/load/reducer.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import produce from 'immer';\nimport actions from './actions';\n\nconst initState = {\n loadUserLoading: false,\n isLoadedUser: false,\n loadUserError: false,\n loadDataLoading: false,\n isLoadedData: false,\n loadDataError: false,\n}\n\nexport default function userReducer(state = initState, action) {\n return produce(state, (draft) => {\n switch (action.type) {\n case actions.LOAD_USER_REQUEST:\n draft.loadUserLoading = true\n draft.isLoadedUser = false\n draft.loadUserError = false\n break;\n case actions.LOAD_USER_SUCSSESS:\n draft.loadUserLoading = false\n draft.isLoadedUser = true\n draft.loadUserError = false\n break;\n case actions.LOAD_USER_FAILURE:\n draft.loadUserLoading = false\n draft.isLoadedUser = false\n draft.loadUserError = action.data\n break;\n case actions.LOAD_DATA_REQUEST:\n draft.loadDataLoading = true\n draft.isLoadedData = false\n draft.loadDataError = false\n break;\n case actions.LOAD_DATA_SUCSSESS:\n draft.loadDataLoading = false\n draft.isLoadedData = true\n draft.loadDataError = false\n break;\n case actions.LOAD_DATA_FAILURE:\n draft.loadDataLoading = false\n draft.isLoadedData = false\n draft.loadDataError = action.data\n break;\n default:\n break;\n }\n });\n}\n" }, { "alpha_fraction": 0.637155294418335, "alphanum_fraction": 0.6487663388252258, "avg_line_length": 23.64285659790039, "blob_id": "4a702bd0bb59fce8c0ddc18f2c6dfdc8a752a443", "content_id": "dc4d312fae3dee80bbab7410d1d12bbc93a1865d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 70, "num_lines": 28, "path": "/device/test.py", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import time\nimport Adafruit_DHT\nimport paho.mqtt.client as mqtt\nimport json\n\nsensor = Adafruit_DHT.DHT11\npin = 2\n\nbroker_address=\"ec2.devfloors.com\"\nclient = mqtt.client(\"clientPublisher\")\nclient.connect(broker_address)\n\n\ntry:\n while True:\n h,t = Adafruit_DHT.read_retry(sensor,pin)\n if h is not None and t is not None:\n pub_data = {\"message\":true, \"Temperature\":t, \"Humidity\":h}\n else:\n pub_data = {\"message\":false}\n pub_data = json.dumps(pub_data)\n print(pub_data)\n client.publish(\"DHT11\",pub_data)\n time.sleep(60)\nexcept KeyboardInterrupt:\n print(\"Terminated by Keyboard\")\nfinally:\n print(\"End of Program\")" }, { "alpha_fraction": 0.5983606576919556, "alphanum_fraction": 0.6147540807723999, "avg_line_length": 12.666666984558105, "blob_id": "be9be442828fada8100ce347f3a8de119dafd848", "content_id": "74d06f9f0aaee803042f7b288c047027b614fa01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 122, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/front/out/_next/static/webpack/styles.0b5d610724ce0eb600dc.hot-update.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "webpackHotUpdate_N_E(\"styles\",{\n\n/***/ \"../../node_modules/antd/lib/checkbox/style/index.css\":\nfalse,\n\n/***/ 13:\nfalse\n\n})" }, { "alpha_fraction": 0.6341463327407837, "alphanum_fraction": 0.6341463327407837, "avg_line_length": 27.461538314819336, "blob_id": "7a1b058d87a7740eebe028566b6b9f50a883ca11", "content_id": "513d0819940f81ff1494ab1265ec531531277d76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 369, "license_type": "no_license", "max_line_length": 43, "num_lines": 13, "path": "/back/routes/index.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const express = require('express');\nconst router = express.Router();\n\nrouter.get('/', function (req, res) {\n res.send('hello');\n})\nrouter.use('/user', require('./user'));\nrouter.use('/oauth', require('./oauth'));\nrouter.use('/site', require('./site'));\nrouter.use('/device', require('./device'));\nrouter.use('/socket', require('./socket'));\n\nmodule.exports = router;" }, { "alpha_fraction": 0.5483067631721497, "alphanum_fraction": 0.5582669377326965, "avg_line_length": 24.743589401245117, "blob_id": "f6d99e16e1a56abc6f69ca121cfe5c578e88e5e4", "content_id": "155e0c5c51f9c3a8683951abcc273443210c144b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2016, "license_type": "no_license", "max_line_length": 75, "num_lines": 78, "path": "/front/containers/Widgets/Widgets.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from 'react';\nimport { Row, Col } from 'antd';\nimport LayoutWrapper from '@iso/components/utility/layoutWrapper';\nimport basicStyle from '@iso/assets/styles/constants';\nimport IsoWidgetsWrapper from './WidgetsWrapper';\nimport CardWidget from './Card/CardWidget';\nimport Device from '../devices';\nimport IntlMessages from '@iso/components/utility/intlMessages';\nimport { useSelector } from 'react-redux';\n\nconst styles = {\n wisgetPageStyle: {\n display: 'flex',\n flexFlow: 'row wrap',\n alignItems: 'flex-start',\n overflow: 'hidden',\n },\n}\n\nexport default function () {\n const { tempData, humData } = useSelector((state) => state.socketReducer)\n const [temp, setTempData] = useState('');\n const [hum, setHumData] = useState('');\n\n let CARD_WIDGET = [\n {\n icon: 'ion-bonfire',\n iconcolor: '#F75D81',\n number: `${temp}`,\n text: '온도',\n },\n {\n icon: 'ion-android-cloud',\n iconcolor: '#42A5F5',\n number: `${hum}`,\n text: '습도',\n },\n {\n icon: 'ion-ios-toggle-outline',\n iconcolor: '#FEAC01',\n number: '',\n text: null,\n },\n ];\n\n const { rowStyle, colStyle } = basicStyle;\n\n useEffect(() => {\n if (tempData) {\n setTempData(tempData)\n }\n if (humData) {\n setHumData(humData)\n }\n }, [tempData, humData])\n\n return (\n <LayoutWrapper>\n <div style={styles.wisgetPageStyle}>\n <Row style={rowStyle} gutter={0} justify=\"start\">\n {CARD_WIDGET.map((widget, idx) => (\n <Col lg={6} md={12} sm={12} xs={24} style={colStyle}>\n <IsoWidgetsWrapper key={idx} gutterBottom={20}>\n <CardWidget\n icon={widget.icon}\n iconcolor={widget.iconcolor}\n number={widget.number}\n text={widget.text}\n />\n </IsoWidgetsWrapper>\n </Col>\n ))}\n </Row>\n <Device />\n </div>\n </LayoutWrapper>\n );\n}\n" }, { "alpha_fraction": 0.526652455329895, "alphanum_fraction": 0.526652455329895, "avg_line_length": 28.3125, "blob_id": "bf77ff5ec2f0ca0052015bff6801ffe966cf9924", "content_id": "bd31aa397f3740e9f2aed699723ff8b3861c71f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 938, "license_type": "no_license", "max_line_length": 64, "num_lines": 32, "path": "/front/redux/user/reducer.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import produce from 'immer';\nimport actions from './actions';\n\nconst initState = {\n signUpLoading: false,\n isSignedUp: false,\n signUpError: false,\n}\n\nexport default function userReducer(state = initState, action) {\n return produce(state, (draft) => {\n switch (action.type) {\n case actions.SIGN_UP_REQUEST:\n draft.signUpLoading = true\n draft.isSignedUp = false\n draft.signUpError = false\n break;\n case actions.SIGN_UP_SUCCESS:\n draft.signUpLoading = false\n draft.isSignedUp = true\n draft.signUpError = false\n break;\n case actions.SIGN_UP_FAILURE:\n draft.signUpLoading = false\n draft.isSignedUp = false\n draft.signUpError = action.data\n break;\n default:\n break;\n }\n });\n}\n" }, { "alpha_fraction": 0.567533552646637, "alphanum_fraction": 0.5696308612823486, "avg_line_length": 28.432098388671875, "blob_id": "117f82d251ba77c7446af3e4cdf6e29d7f539455", "content_id": "1357e00032da8b0125256339accf26d9845c152c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2392, "license_type": "no_license", "max_line_length": 99, "num_lines": 81, "path": "/front/pages/dashboard/index.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import React, { useEffect, useState } from 'react';\nimport Head from 'next/head';\nimport { withAuthSync } from '../../authentication/auth.utils';\nimport DashboardLayout from '../../containers/DashboardLayout/DashboardLayout';\nimport Widgets from '../../containers/Widgets/Widgets';\n\nimport io from 'socket.io-client';\nimport axios from '../../pages/api/axios'\nimport { useDispatch, useSelector } from 'react-redux';\nimport socketActions from '../../redux/socket/actions'\n\n\nconst socketClient = io('https://elb.devfloors.com:443/deviceRoom', { transports: ['websocket'] });\n\nconst { getTempData, getHumData } = socketActions;\n\nexport default withAuthSync(() => {\n const dispatch = useDispatch();\n\n const { userData } = useSelector((state) => state.Auth);\n const { tempData, humData } = useSelector((state) => state.socketReducer)\n const [temp, setTempData] = useState('');\n const [hum, setHumData] = useState('');\n\n // useEffect(() => {\n // if (tempData) {\n // setTempData(tempData)\n // }\n // if (humData) {\n // setHumData(humData)\n // }\n // }, [tempData, humData])\n\n function socketConnect(userData) {\n socketClient.on('connect', async () => {\n console.log('socket connected');\n\n socketClient.on('newRoom', (data) => {\n if (userData.userId == data.userId) {\n Object.values(data.topics).map((v) => {\n socketClient.on(`${v}`, (data) => {\n switch (JSON.parse(data).dataType) {\n case 'Temp':\n dispatch(getTempData(JSON.parse(data).data))\n // setTempData(JSON.parse(data).data)\n break;\n case 'Hum':\n dispatch(getHumData(JSON.parse(data).data))\n // setHumData(JSON.parse(data).data)\n break;\n }\n })\n })\n }\n })\n\n socketClient.on('disconnect', async () => {\n console.log('접속 해제');\n })\n const result = await axios.post(`/socket/room`, userData);\n\n dispatch(getTempData(result.data.datas[0]))\n dispatch(getHumData(result.data.datas[1]))\n\n console.log('userDAta', result)\n })\n }\n socketConnect(userData);\n\n return (\n <>\n <Head>\n <title>Home page</title>\n </Head>\n <DashboardLayout>\n {/* <Card /> */}\n <Widgets />\n </DashboardLayout>\n </>\n )\n});\n" }, { "alpha_fraction": 0.5069518685340881, "alphanum_fraction": 0.5069518685340881, "avg_line_length": 21.285715103149414, "blob_id": "f87fdd34871e69428d7c2e355fa52cc3e13e724f", "content_id": "22256627ae2b8eee50b2dd4ab543c10df8b3e8be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 991, "license_type": "no_license", "max_line_length": 58, "num_lines": 42, "path": "/back/models/mongo/index.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const mongoose = require('mongoose');\nconst glob = require('glob');\nconst dotenv = require('dotenv');\n\ndotenv.config()\n\nmodule.exports = () => {\n const connect = () => {\n mongoose.connect(process.env.MONGO_HOST, {\n useCreateIndex: true,\n useNewUrlParser: true\n }, (err) => {\n if (err) {\n console.log(err)\n }\n console.log('mongodb connected')\n })\n }\n\n connect();\n\n mongoose.connection.on('error', (err) => {\n console.log('몽고디비 연결 에러', err);\n });\n\n mongoose.connection.on('disconnected', () => {\n console.log('몽고디비 연결이 끊겼습니다. 재연결 시도합니다.')\n connect();\n });\n\n require('./room');\n require('./sensorData');\n require('./device');\n\n // const models = glob.sync('back/models/mongo/*.js');\n\n // models.forEach(model => {\n // console.log(model)\n // require('../' + model)\n // })\n\n}" }, { "alpha_fraction": 0.75052410364151, "alphanum_fraction": 0.75052410364151, "avg_line_length": 35.769229888916016, "blob_id": "ed667e5f821335919b7178fc98f4784c4a5cad23", "content_id": "39e6d856d2d0d964982ccc888e7f075c07a1eaa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 477, "license_type": "no_license", "max_line_length": 80, "num_lines": 13, "path": "/back/routes/user.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const express = require('express');\nconst router = express.Router();\n\nconst { isLoggedIn, isNotLoggedIn } = require('../utils/passport/confirmLogin');\nconst userController = require('../controllers/user');\n\nrouter.get('/', userController.getUser);\nrouter.get('/session', userController.getSesssion);\nrouter.post('/', isNotLoggedIn, userController.createUser);\nrouter.post('/login', userController.login);\nrouter.post('/logout', userController.logout);\n\nmodule.exports = router;" }, { "alpha_fraction": 0.7404844164848328, "alphanum_fraction": 0.7404844164848328, "avg_line_length": 31.22222137451172, "blob_id": "414c2640923342d22da52cdb78a7200b71a03218", "content_id": "adb52f1575828c510477fa0bc7c86ec1f88fb79c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 289, "license_type": "no_license", "max_line_length": 80, "num_lines": 9, "path": "/back/routes/site.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const express = require('express');\nconst router = express.Router();\n\nconst { isLoggedIn, isNotLoggedIn } = require('../utils/passport/confirmLogin');\nconst siteController = require('../controllers/site');\n\nrouter.post('/', isLoggedIn, siteController.createSite);\n\nmodule.exports = router;" }, { "alpha_fraction": 0.556466281414032, "alphanum_fraction": 0.556466281414032, "avg_line_length": 23.120878219604492, "blob_id": "cf15dd0a54c58049bf7fe8e4172fe01848b10822", "content_id": "ce45815cfaafb988fe8eb78f468a6119c7eec8eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2196, "license_type": "no_license", "max_line_length": 68, "num_lines": 91, "path": "/back/service/user.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const { Strategy } = require('passport');\nconst { User, Device, UserDevice } = require('../models');\nconst { createError } = require('../utils/error/error');\n\nconst Errors = (exports.Errors = {\n BadUserDataError: createError('BadUserDataError'),\n DuplicateUserEmailError: createError('DuplicateUserEmailError'),\n UserEmailNotFound: createError('UserEmailNotFound'),\n FailureSignIn: createError('FailureSignIn'),\n UnknownError: createError('UnkownError')\n})\n\nexports.getUser = async (userId) => {\n try {\n const user = await User.findOne({\n where: { userId: userId }\n });\n\n return user\n } catch (err) {\n console.log(err);\n }\n}\n\nexports.findOneUser = async (userId) => {\n const user = await User.findOne({\n where: { userId: userId },\n attributes: {\n exclude: ['password']\n },\n include: [{\n model: Device,\n attributes: ['deviceId', 'topic']\n }]\n });\n\n return user\n // JSON.stringify(user)\n}\n\nexports.findOneUserByEmail = async (email) => {\n const exUser = await User.findOne({\n where: {\n email: email\n }\n })\n if (exUser) {\n throw new Errors.DuplicateUserEmailError()\n }\n return exUser;\n}\nexports.findOneIdByEmail = async (email) => {\n const findOneIdByEmail = await User.findOne({\n where: {\n email: email\n }\n })\n if (!findOneIdByEmail) {\n throw new Errors.UserEmailNotFound()\n }\n return findOneIdByEmail\n}\n\nexports.findUser = async ({ email }) => {\n const findUser = await User.findOne({\n where: {\n email: email\n }\n });\n if (!findUser) {\n throw new Errors.FailureSignIn()\n }\n return findUser;\n}\n\nexports.createUser = async (body) => {\n try {\n return await User.create({\n email: body.email,\n nickname: body.nickname,\n password: body.password\n })\n } catch (err) {\n switch (err.name) {\n case 'ValidationError':\n throw new Errors.BadUserDataError()\n default:\n throw new Errors.UnknownError()\n }\n }\n}\n\n" }, { "alpha_fraction": 0.7579365372657776, "alphanum_fraction": 0.7579365372657776, "avg_line_length": 27.11111068725586, "blob_id": "db00f51b20ec8b7bf7ef1dcc9328299f5dd085df", "content_id": "6da6830269483c20b08ddc76211114054430b115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 252, "license_type": "no_license", "max_line_length": 87, "num_lines": 9, "path": "/back/utils/redis/index.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const redis = require('redis');\nconst dotenv = require('dotenv');\n\ndotenv.config();\n\n// process.env.REDIS_PORT, process.env.REDIS_HOST\nconst redisClient = redis.createClient(process.env.REDIS_PORT, process.env.REDIS_HOST);\n\nmodule.exports = redisClient" }, { "alpha_fraction": 0.5745614171028137, "alphanum_fraction": 0.5745614171028137, "avg_line_length": 24.725807189941406, "blob_id": "84829dd0593fe29cb57ccca5e07be85f9b710e65", "content_id": "c331abdaa36c88bf53c3421304e1c5cd95b2d8c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1596, "license_type": "no_license", "max_line_length": 58, "num_lines": 62, "path": "/back/service/site.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const { Site } = require('../models');\nconst { createError } = require('../utils/error/error');\n\nconst Errors = (exports.Errors = {\n BadSiteDataError: createError('BadSiteDataError'),\n DuplicateSiteError: createError('DuplicateSiteError'),\n FailureCreateSite: createError('FailureCreateSite'),\n SiteCodeNotFound: createError('SiteCodeNotFound'),\n UnknownError: createError('UnkownError')\n})\n\nexports.findOneSite = async (siteCode) => {\n const exSite = await Site.findOne({\n where: {\n siteCode: siteCode\n }\n })\n if (exSite) {\n throw new Errors.DuplicateSiteError()\n }\n return exSite;\n}\n\nexports.findOneBySiteCode = async (siteCode) => {\n const findOneBySiteCode = await Site.findOne({\n where: {\n siteCode: siteCode\n }\n })\n if (!findOneBySiteCode) {\n throw new Errors.SiteCodeNotFound()\n }\n return findOneBySiteCode\n}\n\n// exports.findUser = async ({ email }) => {\n// const findUser = await User.findOne({\n// where: {\n// email: email\n// }\n// });\n// if (!findUser) {\n// throw new Errors.FailureSignIn()\n// }\n// return findUser;\n// }\n\nexports.createSite = async (payload) => {\n try {\n return await Site.create({\n siteCode: payload.siteCode,\n siteName: payload.siteName\n })\n } catch (err) {\n switch (err.name) {\n case 'ValidationError':\n throw new Errors.BadSiteDataError()\n default:\n throw new Errors.UnknownError()\n }\n }\n}\n\n" }, { "alpha_fraction": 0.6694560647010803, "alphanum_fraction": 0.6694560647010803, "avg_line_length": 22.899999618530273, "blob_id": "2a5bbda0c8d66e04ac040f92c9147ac77007e514", "content_id": "07921cbabd607b2783ee9c69c7135bf89d5d9c2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 239, "license_type": "no_license", "max_line_length": 57, "num_lines": 10, "path": "/back/service/socket.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const { Site } = require('../models');\nconst { createError } = require('../utils/error/error');\n\nconst Errors = (exports.Errors = {\n UnknownSocketError: createError('UnknownSocketError')\n})\n\nexports.createRoom = async (payload) => {\n\n}\n" }, { "alpha_fraction": 0.6140350699424744, "alphanum_fraction": 0.6140350699424744, "avg_line_length": 27.5, "blob_id": "0ed76c9540329e22caf2750bb9dd7add58f458c1", "content_id": "6fc028ccc62dd98a5f89289a9c30580bfa6f8b92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 570, "license_type": "no_license", "max_line_length": 45, "num_lines": 20, "path": "/front/redux/load/actions.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const actions = {\n LOAD_USER_REQUEST: 'LOAD_USER_REQUEST',\n LOAD_USER_SUCSSESS: 'LOAD_USER_SUCSSESS',\n LOAD_USER_FAILURE: 'LOAD_USER_FAILURE',\n LOAD_DATA_REQUEST: 'LOAD_DATA_REQUEST',\n LOAD_DATA_SUCSSESS: 'LOAD_DATA_SUCSSESS',\n LOAD_DATA_FAILURE: 'LOAD_DATA_FAILURE',\n\n loadUser: () => ({\n type: actions.LOAD_USER_REQUEST\n }),\n loadUserSucssess: (userData) => ({\n type: actions.LOAD_USER_SUCSSESS,\n payload: userData\n }),\n loadData: () => ({\n type: actions.LOAD_DATA_REQUEST\n })\n};\nexport default actions;\n" }, { "alpha_fraction": 0.39918169379234314, "alphanum_fraction": 0.4198489189147949, "avg_line_length": 27.713855743408203, "blob_id": "9dff3617ae09c3fc483014149440eb5aa2bcf7b4", "content_id": "3080dea876240aa09101d5bf563a9f9eea622339", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9532, "license_type": "no_license", "max_line_length": 123, "num_lines": 332, "path": "/front/containers/ConnectedLine.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "import React, { Component } from \"react\";\nimport styled from \"styled-components\";\nimport BackgroundImg from '@iso/assets/images/IMG_2920_3.png';\nimport BackgroundImg_1 from '@iso/assets/images/IMG_2920_1_1.png';\n\n\nclass ConnectedLine extends Component {\n constructor(props) {\n super(props);\n this.canvasRef = React.createRef();\n this.imgRef = React.createRef();\n this.menuRef = React.createRef();\n }\n\n componentDidMount() {\n this.updateCanvas();\n }\n\n componentWillUpdate() {\n this.updateCanvas();\n }\n\n updateCanvas() {\n const img = this.imgRef.current;\n const canvas = this.canvasRef.current;\n let can_w = canvas.width;\n let can_h = canvas.height;\n const ctx = canvas.getContext(\"2d\");\n\n var ball = {\n x: 0,\n y: 0,\n vx: 0,\n vy: 0,\n r: 0,\n alpha: 1,\n phase: 0\n },\n ball_color = {\n r: 207,\n g: 255,\n b: 4\n },\n R = 1.5,\n balls = [],\n alpha_f = 0.03,\n alpha_phase = 0,\n\n link_line_width = 0.8,\n dis_limit = (100 * window.innerWidth * window.innerHeight) / (1920 * 1021),\n add_mouse_point = true,\n mouse_in = false,\n mouse_ball = {\n x: 0,\n y: 0,\n vx: 0,\n vy: 0,\n r: 0,\n type: \"mouse\"\n };\n\n function getRandomSpeed(pos) {\n let min = -0.3,\n max = 0.3;\n\n switch (pos) {\n case \"top\":\n return [randomNumFrom(min, max), randomNumFrom(0.1, max)];\n\n case \"right\":\n return [randomNumFrom(min, -0.1), randomNumFrom(min, max)];\n\n case \"bottom\":\n return [randomNumFrom(min, max), randomNumFrom(min, -0.1)];\n\n case \"left\":\n return [randomNumFrom(0.1, max), randomNumFrom(min, max)];\n\n default:\n return;\n }\n }\n\n function randomArrayItem(arr) {\n return arr[Math.floor(Math.random() * arr.length)];\n }\n\n function randomNumFrom(min, max) {\n return Math.random() * (max - min) + min;\n }\n\n function getRandomBall() {\n let pos = randomArrayItem([\"bottom\", \"right\", \"top\", \"left\"]);\n switch (pos) {\n case \"top\":\n return {\n x: randomSidePos(can_w),\n y: -R,\n vx: getRandomSpeed(\"top\")[0],\n vy: getRandomSpeed(\"top\")[1],\n r: R,\n alpha: 1,\n phase: randomNumFrom(0, 10)\n }\n\n case \"right\":\n return {\n x: can_w + R,\n y: randomSidePos(can_h),\n vx: getRandomSpeed(\"right\")[0],\n vy: getRandomSpeed(\"right\")[1],\n r: R,\n alpha: 1,\n phase: randomNumFrom(0, 10)\n }\n\n case \"bottom\":\n return {\n x: randomSidePos(can_w),\n y: can_h + R,\n vx: getRandomSpeed(\"bottom\")[0],\n vy: getRandomSpeed(\"bottom\")[1],\n r: R,\n alpha: 1,\n phase: randomNumFrom(0, 10)\n }\n\n case \"left\":\n return {\n x: -R,\n y: randomSidePos(can_h),\n vx: getRandomSpeed(\"left\")[0],\n vy: getRandomSpeed(\"left\")[1],\n r: R,\n alpha: 1,\n phase: randomNumFrom(0, 10)\n }\n }\n }\n\n function randomSidePos(length) {\n return Math.ceil(Math.random() * length);\n }\n\n function renderBalls() {\n Array.prototype.forEach.call(balls, function (b) {\n if (!b.hasOwnProperty(\"type\")) {\n ctx.fillStyle = \"rgba(\" + ball_color.r + \",\" + ball_color.g + \",\" + ball_color.b + \",\" + b.alpha + \")\";\n ctx.beginPath();\n ctx.arc(b.x, b.y, R, 0, Math.PI * 2, true);\n ctx.closePath();\n ctx.fill();\n }\n });\n }\n\n function updateBalls() {\n let new_balls = [];\n Array.prototype.forEach.call(balls, function (b) {\n b.x += b.vx;\n b.y += b.vy;\n\n if (b.x > -(5) && b.x < (can_w + 5) && b.y > -(5) && b.y < (can_h + 5)) {\n new_balls.push(b);\n }\n\n b.phase += alpha_f;\n b.alpha = Math.abs(Math.cos(b.phase));\n });\n\n balls = new_balls.slice(0);\n }\n\n function loopAlphaInf() {\n\n }\n\n function renderLines() {\n let fraction, alpha;\n for (let i = 0; i < balls.length; i++) {\n for (var j = i + 1; j < balls.length; j++) {\n fraction = getDisOf(balls[i], balls[j]) / dis_limit;\n\n if (fraction < 1) {\n alpha = (1 - fraction).toString();\n\n ctx.strokeStyle = \"rgba(150,150,150,\" + alpha + \")\";\n ctx.lineWidth = link_line_width;\n\n ctx.beginPath();\n ctx.moveTo(balls[i].x, balls[i].y);\n ctx.lineTo(balls[j].x, balls[j].y);\n ctx.stroke();\n ctx.closePath();\n }\n }\n }\n }\n\n function getDisOf(b1, b2) {\n let delta_x = Math.abs(b1.x - b2.x),\n delta_y = Math.abs(b1.y - b2.y);\n\n return Math.sqrt(delta_x * delta_x + delta_y * delta_y);\n }\n\n function addBallIfy() {\n if (balls.length < 20) {\n balls.push(getRandomBall());\n }\n }\n\n function rendering() {\n ctx.clearRect(0, 0, can_w, can_h);\n\n renderBalls();\n renderLines();\n updateBalls();\n addBallIfy();\n\n window.requestAnimationFrame(rendering);\n }\n\n function initBalls(num) {\n for (let i = 1; i <= num; i++) {\n balls.push({\n x: randomSidePos(can_w),\n y: randomSidePos(can_h),\n vx: getRandomSpeed(\"top\")[0],\n vy: getRandomSpeed(\"top\")[1],\n r: R,\n alpha: 1,\n phase: randomNumFrom(0, 10)\n });\n }\n }\n\n function initCanvas() {\n canvas.width = window.innerWidth;\n canvas.height = window.innerHeight;\n\n can_w = canvas.width;\n can_h = canvas.height;\n }\n\n window.addEventListener(\"resize\", function (e) {\n console.log(\"window resize...\");\n initCanvas();\n });\n\n function goMovie() {\n initCanvas();\n initBalls((window.innerWidth * window.innerHeight) / 1000000);\n window.requestAnimationFrame(rendering);\n }\n goMovie();\n\n img.addEventListener(\"mouseenter\", function () {\n mouse_in = true;\n balls.push(mouse_ball);\n });\n img.addEventListener(\"mouseleave\", function () {\n mouse_in = false;\n var new_balls = [];\n Array.prototype.forEach.call(balls, function (b) {\n if (!b.hasOwnProperty(\"type\")) {\n new_balls.push(b);\n }\n });\n balls = new_balls.slice(0);\n });\n img.addEventListener(\"mousemove\", function (e) {\n var e = e || window.event;\n mouse_ball.x = e.pageX;\n mouse_ball.y = e.pageY;\n });\n\n }\n\n render() {\n return (\n <Container>\n <Canvas ref={this.canvasRef} className=\"ConnectedLine\" width=\"350\" height=\"700\" />\n <Background ref={this.imgRef} />\n </Container>\n );\n }\n}\n\nconst Container = styled.div`\n position : relative; \n`;\nconst Div = styled.div`\n position:relative;\n`;\n\nconst Canvas = styled.canvas`\n // position:absolute;\n // height: 100vh;\n // width: 100%;\n // background: radial-gradient(\n // ellipse at 60% 120%,\n // rgba(20,20,20,0.4) 10%,\n // rgba(20,0,0,0.6) 40%,\n // rgba(20,20,20,1) 80%\n // );\n // background-color : #0f4c81;\n\n position:absolute;\n top : 0;\n left : 0;\n width:100%;\n height:100vh;\n background-image:url(${BackgroundImg_1});\n background-size:cover;\n background-position: center center;\n background-repeat: no-repeat;\n`;\n\nconst Background = styled.img`\n position:absolute;\n top : 0;\n left : 0;\n width:100%;\n height:100vh;\n background-image:url(${BackgroundImg});\n background-size:cover;\n background-position: center center;\n background-repeat: no-repeat;\n`;\n\nexport default ConnectedLine;" }, { "alpha_fraction": 0.6722532510757446, "alphanum_fraction": 0.6722532510757446, "avg_line_length": 28.88888931274414, "blob_id": "ff9c2a61a66cd44e889626e61c0129cb33ce6707", "content_id": "c4dd12ebb866950d72883072e5410d9e71be3f9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 771, "license_type": "no_license", "max_line_length": 44, "num_lines": 18, "path": "/back/utils/error/errorMessage.js", "repo_name": "1000sang/IoT_Project", "src_encoding": "UTF-8", "text": "const messages = {\n DuplicateUserEmailError: '이메일 중복',\n DuplicateSiteError: '사이트 중복',\n DuplicateDeviceError: '디바이스 중복',\n UserEmailNotFound: '등록되지 않은 이메일 입니다.',\n DeviceIdNotFound: '등록되지 않은 디바이스 입니다.',\n FailureSignIn: '아이디 혹은 비밀번호가 틀렸습니다.',\n FailureCreateToken: '토큰생성에 실패했습니다.',\n NeedLogin: '로그인이 필요합니다.',\n NeedNotLogin: '로그인하지 않은 사용자만 접근 가능합니다.',\n UnknowError: '알수없는 에러',\n UnknownRedisError: '알수없는 레디스 에러',\n UnknownSocketError: '알수없는 소켓 에러'\n}\n\nexports.createErrMsg = err => {\n return messages[err]\n}" } ]
38
af2tr-snippets/cloud-python
https://github.com/af2tr-snippets/cloud-python
53dedcf0d4c76e07d0ed8669c3eccf806412e019
e6c3a23f505ec1a764e47a40ccca1b96e335d763
b27c4d159fc7851dc34d08f12478ea895b1789e6
refs/heads/master
2021-01-18T06:22:14.719724
2015-07-31T07:15:57
2015-07-31T07:15:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6000924706459045, "alphanum_fraction": 0.6134997606277466, "avg_line_length": 28.630136489868164, "blob_id": "789693dcd109e722ee36e381ba4ab85fd13f6370", "content_id": "1bb2665e2f9aedaac13f64386d67bda650f32e62", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2163, "license_type": "permissive", "max_line_length": 78, "num_lines": 73, "path": "/stock_int/stock_interactive.py", "repo_name": "af2tr-snippets/cloud-python", "src_encoding": "UTF-8", "text": "#\n# Historical Stock Prices\n# Plotted with Plotly & D3.js\n#\n# stock_interactive.py\n#\n# (c) Dr. Yves J. Hilpisch\n# The Python Quants GmbH\n#\n\nimport pandas as pd\nimport pandas.io.data as web\nimport plotly.plotly as ply\nfrom plotly.graph_objs import Figure, Layout, XAxis, YAxis\nfrom flask import Flask, request, render_template, redirect, url_for\nfrom forms import SymbolSearch\n\n#\n# Needed for plotly usage\n#\n\nply.sign_in('Python-Demo-Account', 'gwt101uhh0')\n\ndef df_to_plotly(df):\n '''\n Converting a pandas DataFrame to plotly compatible format.\n '''\n if df.index.__class__.__name__==\"DatetimeIndex\":\n x = df.index.format()\n else:\n x = df.index.values\n lines = {}\n for key in df:\n lines[key] = {}\n lines[key]['x'] = x\n lines[key]['y'] = df[key].values\n lines[key]['name'] = key\n lines_plotly = [lines[key] for key in df]\n return lines_plotly\n\n#\n# Main app\n#\n\napp = Flask(__name__)\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef main():\n form = SymbolSearch(csrf_enabled=False)\n if request.method == 'POST' and form.validate():\n return redirect(url_for('results', symbol=request.form['symbol'],\n trend1=request.form['trend1'],\n trend2=request.form['trend2']))\n return render_template('selection.html', form=form)\n\[email protected](\"/symbol/<symbol>+<trend1>+<trend2>\")\ndef results(symbol, trend1, trend2):\n data = web.DataReader(symbol, data_source='yahoo')\n data['Trend 1'] = pd.rolling_mean(data['Adj Close'], window=int(trend1))\n data['Trend 2'] = pd.rolling_mean(data['Adj Close'], window=int(trend2))\n layout = Layout(\n xaxis=XAxis(showgrid=True, gridcolor='#bdbdbd', gridwidth=2),\n yaxis=YAxis(showgrid=True, gridcolor='#bdbdbd', gridwidth=2)\n )\n fig = Figure(data=df_to_plotly(data[['Adj Close', 'Trend 1', 'Trend 2']]),\n layout=layout)\n plot = ply.plot(fig, auto_open=False)\n table = data.tail().to_html()\n return render_template('plotly.html', symbol=symbol,\n plot=plot, table=table)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=7777)\n" }, { "alpha_fraction": 0.7285429239273071, "alphanum_fraction": 0.7365269660949707, "avg_line_length": 22.85714340209961, "blob_id": "ea4b47e5bae1f242ba99cb60e60581bbfe6b746c", "content_id": "607e8dd286a162c7400ae0fef1bf6dcff8221adf", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1002, "license_type": "permissive", "max_line_length": 91, "num_lines": 42, "path": "/python-setup.sh", "repo_name": "af2tr-snippets/cloud-python", "src_encoding": "UTF-8", "text": "#\n# Python Installation (incl. Jupyter)\n# (c) Dr Yves J Hilpisch\n# The Python Quants GmbH\n#\n\n# A FEW SYSTEM TOOLS\n# ADD THINGS YOU WANT TO USE (e.g. Git)\napt-get install htop unzip -y\napt-get autoremove\n\n# INSTALL MINICONDA\nwget -q http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O Miniconda.sh\nbash Miniconda.sh -b\nrm Miniconda.sh\nexport PATH=\"$HOME/miniconda/bin:$PATH\"\n\ncat >> ~/.profile <<EOF\nexport PATH=\"$HOME/miniconda/bin:$PATH\"\nEOF\n\n# INSTALL PYTHON LIBRARIES --\n# ADD LIBRARIES YOU WANT TO USE\nconda install -y ipython-notebook\nconda install -y matplotlib\nconda install -y pandas\nconda install -y seaborn\n\npip install plotly\npip install cufflinks\npip install flask-wtf\n\n# COPYING FILES TO WORKING DIRECTORY\nmkdir ${HOME}/notebook\nmv ${HOME}/*.ipynb ${HOME}/notebook\nrm ${HOME}/*.ipynb\nunzip ${HOME}/stock_int.zip -d ${HOME}/notebook\nrm ${HOME}/stock_int.zip\ncd ${HOME}/notebook\n\n# STARTING JUPYTER NOTEBOOK\nipython notebook --ip=0.0.0.0 --no-browser --notebook-dir=$HOME/notebook\n" } ]
2
alina-krivolapova/my_ds_project
https://github.com/alina-krivolapova/my_ds_project
e10d7231fc494216f0d0d07c99af406a8a9c01c4
f906c9334ec5c4c698557d76da11860da60dfe0f
151e0470c2b6e2cb8a386d65e862b3b6653b8d39
refs/heads/main
2023-06-26T04:31:18.423025
2021-06-26T08:48:33
2021-06-26T08:49:01
374,283,421
0
0
null
2021-06-06T06:17:41
2021-06-26T08:49:04
2021-07-03T11:38:27
Python
[ { "alpha_fraction": 0.7966101765632629, "alphanum_fraction": 0.7966101765632629, "avg_line_length": 32.71428680419922, "blob_id": "c26fbadacd8cfa75e1a8f2d67a2a782c356a81d7", "content_id": "092c18f2f221562a7be5695795e6d3b3c2acadda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "no_license", "max_line_length": 58, "num_lines": 14, "path": "/db.py", "repo_name": "alina-krivolapova/my_ds_project", "src_encoding": "UTF-8", "text": "from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom connection_settings import db_connection_path\n\n# connection to db\nengine = create_engine(f'sqlite:////{db_connection_path}')\n# create session\ndb_session = scoped_session(sessionmaker(bind=engine))\n\n# base class to create table objects\nBase = declarative_base()\n# query binding\nBase.query = db_session.query_property()\n" }, { "alpha_fraction": 0.7096070051193237, "alphanum_fraction": 0.7096070051193237, "avg_line_length": 25.941177368164062, "blob_id": "dad2d0cb3701136fe8979538a0d837c5ed9737a1", "content_id": "7d467d7c69248259b1349d8443b7705064bb60ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 62, "num_lines": 17, "path": "/db_data_loader.py", "repo_name": "alina-krivolapova/my_ds_project", "src_encoding": "UTF-8", "text": "from rates_data_downloader import RatesDataProvider\nfrom news_downloader import NewsDataProvider\nfrom db import db_session\nfrom db_model import BTCRate, News\n\n\ndef save_data_to_db():\n rates = RatesDataProvider()\n news = NewsDataProvider()\n db_session.bulk_insert_mappings(BTCRate, rates.get_data())\n db_session.bulk_insert_mappings(News, news.get_data())\n # save data\n db_session.commit()\n\n\nif __name__ == \"__main__\":\n save_data_to_db()\n" }, { "alpha_fraction": 0.5645320415496826, "alphanum_fraction": 0.5945813059806824, "avg_line_length": 38.80392074584961, "blob_id": "ee0500c7233d608ebca4e7967665c4975572c94c", "content_id": "57fa04cb8955154eb307b22a1fd64ec117c8b657", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2030, "license_type": "no_license", "max_line_length": 111, "num_lines": 51, "path": "/rates_data_downloader.py", "repo_name": "alina-krivolapova/my_ds_project", "src_encoding": "UTF-8", "text": "\"\"\" Module to parse html with Bitcoin rates. \"\"\"\n\nimport re\nimport bs4\nfrom datetime import datetime\nfrom typing import List, Dict\n\nfrom data_provider import DataProvider\n\nCOLUMN_TITLES = [\"date\", \"open_price\", \"high_price\", \"low_price\", \"close_price\", \"volume\", \"market_cap\"]\n\n\nclass RatesDataProvider(DataProvider):\n \"\"\" Class to work with rates data. \"\"\"\n\n def __init__(self):\n # use local downloaded file\n src = self.read_file('Bitcoin price today, BTC live marketcap, chart, and info _ CoinMarketCap.html')\n soup = bs4.BeautifulSoup(src, \"lxml\")\n # save for future to have prettier version\n self.write_to_file(\"pretty_rates.html\", soup.prettify())\n self.raw_rates = soup.find('table', {'class': re.compile('^cmc-table.*')}).find(\"tbody\").find_all(\"tr\")\n\n @staticmethod\n def convert_date(text: str) -> datetime:\n \"\"\" Change format for date field.\"\"\"\n return datetime.strptime(text, '%b %d, %Y')\n\n def parse_data(self, initial_rates: List[bs4.element.Tag]) -> List[Dict[str, str]]:\n \"\"\"Parse data.\n\n Returns list of dicts with prices for each day in downloaded period.\n Output example:\n {'date': 'Jun 05 2021', 'open_price': '36880.16', 'high_price': '37917.71', 'low_price': '34900.41',\n 'close_price': '35551.96', 'volume': '35959473399', 'market_cap': '665804639833'}\n \"\"\"\n rates = []\n for rate in initial_rates:\n day_prices = {}\n for value, column in zip(rate.find_all(\"td\"), COLUMN_TITLES):\n text = value.text.split(\"$\")[1] if value.text.startswith(\"$\") else value.text\n if column == \"date\":\n day_prices[column] = self.convert_date(text)\n else:\n day_prices[column] = text.replace(\",\", \"\")\n rates.append(day_prices)\n return rates\n\n def get_data(self) -> List[Dict[str, str]]:\n \"\"\" Function to provide rates data.\"\"\"\n return self.parse_data(self.raw_rates)\n" }, { "alpha_fraction": 0.6473149657249451, "alphanum_fraction": 0.6473149657249451, "avg_line_length": 25.5, "blob_id": "d9e325503f3440d4e37a3439031b0403f73ed7b4", "content_id": "e9ba2dfeb938a7e99a926dfeafea0766bda71210", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "no_license", "max_line_length": 65, "num_lines": 26, "path": "/db_model.py", "repo_name": "alina-krivolapova/my_ds_project", "src_encoding": "UTF-8", "text": "from sqlalchemy import Column, Integer, DateTime, Numeric, String\nfrom db import Base\n\n\nclass BTCRate(Base):\n __tablename__ = 'btc_rates'\n date = Column(DateTime, primary_key=True)\n open_price = Column(Numeric)\n high_price = Column(Numeric)\n low_price = Column(Numeric)\n close_price = Column(Numeric)\n volume = Column(Integer)\n market_cap = Column(Numeric)\n\n def __repr__(self):\n return f'<BTCRate object for date={self.date}>'\n\n\nclass News(Base):\n __tablename__ = 'news'\n id = Column(Integer, primary_key=True)\n date = Column(DateTime)\n news_text = Column(String)\n\n def __repr__(self):\n return f'<News object for date={self.date}>'\n" }, { "alpha_fraction": 0.4848484992980957, "alphanum_fraction": 0.6969696879386902, "avg_line_length": 15.5, "blob_id": "267408041d9bf0000ac398d2da96b9f87e8b74e8", "content_id": "9c712d625ed96ea72d5254c063ea16941fb1e5e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 99, "license_type": "no_license", "max_line_length": 21, "num_lines": 6, "path": "/requirements.txt", "repo_name": "alina-krivolapova/my_ds_project", "src_encoding": "UTF-8", "text": "beautifulsoup4==4.9.3\ngensim==4.0.1\njupyter==1.0.0\nlxml==4.6.3\nrequests==2.25.1\nSQLAlchemy==1.4.17\n" }, { "alpha_fraction": 0.5419757962226868, "alphanum_fraction": 0.5548613667488098, "avg_line_length": 34.082191467285156, "blob_id": "70806c3209cd7d9921e348a8ca28d894bda16cfb", "content_id": "069258abbe2f731660f387c66ef602f473c12ef1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2563, "license_type": "no_license", "max_line_length": 120, "num_lines": 73, "path": "/news_downloader.py", "repo_name": "alina-krivolapova/my_ds_project", "src_encoding": "UTF-8", "text": "\"\"\" Module to parse html with news. \"\"\"\n\nimport re\nimport bs4\nfrom datetime import datetime\nfrom typing import List, Dict, Union\n\nfrom data_provider import DataProvider\n\nURL_PATTERN = \"https://www.theguardian.com/technology/elon-musk\"\n\n\nclass NewsDataProvider(DataProvider):\n \"\"\" Class to work with news data. \"\"\"\n\n def __init__(self):\n self.articles = []\n\n for url in self.generate_list_of_pages(20):\n src = self.download_data_from_site(url)\n soup = bs4.BeautifulSoup(src, \"lxml\")\n\n # save all links to particular news\n news = set()\n for item in soup.find_all('a', {'class': re.compile(\".* js-headline-text\")}):\n link = item.get('href')\n if re.search('/202[0-1]', link):\n news.add(link)\n\n # save particular articles\n for article in news:\n parsed = self.parse_data(article)\n if parsed:\n self.articles.append(parsed)\n\n @staticmethod\n def generate_list_of_pages(num_of_pages: int) -> List[str]:\n \"\"\" Create list of pages urls based on pattern.\"\"\"\n list_of_pages = []\n list_of_pages.append(URL_PATTERN)\n for i in range(2, num_of_pages + 1):\n list_of_pages.append(URL_PATTERN + f\"?page={i}\")\n return list_of_pages\n\n @staticmethod\n def convert_date(text: str) -> datetime:\n \"\"\"Return date in datetime format.\n\n Example:\n Date text is Mon 14 Jun 2021 01.12 BST\"\"\"\n return datetime.strptime(' '.join(text.split()[1:4]), '%d %b %Y')\n\n def parse_data(self, url: str) -> Dict[str, Union[str, datetime]]:\n \"\"\"Parse data.\n\n Returns list of dicts with news\n Output example:\n {'date': datetime.datetime(2021, 6, 11, 0, 0), 'news_text': 'It’s famously impossible to take...'}\n \"\"\"\n src = self.download_data_from_site(url)\n soup = bs4.BeautifulSoup(src, \"lxml\")\n news = {}\n # in other articles it's hard to identify date\n if soup.find(\"label\", attrs={\"for\": \"dateToggle\"}):\n news[\"date\"] = self.convert_date(soup.find(\"label\", attrs={\"for\": \"dateToggle\"}).text)\n news[\"news_text\"] = soup.find(\"div\",\n class_=re.compile(\n \"^article-body-commercial-selector article-body-viewer-selector .*\")).text\n return news\n\n def get_data(self):\n \"\"\" Function to provide news.\"\"\"\n return self.articles\n" }, { "alpha_fraction": 0.6203554272651672, "alphanum_fraction": 0.6203554272651672, "avg_line_length": 27.136363983154297, "blob_id": "b60ecaf973200145dbb4cf2c3259d0b28a291a42", "content_id": "d15def95cc092c3d5530dbe01fa4e784cfe0f2bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1238, "license_type": "no_license", "max_line_length": 102, "num_lines": 44, "path": "/data_provider.py", "repo_name": "alina-krivolapova/my_ds_project", "src_encoding": "UTF-8", "text": "import requests\nfrom abc import ABC, abstractmethod\nfrom datetime import datetime\nfrom typing import Any\n\n\nclass DataProvider(ABC):\n \"\"\" Abstract class to form the structure of all data providers and provide basic functionality.\"\"\"\n\n @abstractmethod\n def get_data(self):\n pass\n\n @abstractmethod\n def parse_data(self, data: Any):\n pass\n\n @staticmethod\n @abstractmethod\n def convert_date(text: str) -> datetime:\n \"\"\" Change format for date field.\"\"\"\n pass\n\n @staticmethod\n def read_file(name: str) -> str:\n \"\"\"Read data from file.\"\"\"\n with open(name) as file:\n return file.read()\n\n @staticmethod\n def download_data_from_site(data_url: str) -> str:\n \"\"\"Download data from site with specified URL\"\"\"\n try:\n result = requests.get(data_url)\n result.raise_for_status()\n return result.text\n except (requests.RequestException, ValueError):\n raise ConnectionError(\"Cannot download data from site\")\n\n @staticmethod\n def write_to_file(name: str, content: str) -> None:\n \"\"\"Write content to file.\"\"\"\n with open(name, \"w\") as pretty_file:\n pretty_file.write(content)\n" } ]
7
willemkempers/grabber
https://github.com/willemkempers/grabber
8383f3003d841950db45494a3c9526448f6ec685
31bf2a645e92343b7a0f9f6d443de10151e557cf
590fa3884b61c55482f719c026810c242d2ebd46
refs/heads/master
2016-09-05T09:30:33.686825
2015-02-05T12:37:07
2015-02-05T12:37:07
30,351,683
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6078545451164246, "alphanum_fraction": 0.6403559446334839, "avg_line_length": 28.70689582824707, "blob_id": "81aece76eeeb219aa889f07351237021ffa9a34a", "content_id": "01b3e7b83074ee3db20f0eb7e553d1460c450603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5169, "license_type": "no_license", "max_line_length": 144, "num_lines": 174, "path": "/visual/build-tmp/source/visual.java", "repo_name": "willemkempers/grabber", "src_encoding": "UTF-8", "text": "import processing.core.*; \nimport processing.data.*; \nimport processing.event.*; \nimport processing.opengl.*; \n\nimport java.util.HashMap; \nimport java.util.ArrayList; \nimport java.io.File; \nimport java.io.BufferedReader; \nimport java.io.PrintWriter; \nimport java.io.InputStream; \nimport java.io.OutputStream; \nimport java.io.IOException; \n\npublic class visual extends PApplet {\n\npublic boolean sketchFullScreen() {\n return true;\n}\n\nArrayList<Node> nodes = new ArrayList<Node>();\nTable table;\nint id = 0;\nint[] totalissues = new int[1];\nint[] numtotaltitles = new int[0];\nString[] totaltitles = new String[0];\n\nfloat distance = 1000000.0f;\nint closestnode = 0;\n\npublic void setup() {\n theSetup();\n table = loadTable(\"woningwet2.csv\", \"header\");\n addLidNodes();\n}\n\npublic void draw() {\n background(0,0,0);\n for (int i = 0; i<nodes.size(); i++) {\n Node n = nodes.get(i);\n n.update();\n n.display();\n\n float nodeDist = dist(mouseX,mouseY,n.xp,n.yp);\n if(nodeDist < distance) {\n distance = nodeDist;\n closestnode = i;\n }\n }\n stroke(0,0,100);\n Node dn = nodes.get(closestnode);\n line(mouseX,mouseY,dn.xp,dn.yp);\n text(str(dn.totaltitles), constrain(mouseX+10, 0, width-300), mouseY-90, 300, height);\n text(str(dn.titlenum), constrain(mouseX+10, 0, width-300), mouseY-70, 300, height);\n text(str(dn.yr), constrain(mouseX+10, 0, width-300), mouseY-50, 300, height);\n text(str(dn.issue), constrain(mouseX+10, 0, width-300), mouseY-30, 300, height);\n text(dn.title, constrain(mouseX+10, 0, width-300), mouseY-10, 300, height);\n text(dn.alinea, constrain(mouseX+10, 0, width-300), mouseY+20, 300, height);\n\n distance = 1000000.0f;\n closestnode = 0;\n}\npublic void addLidNodes() {\n\t for (TableRow row : table.rows()) {\n int yr = row.getInt(\"Year\");\n int issue = row.getInt(\"Issue\");\n if(id==0) totalissues[0] = issue;\n if(issue != totalissues[totalissues.length-1]) totalissues = append(totalissues, issue);\n \n int issuenum = totalissues.length;\n int editnr = row.getInt(\"Editnr\");\n int totallids = row.getInt(\"Totallids\");\n String title = row.getString(\"Title\");\n \n if(editnr == 1) {\n if(id > 0) {\n numtotaltitles = append(numtotaltitles, totaltitles.length);\n }\n totaltitles = new String[1];\n totaltitles[totaltitles.length-1] = title;\n }\n if(title.equals(totaltitles[totaltitles.length-1]) == false) totaltitles = append(totaltitles, title);\n // println(totaltitles.length);\n int titlenum = totaltitles.length;\n\n String lidnrtxt = row.getString(\"Lidnr\");\n int lidnr = PApplet.parseInt(lidnrtxt.substring(0, lidnrtxt.length() - 1));\n String alinea = row.getString(\"Alinea\");\n new Node(true, id, yr, issue, issuenum, editnr, totallids, title, titlenum, lidnr, alinea);\n id++;\n }\n numtotaltitles = append(numtotaltitles, totaltitles.length);\n\n // println(\"total\"+numtotaltitles);\n // println(\"---------------------------\");\n for (int i = 0; i<nodes.size(); i++) {\n Node n = nodes.get(i);\n n.update_value_totalissues(totalissues.length);\n for(int j = 0; j < totalissues.length; j++) {\n if(n.issue == totalissues[j]) {\n n.update_value_totaltitles(numtotaltitles[j]);\n }\n }\n }\n}\nclass Node {\n int id, yr, issue, issuenum, totalissues, titlenum, totaltitles, lidnr, editnr, totallids;\n float xp, yp, xOrigin, yOrigin;\n float radius = 25;\n String title, alinea;\n \n Node(boolean save, int id, int yr, int issue, int issuenum, int editnr, int totallids, String title, int titlenum, int lidnr, String alinea) {\n this.id = id;\n this.yr = yr;\n this.issue = issue;\n this.issuenum = issuenum;\n this.editnr = editnr;\n this.totallids = totallids;\n this.title = title;\n this.titlenum = titlenum;\n this.lidnr = lidnr;\n this.alinea = alinea;\n if(save)nodes.add(this);\n }\n \n public void update() {\n // if(id>0) {\n // Node otherNode = (Node) nodes.get(id-1);\n // if(otherNode.title.equals(title)) {\n // offsetX += 50;\n // }\n // }\n\n // xp = map(issuenum, 0, totalissues+1, 0, width) + lidnr*20;\n // yp = map(titlenum, 0, totaltitles+1, 0, height);\n xOrigin = map(issuenum, 0, totalissues+1, 0, width);\n yOrigin = map(titlenum, 0, totaltitles+1, 0, height);\n\n xp = xOrigin + sin(radians(lidnr*30))*(radius);;\n yp = yOrigin + cos(radians(lidnr*30))*(radius);\n }\n \n public void display() {\n fill(0,0,100);\n noStroke();\n ellipse(xp, yp, 3, 3);\n stroke(0,0,100);\n line(xp,yp,xOrigin,yOrigin);\n }\n\n public void update_value_totalissues(int totaliss) {\n this.totalissues = totaliss;\n }\n\n public void update_value_totaltitles(int totaltitles) {\n this.totaltitles = totaltitles;\n }\n}\npublic void theSetup() {\n size(displayWidth, displayHeight);\n frameRate(60);\n colorMode(HSB, 360, 100, 100, 100);\n smooth(8);\n background(0);\n}\n static public void main(String[] passedArgs) {\n String[] appletArgs = new String[] { \"--full-screen\", \"--bgcolor=#666666\", \"--hide-stop\", \"visual\" };\n if (passedArgs != null) {\n PApplet.main(concat(appletArgs, passedArgs));\n } else {\n PApplet.main(appletArgs);\n }\n }\n}\n" }, { "alpha_fraction": 0.5731176137924194, "alphanum_fraction": 0.5849409103393555, "avg_line_length": 26.70689582824707, "blob_id": "b059bfcf94e688ccbe0ed500b1cbc00e51b63b95", "content_id": "0029e2d41d963d35f1eb56d088cfd8defe978abf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1607, "license_type": "no_license", "max_line_length": 101, "num_lines": 58, "path": "/grabber.py", "repo_name": "willemkempers/grabber", "src_encoding": "UTF-8", "text": "import os, glob, string\nfrom lxml import etree\n\ndef write_line(f, year, issue, title, lidnr, alinea):\n\t# massage our data to get it ready for output, any further character replacements go here\n\tyear = year.strip().replace('\\n', ' ')\n\tissue = issue.strip().replace('\\n', ' ')\n\ttitle = title.strip().replace('\\n', ' ')\n\tlidnr = lidnr.strip().replace('\\n', ' ')\n\talinea = alinea.strip().replace('\\n', ' ')\n\n\tf.write( \"\\\"{0}\\\", \\\"{1}\\\", \\\"{2}\\\", \\\"{3}\\\", \\\"{4}\\\"\\n\".format(year, issue, title, lidnr, alinea) )\n\ndef parse_stadsblad(fname):\n\tprint \"Now processing file\", fname\n\tdom = etree.parse(fname)\n\n\t# get staatsblad issue and year\n\tstb = dom.xpath('//stb')\n\tchs = stb[0].getchildren()\n\tyear = chs[1].text\n\tissue = chs[2].text\n\tprint \"STAATSBLAD \", year, issue\n\n\t# get all the titles\n\twlids = dom.xpath('//wlid')\n\tfor wlid in wlids:\n\t\ttitlee = wlid.xpath('al')\n\t\t#print \"title: \", title[0].text.encode('utf-8')\n\t\ttitle = titlee[0].text.encode('utf-8')\n\n\t\t# predefine variables\n\t\tlidnr = \"\"\n\t\talinea = \"\"\n\n\t\twith open(\"{0}.txt\".format(fname), \"w+\") as f:\t\t\t\n\t\t\t# get all the 'lids'\n\t\t\tlids = wlid.xpath('//lid')\n\t\t\tfor lid in lids:\n\t\t\t\tchs = lid.getchildren()\n\t\t\t\tfor c in chs:\n\t\t\t\t\tif (c.tag == 'nr'):\n\t\t\t\t\t\t#print \"lidnr: \", c.text.encode('utf-8')\n\t\t\t\t\t\tlidnr = c.text.encode('utf-8')\n\t\t\t\t\tif (c.tag == 'al'):\n\t\t\t\t\t\t#print \"alinea: \", c.text.encode('utf-8')\n\t\t\t\t\t\talinea = c.text.encode('utf-8')\n\t\t\t\t\n\t\t\t\t\twrite_line(f, year, issue, title, lidnr, alinea)\n\ndef main():\n\tos.chdir(\"data/\")\n\tfor file in glob.glob(\"*.xml\"):\n\t parse_stadsblad(file)\n\t print(\"=\"*80)\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.5906183123588562, "alphanum_fraction": 0.5954157710075378, "avg_line_length": 24.013334274291992, "blob_id": "d22836961f693fd2b43300202690a82fda05fbef", "content_id": "e8fb0ed7850b2b290768a94776a2d049525827eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1876, "license_type": "no_license", "max_line_length": 106, "num_lines": 75, "path": "/xmltest/build-tmp/source/xmltest.java", "repo_name": "willemkempers/grabber", "src_encoding": "UTF-8", "text": "import processing.core.*; \nimport processing.data.*; \nimport processing.event.*; \nimport processing.opengl.*; \n\nimport java.util.HashMap; \nimport java.util.ArrayList; \nimport java.io.File; \nimport java.io.BufferedReader; \nimport java.io.PrintWriter; \nimport java.io.InputStream; \nimport java.io.OutputStream; \nimport java.io.IOException; \n\npublic class xmltest extends PApplet {\n\nXML xml;\n\npublic void setup() {\n xml = loadXML(\"data.xml\");\n XML[] children = xml.getChildren(\"wlid\");\n\n for (int i = 0; i < children.length; i++) {\n \tXML kop = children[i].getChild(\"kop\");\n \tXML kopnr = kop.getChild(\"nr\");\n println(kopnr.getContent());\n\n XML al = children[i].getChild(\"al\");\n println(al.getContent());\n\n try {\n \tXML arttext = children[i].getChild(\"arttkst\");\n\t \tXML lid = arttext.getChild(\"lid\");\n\t \tXML lidnr = lid.getChild(\"nr\");\n\t \tString[] lids = {};\n \t\ttry {\n \t\t\tXML[] lidal = lid.getChildren(\"al\");\n \t\t\tfor (int j = 0; j < lidal.length; j++) {\n\t\t\t lids = append(lids, lidal[j].getContent());\n\t\t\t }\n \t\t} catch (Exception e) {\n \t\t\t\n \t\t}\n\n\t \tprintln(lidnr.getContent());\n\t \t println(lids);\n\n } catch (Exception e) {\n \t\n }\n\n // try {\n // \tXML wlichaam = children[i].getChild(\"wlichaam\");\n\t // \tXML art = wlichaam.getChild(\"art\");\n\t // \tXML[] wllids = art.getChildren(\"lid\");\n\n\t\t\t// for (int j = 0; j < wllids.length; j++) {\n\t\t\t// \tXML wlnr = wllids[j].getChild(\"nr\");\n\t\t\t// \tXML wlal = wllids[j].getChild(\"al\");\n\t // \t\tprintln(wlnr.getContent() + wlal.getContent());\n\t // \t}\n // } catch (Exception e) {\n \t\n // }\n }\n}\n static public void main(String[] passedArgs) {\n String[] appletArgs = new String[] { \"--full-screen\", \"--bgcolor=#666666\", \"--hide-stop\", \"xmltest\" };\n if (passedArgs != null) {\n PApplet.main(concat(appletArgs, passedArgs));\n } else {\n PApplet.main(appletArgs);\n }\n }\n}\n" } ]
3
ttm/fimdomundo
https://github.com/ttm/fimdomundo
e261eb6105998b87a0385ed5f919c6644e948aa1
754c1a7bdbccba358580636cf81675b8cd41e84a
4cadbcf2092f603fe544fd89ff76bb4a064b5eb6
refs/heads/master
2021-01-18T06:39:15.237193
2013-01-14T00:15:15
2013-01-14T00:15:15
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.41818180680274963, "alphanum_fraction": 0.5030303001403809, "avg_line_length": 22.571428298950195, "blob_id": "7eb1fdcae39a57be79521eda1c8ea5621eb38c82", "content_id": "36fa7b1b01bb9dba46afb86a43ecd42076736e6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/artigo/snippets/referencias.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "In [87]: mm[40]['References']\nOut[87]: '<[email protected]>\\\n \\n\\t<[email protected]>'\n\nIn [88]: mm[40]['In-Reply-To']\nOut[88]: '<bf76ddfc05081006455623b71d-JsoAwUIsXosN+\\\n [email protected]>'\n" }, { "alpha_fraction": 0.5692354440689087, "alphanum_fraction": 0.5917584300041199, "avg_line_length": 25.615591049194336, "blob_id": "fb1a7b3f1213a662aee6e30ef39e43a8887434e7", "content_id": "eb79ef91323c4de943a35f2773fbfbbedd06804f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9912, "license_type": "no_license", "max_line_length": 92, "num_lines": 372, "path": "/python/processaMil.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "#-*- coding: utf8 -*-\nimport mailbox, os\n\na=os.listdir(\".\")\na.remove(\"processaMil.py\")\na.remove(\".processaMil.py.swp\")\na.remove(\"mensagens.gml\")\na.sort()\n\nmm=[]\nc=0\ncc=0\nn_multi=0\nfor aa in a:\n mbox = mailbox.mbox(aa)\n m=mbox[0]\n mm.append(m)\n\n for i in xrange(len(mbox)):\n m=mbox[i]\n if m.is_multipart():\n try:\n #print(m.get_payload()[0].get_payload()[1].get_payload())\n m.get_payload()[0].get_payload()[1].get_payload()\n except:\n #m.get_payload()[0].get_payload()\n cc+=1\n n_multi+=1\n c+=1\n\nfor i in xrange(len(mm)):\n mm[i]['from']=mm[i]['from'].replace(\"\\\"\",\"\")\n\n# n_multi eh o numero de mensagens enviadas como multimesage\n# geralmente sao msgs HTML com versao txt tb\n\n# cc é o número de mensagens enviadas como multimessage \n# que nao possuem versao HTML, mas sim uma assinatura da lista.\n\nthreads = [m for m in mm if m['in-reply-to']==None]\nn_threads=len(threads)\n\n# n_threads eh o numero de threads iniciadas, i.e., nao sao resposta a outra mensagem\n# (verificar se ocorre realizar um reply e mudar o titulo, constituindo uma thread\n# sem que seja reply.)\n# Observe que existem threads iniciadas na lista que tem In-Reply-To, pois sao respostas\n# a uma msg que nao estava na lista de emails\n\n####\n# achando enviadores diferentes\n\nsenders=[]\nfor m in mm:\n sender=m[\"from\"]\n if sender not in senders:\n senders.append(sender)\n\n# senders eh o conjunto de diferentes enderecos que enviaram mensagem\n# vale fazer pre-tratamento para cruzar os usuarios que\n# usam enderecos e nomes diferentes\n\n########\n# achando numero de mensagens por enviador/participante\n\nn_msgs=[0]*len(senders)\nfor m in mm:\n sender=m[\"from\"]\n i=senders.index(sender)\n n_msgs[i]+=1\n\n# n_msgs[i] eh o numero de mensagem do senders[i]\n\nimport numpy as n\n###\n# ordenando:\nn_=n.array(n_msgs)\ns_=n.array(senders)\n\nii=n.argsort(n_)\n\nn_=n_[ii][::-1]\ns_=s_[ii][::-1]\n\n#####\n# fazendo histograma\n\nmmax=n_.max()\ndelta=10 # delta em delta\napontador=0\nincidencias=[]\nwhile apontador < mmax:\n incidentes= n_ < apontador + 10\n incidencias.append(sum(incidentes))\n n_[incidentes]+=n.ones(sum(incidentes))*mmax*2\n apontador+=10\n\ninc=incidencias[::-1]\nn_msgs2=(n.arange(0,(mmax/10+1)*10,10)+5)[::-1]\n# inc[i] participantes mandaram n_msgs2[i] mensagens +-5 \n\ninc=n.array(inc)\nyes=inc.nonzero()\nimport pylab as p\np.plot(n.log10(inc[yes]),n.log10(n_msgs2[yes]),\"ro\")\np.xlabel(\"ln10(numero de participantes)\")\np.ylabel(\"ln10(numero de mensagens enviadas)\")\n\np.yticks(n.log10(n_msgs2[yes]),n_msgs2[yes])\np.xticks(n.log10(inc[yes]),inc[yes])\n\np.show()\n\n###\n# refazendo n_, numero de mensagens de cada participante s_[i]\nn_=n.array(n_msgs)\nii=n.argsort(n_)\nn_=n_[ii][::-1]\n\n\n#########################\n# achando numero de respostas obtidas por cada enviador s_[i]\n\nn_respostas=[0]*len(senders)\nn_res=0\nn_res_achado=0\nfoo=0\nfor m in mm:\n try:\n id_msg_orig=m['references'].split('\\t')[-1]\n id_msg_orig=id_msg_orig.split(' ')[-1]\n except:\n id_msg_orig=0\n if id_msg_orig:\n n_res+=1\n for m2 in mm:\n if m2['Message-ID']==id_msg_orig:\n #print id_msg_orig+\"\\n\"+m2['Message-Id']+\"\\n\\n\"\n n_res_achado+=1\n sender=m2['from']\n i=senders.index(sender)\n n_respostas[i]+=1\n foo=1\n if not foo:\n print id_msg_orig\n foo=0\n\n# n_res+n_threads == len(mm)\n# n_res-n_res_achado == numero de respostas a emails que não estão na lista (ao menos no BD)\n# n_threads+(n_res-n_res_achado) == numero de threads iniciadas para a lista\n# (nao para todos os participantes, pois algumas das msgs sao respostas a emails externos)\n\nn__=n.array(n_respostas)\nii=n.argsort(n__)\nn__=n__[ii][::-1]\ns__=n.array(senders)\ns__=s__[ii][::-1]\n\n\n# n__[i] eh o numero de respostas obtidas pelo enviador s__[i]\n\n#####\n# fazendo histograma de numero de respostas obtidas\n\nmmax=n__.max()\ndelta=10 # delta em delta\napontador=0\nincidencias_=[]\nwhile apontador < mmax:\n incidentes= n__ < apontador + 10\n incidencias_.append(sum(incidentes))\n n__[incidentes]+=n.ones(sum(incidentes))*mmax*2\n apontador+=10\n\ninc_=incidencias_[::-1]\nn_msgs2_=(n.arange(0,(mmax/10+1)*10,10)+5)[::-1]\n# inc[i] participantes mandaram n_msgs2[i] mensagens +-5 \n\ninc_=n.array(inc_)\nyes_=inc_.nonzero()\np.plot(n.log10(inc_[yes_]),n.log10(n_msgs2_[yes_]),\"ro\")\np.xlabel(\"ln10(numero de participantes)\")\np.ylabel(\"ln10(numero de respostas recebidas)\")\np.yticks(n.log10(n_msgs2_[yes_]),n_msgs2_[yes_])\np.xticks(n.log10(inc_[yes_]),inc_[yes_])\np.show()\n\nn__=n.array(n_respostas)\nii=n.argsort(n__)\nn__=n__[ii][::-1]\ns__=n.array(senders)\ns__=s__[ii][::-1]\n\n\n############\n# pensando na estrutura da rede social atraves dos graus, nao pela forca\n### 1) numero de pessoas diferentes que responderam aa pessoa\n### (grau de saida)\nn_pessoas_responderam=[0]*len(senders)\npessoas_responderam=[[] for i in senders]\nrepetidos=[0]*len(senders)\nrp=[]\nimport time\nfor m in mm:\n try:\n id_msg_orig=m['references'].split('\\t')[-1]\n id_msg_orig=id_msg_orig.split(' ')[-1]\n except:\n id_msg_orig=0\n if id_msg_orig:\n for m2 in mm:\n if m2['Message-ID']==id_msg_orig:\n #print id_msg_orig+\"\\n\"+m2['Message-Id']+\"\\n\\n\"\n sender=m2['from']\n replyer=m['from']\n i=senders.index(sender)\n if replyer not in pessoas_responderam[i]:\n pessoas_responderam[i].append(replyer)\n n_pessoas_responderam[i]+=1\n #print(\"novo:\\nenv: %s,\\nresp: %s\" % (sender,replyer))\n else:\n rp.append(replyer)\n repetidos[i]+=1\n #print(\"repetido:\\nenv: %s,\\nresp: %s\" % (sender,replyer))\n\n time.sleep(1)\n\nn___=n.array(n_pessoas_responderam)\nii=n.argsort(n___)\nn___=n___[ii][::-1]\nprint n___\ns___=n.array(senders)\ns___=s___[ii][::-1]\nr___=n.array(repetidos)\nr___=r___[ii][::-1]\n\n# n___[i] é o número de pessoas diferentes que responderam s___[i]\n# é o grau de saída\n##########\n# fazendo histograma de numero de respostas obtidas\n\nmmax=n___.max()\ndelta=10 # delta em delta\napontador=0\nincidencias__=[]\nwhile apontador < mmax:\n incidentes= n___ < apontador + 10\n incidencias__.append(sum(incidentes))\n n___[incidentes]+=n.ones(sum(incidentes))*mmax*2\n apontador+=10\n\ninc__=incidencias__[::-1]\nn_msgs2__=(n.arange(0,(mmax/10+1)*10,10)+5)[::-1]\n# inc[i] participantes mandaram n_msgs2[i] mensagens +-5 \n\ninc__=n.array(inc__)\nyes__=inc__.nonzero()\np.plot(n.log10(inc__[yes__]),n.log10(n_msgs2__[yes__]),\"ro\")\np.xlabel(\"ln10(numero de participantes)\")\np.ylabel(\"ln10(numero de pessoas diferentes que responderam a pessoa (grau de saida))\")\np.show()\n\nn___=n.array(n_pessoas_responderam)\nii=n.argsort(n___)\nn___=n___[ii][::-1]\n\n\n############\n# pensando na estrutura da rede social atraves dos graus, nao pela forca\n### 2) numero de pessoas diferentes a quem a pessoa respondeu\n### (grau de entrada)\nn_pessoas_respondidas=[0]*len(senders)\npessoas_respondidas=[[] for i in senders]\nrepetidos_=[0]*len(senders)\nfor m in mm:\n try:\n id_msg_orig=m['references'].split('\\t')[-1]\n id_msg_orig=id_msg_orig.split(' ')[-1]\n except:\n id_msg_orig=0\n if id_msg_orig:\n for m2 in mm:\n if m2['Message-ID']==id_msg_orig:\n #print id_msg_orig+\"\\n\"+m2['Message-Id']+\"\\n\\n\"\n sender=m2['from']\n replyer=m['from']\n i=senders.index(replyer)\n if sender not in pessoas_respondidas[i]:\n pessoas_respondidas[i].append(sender)\n n_pessoas_respondidas[i]+=1\n else:\n repetidos_[i]+=1\n\n\nn____=n.array(n_pessoas_respondidas)\nii=n.argsort(n____)\nn____=n____[ii][::-1]\ns____=n.array(senders)\ns____=s____[ii][::-1]\nr____=n.array(repetidos_)\nr____=r____[ii][::-1]\n\n# n____[i] é o número de pessoas diferentes a que respondeu s____[i]\n# é o grau deentrada \n#####\n# fazendo histograma de numero de respostas obtidas\n\nmmax=n____.max()\ndelta=10 # delta em delta\napontador=0\nincidencias___=[]\nwhile apontador < mmax:\n incidentes= n____ < apontador + 10\n incidencias___.append(sum(incidentes))\n n____[incidentes]+=n.ones(sum(incidentes))*mmax*2\n apontador+=10\n\ninc___=incidencias___[::-1]\nn_msgs2___=(n.arange(0,(mmax/10+1)*10,10)+5)[::-1]\n# inc[i] participantes mandaram n_msgs2[i] mensagens +-5 \n\ninc___=n.array(inc___)\nyes___=inc___.nonzero()\np.plot(n.log10(inc___[yes___]),n.log10(n_msgs2___[yes___]),\"ro\")\np.xlabel(\"ln10(numero de participantes)\")\np.ylabel(\"ln10(numero de pessoas diferentes a que a pessoa respondeu (grau de entrada))\")\np.show()\n\nn____=n.array(n_pessoas_respondidas)\nii=n.argsort(n____)\nn____=n____[ii][::-1]\ns____=n.array(senders)\ns____=s____[ii][::-1]\nr____=n.array(repetidos_)\nr____=r____[ii][::-1]\n\n\n#######################\n# montando a rede\n\nn_respostas=[0]*len(senders)\nn_res=0\nn_res_achado=0\nfoo=0\narestas=[]\nfor m in mm:\n try:\n id_msg_orig=m['references'].split('\\t')[-1]\n id_msg_orig=id_msg_orig.split(' ')[-1]\n except:\n id_msg_orig=0\n\n if id_msg_orig:\n n_res+=1\n for m2 in mm:\n if m2['Message-ID']==id_msg_orig:\n #print id_msg_orig+\"\\n\"+m2['Message-Id']+\"\\n\\n\"\n sender=m2['from']\n replyer=m['from']\n arestas.append((sender,replyer))\n\nimport networkx as x\nG=x.DiGraph()\nfor aresta in arestas:\n emissor,receptor=aresta\n #emissor=emissor.replace(\"\\\"\",\"\")\n #receiver=receiver.replace(\"\\\"\",\"\")\n if G.has_edge(emissor,receptor):\n G[emissor][receptor]['weight']+=1\n else:\n G.add_weighted_edges_from([(emissor,receptor,1.)])\n\nx.write_gml(G,'mensagens.gml')\n" }, { "alpha_fraction": 0.7525773048400879, "alphanum_fraction": 0.7525773048400879, "avg_line_length": 23.25, "blob_id": "0b658f67df9009a117a3daba56f46cdd672873b4", "content_id": "1c74a5b0a6fbb43a0cec07328b1467ebc23f1d94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 63, "num_lines": 4, "path": "/README.md", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "Mutirão Fim do Mundo\n==========\n\nConstruindo metodos de filtragem da mailing list metareciclagem\n" }, { "alpha_fraction": 0.6575342416763306, "alphanum_fraction": 0.7191780805587769, "avg_line_length": 35.5, "blob_id": "88848bd3548e1a7e8a3aa1b472e9ebdeee0fd5f9", "content_id": "faecdf20d6fc2e949216cff071743ea6f753f3a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 113, "num_lines": 4, "path": "/python/baixaTudo.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "import os\n\nfor i in xrange(60):\n os.system(\"wget http://download.gmane.org/gmane.politics.organizations.metareciclagem/%i000/%i000\" % (i,i+1))\n" }, { "alpha_fraction": 0.6754966974258423, "alphanum_fraction": 0.7003311514854431, "avg_line_length": 19.827587127685547, "blob_id": "d2571c5949f61f30b4c2b2cdd3ecb324c1e6b5dc", "content_id": "0f7121bc05bb78a2bdafea8f37a380fc2132601c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 83, "num_lines": 29, "path": "/python/readLog.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "import nltk,time\nfrom urllib import urlopen\n\n#url = \"http://www.gutenberg.org/files/2554/2554.txt\" # ao inves de crime e castigo\nurl = \"http://hera.ethymos.com.br:1080/~macambot/OMYG.text\" \na=time.time()\nraw = urlopen(url).read()\nraw2 = raw.decode(\"utf-8\",'ignore')\nprint(\"read\")\nprint(time.time()-a)\na=time.time()\n\ntokens = nltk.word_tokenize(raw2)\nprint(\"tokens\")\nprint(time.time()-a)\na=time.time()\n\ntext = nltk.Text(tokens)\nprint(\"text\")\nprint(time.time()-a)\na=time.time()\n\ntext.collocations()\nprint(\"collocations\")\nprint(time.time()-a)\na=time.time()\n\n\n# usar as funcoes do text, tem btte coisa legal\n" }, { "alpha_fraction": 0.7032967209815979, "alphanum_fraction": 0.791208803653717, "avg_line_length": 44.5, "blob_id": "945bb2b55f609ed3a5bd0635bdb0b37949e94b13", "content_id": "afd00ecd0a33f4244e3b0a81d71366a605303f82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 47, "num_lines": 2, "path": "/artigo/snippets/baixaChunck.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "wget http://download.gmane.org/gmane.politics.\\\n organizations. metareciclagem/1456/2200\n" }, { "alpha_fraction": 0.44938206672668457, "alphanum_fraction": 0.46778857707977295, "avg_line_length": 26.359712600708008, "blob_id": "5fa545c4ccae7b2c2a8fdde814bb65b2cc41d434", "content_id": "b8d430fcb4d718eb721cda0d2366bf2e5b9f4ac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3808, "license_type": "no_license", "max_line_length": 97, "num_lines": 139, "path": "/python/processaTudo.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\nimport mailbox, os\n# pegar as datas iniciais e finais e fazer para cada arquivo, escrevendo o gml com o nome da data\n\nfiles=os.listdir(\".\")\nfiles_=[f for f in files if f[-3:]==\"000\"]\nfor oOo in xrange(len(files_)):\n files=files_[oOo:oOo+1]\n mm=[]\n for f in files:\n mbox = mailbox.mbox(f) # 4543 mensagens\n mm+=[mbox]\n print(\"ok f\")\n \n mm2=[]\n count=[]\n for m in mm:\n mm2+=[m.items()]\n count+=[len(mm2[-1])]\n print(\"ok 2 %i\" % (count[-1],))\n \n \n \n #########################\n ##\n mensagens=mm2[0]\n \n mm=[]\n for m in mensagens:\n mm+=[m[1]]\n \n # primeira coisa eh ver se estah cronologico certinho\n \n # dd=[m['Date'] for m in mm] revela que é\n # então a resposta é para o mesmo tópico, quem escreveu logo depois de quem\n\n date_period=\"FROM: %s__TO: %s\" % (mm[0]['Date'],mm[-1]['Date'])\n \n IDs=[]\n for m in mm:\n # fazendo ID\n f=m['From']\n try:\n ff=f.split('<')\n except:\n print(\"SEM SENDER (FROM)\")\n if len(ff)>1:\n nome=ff[0].split(\" \")\n nome=[n for n in nome if len(n)]\n email=ff[1][:-1]\n else:\n nome='__NENHUM__'\n email=ff\n IDs+=[(nome, email)]\n \n IDs_=[IDs[0]]\n mm[0]['id_']=IDs[0]\n emails=[IDs[0][1]]\n mm[0]['email']=[email]\n nomes=[]\n i=1\n for ID in IDs[1:]:\n nome=ID[0]\n email=ID[1]\n if email in emails:\n pass\n else:\n emails+=[email]\n nomes+=[nome]\n IDs_+=[ID]\n mm[i]['id_']=[ID]\n mm[i]['email']=[email]\n mm[i]['nome']=[nome]\n i+=1\n \n # IDs_ são os nodes da rede\n \n # manter um dicionario atualizado\n # {'Subject' : 'email que postou por ultimo'}\n # cada nova mensagem, pegar o From, e fazer\n # o link ou adicionar +1 no peso do que escreveu antes para o que responde agora\n # e atualizar o dicionario com o email do que mandou por ultimo\n nodes=[]\n for ID in IDs_:\n nodes+=[ID[1]] # emails\n \n edges=[]\n sj={}\n for m in mm:\n ID_pre=m['In-Reply-To']\n if ID_pre:\n for n in mm:\n if n['Message-Id'] == ID_pre:\n print(n['ID'])\n msg_pre=n\n try:\n if msg_pre:\n edges+=[(msg_pre,m)]\n except:\n pass\n \n import networkx as x, string\n G=x.DiGraph()\n #G.add_edges_from(edges)\n for e in edges:\n #emissor= unicode(string.join(e[0]['nome'][0]))\n #receiver=unicode(string.join(e[1]['nome'][0]))\n emissor= e[0]['nome'][0][0]\n try:\n receiver=e[1]['nome'][0][0]\n except:\n receiver=e[1]['email'][0].split(\"@\")[0]\n #print(emissor,receiver)\n if not emissor:\n emissor=e[0]['email'][0].split(\"@\")[0]\n if not receiver:\n receiver=e[1]['email'][0].split(\"@\")[0]\n\n if \".:\" in emissor or not emissor:\n emissor = e[0]['email'][0]\n if \".:\" in emissor or not emissor:\n emissor = str(e[0]['nome'][0])\n if \".:\" in receiver or not receiver:\n receiver = e[1]['email'][0]\n if \".:\" in receiver or not receiver:\n receiver = str(e[1]['nome'][0])\n\n if not emissor:\n emissor = str(e[0]['nome'])\n if not receiver:\n receiver = str(e[1]['nome'])\n\n if emissor != receiver:\n if G.has_edge(emissor,receiver):\n G[emissor][receiver]['weight']+=1\n else:\n G.add_weighted_edges_from([(emissor,receiver,1.)])\n \n x.write_gml(G,date_period+\"___\"+str(count[0])+'_mensagens.gml')\n" }, { "alpha_fraction": 0.7475728392601013, "alphanum_fraction": 0.762135922908783, "avg_line_length": 21.77777862548828, "blob_id": "9465ca3dcbfcf8abfc76d4cb95f2f80278077826", "content_id": "26af3944ce438316c17f41dbcd897c46186da22a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 112, "num_lines": 9, "path": "/python/metagmane.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport feedparser\n\nmetaRSS = feedparser.parse('http://rss.gmane.org/messages/complete/gmane.politics.organizations.metareciclagem')\n\nteste=metaRSS.entries[13].summary\n\nprint teste\n\n" }, { "alpha_fraction": 0.5892857313156128, "alphanum_fraction": 0.6309523582458496, "avg_line_length": 27, "blob_id": "b11465e7f55553cd16a206437d7682ee2d8870ea", "content_id": "81a7cb52f5b2503327ebf2aba1f88cea1f58486f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "no_license", "max_line_length": 50, "num_lines": 6, "path": "/artigo/snippets/baixaTudo.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "import os\n\nfor i in xrange(1,58000):\n os.system(\"wget http://download.gmane.org/\\\n gmane.politics.organizations. metareciclagem/\\\n %i/%i -O %i\" % (i,i+1,i))\n" }, { "alpha_fraction": 0.5156313180923462, "alphanum_fraction": 0.5375558137893677, "avg_line_length": 20.78761100769043, "blob_id": "ff587b1c0151f30182e1af89096421c308a79a38", "content_id": "5a1794f4235a58daf86260aa2922ca66fb00c7c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2468, "license_type": "no_license", "max_line_length": 80, "num_lines": 113, "path": "/python/verificaTudo.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "#-*- coding: utf-8 -*-\n\nimport mailbox, os\n\nfiles=os.listdir(\".\")\nfiles=[f for f in files if f[-3:]==\"000\"]\nfiles=files[:1]\nmm=[]\nfor f in files:\n mbox = mailbox.mbox(f) # 4543 mensagens\n mm+=[mbox]\n print(\"ok f\")\n\nmm2=[]\ncount=[]\nfor m in mm:\n mm2+=[m.items()]\n count+=[len(mm2[-1])]\n print(\"ok 2 %i\" % (count[-1],))\n\n\n\n#########################\n##\nmensagens=mm2[0]\n\nmm=[]\nfor m in mensagens:\n mm+=[m[1]]\n\n# primeira coisa eh ver se estah cronologico certinho\n\n# dd=[m['Date'] for m in mm] revela que é\n# então a resposta é para o mesmo tópico, quem escreveu logo depois de quem\n\nIDs=[]\nfor m in mm:\n # fazendo ID\n f=m['From']\n ff=f.split('<')\n if len(ff)>1:\n nome=ff[0].split(\" \")\n nome=[n for n in nome if len(n)]\n email=ff[1][:-1]\n else:\n nome='__NENHUM__'\n email=ff\n IDs+=[(nome, email)]\n\nIDs_=[IDs[0]]\nmm[0]['id_']=IDs[0]\nemails=[IDs[0][1]]\nmm[0]['email']=[email]\nnomes=[]\ni=1\nfor ID in IDs[1:]:\n nome=ID[0]\n email=ID[1]\n if email in emails:\n pass\n else:\n emails+=[email]\n nomes+=[nome]\n IDs_+=[ID]\n mm[i]['id_']=[ID]\n mm[i]['email']=[email]\n mm[i]['nome']=[nome]\n i+=1\n\n# IDs_ são os nodes da rede\n\n# manter um dicionario atualizado\n# {'Subject' : 'email que postou por ultimo'}\n# cada nova mensagem, pegar o From, e fazer\n# o link ou adicionar +1 no peso do que escreveu antes para o que responde agora\n# e atualizar o dicionario com o email do que mandou por ultimo\nnodes=[]\nfor ID in IDs_:\n nodes+=[ID[1]] # emails\n\nedges=[]\nsj={}\nfor m in mm:\n ID_pre=m['In-Reply-To']\n if ID_pre:\n for n in mm:\n if n['Message-Id'] == ID_pre:\n print(n['ID'])\n msg_pre=n\n try:\n if msg_pre:\n edges+=[(msg_pre,m)]\n except:\n pass\n\nimport networkx as x, string\nG=x.DiGraph()\n#G.add_edges_from(edges)\nfor e in edges:\n #emissor= unicode(string.join(e[0]['nome'][0]))\n #receiver=unicode(string.join(e[1]['nome'][0]))\n emissor= e[0]['nome'][0][0]\n receiver=e[1]['nome'][0][0]\n print(emissor,receiver)\n if not emissor:\n emissor=e[0]['email'][0].split(\"@\")[1]\n if not receiver:\n receiver=e[1]['email'][0].split(\"@\")[1]\n if emissor != receiver:\n if G.has_edge(emissor,receiver):\n G[emissor][receiver]['weight']+=1\n else:\n G.add_weighted_edges_from([(emissor,receiver,1.)])\n\n" }, { "alpha_fraction": 0.5957446694374084, "alphanum_fraction": 0.6010638475418091, "avg_line_length": 14.666666984558105, "blob_id": "7ab97b3ca6ea5aecb65aed2a61a619731e0cd197", "content_id": "38a7c10ac9c085797a11cc3d6eb2fdc2a84ddb0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "no_license", "max_line_length": 33, "num_lines": 12, "path": "/artigo/snippets/carregaTudo.py", "repo_name": "ttm/fimdomundo", "src_encoding": "UTF-8", "text": "import mailbox, os\n\na=os.listdir(\".\")\na.sort()\n\nmm=[]\nfor aa in a:\n mbox = mailbox.mbox(aa)\n # versao em plain text,\n # mesmo quando vier multipart\n m=mbox[0]\n mm.append(m)\n" } ]
11
shpe-uf/SHPE-UF-SERVER
https://github.com/shpe-uf/SHPE-UF-SERVER
bb0f9dc839446db97a9f99aee55ad122bd4dcc18
1ee7968719029c2c80beb8c50d92525390642525
5339ef8675e2e111f7e312c7103a53519a9eb666
refs/heads/master
2023-08-21T02:35:17.038397
2023-06-20T20:10:29
2023-06-20T20:10:29
241,526,282
0
0
null
2020-02-19T03:45:25
2023-06-20T21:42:00
2023-09-13T18:35:26
JavaScript
[ { "alpha_fraction": 0.4566439092159271, "alphanum_fraction": 0.459151029586792, "avg_line_length": 27.989974975585938, "blob_id": "13fff87dea3074a4377d8f7c51b0164f175b90a1", "content_id": "aecc0ba400b6799a71db16b915947acdc13735ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 11567, "license_type": "no_license", "max_line_length": 132, "num_lines": 399, "path": "/graphql/resolvers/rentables.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const { GraphQLError } = require(\"graphql\");\nconst { ApolloServerErrorCode } = require(\"@apollo/server/errors\");\nconst Rentable = require(\"../../models/Rentable.js\");\nconst Receipt = require(\"../../models/Receipt.js\");\nconst User = require(\"../../models/User.js\");\nconst nodemailer = require(\"nodemailer\");\n\nrequire(\"dotenv\").config();\n\nconst { validateRentalRequest } = require(\"../../util/validators\");\n\nconst {\n handleInputError,\n handleGeneralError,\n} = require(\"../../util/error-handling\");\n\nmodule.exports = {\n Query: {\n async getInventory() {\n try {\n const inventory = await Rentable.find();\n return inventory;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n\n async getItem(_, { item }) {\n try {\n const rentable = await Rentable.findOne({ item: item });\n return rentable;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n\n async getReceipts(_, { item }) {\n try {\n const receipts = await Receipt.find();\n return receipts;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n },\n\n Mutation: {\n //=====================================================================\n //========================= Checking out an item ======================\n //=====================================================================\n\n async checkOutItem(_, data) {\n try {\n //fixes bug where Object Null is received\n const { item, username, numberOfItems, email } = JSON.parse(\n JSON.stringify(data)\n ).data;\n\n const rentable = await Rentable.findOne({ item: item });\n const user = await User.findOne({ username: username });\n\n if (!user) {\n errors.general = \"That's not a valid user.\";\n handleInputError(errors);\n }\n\n if (!rentable) {\n erorrs.general = \"That's not a valid rental item.\";\n handleInputError(errors);\n }\n\n let { errors, valid } = validateRentalRequest(\n numberOfItems,\n rentable.quantity,\n rentable.renters\n );\n\n if (!valid) {\n handleInputError(errors);\n }\n\n for (var i = 1; i <= numberOfItems; i++) {\n if (rentable.renters) {\n rentable.renters.push(username);\n } else {\n rentable.renters = [username];\n }\n }\n\n await rentable.save();\n\n const newDate = JSON.stringify(new Date());\n const receipt = new Receipt({\n username: username,\n item: item,\n quantity: numberOfItems,\n email: email,\n dateCheckedOut: newDate,\n });\n await receipt.save();\n\n const transporter = nodemailer.createTransport({\n service: process.env.EMAIL_SERVICE,\n auth: {\n user: process.env.EMAIL,\n pass: process.env.EMAIL_PASSWORD,\n },\n });\n\n const requesterMail = {\n from: process.env.EMAIL,\n to: `${email}`,\n subject: \"Rental Request\",\n text:\n \"You made a request to rent from SHPE UF.\\n\\n\" +\n `Username: ${username}\\n` +\n `Email: ${email}\\n` +\n `Item: ${item}\\n` +\n `Quantity: ${numberOfItems}\\n` +\n `Date: ${newDate}\\n` +\n `\\n` +\n `In order to pick up your item, please visit a SHPE Rentals director in the SHPE Office in Weil Hall in room 276A\\n\\n` +\n `SHPE Rentals directors are in the office on Mondays 10:00 am - 12:00 pm or Thursdays 1:00 pm - 3:00 pm.\\n\\n` +\n `If this is an urgent request you can contact a SHPE Rentals director directly at [email protected] or [email protected]`,\n };\n\n transporter.sendMail(requesterMail, (err, response) => {\n if (err) {\n console.error(\"there was an error: \", err);\n } else {\n res.status(200).json(\"recovery email sent\");\n }\n });\n\n const rentalAdminMail = {\n from: process.env.EMAIL,\n to: `[email protected]`,\n subject: \"Rental Request - \" + `${username} ${item}`,\n text:\n \"There is a new request for rental.\\n\\n\" +\n `Username: ${username}\\n` +\n `Email: ${email}\\n` +\n `Item: ${item}\\n` +\n `Quantity: ${numberOfItems}\\n` +\n `Date: ${newDate}\\n`,\n };\n\n transporter.sendMail(rentalAdminMail, (err, response) => {\n if (err) {\n console.error(\"there was an error: \", err);\n } else {\n res.status(200).json(\"recovery email sent\");\n }\n });\n\n return await Rentable.find();\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n\n //=====================================================================\n //========================= Picking up an item =========================\n //=====================================================================\n\n async pickUpItem(_, { receiptID }) {\n try {\n const receipt = await Receipt.findOne({ _id: receiptID });\n\n if (!receipt) {\n errors.general = \"That's not a valid receipt.\";\n handleInputError(errors);\n }\n\n const newReceipt = await Receipt.findOneAndUpdate(\n { _id: receiptID },\n {\n datePickedUp: JSON.stringify(new Date()),\n },\n { new: true }\n );\n\n return newReceipt;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n\n //=====================================================================\n //========================= Returning an item =========================\n //=====================================================================\n\n async returnItem(_, { receiptID }) {\n try {\n const receipt = await Receipt.findById(receiptID);\n\n if (!receipt) {\n errors.general = \"That's not a valid receipt.\";\n handleInputError(errors);\n }\n\n const item = receipt.item;\n const username = receipt.username;\n\n const rentable = await Rentable.findOne({ item: item });\n const user = await User.findOne({ username: username });\n\n if (!rentable) {\n errors.general = \"Receipt contained an invalid rental item.\";\n handleInputError(errors);\n }\n\n if (!user) {\n errors.general = \"Receipt contained an invalid user.\";\n handleInputError(errors);\n }\n\n let newRenters = rentable.renters;\n for (let i = 0; i < receipt.quantity; i++) {\n newRenters.splice(\n rentable.renters.findIndex((e) => e === receipt.username),\n 1\n );\n }\n\n await Rentable.findOneAndUpdate(\n { item: item },\n {\n renters: newRenters,\n }\n );\n\n const newReceipt = await Receipt.findOneAndUpdate(\n { _id: receiptID },\n {\n dateClosed: JSON.stringify(new Date()),\n },\n { new: true }\n );\n\n return newReceipt;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n //=====================================================================\n //========================= unPicking up an item =======================\n //=====================================================================\n\n async unPickUpItem(_, { receiptID }) {\n try {\n const receipt = await Receipt.findOne({ _id: receiptID });\n\n if (!receipt) {\n errors.general = \"That's not a valid receipt.\";\n handleInputError(errors);\n }\n\n if (!receipt.datePickedUp) {\n errors.general = \"That's not a valid receipt.\";\n handleInputError(errors);\n }\n\n const newReceipt = await Receipt.findOneAndUpdate(\n { _id: receiptID },\n {\n datePickedUp: null,\n },\n { new: true }\n );\n\n return newReceipt;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n\n //=====================================================================\n //========================= unReturning an item =======================\n //=====================================================================\n\n async unReturnItem(_, { receiptID }) {\n try {\n const receipt = await Receipt.findById(receiptID);\n\n if (!receipt) {\n errors.general = \"That's not a valid receipt.\";\n handleInputError(errors);\n }\n\n if (!receipt.dateClosed) {\n errors.general = \"That's not a closed receipt.\";\n handleInputError(errors);\n }\n\n const item = receipt.item;\n const username = receipt.username;\n\n const rentable = await Rentable.findOne({ item: item });\n const user = await User.findOne({ username: username });\n\n if (!rentable) {\n errors.general = \"Receipt contained an invalid rental item.\";\n handleInputError(errors);\n }\n\n if (!user) {\n errors.general = \"Receipt contained an invalid user.\";\n handleInputError(errors);\n }\n\n const users = [];\n for (let i = 0; i < receipt.quantity; i++) {\n users.push(receipt.username);\n }\n\n await Rentable.findOneAndUpdate(\n { item: item },\n {\n $push: {\n renters: users,\n },\n }\n );\n\n const newReceipt = await Receipt.findOneAndUpdate(\n { _id: receiptID },\n {\n dateClosed: null,\n },\n { new: true }\n );\n\n return newReceipt;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n //=====================================================================\n //========================= deleting a receipt =======================\n //=====================================================================\n\n async deleteReceipt(_, { receiptID }) {\n try {\n const receipt = await Receipt.findById(receiptID);\n\n if (!receipt) {\n errors.general = \"That's not a valid receipt.\";\n handleInputError(errors);\n }\n\n if (!receipt.dateClosed) {\n const item = receipt.item;\n const username = receipt.username;\n\n const rentable = await Rentable.findOne({ item: item });\n const user = await User.findOne({ username: username });\n\n if (!rentable) {\n errors.general = \"Receipt contained an invalid rental item.\";\n handleInputError(errors);\n }\n\n if (!user) {\n errors.general = \"Receipt contained an invalid user.\";\n handleInputError(errors);\n }\n\n let newRenters = rentable.renters;\n for (let i = 0; i < receipt.quantity; i++) {\n newRenters.splice(\n rentable.renters.findIndex((e) => e === receipt.username),\n 1\n );\n }\n\n await Rentable.findOneAndUpdate(\n { item: item },\n {\n renters: newRenters,\n }\n );\n }\n\n const newReceipt = await Receipt.findOneAndUpdate(\n { _id: receiptID },\n {\n deleted: true,\n },\n { new: true }\n );\n\n return newReceipt;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n },\n};\n" }, { "alpha_fraction": 0.493120938539505, "alphanum_fraction": 0.4940864145755768, "avg_line_length": 23.017391204833984, "blob_id": "2c1a958e64e1dfb7d5a5ea15998b830097c5e69a", "content_id": "8e7a522029c345f9f5341f82c7db1c6d0678591f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8286, "license_type": "no_license", "max_line_length": 82, "num_lines": 345, "path": "/graphql/resolvers/tasks.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const Task = require(\"../../models/Task\");\nconst User = require(\"../../models/User.js\");\nconst Request = require(\"../../models/Request.js\");\n\nconst {\n validateCreateTaskInput,\n validateManualTaskInputInput,\n} = require(\"../../util/validators\");\n\nconst {\n handleInputError,\n handleGeneralError,\n} = require(\"../../util/error-handling\");\n\nconst monthOptions = require(\"../../json/month.json\");\n\nmodule.exports = {\n Query: {\n async getTasks() {\n try {\n const tasks = await Task.find().sort({ createdAt: 1 });\n return tasks;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n },\n\n Mutation: {\n async createTask(\n _,\n { createTaskInput: { name, startDate, endDate, description, points } }\n ) {\n const { errors, valid } = validateCreateTaskInput(\n name,\n startDate,\n endDate,\n description,\n points\n );\n\n if (!valid) {\n handleInputError(errors);\n }\n\n // REVISIT THIS LATER TO DETERMINE SEMESTER EITHER WHEN TASK BEGINS OR ENDS.\n const month = new Date().getMonth();\n\n semester = monthOptions[month].value;\n\n isTaskDuplicate = await Task.findOne({ name });\n\n if (isTaskDuplicate) {\n errors.general = \"A task with that name already exists.\";\n handleInputError(errors);\n }\n\n startDate = new Date(startDate).toDateString();\n endDate = new Date(endDate).toDateString();\n\n const newTask = new Task({\n name,\n startDate,\n endDate,\n type: \"Task\",\n description,\n points,\n attendance: 0,\n semester,\n users: [],\n createdAt: new Date().toISOString(),\n });\n\n await newTask.save();\n\n const res = Task.findOne({ name });\n\n return res;\n },\n async manualTaskInput(_, { manualTaskInputInput: { username, taskName } }) {\n const { valid, errors } = validateManualTaskInputInput(username);\n\n if (!valid) {\n errors.general = \"User input errors.\";\n handleInputError(errors);\n }\n\n const user = await User.findOne({\n username,\n });\n\n const task = await Task.findOne({\n name: taskName,\n });\n\n const request = await Request.findOne({\n username: username,\n taskName: taskName,\n });\n\n if (!user) {\n errors.general = \"User not found.\";\n handleInputError(errors);\n }\n\n if (!task) {\n errors.general = \"Task not found.\";\n handleInputError(errors);\n }\n\n if (request) {\n errors.general =\n \"This member has sent a request for this task. Check the Requests tab.\";\n handleInputError(errors);\n }\n\n user.tasks.map((userTask) => {\n if (String(userTask.name) == String(task.name)) {\n errors.general = \"Task already redeemed by the user.\";\n handleInputError(errors);\n }\n });\n\n var pointsIncrease = {};\n\n if (task.semester === \"Fall Semester\") {\n pointsIncrease = {\n points: task.points,\n fallPoints: task.points,\n };\n } else if (task.semester === \"Spring Semester\") {\n pointsIncrease = {\n points: task.points,\n springPoints: task.points,\n };\n } else if (task.semester === \"Summer Semester\") {\n pointsIncrease = {\n points: task.points,\n summerPoints: task.points,\n };\n } else {\n errors.general = \"Invalid task.\";\n handleInputError(errors);\n }\n\n var updatedUser = await User.findOneAndUpdate(\n {\n username,\n },\n {\n $push: {\n tasks: {\n $each: [\n {\n name: task.name,\n startDate: task.startDate,\n points: task.points,\n },\n ],\n $sort: { createdAt: 1 },\n },\n },\n $inc: pointsIncrease,\n },\n {\n new: true,\n }\n );\n updatedUser.message = \"\";\n\n await Task.findOneAndUpdate(\n {\n name: taskName,\n },\n {\n $push: {\n users: {\n $each: [\n {\n firstName: user.firstName,\n lastName: user.lastName,\n username: user.username,\n email: user.email,\n },\n ],\n $sort: { lastName: 1, firstName: 1 },\n },\n },\n $inc: {\n attendance: 1,\n },\n },\n {\n new: true,\n }\n );\n\n const newTask = await Task.findOne({\n name: taskName,\n });\n\n return newTask;\n },\n async removeUserFromTask(\n _,\n { manualTaskInputInput: { username, taskName } }\n ) {\n const { valid, errors } = validateManualTaskInputInput(username);\n\n if (!valid) {\n errors.general = \"User input errors.\";\n handleInputError(errors);\n }\n\n const user = await User.findOne({\n username,\n });\n\n const task = await Task.findOne({\n name: taskName,\n });\n\n if (!user) {\n errors.general = \"User not found.\";\n handleInputError(errors);\n }\n\n if (!task) {\n errors.general = \"Task not found.\";\n handleInputError(errors);\n }\n\n if (!user.tasks.map((e) => e.name).includes(task.name)) {\n errors.general = \"User is not member of task.\";\n handleInputError(errors);\n }\n\n newTasks = user.tasks.filter((e) => e.name !== task.name);\n newUsers = task.users.filter((e) => e.username !== user.username);\n\n if (task.semester === \"Fall Semester\") {\n await User.findOneAndUpdate(\n { username },\n {\n tasks: newTasks,\n points: user.points - task.points,\n fallPoints: user.fallPoints - task.points,\n }\n );\n } else if (task.semester === \"Spring Semester\") {\n await User.findOneAndUpdate(\n { username },\n {\n tasks: newTasks,\n points: user.points - task.points,\n springPoints: user.springPoints - task.points,\n }\n );\n } else if (task.semester === \"Summer Semester\") {\n await User.findOneAndUpdate(\n { username },\n {\n tasks: newTasks,\n points: user.points - task.points,\n summerPoints: user.summerPoints - task.points,\n }\n );\n } else {\n errors.general = \"Invalid task.\";\n handleInputError(errors);\n }\n\n newTask = await Task.findOneAndUpdate(\n { name: taskName },\n { users: newUsers, attendance: task.attendance - 1 },\n { new: true }\n );\n\n return newTask;\n },\n async deleteTask(_, { taskId }) {\n const users = await User.find();\n\n const task = await Task.findOne({\n _id: taskId,\n });\n\n if (!users || !users.length || users.length === 0) {\n errors.general = \"User not found.\";\n handleInputError(errors);\n }\n\n if (!task) {\n errors.general = \"Task not found.\";\n handleInputError(errors);\n }\n\n var pointsDecrease = {};\n\n if (task.semester === \"Fall Semester\") {\n pointsDecrease = {\n points: -task.points,\n fallPoints: -task.points,\n };\n } else if (task.semester === \"Spring Semester\") {\n pointsDecrease = {\n points: -task.points,\n springPoints: -task.points,\n };\n } else if (task.semester === \"Summer Semester\") {\n pointsDecrease = {\n points: -task.points,\n summerPoints: -task.points,\n };\n } else {\n errors.general = \"Invalid task.\";\n handleInputError(errors);\n }\n\n await Task.deleteOne({ _id: taskId });\n\n await User.updateMany(\n {\n tasks: {\n $elemMatch: {\n name: task.name,\n },\n },\n },\n {\n $pull: {\n tasks: {\n name: task.name,\n },\n },\n $inc: pointsDecrease,\n }\n );\n\n tasks = await Task.find();\n\n return tasks;\n },\n },\n};\n" }, { "alpha_fraction": 0.6227642297744751, "alphanum_fraction": 0.6227642297744751, "avg_line_length": 23.600000381469727, "blob_id": "8f9fda26d02273b9b21f884217569b45542770df", "content_id": "e15bb67c232fd71d94bf8ffdfd1f66dc11c4b215", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 615, "license_type": "no_license", "max_line_length": 70, "num_lines": 25, "path": "/util/error-handling.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const { GraphQLError } = require(\"graphql\");\nconst { ApolloServerErrorCode } = require(\"@apollo/server/errors\");\n\nmodule.exports.handleInputError = (errors) => {\n throw new GraphQLError(errors.general ? errors.general : \"Errors\", {\n extensions: {\n exception: {\n code: ApolloServerErrorCode.BAD_USER_INPUT,\n errors,\n },\n },\n });\n};\n\nmodule.exports.handleGeneralError = (errors, title) => {\n console.log(errors);\n throw new GraphQLError(title, {\n extensions: {\n exception: {\n code: ApolloServerErrorCode.INTERNAL_SERVER_ERROR,\n errors,\n },\n },\n });\n};\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 32.088233947753906, "blob_id": "9217eb9e1ce01a08fd7ae10d9a3e5a4a00333b36", "content_id": "8f471002964084fe2a9ed9532f852881e54c7b0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1125, "license_type": "no_license", "max_line_length": 64, "num_lines": 34, "path": "/graphql/resolvers/index.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const usersResolvers = require(\"./users.js\");\nconst eventsResolvers = require(\"./events.js\");\nconst requestsResolvers = require(\"./requests.js\");\nconst tasksResolvers = require(\"./tasks.js\");\nconst corporationResolvers = require(\"./corporations.js\");\nconst alumnisResolvers = require(\"./alumnis.js\");\nconst reimbursementsResolvers = require(\"./reimbursements.js\");\nconst shpeRentalsResolvers = require(\"./rentables.js\")\nconst contactRequestsResolvers = require(\"./contactRequests.js\")\n\nmodule.exports = {\n Query: {\n ...usersResolvers.Query,\n ...eventsResolvers.Query,\n ...requestsResolvers.Query,\n ...tasksResolvers.Query,\n ...corporationResolvers.Query,\n ...alumnisResolvers.Query,\n ...reimbursementsResolvers.Query,\n ...shpeRentalsResolvers.Query\n },\n\n Mutation: {\n ...usersResolvers.Mutation,\n ...eventsResolvers.Mutation,\n ...requestsResolvers.Mutation,\n ...tasksResolvers.Mutation,\n ...corporationResolvers.Mutation,\n ...alumnisResolvers.Mutation,\n ...reimbursementsResolvers.Mutation,\n ...shpeRentalsResolvers.Mutation,\n ...contactRequestsResolvers.Mutation\n }\n};\n" }, { "alpha_fraction": 0.602150559425354, "alphanum_fraction": 0.602150559425354, "avg_line_length": 16.22222137451172, "blob_id": "58da0790d96fa2aaf0b48448bac76c7db07eb96c", "content_id": "c57ae5fcd7073141d17f0ed6d591db0d24b17683", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 465, "license_type": "no_license", "max_line_length": 63, "num_lines": 27, "path": "/models/ContactRequest.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const { model, Schema } = require('mongoose');\n\nconst contactRequestSchema = new Schema({\n firstName: {\n type: String,\n required: true,\n },\n lastName: {\n type: String,\n required: true,\n },\n email: {\n type: String,\n required: true,\n lowercase: true,\n },\n messageType: {\n type: String,\n required: true,\n },\n message: {\n type: String,\n required: true,\n },\n});\n\nmodule.exports = model('contactRequest', contactRequestSchema);\n" }, { "alpha_fraction": 0.567669153213501, "alphanum_fraction": 0.5801256895065308, "avg_line_length": 22.63660430908203, "blob_id": "999f3a9c864e5ce9e5d3b55b6038719ba2d4c390", "content_id": "1272db87a9d8b7c3d4e1476250998656f819b8fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 17822, "license_type": "no_license", "max_line_length": 163, "num_lines": 754, "path": "/util/validators.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const parseDataURL = require(\"data-urls\");\n\nmodule.exports.validateRegisterInput = (\n firstName,\n lastName,\n major,\n year,\n graduating,\n country,\n ethnicity,\n sex,\n username,\n email,\n password,\n confirmPassword\n) => {\n const errors = {};\n\n const nameValidator = /^[a-zA-Z ',.-]{3,20}$/;\n const usernameValidator = /^(?=.{6,20}$)(?![_.])(?!.*[_.]{2})[a-zA-Z0-9._]+(?<![_.])$/i;\n const emailRegex = /^([0-9a-zA-Z]([-.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z][-\\w]*[0-9a-zA-Z]\\.)+[a-zA-Z]{2,12})$/;\n const passwordValidator = /^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#?!@$%^&*-.]).{8,}$/;\n\n if (firstName.trim() === \"\") {\n errors.firstName = \"First name is required.\";\n } else {\n if (!firstName.match(nameValidator)) {\n errors.firstName =\n \"First Name must be at least 3 characters, max 20. No special characters or numbers.\";\n }\n }\n\n if (lastName.trim() === \"\") {\n errors.lastName = \"Last Name is required.\";\n } else {\n if (!lastName.match(nameValidator)) {\n errors.lastName =\n \"Last name must be at least 3 characters, max 20. No special characters or numbers.\";\n }\n }\n\n if (major.trim() === \"\") {\n errors.major = \"Major is required.\";\n }\n\n if (year.trim() === \"\") {\n errors.year = \"Year is required.\";\n }\n\n if (graduating.trim() === \"\") {\n errors.graduating = \"Graduating is required.\";\n }\n\n if (country.trim() === \"\") {\n errors.country = \"Country of Origin is required.\";\n }\n\n if (ethnicity.trim() === \"\") {\n errors.ethnicity = \"Ethnicity is required.\";\n }\n\n if (sex.trim() === \"\") {\n errors.sex = \"Sex is required.\";\n }\n\n if (username.trim() === \"\") {\n errors.username = \"Username is required.\";\n } else {\n if (!username.match(usernameValidator)) {\n errors.username =\n \"Username must be at least 6 characters, max 20. No special characters, except for periods (.) and underscores (_).\";\n }\n }\n\n if (email.trim() === \"\") {\n errors.email = \"Email is required.\";\n } else {\n if (!email.match(emailRegex)) {\n errors.email = \"Invalid email address.\";\n } else if (email.length > 7) {\n var indexUF = email.length - 8;\n var indexSF = email.length - 14;\n if (\n email.substring(indexUF) != \"@ufl.edu\" &&\n email.substring(indexSF) != \"@sfcollege.edu\"\n ) {\n errors.email =\n \"University of Florida or Santa Fe College email required.\";\n }\n }\n }\n\n if (password === \"\") {\n errors.password = \"Password is required.\";\n } else if (!password.match(passwordValidator)) {\n errors.password =\n \"Passwords must be at least 8 characters. It must contain at least one lowercase character, one uppercase character, one number, and one special character.\";\n } else if (password !== confirmPassword) {\n errors.confirmPassword = \"Password and Confirm Password must match.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateLoginInput = (username, password) => {\n const errors = {};\n\n if (username.trim() === \"\") {\n errors.username = \"Username is required.\";\n }\n\n if (password.trim() === \"\") {\n errors.password = \"Password is required.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validatePasswordInput = (password, confirmPassword) => {\n const errors = {};\n const passwordValidator = /^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#?!@$%^&*-.]).{8,}$/;\n\n if (password === \"\") {\n errors.password = \"Password is required.\";\n } else if (!password.match(passwordValidator)) {\n errors.password =\n \"Passwords must be at least 8 characters. It must contain at least one lowercase character, one uppercase character, one number, and one special character.\";\n } else if (password !== confirmPassword) {\n errors.confirmPassword = \"Password and Confirm Password must match.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateEmailInput = (email) => {\n const emailRegex = /^([0-9a-zA-Z]([-.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z][-\\w]*[0-9a-zA-Z]\\.)+[a-zA-Z]{2,12})$/;\n const errors = {};\n\n if (email.trim() === \"\") {\n errors.email = \"Email is required.\";\n } else {\n if (!email.match(emailRegex)) {\n errors.email = \"Invalid email address.\";\n }\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateCreateEventInput = (\n name,\n code,\n category,\n points,\n expiration\n) => {\n const errors = {};\n\n const nameValidator = /^[a-zA-Z0-9- ]{6,50}$/i;\n const codeValidator = /^[a-zA-Z0-9]{6,50}$/i;\n\n if (name.trim() === \"\") {\n errors.name = \"Name is required.\";\n } else {\n if (!name.match(nameValidator)) {\n errors.name =\n \"Event name must be at least 6 characters, max 50. No special characters, except for hyphens (-) and dashes (/).\";\n }\n }\n\n if (code.trim() === \"\") {\n errors.code = \"Code is required.\";\n } else {\n if (!code.match(codeValidator)) {\n errors.code =\n \"Event code must be at least 6 characters, max 50. No special characters.\";\n }\n }\n\n if (category.trim() === \"\") {\n errors.category = \"Category is required.\";\n }\n\n if (expiration.trim() === \"\") {\n errors.expiration = \"Expires in is required.\";\n }\n\n if (category.trim() === \"Miscellaneous\") {\n if (points < 0 || points > 10) {\n errors.points = \"Points must be a whole number greater than 0.\";\n }\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateCreateTaskInput = (\n name,\n startDate,\n endDate,\n description,\n points\n) => {\n\n const errors = {};\n\n const nameValidator = /^[a-zA-Z0-9- ]{6,50}$/i;\n\n if (name.trim() === \"\") {\n errors.name = \"Name is required.\";\n } else {\n if (!name.match(nameValidator)) {\n errors.name =\n \"Task name must be at least 6 characters, max 50. No special characters, except for hyphens (-) and dashes (/).\";\n }\n }\n\n if(isNaN(Date.parse(startDate)) || isNaN(Date.parse(endDate))) {\n errors.date = \"Invalid date, please enter a 'MM/DD/YYYY' format\"\n } else {\n let start = new Date(Date.parse(startDate))\n let end = new Date(Date.parse(endDate))\n let d = new Date()\n let futureLimit = new Date(d.getFullYear()+1,d.getMonth(),d.getDay())\n let pastLimit = new Date(d.getFullYear()-1,d.getMonth(),d.getDay())\n if(start > futureLimit || end > futureLimit) {\n errors.date = \"Invalid date, too far into the future\"\n } else if(start < pastLimit || end < pastLimit) {\n errors.date = \"Invalid date, too far into the past\"\n } else if(end <= start) {\n errors.date = \"End date needs to be after start date\"\n }\n }\n\n if(description.trim() === \"\" && description.length > 280){\n errors.description = \"Description must be between 1 and 280 characters.\"\n }\n if(typeof(points) !== 'number') {\n errors.points = \"Points must be a whole number greater than 0.\";\n }\n\n if(points < 0 || points > 10) {\n errors.points = \"Points must be a whole number greater than 0.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateRedeemPointsInput = (code) => {\n const errors = {};\n\n if (code.trim() === \"\") {\n errors.code = \"No code was provided.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateManualInputInput = (username) => {\n const errors = {};\n\n if (username.trim() === \"\") {\n errors.username = \"No username was provided.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateManualTaskInputInput = (username) => {\n const errors = {};\n\n if (username.trim() === \"\") {\n errors.username = \"No username was provided.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateCreateEditCorporationInput = (\n name,\n logo,\n slogan,\n majors,\n industries,\n overview,\n mission,\n goals,\n businessModel,\n newsLink,\n applyLink\n) => {\n const errors = {};\n\n if (name.trim() === \"\") {\n errors.name = \"No name was provided.\";\n }\n\n if (logo.trim() === \"\") {\n errors.logo = \"No logo was provided.\";\n }\n\n if (slogan.trim() === \"\") {\n errors.slogan = \"No slogan was provided.\";\n }\n\n if (majors.length === 0) {\n errors.majors = \"No majors were provided.\";\n }\n\n if (industries.length === 0) {\n errors.industries = \"No industries were provided.\";\n }\n\n if (overview.trim() === \"\") {\n errors.overview = \"No overview was provided.\";\n }\n\n if (mission.trim() === \"\") {\n errors.mission = \"No mission was provided.\";\n }\n\n if (goals.trim() === \"\") {\n errors.goals = \"No goals were provided.\";\n }\n\n if (businessModel.trim() === \"\") {\n errors.businessModel = \"No business model was provided.\";\n }\n\n if (newsLink.trim() === \"\") {\n errors.newsLink = \"No news link was provided.\";\n }\n\n if (applyLink.trim() === \"\") {\n errors.applyLink = \"No apply link was provided.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateEditUserProfile = (\n firstName,\n lastName,\n photo,\n major,\n year,\n graduating,\n country,\n ethnicity,\n sex\n) => {\n const errors = {};\n\n const nameValidator = /^[a-zA-Z ',.-]{3,20}$/;\n\n if (firstName.trim() === \"\") {\n errors.firstName = \"First name is required.\";\n } else {\n if (!firstName.match(nameValidator)) {\n errors.firstName =\n \"First Name must be at least 3 characters, max 20. No special characters or numbers.\";\n }\n }\n\n if (lastName.trim() === \"\") {\n errors.lastName = \"Last Name is required.\";\n } else {\n if (!lastName.match(nameValidator)) {\n errors.lastName =\n \"Last name must be at least 3 characters, max 20. No special characters or numbers.\";\n }\n }\n\n if(photo) {\n const dataUrlData = parseDataURL(photo);\n\n if (dataUrlData.mimeType.toString().slice(0, 6) !== \"image/\") {\n errors.photo = \"Please use a valid image file for photo.\";\n } else if (Buffer.byteLength(dataUrlData.body) > 102400) {\n errors.photo =\n \"Please use an image file that doesn't exceed the maximum file size (100 KB)\";\n }\n }\n \n\n if (major.trim() === \"\") {\n errors.major = \"Major is required.\";\n }\n\n if (year.trim() === \"\") {\n errors.year = \"Year is required.\";\n }\n\n if (graduating.trim() === \"\") {\n errors.graduating = \"Graduating is required.\";\n }\n\n if (country.trim() === \"\") {\n errors.country = \"Country of Origin is required.\";\n }\n\n if (ethnicity.trim() === \"\") {\n errors.ethnicity = \"Ethnicity is required.\";\n }\n\n if (sex.trim() === \"\") {\n errors.sex = \"Sex is required.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateEditUpdatedAt = (\n email,\n) => {\n const errors = {};\n\n if (email.trim() === \"\") {\n errors.code = \"No email was provided.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n}\n\nmodule.exports.validateRegisterAlumniInput = (\n firstName,\n lastName,\n email,\n undergrad,\n grad,\n employer,\n position,\n location,\n linkedin\n) => {\n const errors = {};\n\n const nameValidator = /^[a-zA-Z ',.-]{3,20}$/;\n const emailValidator = /^([0-9a-zA-Z]([-.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z][-\\w]*[0-9a-zA-Z]\\.)+[a-zA-Z]{2,12})$/;\n const yearValidator = /^\\d{4}$/;\n\n if (firstName.trim() === \"\") {\n errors.firstName = \"First name is required.\";\n } else {\n if (!firstName.match(nameValidator)) {\n errors.firstName =\n \"First Name must be at least 3 characters, max 20. No special characters or numbers.\";\n }\n }\n\n if (lastName.trim() === \"\") {\n errors.lastName = \"Last Name is required.\";\n } else {\n if (!lastName.match(nameValidator)) {\n errors.lastName =\n \"Last name must be at least 3 characters, max 20. No special characters or numbers.\";\n }\n }\n\n if (email.trim() === \"\") {\n errors.email = \"Email is required.\";\n } else {\n if (!email.match(emailValidator)) {\n errors.email = \"Invalid email address.\";\n }\n }\n\n if (undergrad.university.trim() === \"\") {\n errors.undergradUniversity = \"Undergraduate university is required.\";\n }\n\n if (undergrad.year.trim() === \"\") {\n errors.undergradYear = \"Undergraduate year is required.\";\n } else {\n if (!undergrad.year.match(yearValidator)) {\n errors.undergradYear = \"Invalid undergraduate year.\";\n }\n }\n\n if (undergrad.major.trim() === \"\") {\n errors.undergradMajor = \"Undergraduate major is required.\";\n }\n\n if (\n grad.university.trim() !== \"\" ||\n grad.year.trim() !== \"\" ||\n grad.major.trim() !== \"\"\n ) {\n if (grad.university.trim() === \"\") {\n errors.gradUniversity = \"Graduate university is required.\";\n }\n\n if (grad.year.trim() === \"\") {\n errors.gradYear = \"Graduate year is required.\";\n } else {\n if (!grad.year.match(yearValidator)) {\n errors.gradYear = \"Invalid graduate year.\";\n }\n }\n\n if (grad.major.trim() === \"\") {\n errors.gradMajor = \"Graduate major is required.\";\n }\n }\n\n if (\n grad.university.trim() === \"\" &&\n grad.year.trim() === \"\" &&\n grad.major.trim() === \"\"\n ) {\n if (employer.trim() === \"\") {\n errors.employer = \"Employer is required.\";\n }\n\n if (position.trim() === \"\") {\n errors.position = \"Position is required.\";\n }\n }\n\n if (location.city.trim() === \"\") {\n errors.locationCity = \"City is required.\";\n }\n\n if (location.country === \"United States\") {\n if (location.state.trim() === \"\") {\n errors.locationState = \"State is required.\";\n }\n }\n\n if (location.country.trim() === \"\") {\n errors.locationCountry = \"Country is required.\";\n }\n\n if (linkedin.trim() === \"\") {\n errors.linkedin = \"LinkedIn Profile link is required.\";\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateCreateClassInput = (code) => {\n const errors = {};\n\n const codeValidator = /^[a-zA-Z0-9]*$/i;\n\n if (code.trim() === \"\") {\n errors.code = \"No code was provided.\";\n } else {\n if (!code.match(codeValidator)) {\n errors.code =\n \"Course code must be made up of letters (A-Z) and numbers (0-9). No special characters allowed.\";\n }\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateReimbursementRequest = (\n firstName,\n lastName,\n email,\n studentId,\n address,\n company,\n event,\n description,\n reimbursed,\n amount\n) => {\n const errors = {};\n\n const nameValidator = /^[a-zA-Z ',.-]{3,20}$/;\n const emailValidator = /^([0-9a-zA-Z]([-.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z][-\\w]*[0-9a-zA-Z]\\.)+[a-zA-Z]{2,12})$/;\n\n if (firstName.trim() === \"\") {\n errors.firstName = \"First name is required.\";\n } else {\n if (!firstName.match(nameValidator)) {\n errors.firstName =\n \"First Name must be at least 3 characters, max 20. No special characters or numbers.\";\n }\n }\n\n if (lastName.trim() === \"\") {\n errors.lastName = \"Last Name is required.\";\n } else {\n if (!lastName.match(nameValidator)) {\n errors.lastName =\n \"Last name must be at least 3 character, max 20. No special characters or numbers.\";\n }\n }\n\n if (email.trim() === \"\") {\n errors.email = \"Email is required.\";\n } else {\n if (!email.match(emailValidator)) {\n errors.email = \"Invalid email address.\";\n }\n }\n\n if (studentId.trim() === \"\") {\n errors.studentId = \"Student ID is required.\"\n } else if (isNaN(studentId)) {\n errors.studentId = \"Student ID can only be numbers.\"\n }else {\n if (studentId > 99999999 || studentId < 10000000) {\n errors.studentId = \"Invalid student ID.\";\n }\n }\n\n if (amount.trim() === \"\") {\n errors.amount = \"Amount is required.\"\n }\n\n if (address.trim() === \"\") {\n errors.address = \"Address is required.\"\n }\n\n if (company.trim() === \"\") {\n errors.company = \"Company is required.\"\n }\n\n if (event.trim() === \"\") {\n errors.event = \"Event is required.\"\n }\n\n if (description.trim() === \"\") {\n errors.description = \"Description is required.\"\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1\n };\n};\n\nmodule.exports.validateRentalRequest = (\n numberRequested,\n totalQuantity,\n currentRenters\n) => {\n let errors = {};\n\n //is in stock?\n if (totalQuantity - (currentRenters.length + numberRequested) < 0) {\n errors.availability =\n \"The requested number is too high for the current stock\";\n }\n\n if(numberRequested === 0) {\n errors.invalid = 'The requested number must be greater than 0';\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n};\n\nmodule.exports.validateContactUsForm = (\n firstName,\n lastName,\n email,\n messageType,\n message,\n) => {\n const errors = {};\n\n const emailValidator = /^([0-9a-zA-Z]([-.\\w]*[0-9a-zA-Z])*@([0-9a-zA-Z][-\\w]*[0-9a-zA-Z]\\.)+[a-zA-Z]{2,12})$/;\n\n if (firstName.trim() === \"\") {\n errors.firstName = \"First name is required.\";\n } else {\n if (firstName.length > 20) {\n errors.firstName = \"First name must be 20 characters max.\"\n }\n }\n\n if (lastName.trim() === \"\") {\n errors.lastName = \"Last Name is required.\";\n } else {\n if (lastName.length > 20) {\n errors.lastName = \"Last name must be 20 characters max.\"\n }\n }\n\n if (email.trim() === \"\") {\n errors.email = \"Email is required.\";\n } else {\n if (!email.match(emailValidator)) {\n errors.email = \"Invalid email address.\";\n }\n }\n\n if (messageType.trim() === \"\") {\n errors.messageType = \"Goal is required.\";\n }\n\n if (message.trim() === \"\") {\n errors.message = \"Message is required.\";\n } else {\n if (message.length > 500) {\n errors.message = \"Message must be 500 characters max.\"\n }\n }\n\n return {\n errors,\n valid: Object.keys(errors).length < 1,\n };\n\n}\n" }, { "alpha_fraction": 0.5030755996704102, "alphanum_fraction": 0.5051625370979309, "avg_line_length": 23.80653953552246, "blob_id": "869bc4c89f15ed724ee0a9c26ce21053c8452cb1", "content_id": "68a32577030b0440c01ec842a3d299eaf84ec044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9104, "license_type": "no_license", "max_line_length": 88, "num_lines": 367, "path": "/graphql/resolvers/events.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const Event = require(\"../../models/Event.js\");\nconst User = require(\"../../models/User.js\");\nconst Request = require(\"../../models/Request.js\");\n\nconst {\n validateCreateEventInput,\n validateManualInputInput,\n} = require(\"../../util/validators\");\n\nconst categoryOptions = require(\"../../json/category.json\");\nconst monthOptions = require(\"../../json/month.json\");\nvar { events } = require(\"react-mapbox-gl/lib/map-events\");\n\nconst {\n handleInputError,\n handleGeneralError,\n} = require(\"../../util/error-handling\");\n\nmodule.exports = {\n Query: {\n async getEvents() {\n try {\n const events = await Event.find().sort({ createdAt: 1 });\n return events;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n async getEventsReversed() {\n try {\n const events = await Event.find().sort({ createdAt: -1 });\n return events;\n } catch (err) {\n throw new Error(err);\n }\n },\n },\n\n Mutation: {\n async createEvent(\n _,\n {\n createEventInput: { name, code, category, expiration, request, points },\n }\n ) {\n const { valid, errors } = validateCreateEventInput(\n name,\n code,\n category,\n points,\n expiration\n );\n\n if (!valid) {\n handleInputError(errors);\n }\n\n const findPoints = categoryOptions.find(({ key }) => key === category);\n const month = new Date().getMonth();\n\n code = code.toLowerCase().trim().replace(/ /g, \"\");\n points = category === \"Miscellaneous\" ? points : findPoints.points;\n\n semester = monthOptions[month].value;\n expiration = new Date(\n new Date().getTime() + parseInt(expiration, 10) * 60 * 60 * 1000\n );\n request = request === \"true\" || request === true ? true : false;\n\n isEventNameDuplicate = await Event.findOne({ name });\n\n if (isEventNameDuplicate) {\n errors.general = \"An event with that name already exists.\";\n handleInputError(errors);\n }\n\n isEventCodeDuplicate = await Event.findOne({ code });\n\n if (isEventCodeDuplicate) {\n errors.general = \"An event with that code already exists.\";\n handleInputError(errors);\n }\n\n const newEvent = new Event({\n name,\n code,\n category,\n points,\n attendance: 0,\n expiration,\n semester,\n request,\n users: [],\n createdAt: new Date().toISOString(),\n });\n\n await newEvent.save();\n\n const updatedEvents = await Event.find();\n\n return updatedEvents;\n },\n\n async manualInput(_, { manualInputInput: { username, eventName } }) {\n const { valid, errors } = validateManualInputInput(username);\n\n if (!valid) {\n handleInputError(errors);\n }\n\n const user = await User.findOne({\n username,\n });\n\n const event = await Event.findOne({\n name: eventName,\n });\n\n const request = await Request.findOne({\n username: username,\n eventName: eventName,\n });\n\n if (!user) {\n errors.general = \"User not found.\";\n handleInputError(errors);\n }\n\n if (!event) {\n errors.general = \"Event not found.\";\n handleInputError(errors);\n }\n\n if (request) {\n errors.general =\n \"This member has sent a request for this event code. Check the Requests tab.\";\n handleInputError(errors);\n }\n\n user.events.map((userEvent) => {\n if (String(userEvent.name) == String(event.name)) {\n errors.general = \"Event code already redeemed by the user.\";\n handleInputError(errors);\n }\n });\n\n var pointsIncrease = {};\n\n if (event.semester === \"Fall Semester\") {\n pointsIncrease = {\n points: event.points,\n fallPoints: event.points,\n };\n } else if (event.semester === \"Spring Semester\") {\n pointsIncrease = {\n points: event.points,\n springPoints: event.points,\n };\n } else if (event.semester === \"Summer Semester\") {\n pointsIncrease = {\n points: event.points,\n summerPoints: event.points,\n };\n } else {\n errors.general = \"Invalid event.\";\n handleInputError(errors);\n }\n\n var updatedUser = await User.findOneAndUpdate(\n {\n username,\n },\n {\n $push: {\n events: {\n $each: [\n {\n name: event.name,\n category: event.category,\n createdAt: event.createdAt,\n points: event.points,\n },\n ],\n $sort: { createdAt: 1 },\n },\n },\n $inc: pointsIncrease,\n },\n {\n new: true,\n }\n );\n\n updatedUser.message = \"\";\n\n await Event.findOneAndUpdate(\n {\n name: eventName,\n },\n {\n $push: {\n users: {\n $each: [\n {\n firstName: user.firstName,\n lastName: user.lastName,\n username: user.username,\n email: user.email,\n },\n ],\n $sort: { lastName: 1, firstName: 1 },\n },\n },\n $inc: {\n attendance: 1,\n },\n },\n {\n new: true,\n }\n );\n\n const updatedEvents = await Event.find();\n\n return updatedEvents;\n },\n async removeUserFromEvent(\n _,\n { manualInputInput: { username, eventName } }\n ) {\n const { valid, errors } = validateManualInputInput(username);\n\n if (!valid) {\n handleInputError(errors);\n }\n\n const user = await User.findOne({\n username,\n });\n\n const event = await Event.findOne({\n name: eventName,\n });\n\n if (!user) {\n errors.general = \"User not found.\";\n handleInputError(errors);\n }\n\n if (!event) {\n errors.general = \"Event not found.\";\n handleInputError(errors);\n }\n\n if (!user.events.map((e) => e.name).includes(event.name)) {\n errors.general = \"User is not member of event.\";\n handleInputError(errors);\n }\n\n newEvents = user.events.filter((e) => e.name !== event.name);\n newUsers = event.users.filter((e) => e.username !== user.username);\n\n if (event.semester === \"Fall Semester\") {\n await User.findOneAndUpdate(\n { username },\n {\n events: newEvents,\n points: user.points - event.points,\n fallPoints: user.fallPoints - event.points,\n }\n );\n } else if (event.semester === \"Spring Semester\") {\n await User.findOneAndUpdate(\n { username },\n {\n events: newEvents,\n points: user.points - event.points,\n springPoints: user.springPoints - event.points,\n }\n );\n } else if (event.semester === \"Summer Semester\") {\n await User.findOneAndUpdate(\n { username },\n {\n events: newEvents,\n points: user.points - event.points,\n summerPoints: user.summerPoints - event.points,\n }\n );\n } else {\n errors.general = \"Invalid event.\";\n handleInputError(errors);\n }\n\n newEvent = await Event.findOneAndUpdate(\n { name: eventName },\n { users: newUsers, attendance: event.attendance - 1 },\n { new: true }\n );\n\n return newEvent;\n },\n async deleteEvent(_, { eventName }) {\n const errors = {};\n const users = await User.find();\n const event = await Event.findOne({\n name: eventName,\n });\n\n if (!users || !users.length || users.length === 0) {\n errors.general = \"User not found.\";\n handleInputError(errors);\n }\n\n if (!event) {\n errors.general = \"Event not found.\";\n handleInputError(errors);\n }\n\n var pointsDecrease = {};\n\n if (event.semester === \"Fall Semester\") {\n pointsDecrease = {\n points: -event.points,\n fallPoints: -event.points,\n };\n } else if (event.semester === \"Spring Semester\") {\n pointsDecrease = {\n points: -event.points,\n springPoints: -event.points,\n };\n } else if (event.semester === \"Summer Semester\") {\n pointsDecrease = {\n points: -event.points,\n summerPoints: -event.points,\n };\n } else {\n errors.general = \"Invalid event.\";\n handleInputError(errors);\n }\n\n await Event.deleteOne({ name: eventName });\n\n await User.updateMany(\n {\n events: {\n $elemMatch: {\n name: eventName,\n },\n },\n },\n {\n $pull: {\n events: {\n name: eventName,\n },\n },\n $inc: pointsDecrease,\n }\n );\n\n events = await Event.find();\n\n return events;\n },\n },\n};\n" }, { "alpha_fraction": 0.6540535688400269, "alphanum_fraction": 0.6540535688400269, "avg_line_length": 20.840909957885742, "blob_id": "6187f4bd343d0886a51b67152467fe5b34a7c1e0", "content_id": "354ff2a777381cfa3f8aecfbcd6f5b80a4943c6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10571, "license_type": "no_license", "max_line_length": 80, "num_lines": 484, "path": "/graphql/typeDefs.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const { gql } = require(\"graphql-tag\");\n\nmodule.exports = gql`\n ### MAIN MODEL TYPES ###\n\n type User {\n id: ID!\n firstName: String!\n lastName: String!\n photo: String!\n major: String!\n year: String!\n graduating: String!\n country: String!\n ethnicity: String!\n sex: String!\n username: String!\n email: String!\n password: String!\n createdAt: String!\n updatedAt: String!\n points: Int!\n fallPoints: Int!\n springPoints: Int!\n summerPoints: Int!\n fallPercentile: Int!\n springPercentile: Int!\n summerPercentile: Int!\n permission: String!\n listServ: Boolean!\n events: [Event]!\n tasks: [Task]!\n bookmarkedTasks: [String]!\n token: String!\n message: String!\n confirmed: Boolean!\n bookmarks: [String]!\n classes: [String]\n internships: [String]\n socialMedia: [String]\n }\n\n type Event {\n id: ID!\n name: String!\n code: String!\n category: String!\n points: Int!\n attendance: Int!\n expiration: String!\n request: Boolean!\n semester: String!\n createdAt: String!\n users: [User]!\n }\n\n type Task {\n id: ID!\n name: String!\n startDate: String!\n endDate: String!\n description: String!\n points: Int!\n attendance: Int!\n semester: String!\n createdAt: String!\n users: [User]\n }\n\n type Corporation {\n id: ID!\n name: String!\n logo: String!\n slogan: String!\n majors: [String!]!\n industries: [String!]!\n overview: String!\n mission: String!\n goals: String!\n businessModel: String!\n newsLink: String!\n applyLink: String!\n academia: Boolean!\n govContractor: Boolean!\n nonProfit: Boolean!\n visaSponsor: Boolean!\n shpeSponsor: Boolean!\n industryPartnership: Boolean!\n fallBBQ: Boolean!\n springBBQ: Boolean!\n nationalConvention: Boolean!\n }\n\n type Request {\n id: ID!\n name: String!\n type: String!\n points: String!\n firstName: String!\n lastName: String!\n username: String!\n createdAt: String!\n }\n\n type Alumni {\n id: ID!\n firstName: String!\n lastName: String!\n email: String!\n undergrad: Undergrad!\n grad: Grad!\n employer: String!\n position: String!\n location: Location\n coordinates: Coordinates!\n linkedin: String!\n }\n\n type Reimbursement {\n id: ID!\n firstName: String!\n lastName: String!\n email: String!\n studentId: Int!\n address: String!\n company: String!\n event: String!\n description: String!\n reimbursed: String!\n amount: String!\n ufEmployee: Boolean!\n receiptPhoto: String!\n eventFlyer: String!\n }\n\n type Rentable {\n item: String!\n quantity: Int!\n level: Int!\n description: String\n link: String\n renters: [String]!\n category: String!\n image: String!\n }\n\n type Receipt {\n id: ID!\n username: String!\n item: String!\n quantity: Int!\n email: String!\n dateCheckedOut: String!\n datePickedUp: String\n dateClosed: String\n deleted: Boolean\n }\n\n type ContactRequest {\n firstName: String!\n lastName: String!\n email: String!\n messageType: String!\n message: String!\n }\n\n ### AUXILIARY TYPES ###\n type StatData {\n _id: String!\n value: Int!\n }\n\n type Token {\n token: String!\n }\n\n type Undergrad {\n university: String!\n year: Int!\n major: String!\n }\n\n type Grad {\n university: String!\n year: Int!\n major: String!\n }\n\n type Location {\n city: String!\n state: String!\n country: String!\n }\n\n type Coordinates {\n latitude: Float!\n longitude: Float!\n }\n\n ### QUERY AND MUTATION INPUTS ###\n\n input RegisterInput {\n firstName: String!\n lastName: String!\n major: String!\n year: String!\n graduating: String!\n country: String!\n ethnicity: String!\n sex: String!\n username: String!\n email: String!\n password: String!\n confirmPassword: String!\n listServ: String!\n }\n\n input CreateEventInput {\n name: String!\n code: String!\n category: String!\n points: String!\n expiration: String!\n request: String!\n }\n\n input CreateTaskInput {\n name: String!\n startDate: String!\n endDate: String!\n description: String!\n points: Int!\n }\n\n input TransactionData {\n item: String!\n username: String!\n numberOfItems: Int!\n email: String!\n }\n\n input CreateCorporationInput {\n name: String!\n logo: String!\n slogan: String!\n majors: [String!]!\n industries: [String!]!\n overview: String!\n mission: String!\n goals: String!\n businessModel: String!\n newsLink: String!\n applyLink: String!\n academia: String!\n govContractor: String!\n nonProfit: String!\n visaSponsor: String!\n shpeSponsor: String!\n industryPartnership: String!\n fallBBQ: String!\n springBBQ: String!\n nationalConvention: String!\n }\n\n input EditCorporationInput {\n id: ID!\n name: String!\n logo: String!\n slogan: String!\n majors: [String!]!\n industries: [String!]!\n overview: String!\n mission: String!\n goals: String!\n businessModel: String!\n newsLink: String!\n applyLink: String!\n academia: String!\n govContractor: String!\n nonProfit: String!\n visaSponsor: String!\n shpeSponsor: String!\n industryPartnership: String!\n fallBBQ: String!\n springBBQ: String!\n nationalConvention: String!\n }\n\n input RedeemPointsInput {\n code: String!\n username: String!\n guests: Int!\n }\n\n input bookmarkTaskInput {\n name: String!\n username: String!\n }\n\n input unBookmarkTaskInput {\n name: String!\n username: String!\n }\n\n input RedeemTasksPointsInput {\n name: String!\n username: String!\n }\n\n input ApproveRejectRequestInput {\n username: String!\n name: String!\n type: String!\n }\n\n input ManualInputInput {\n username: String!\n eventName: String!\n }\n\n input ManualTaskInputInput {\n username: String!\n taskName: String!\n }\n\n input RegisterAlumniInput {\n firstName: String!\n lastName: String!\n email: String!\n undergrad: UndergradInput!\n grad: GradInput!\n employer: String!\n position: String!\n location: LocationInput!\n linkedin: String!\n }\n\n input EditUserProfileInput {\n email: String!\n firstName: String!\n lastName: String!\n photo: String!\n major: String!\n year: String!\n graduating: String!\n country: String!\n ethnicity: String!\n sex: String!\n classes: [String]\n internships: [String]\n socialMedia: [String]\n }\n\n input EditUpdatedAtInput {\n email: String!\n updatedAt: String!\n }\n\n input ReimbursementInput {\n firstName: String!\n lastName: String!\n email: String!\n studentId: String!\n address: String!\n company: String!\n event: String!\n description: String!\n reimbursed: String!\n amount: String!\n ufEmployee: String!\n receiptPhoto: String!\n eventFlyer: String!\n execute: Boolean!\n }\n\n ### AUXILIARY INPUTS ###\n input UndergradInput {\n university: String!\n year: String!\n major: String!\n }\n\n input GradInput {\n university: String!\n year: String!\n major: String!\n }\n\n input LocationInput {\n city: String!\n state: String!\n country: String!\n }\n\n ### QUERIES LIST ###\n\n type Query {\n getUsers: [User]\n getUser(userId: ID!): User\n getEvents: [Event]\n getEventsReversed: [Event]\n getTasks: [Task]\n getRequests: [Request]\n getMatches(username: String!): [User]\n getCorporations: [Corporation]\n getMajorStat: [StatData]\n getCountryStat: [StatData]\n getYearStat: [StatData]\n getSexStat: [StatData]\n getEthnicityStat: [StatData]\n getAlumnis: [Alumni]\n getReimbursements: [Reimbursement]\n getInventory: [Rentable]\n getItem(item: String): Rentable\n getReceipts: [Receipt]\n }\n\n ### MUTATIONS LIST ###\n\n type Mutation {\n register(registerInput: RegisterInput): User!\n login(username: String!, password: String!, remember: String!): User!\n createCorporation(\n createCorporationInput: CreateCorporationInput\n ): [Corporation]\n editCorporation(editCorporationInput: EditCorporationInput): Corporation!\n deleteCorporation(corporationId: ID!): [Corporation]!\n createEvent(createEventInput: CreateEventInput): [Event]\n redeemPoints(redeemPointsInput: RedeemPointsInput): User!\n createTask(createTaskInput: CreateTaskInput): Task!\n bookmarkTask(bookmarkTaskInput: bookmarkTaskInput): User!\n unBookmarkTask(unBookmarkTaskInput: unBookmarkTaskInput): User!\n redeemTasksPoints(redeemTasksPointsInput: RedeemTasksPointsInput): User!\n approveRequest(\n approveRejectRequestInput: ApproveRejectRequestInput\n ): [Request]\n rejectRequest(\n approveRejectRequestInput: ApproveRejectRequestInput\n ): [Request]\n manualInput(manualInputInput: ManualInputInput): [Event]\n deleteEvent(eventName: String!): [Event]\n removeUserFromEvent(manualInputInput: ManualInputInput): Event\n manualTaskInput(manualTaskInputInput: ManualTaskInputInput): Task\n removeUserFromTask(manualTaskInputInput: ManualTaskInputInput): Task\n deleteTask(taskId: ID!): [Task]\n forgotPassword(email: String!): User!\n resetPassword(\n password: String!\n confirmPassword: String!\n token: String!\n ): Token!\n confirmUser(id: String!): User!\n bookmark(company: String!, username: String!): User!\n deleteBookmark(company: String!, username: String!): User!\n registerAlumni(registerAlumniInput: RegisterAlumniInput): Alumni!\n changePermission(\n email: String!\n currentEmail: String!\n permission: String!\n ): User!\n editUserProfile(editUserProfileInput: EditUserProfileInput): User!\n editUpdatedAt(editUpdatedAtInput: EditUpdatedAtInput): User!\n updateYears: [User]\n reimbursementRequest(reimbursementInput: ReimbursementInput): Reimbursement!\n resolveReimbursement(id: ID!, email: String!): Reimbursement!\n unresolveReimbursement(id: ID!, email: String!): Reimbursement!\n cancelReimbursement(id: ID!, email: String!): Reimbursement!\n uncancelReimbursement(id: ID!, email: String!): Reimbursement!\n checkOutItem(data: TransactionData): [Rentable]\n pickUpItem(receiptID: ID!): Receipt\n returnItem(receiptID: ID!): Receipt\n unPickUpItem(receiptID: ID!): Receipt\n unReturnItem(receiptID: ID!): Receipt\n deleteReceipt(receiptID: ID!): Receipt\n submitContactRequest(\n firstName: String!\n lastName: String!\n email: String!\n messageType: String!\n message: String!\n ): ContactRequest!\n resetPercentile(semester: String!): Int!\n }\n`;\n" }, { "alpha_fraction": 0.7742946743965149, "alphanum_fraction": 0.7868338823318481, "avg_line_length": 20.33333396911621, "blob_id": "097369cd2d99b02d76a34c610a38091c7c13be71", "content_id": "5bd49ce704f89d2f41ef69cb6ba069fe0b0c4e20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 319, "license_type": "no_license", "max_line_length": 117, "num_lines": 15, "path": "/README.md", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "# Backend code for the SHPE UF web page\n\n## Scripts\n\n#npm install\n\nInstalls the node_modules folder needed to run the server\n\n#nodemon\n\nRuns the server in development mode on port 5000\n\n#Build scripts\n\nBuild scripts are not included in this readme beacause the build is deployed on Heroku which builds the server for us" }, { "alpha_fraction": 0.563043475151062, "alphanum_fraction": 0.563043475151062, "avg_line_length": 17.039215087890625, "blob_id": "42f31524cf025a5fc77b62b5b47c2aebb380f38a", "content_id": "18adca0bce61fc4c255c8779658b322dacf77dbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 920, "license_type": "no_license", "max_line_length": 64, "num_lines": 51, "path": "/models/Receipt.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const { Schema } = require('mongoose');\nconst mongoose = require('mongoose');\n\nconst receiptSchema = new Schema(\n {\n username: {\n type: String,\n required: true,\n },\n item: {\n type: String,\n required: true,\n },\n quantity: {\n type: String,\n required: true,\n },\n email: {\n type: String,\n required: false,\n },\n dateCheckedOut: {\n type: String,\n required: true,\n },\n datePickedUp: {\n type: String,\n required: false,\n },\n dateClosed: {\n type: String,\n required: false,\n },\n deleted: {\n type: Boolean,\n required: true,\n default: false,\n },\n },\n {\n collection: 'Receipts',\n }\n);\n\nconst switchDB = mongoose.createConnection(process.env.DB_URI, {\n useNewUrlParser: true,\n useUnifiedTopology: true,\n useFindAndModify: true,\n});\n\nmodule.exports = switchDB.model('Receipt', receiptSchema);\n" }, { "alpha_fraction": 0.5087034106254578, "alphanum_fraction": 0.5109350681304932, "avg_line_length": 27.450794219970703, "blob_id": "d1e3e1a9c1b76ba012acbe227d1b6d00fe53c7d2", "content_id": "a1fe947f4a34e393be340a394ff9db74f3b8c128", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8962, "license_type": "no_license", "max_line_length": 135, "num_lines": 315, "path": "/graphql/resolvers/reimbursements.js", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "const Reimbursement = require(\"../../models/Reimbursement.js\");\nconst { validateReimbursementRequest } = require(\"../../util/validators\");\nconst nodemailer = require(\"nodemailer\");\n\nrequire(\"dotenv\").config();\n\nconst {\n handleInputError,\n handleGeneralError,\n} = require(\"../../util/error-handling\");\n\nmodule.exports = {\n Query: {\n async getReimbursements() {\n try {\n const reimbursement = await Reimbursement.find().sort({\n lastName: 1,\n firstName: 1,\n });\n return reimbursement;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n },\n\n Mutation: {\n async reimbursementRequest(\n _,\n {\n reimbursementInput: {\n firstName,\n lastName,\n eventFlyer,\n email,\n studentId,\n address,\n company,\n event,\n description,\n reimbursed,\n amount,\n ufEmployee,\n receiptPhoto,\n execute,\n },\n }\n ) {\n const { valid, errors } = validateReimbursementRequest(\n firstName,\n lastName,\n email,\n studentId,\n address,\n company,\n event,\n description,\n reimbursed,\n amount,\n receiptPhoto,\n ufEmployee,\n eventFlyer\n );\n\n if (!valid) {\n handleInputError(errors);\n }\n\n ufEmployee = ufEmployee === \"true\" || ufEmployee === true ? true : false;\n\n const newReimbursement = new Reimbursement({\n firstName,\n lastName,\n email,\n studentId,\n address,\n company,\n event,\n description,\n reimbursed,\n amount,\n ufEmployee,\n receiptPhoto,\n eventFlyer,\n });\n\n if (execute) {\n await newReimbursement.save();\n\n const transporter = nodemailer.createTransport({\n service: process.env.EMAIL_SERVICE,\n auth: {\n user: process.env.EMAIL,\n pass: process.env.EMAIL_PASSWORD,\n },\n });\n\n const requesterMail = {\n from: process.env.EMAIL,\n to: `${email}`,\n subject: \"Reimbursement Request\",\n text:\n \"You made a request for a reimbursement.\\n\\n\" +\n `ID: ${newReimbursement.id}\\n` +\n `Name: ${firstName} ${lastName}\\n` +\n `Email: ${email}\\n` +\n `Student ID: ${studentId}\\n` +\n `UF Employee: ${ufEmployee.toString()}` +\n `Address: ${address}\\n` +\n `Company: ${company}\\n` +\n `Event: ${event}\\n` +\n `Event description: ${description}\\n` +\n `Amount: $${amount}\\n\\n` +\n `Receipt: ${receiptPhoto}\\n` +\n `Event Flyer: ${eventFlyer}\\n` +\n \"You will get a confirmation email when your request is approved. If you have any questions, send us an email at [email protected]!\",\n };\n\n transporter.sendMail(requesterMail, (err, response) => {\n if (err) {\n console.error(\"there was an error: \", err);\n } else {\n res.status(200).json(\"recovery email sent\");\n }\n });\n\n const treasuryMail = {\n from: process.env.EMAIL,\n to: process.env.TREASURY_EMAIL,\n subject: \"Reimbursement Request - \" + `${firstName} ${lastName}`,\n text:\n \"There is a new request for a reimbursement.\\n\\n\" +\n `ID: ${newReimbursement.id}\\n` +\n `Name: ${firstName} ${lastName}\\n` +\n `Email: ${email}\\n` +\n `Student ID: ${studentId}\\n` +\n `UF Employee: ${ufEmployee.toString()}\\n` +\n `Address: ${address}\\n` +\n `Company: ${company}\\n` +\n `Event: ${event}\\n` +\n `Event description: ${description}\\n` +\n `Amount: $${amount}\\n\\n` +\n `Receipt: ${receiptPhoto}\\n` +\n `Event Flyer: ${eventFlyer}\\n`,\n };\n\n transporter.sendMail(treasuryMail, (err, response) => {\n if (err) {\n console.error(\"there was an error: \", err);\n } else {\n res.status(200).json(\"recovery email sent\");\n }\n });\n }\n\n return newReimbursement;\n },\n\n async resolveReimbursement(_, { id, email }) {\n try {\n const resolvedReimbursement = await Reimbursement.findByIdAndUpdate(\n id,\n { reimbursed: \"resolved\" }\n );\n\n const transporter = nodemailer.createTransport({\n service: process.env.EMAIL_SERVICE,\n auth: {\n user: process.env.EMAIL,\n pass: process.env.EMAIL_PASSWORD,\n },\n });\n\n const requesterMail = {\n from: process.env.EMAIL,\n to: `${email}`,\n subject: \"Reimbursement Request Resolved\",\n text:\n \"Your request for a reimbursement:\\n\\n\" +\n `Reimbursement ID: ${id} \\n\\n` +\n \"has been has been resolved. If you have any questions, send us an email at [email protected]!\",\n };\n\n transporter.sendMail(requesterMail, (err, response) => {\n if (err) {\n console.error(\"there was an error: \", err);\n } else {\n res.status(200).json(\"recovery email sent\");\n }\n });\n\n return resolvedReimbursement;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n\n async unresolveReimbursement(_, { id, email }) {\n try {\n const unresolvedReimbursement = await Reimbursement.findByIdAndUpdate(\n id,\n { reimbursed: \"pending\" }\n );\n\n const transporter = nodemailer.createTransport({\n service: process.env.EMAIL_SERVICE,\n auth: {\n user: process.env.EMAIL,\n pass: process.env.EMAIL_PASSWORD,\n },\n });\n\n const requesterMail = {\n from: process.env.EMAIL,\n to: `${email}`,\n subject: \"Reimbursement Request Unresolved\",\n text:\n \"Your request for a reimbursement:\\n\\n\" +\n `Reimbursement ID: ${id} \\n\\n` +\n \"has been has been unresolved. If you have any questions, send us an email at [email protected]!\",\n };\n\n transporter.sendMail(requesterMail, (err, response) => {\n if (err) {\n console.error(\"there was an error: \", err);\n } else {\n res.status(200).json(\"recovery email sent\");\n }\n });\n\n return unresolvedReimbursement;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n\n async cancelReimbursement(_, { id, email }) {\n try {\n const unresolvedReimbursement = await Reimbursement.findByIdAndUpdate(\n id,\n { reimbursed: \"cancelled\" }\n );\n\n const transporter = nodemailer.createTransport({\n service: process.env.EMAIL_SERVICE,\n auth: {\n user: process.env.EMAIL,\n pass: process.env.EMAIL_PASSWORD,\n },\n });\n\n const requesterMail = {\n from: process.env.EMAIL,\n to: `${email}`,\n subject: \"Reimbursement Request Cancelled\",\n text:\n \"Your request for a reimbursement:\\n\\n\" +\n `Reimbursement ID: ${id} \\n\\n` +\n \"has been has been cancelled. If you have any questions, send us an email at [email protected]!\",\n };\n\n transporter.sendMail(requesterMail, (err, response) => {\n if (err) {\n console.error(\"there was an error: \", err);\n } else {\n res.status(200).json(\"recovery email sent\");\n }\n });\n\n return unresolvedReimbursement;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n\n async uncancelReimbursement(_, { id, email }) {\n try {\n const unresolvedReimbursement = await Reimbursement.findByIdAndUpdate(\n id,\n { reimbursed: \"pending\" }\n );\n\n const transporter = nodemailer.createTransport({\n service: process.env.EMAIL_SERVICE,\n auth: {\n user: process.env.EMAIL,\n pass: process.env.EMAIL_PASSWORD,\n },\n });\n\n const requesterMail = {\n from: process.env.EMAIL,\n to: `${email}`,\n subject: \"Reimbursement Request Uncancelled\",\n text:\n \"Your request for a reimbursement:\\n\\n\" +\n `Reimbursement ID: ${id} \\n\\n` +\n \"has been has been uncancelled. If you have any questions, send us an email at [email protected]!\",\n };\n\n transporter.sendMail(requesterMail, (err, response) => {\n if (err) {\n console.error(\"there was an error: \", err);\n } else {\n res.status(200).json(\"recovery email sent\");\n }\n });\n\n return unresolvedReimbursement;\n } catch (err) {\n handleGeneralError(err, err.message);\n }\n },\n },\n};\n" }, { "alpha_fraction": 0.7053763270378113, "alphanum_fraction": 0.7197132706642151, "avg_line_length": 28.680850982666016, "blob_id": "08a0e92e71a53b1bd92031e6c89138ddbbb80853", "content_id": "350b4709edd0ec73a451141f49284f5436a13349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1395, "license_type": "no_license", "max_line_length": 114, "num_lines": 47, "path": "/json/updaters/courseWebScraper.py", "repo_name": "shpe-uf/SHPE-UF-SERVER", "src_encoding": "UTF-8", "text": "from selenium.webdriver.support.ui import Select\nfrom selenium import webdriver\nimport requests\nimport json\nimport time\nimport math\n\ndriver = webdriver.Chrome(executable_path=r'C:/Users/hp/Documents/chromedriver_win32/chromedriver.exe')\nurl = \"https://one.ufl.edu/soc/\"\n\ndriver.get(url)\ntime.sleep(10)\nhtml = driver.page_source\ntime.sleep(3)\nselectSemester = Select(driver.find_element_by_id('semes')).select_by_visible_text('Fall 2020')\ntime.sleep(3)\nselectProgram = Select(driver.find_element_by_id('prog')).select_by_visible_text('Campus / Web / Special Program')\ntime.sleep(3)\nsearchButton = driver.find_element_by_xpath('//*[@id=\"filterSidebar\"]/button')\nsearchButton.click()\ntime.sleep(3)\ntotalCourses = driver.find_element_by_id('totalCount').text\nstrTotal = totalCourses.split()\ntimesToClick = int(strTotal[0])\nshowMore = (math.floor(timesToClick / 50)) + 1\n\ntime.sleep(3)\n\nfor x in range(showMore):\n if x == showMore:\n break\n loadButton = driver.find_element_by_id('loadBtn')\n loadButton.click()\n time.sleep(4)\n\ncourseArr = []\nfor course in driver.find_elements_by_class_name('course-code'):\n courseObject = {\n \"key\": course.find_element_by_tag_name('h3').text,\n \"value\": course.find_element_by_tag_name('h3').text\n }\n courseArr.append(courseObject)\n\nwith open('courses.json', 'w') as outfile:\n json.dump(courseArr, outfile)\n\ndriver.close()\n" } ]
12
pasaunier/car_b_finder
https://github.com/pasaunier/car_b_finder
64e94396521360baed3ed31b14b321a320623af9
2f65a93f79cf9cfc382bf815e374d1f37b43fd18
68c5c75da875024061ea99e2fa57c275d2d92420
refs/heads/master
2023-04-13T15:35:19.926079
2021-04-25T15:09:27
2021-04-25T15:09:27
323,624,955
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5964912176132202, "alphanum_fraction": 0.5964912176132202, "avg_line_length": 57, "blob_id": "1cc3b90528a65ad5154d3769ac0bef8f2ff5bd25", "content_id": "383e8baa78d1f76d945f3f38c59d25de112a8413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "no_license", "max_line_length": 57, "num_lines": 1, "path": "/car_finder_app/classes/__init__.py", "repo_name": "pasaunier/car_b_finder", "src_encoding": "UTF-8", "text": "__all__ = [\"Formatter\", \"Mediator\", \"Model\", \"Predicter\"]" }, { "alpha_fraction": 0.4929920732975006, "alphanum_fraction": 0.5484460592269897, "avg_line_length": 47.264705657958984, "blob_id": "021d7ee86f97d9bdeb56837c9ab32dd7aefc8519", "content_id": "40062cf11a01ed897dc589a8b40ff4efbbaa984b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1641, "license_type": "no_license", "max_line_length": 81, "num_lines": 34, "path": "/car_finder_app/classes/predicter.py", "repo_name": "pasaunier/car_b_finder", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\n\nfrom classes.model import Model\n\n# Predicter class (predict the car brand)\n\n\nclass Predicter:\n # All car brands available\n __nameconv = {0: 'AM General', 1: 'Acura', 2: 'Aston', 3: 'Audi',\n 4: 'BMW', 5: 'Bentley', 6: 'Bugatti', 7: 'Buick',\n 8: 'Cadillac', 9: 'Chevrolet', 10: 'Chrysler',\n 11: 'Daewoo', 12: 'Dodge', 13: 'Eagle', 14: 'FIAT',\n 15: 'Ferrari', 16: 'Fisker', 17: 'Ford', 18: 'GMC',\n 19: 'Geo', 20: 'HUMMER', 21: 'Honda', 22: 'Hyundai',\n 23: 'Infiniti', 24: 'Isuzu', 25: 'Jaguar', 26: 'Jeep',\n 27: 'Lamborghini', 28: 'Land', 29: 'Lincoln', 30: 'MINI',\n 31: 'Maybach', 32: 'Mazda', 33: 'McLaren', 34: 'Mercedes-Benz',\n 35: 'Mitsubishi', 36: 'Nissan', 37: 'Plymouth', 38: 'Porsche',\n 39: 'Ram', 40: 'Rolls-Royce', 41: 'Scion', 42: 'Spyker',\n 43: 'Suzuki', 44: 'Tesla', 45: 'Toyota', 46: 'Volkswagen',\n 47: 'Volvo', 48: 'smart'}\n\n # Predict the car brand from preprocessed image\n @staticmethod\n def predict(img_processed, batch_size, verbose):\n # Get the model and predict the car brand of the image\n pred = (Model.getModel(r\"models\\sequential_custom.h5\")).predict(\n img_processed, batch_size=batch_size, verbose=verbose)\n # Get the index of the car brand predicted\n indice_pred = np.argmax(pred, axis=1)\n # Return the car brand string associated with the index predicted\n return Predicter.__nameconv[indice_pred[0]]\n" }, { "alpha_fraction": 0.645588219165802, "alphanum_fraction": 0.645588219165802, "avg_line_length": 29.954545974731445, "blob_id": "776c7e20258ba4f190597b406c680d5217e17b47", "content_id": "9a633f27b05cc32f7aaa8a4a264e6045a5144f93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 680, "license_type": "no_license", "max_line_length": 93, "num_lines": 22, "path": "/car_finder_app/classes/model.py", "repo_name": "pasaunier/car_b_finder", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\n# Singleton Class model (to instantiate the tensorflow model only one time)\nclass Model:\n # Path to the model\n __path = \"\"\n # Prediction model\n __model = None\n\n # Constructor (instanciate the model, should normally be private but it's Python for you)\n @staticmethod\n def __init__(path):\n Model.__path = path\n Model.__model = tf.keras.models.load_model(path)\n\n # If the model is already instanciated return the model\n # Else instanciate it with the constructor and return it \n @staticmethod\n def getModel(path):\n if (Model.__model == None):\n Model.__init__(path)\n return Model.__model" }, { "alpha_fraction": 0.6403785347938538, "alphanum_fraction": 0.6475890278816223, "avg_line_length": 32.12686538696289, "blob_id": "9f3b4947637d0520951f9ed7afea41344f3e14d9", "content_id": "2c7f3019f3ce5ee0d23b4f5eea2c140536762f02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4465, "license_type": "no_license", "max_line_length": 95, "num_lines": 134, "path": "/car_finder_app/app.py", "repo_name": "pasaunier/car_b_finder", "src_encoding": "UTF-8", "text": "#stl\nimport os\nimport sys\nimport inspect\n\n#image browsing\nfrom PIL import Image, ImageTk\n\nimport tkinter.filedialog as tkFileDialog\nfrom tkinter import Tk, RIGHT, BOTH, RAISED\nfrom tkinter.ttk import Frame, Button, Label, Style\n\nfrom classes.mediator import Mediator\n\n\n# Set current path to project root\n# currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n# parentdir = os.path.dirname(currentdir)\n# sys.path.insert(0,parentdir) \n\n\n\n# print(\"Result : \", Mediator.processUserInput(\"img/audi.jpg\"))\n\nclass PackManager(Frame):\n # Path to the browsed image\n img_path = \"\"\n # Image (tkinter integration)\n tkimage = None\n # Image displayer component\n img_displayer = None\n # Predict button component\n predictButton = None\n # Result label component\n resultLabel = None\n\n # Frame constructor\n def __init__(self):\n super().__init__()\n self.initUI()\n\n # App exit function (called on button quit)\n def quit(self):\n self.master.destroy()\n\n # Browse button behaviour\n def b_file_browser(e):\n # Width to resize the image for display\n new_width = 700\n \n # Launch file browser for JPEG files only (.jpg) and copy it in img_path\n path = tkFileDialog.askopenfilename(filetypes=[(\"JPEG image (.jpg)\",'.jpg')])\n e.img_path = path\n\n # Change label state for User Experience (UX)\n e.resultLabel.configure(text=\"( -_-)旦~ Waiting...\")\n \n # Open the image and resize it for display purposes then display image\n original = Image.open(path)\n multiplier = new_width / original.width\n new_height = int(original.height * multiplier)\n resized = original.resize((new_width, new_height), Image.ANTIALIAS)\n e.tkimage = ImageTk.PhotoImage(resized)\n e.img_displayer.configure(image=e.tkimage)\n e.img_displayer.image = e.tkimage\n\n # Resize window to image size\n e.master.geometry(f\"{new_width}x{new_height+40}\")\n\n # Enable predict button\n e.predictButton.configure(state=\"normal\")\n\n # Predict button behaviour\n def guess(e):\n # Call mediator function to guess what brand of car it is\n guess_str = Mediator.processUserInput(e.img_path)\n # Change label state to display the result found\n e.resultLabel.configure(text=\"(づ  ̄ ³ ̄)づ旦~ I think it's a \"+guess_str)\n # Disable the predict button to avoid multiple useless prediction\n e.predictButton.configure(state=\"disabled\")\n\n # Info button behaviour\n def displayInfo(e):\n # Change the label to display tips on how the app works\n e.resultLabel.configure(text=\"(☞゚∀゚)☞旦~ JPEG only, crop to max for better results\")\n\n # Frame building function\n def initUI(self):\n # Window settings\n self.master.title(\"Car Brand Finder\")\n self.style = Style()\n self.style.theme_use(\"alt\")\n\n # Create main frame\n frame = Frame(self, relief=RAISED)\n frame.pack(fill=BOTH, expand=True)\n\n self.pack(fill=BOTH, expand=True)\n\n # Create the image displayer\n self.img_displayer = Label(self,image = self.tkimage, width=600)\n self.img_displayer.image = self.tkimage\n self.img_displayer.pack()\n\n # Create the quit button\n closeButton = Button(self, text=\"Close\", command=self.quit)\n closeButton.pack(side=RIGHT, padx=5, pady=5)\n # Create the info button\n infoButton = Button(self, text=\"Info\", command=self.displayInfo)\n infoButton.pack(side=RIGHT, padx=5, pady=5)\n # Create the browse button\n browseButton = Button(self, text=\"Browse image\", command=self.b_file_browser)\n browseButton.pack(side=RIGHT, padx=5, pady=5)\n # Create the predict button\n self.predictButton = Button(self, text=\"Predict\", state=\"disabled\", command=self.guess)\n self.predictButton.pack(side=RIGHT, padx=5, pady=5)\n # Create the result (display) label\n self.resultLabel = Label(self, text=\"( -_-)旦~ Waiting...\")\n self.resultLabel.pack(side=RIGHT, padx=5, pady=5)\n\n# App main loop\ndef main():\n # Initiate the TKInter User Interface\n root = Tk()\n # Define window size\n root.geometry(\"700x200+300+300\")\n # Display the main frame\n app = PackManager()\n # Launch the app loop\n root.mainloop()\n\n# Launch the app\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5880758762359619, "alphanum_fraction": 0.6124660968780518, "avg_line_length": 29.79166603088379, "blob_id": "ed8dcbc6e05b0dfdacc9c28287650dc5eb443b03", "content_id": "1569e6ddb8d57b23397c17afe3435fde7135691f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 91, "num_lines": 24, "path": "/car_finder_app/classes/formatter.py", "repo_name": "pasaunier/car_b_finder", "src_encoding": "UTF-8", "text": "import cv2\n# import matplotlib.pyplot as plt\n\n# Image formatter class (for preprocessing)\nclass Formatter:\n # Path to the image\n __path = \"\"\n # Image\n __img = None\n # Image preprocessed\n __res = None\n\n # Constructor : open and preprocess the image\n # SUPPORT ONLY JPEG AS FORMAT (format=\"jpg\")\n def __init__(self, path, format):\n self.__path = path\n # self.__img = plt.imread(self.__path, format=format)\n self.__img = cv2.imread(self.__path)\n self.__res = cv2.resize(self.__img, dsize=(244, 244), interpolation=cv2.INTER_AREA)\n self.__res = self.__res.reshape((1, 244, 244, 3))\n\n # Function to return the preprocessed image\n def getImage(self):\n return self.__res" }, { "alpha_fraction": 0.6973180174827576, "alphanum_fraction": 0.7011494040489197, "avg_line_length": 23.809524536132812, "blob_id": "a050cdebe520024109950b7705aeb86f6a532d26", "content_id": "2b4bc2badfbd390f6b4ad659619440644bf1ebaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 62, "num_lines": 21, "path": "/car_finder_app/classes/mediator.py", "repo_name": "pasaunier/car_b_finder", "src_encoding": "UTF-8", "text": "\n# import formatter\n# import predicter\n\n# import formatter as form\nfrom classes import formatter\nfrom classes import predicter\n\n\n# Mediator class (only point where the model can be called)\n\n\nclass Mediator:\n\n # Process the request for car brand guess\n # <param path> Path to the image\n @staticmethod\n def processUserInput(path):\n # Call the image formatter\n f = formatter.Formatter(path, \"jpg\")\n # Return the model prediction\n return predicter.Predicter.predict(f.getImage(), 1, 1)\n" }, { "alpha_fraction": 0.5783132314682007, "alphanum_fraction": 0.6035049557685852, "avg_line_length": 59.86666488647461, "blob_id": "34c54411892ed28c889f201c47c5da04e4211c20", "content_id": "c2018af587d54733dd288c8a213504393511e3de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 913, "license_type": "no_license", "max_line_length": 142, "num_lines": 15, "path": "/README.md", "repo_name": "pasaunier/car_b_finder", "src_encoding": "UTF-8", "text": "Credits to : \n<font size=\"-1\">\n &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n <b>3D Object Representations for Fine-Grained Categorization</b><br>\n &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n Jonathan Krause, Michael Stark, Jia Deng, Li Fei-Fei<br>\n &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n <i>4th IEEE Workshop on 3D Representation and Recognition, at ICCV 2013</i> <b>(3dRR-13)</b>. Sydney, Australia. Dec. 8, 2013.<br>\n &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n <a style=\"text-decoration: underline\" href=\"https://ai.stanford.edu/~jkrause/papers/3drr13.pdf\">[pdf]</a>\n &nbsp;&nbsp;<a style=\"text-decoration: underline\" href=\"https://ai.stanford.edu/~jkrause/papers/3drr13.bib\">[BibTex]</a>\n &nbsp;&nbsp;<a style=\"text-decoration: underline\" href=\"https://ai.stanford.edu/~jkrause/papers/3drr_talk.pdf\">[slides]</a>\n</font>\n\n for the dataset\n" } ]
7
dknovar/django-gui_ta
https://github.com/dknovar/django-gui_ta
44f33d79283546bc297032816318d4467c5f9262
9ac883eb2a3426074470c7335e2e1593bb166021
b10a8723a32632c499d77bb60dbe724c67a2b1b6
refs/heads/master
2020-05-20T12:17:18.254953
2019-05-08T08:53:18
2019-05-08T08:53:18
185,567,915
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4924986958503723, "alphanum_fraction": 0.503879964351654, "avg_line_length": 34.14545440673828, "blob_id": "afa38233dcebc93458697fa75da92aa438d30cd7", "content_id": "24a17cc27d036d6ec22b343a4a1a20ac815432e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1933, "license_type": "no_license", "max_line_length": 114, "num_lines": 55, "path": "/deteksi/migrations/0002_img_bnc_img_gray_img_h_img_s_img_v.py", "repo_name": "dknovar/django-gui_ta", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.20 on 2019-04-28 13:10\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('deteksi', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='img_bnc',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.TextField()),\n ('img', models.ImageField(upload_to='bnc/')),\n ],\n ),\n migrations.CreateModel(\n name='img_gray',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.TextField()),\n ('img', models.ImageField(upload_to='gray/')),\n ],\n ),\n migrations.CreateModel(\n name='img_h',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.TextField()),\n ('img', models.ImageField(upload_to='h/')),\n ],\n ),\n migrations.CreateModel(\n name='img_s',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.TextField()),\n ('img', models.ImageField(upload_to='s/')),\n ],\n ),\n migrations.CreateModel(\n name='img_v',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.TextField()),\n ('img', models.ImageField(upload_to='v/')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7887640595436096, "alphanum_fraction": 0.7887640595436096, "avg_line_length": 23.77777862548828, "blob_id": "fc46201c3dd8a20106aac30deed5cc20a0ea33b9", "content_id": "4d08e99a0b62443097d4bb802a30bccbae54f9b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 32, "num_lines": 18, "path": "/deteksi/admin.py", "repo_name": "dknovar/django-gui_ta", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom.models import File\nfrom. models import img_bnc\nfrom. models import img_gray\nfrom. models import img_h\nfrom. models import img_s\nfrom. models import img_v\nfrom. models import tb_ciri\n\nadmin.site.register(File)\nadmin.site.register(img_bnc)\nadmin.site.register(img_gray)\nadmin.site.register(img_h)\nadmin.site.register(img_s)\nadmin.site.register(img_v)\nadmin.site.register(tb_ciri)" }, { "alpha_fraction": 0.6238532066345215, "alphanum_fraction": 0.6238532066345215, "avg_line_length": 20.899999618530273, "blob_id": "ef7dbb18ccb826c7fb74d71a9e27198cbbad2c60", "content_id": "6e9eaa1a0f6d01665edc6924d4d8e5d89ee62b81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 218, "license_type": "no_license", "max_line_length": 40, "num_lines": 10, "path": "/login/urls.py", "repo_name": "dknovar/django-gui_ta", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^recents/$', views.recents),\n url(r'^daftar/$', views.daftar),\n url(r'^lupapass/$', views.lupapass),\n url(r'^$', views.index),\n]" }, { "alpha_fraction": 0.6430155038833618, "alphanum_fraction": 0.6430155038833618, "avg_line_length": 22.789474487304688, "blob_id": "cce2e99867da5cb9efa44c3b2d8b8cac44af8757", "content_id": "4e81e81ebea29df671f411b807e3f1e23aa79dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 52, "num_lines": 19, "path": "/about/views.py", "repo_name": "dknovar/django-gui_ta", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom. models import info \n# Create your views here.\n\ndef index(request):\n #query\n infos = info.objects.all()\n\n contex = {\n 'judul':'About Opang',\n 'penulis':'Iya Opang',\n 'banner':'img/a.jpg',\n 'infos': infos,\n }\n return render(request,'about/index.html',contex)\n\ndef recents(request):\n return render(request, 'about/test_pusher.html')" }, { "alpha_fraction": 0.4437805414199829, "alphanum_fraction": 0.48180335760116577, "avg_line_length": 37.35416793823242, "blob_id": "8a10e1e3eeb3af5a9c7c6d0607cff72ec1808b3a", "content_id": "571ee74ef2f1cff5b8d7c086076b672ef7bdbb40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1841, "license_type": "no_license", "max_line_length": 114, "num_lines": 48, "path": "/deteksi/migrations/0003_tb_ciri.py", "repo_name": "dknovar/django-gui_ta", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.20 on 2019-04-28 13:24\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('deteksi', '0002_img_bnc_img_gray_img_h_img_s_img_v'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='tb_ciri',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('h', models.FloatField()),\n ('s', models.FloatField()),\n ('v', models.FloatField()),\n ('cont0', models.FloatField()),\n ('cont45', models.FloatField()),\n ('cont90', models.FloatField()),\n ('cont135', models.FloatField()),\n ('diss0', models.FloatField()),\n ('diss45', models.FloatField()),\n ('diss90', models.FloatField()),\n ('diss135', models.FloatField()),\n ('asm0', models.FloatField()),\n ('asm45', models.FloatField()),\n ('asm90', models.FloatField()),\n ('asm135', models.FloatField()),\n ('ener0', models.FloatField()),\n ('ener45', models.FloatField()),\n ('ener90', models.FloatField()),\n ('ener135', models.FloatField()),\n ('homo0', models.FloatField()),\n ('homo45', models.FloatField()),\n ('homo90', models.FloatField()),\n ('homo135', models.FloatField()),\n ('corr0', models.FloatField()),\n ('corr45', models.FloatField()),\n ('corr90', models.FloatField()),\n ('corr135', models.FloatField()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.49387040734291077, "alphanum_fraction": 0.5113835334777832, "avg_line_length": 23.869565963745117, "blob_id": "937e796144f8cac0380e5d21093ae19dfb0d83a8", "content_id": "71aa379a56cb34c90886242d00c8d0e727b011e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 571, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/templates/index2.html", "repo_name": "dknovar/django-gui_ta", "src_encoding": "UTF-8", "text": "{% load static %}\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"ie=edge\">\n <title>{{ judul }}</title>\n</head>\n<body>\n <img style=\"width: 100%\" src=\"{% static banner %}\">\n <h1>Selamat Dtang di home {{ judul }}</h1>\n <h2>penulis : {{ penulis}}</h2>\n <ul>\n {% for link,nama in nav %}\n <li>\n <a href=\"{{link}}\"> {{nama}} </a>\n </li>\n {% endfor %}\n </ul>\n \n</body>\n</html>" }, { "alpha_fraction": 0.671999990940094, "alphanum_fraction": 0.671999990940094, "avg_line_length": 22.85714340209961, "blob_id": "4b47f92b405fc35d8054b247868705e805272e6c", "content_id": "9fd0a9dc08d1e7b1f616bef7f5e4d23cf5bf2724", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 500, "license_type": "no_license", "max_line_length": 52, "num_lines": 21, "path": "/login/views.py", "repo_name": "dknovar/django-gui_ta", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\n\ndef index(request):\n contex = {\n # 'judul':'About Opang',\n # 'penulis':'Iya Opang',\n # 'banner':'img/a.jpg',\n }\n return render(request,'login/index.html',contex)\n\ndef recents(request):\n return HttpResponse('INI ADALAH RECENTS')\n\ndef daftar(request):\n return render(request,'login/daftar.html')\n\ndef lupapass(request):\n return render(request,'login/lupapass.html')" }, { "alpha_fraction": 0.5610980987548828, "alphanum_fraction": 0.7315031886100769, "avg_line_length": 38.31578826904297, "blob_id": "64fe3633fdbf56ff79a3a0b0859a5f074afcec73", "content_id": "83053cc6fe7ce8f9a9be5ca1540e5ecb1db9c65b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2987, "license_type": "no_license", "max_line_length": 68, "num_lines": 76, "path": "/deteksi/models.py", "repo_name": "dknovar/django-gui_ta", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass File(models.Model):\n file = models.FileField(blank=False, null=False)\n remark = models.CharField(max_length=20)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"{} {}\".format(self.remark,self.timestamp)\n\nclass img_bnc(models.Model):\n title = models.TextField()\n img = models.ImageField(upload_to='bnc/')\n\n def __str__(self):\n return self.title\n\nclass img_h(models.Model):\n title = models.TextField()\n img = models.ImageField(upload_to='h/')\n\n def __str__(self):\n return self.title\n\nclass img_s(models.Model):\n title = models.TextField()\n img = models.ImageField(upload_to='s/')\n\n def __str__(self):\n return self.title\n\nclass img_v(models.Model):\n title = models.TextField()\n img = models.ImageField(upload_to='v/')\n\n def __str__(self):\n return self.title\n\nclass img_gray(models.Model):\n title = models.TextField()\n img = models.ImageField(upload_to='gray/')\n\n def __str__(self):\n return self.title\n\nclass tb_ciri(models.Model):\n h = models.FloatField(default=0.0000000000000000, null=True)\n s = models.FloatField(default=0.0000000000000000, null=True)\n v = models.FloatField(default=0.0000000000000000, null=True)\n cont0 = models.FloatField(default=0.0000000000000000, null=True)\n cont45 = models.FloatField(default=0.0000000000000000, null=True)\n cont90 = models.FloatField(default=0.0000000000000000, null=True)\n cont135 = models.FloatField(default=0.0000000000000000, null=True)\n diss0 = models.FloatField(default=0.0000000000000000, null=True)\n diss45 = models.FloatField(default=0.0000000000000000, null=True)\n diss90 = models.FloatField(default=0.0000000000000000, null=True)\n diss135 = models.FloatField(default=0.0000000000000000, null=True)\n asm0 = models.FloatField(default=0.0000000000000000, null=True)\n asm45 = models.FloatField(default=0.0000000000000000, null=True)\n asm90 = models.FloatField(default=0.0000000000000000, null=True)\n asm135 = models.FloatField(default=0.0000000000000000, null=True)\n ener0 = models.FloatField(default=0.0000000000000000, null=True)\n ener45 = models.FloatField(default=0.0000000000000000, null=True)\n ener90 = models.FloatField(default=0.0000000000000000, null=True)\n ener135 = models.FloatField(default=0.0000000000000000, null=True)\n homo0 = models.FloatField(default=0.0000000000000000, null=True)\n homo45 = models.FloatField(default=0.0000000000000000, null=True)\n homo90 = models.FloatField(default=0.0000000000000000, null=True)\n homo135 = models.FloatField(default=0.0000000000000000, null=True)\n corr0 = models.FloatField(default=0.0000000000000000, null=True)\n corr45 = models.FloatField(default=0.0000000000000000, null=True)\n corr90 = models.FloatField(default=0.0000000000000000, null=True)\n corr135 = models.FloatField(default=0.0000000000000000, null=True)\n def __str__(self):\n return \"{}\".format(self.id)" } ]
8
a39676/SomethingInLife
https://github.com/a39676/SomethingInLife
015b75b24d02d4ef4f25c9c97c1c32d98ff7936d
0696d692c7694dbed080865cdeb88e09bb265227
810c99b2b89ea954c0b224bfb3613369da0d1323
refs/heads/master
2020-07-03T16:54:05.023000
2016-10-25T15:02:10
2016-10-25T15:02:10
67,511,942
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7428571581840515, "alphanum_fraction": 0.7485714554786682, "avg_line_length": 18.55555534362793, "blob_id": "64ff03fcb55416f364ca2cf3abf496612647aa17", "content_id": "a03c2322559f17b6783b28ba4ad5684c63253428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 175, "license_type": "no_license", "max_line_length": 36, "num_lines": 9, "path": "/mysite05/mysite05/polls/admin.py", "repo_name": "a39676/SomethingInLife", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom django.contrib import admin\n\nfrom .models import Question, Choice\n\nadmin.site.register(Question)\nadmin.site.register(Choice)" }, { "alpha_fraction": 0.5870967507362366, "alphanum_fraction": 0.60161292552948, "avg_line_length": 28.5238094329834, "blob_id": "30fe0898fe596086e61151e9da5d5a4668b236a3", "content_id": "fb5d2b7ef0d426fc1a2652001d9b87d54697c7f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 666, "license_type": "no_license", "max_line_length": 83, "num_lines": 21, "path": "/mysite05/mysite05/polls/urls.py", "repo_name": "a39676/SomethingInLife", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom django.conf.urls import include, url\n\nfrom . import views\n\n# 给定命名空间 有此 模板中的detail/results等 可使用 polls:detail/ polls:results\napp_name = 'polls'\n\nurlpatterns = [\n\n url(r'^$', views.IndexView.as_view(), name='index'),\n # 未引用 as_view() 的url\n # 传参给question_id\n # url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='detail'),\n url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),\n url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),\n \n]\n" }, { "alpha_fraction": 0.6481481194496155, "alphanum_fraction": 0.6643518805503845, "avg_line_length": 25.875, "blob_id": "b69a26555a1429299a75a3a9a8b17ac7d1253f62", "content_id": "58ed6ec4efefacb4e7f21bda8f1bc3dda5d2779e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 66, "num_lines": 16, "path": "/mysite05/mysite05/urls.py", "repo_name": "a39676/SomethingInLife", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom mysite05.views import hello, current_datetime\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^polls/', include('mysite05.polls.urls')),\n url(r'^financeClear/', include('mysite05.financeClear.urls')),\n url(r'^hello/', hello),\n url(r'^time/', current_datetime),\n \n\n]\n\n\n" }, { "alpha_fraction": 0.7766990065574646, "alphanum_fraction": 0.7766990065574646, "avg_line_length": 25, "blob_id": "3452cf633ef10f9d3af51a50818a32e011771608", "content_id": "eaadd22cb2a42a5ab6d7d8d9007dbcb0532f8de9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 46, "num_lines": 4, "path": "/mysite05/blog/views.py", "repo_name": "a39676/SomethingInLife", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\ndef hello(request):\n return HttpResponse(\"Hello world in blog\")" }, { "alpha_fraction": 0.6820809245109558, "alphanum_fraction": 0.6936416029930115, "avg_line_length": 16.299999237060547, "blob_id": "7f38c4c3f51bdbd2d21c0d0e6ef1d92510a2eb2c", "content_id": "b11f23338047e474a2068a65cf92e7c04e999c54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/mysite05/mysite05/polls/apps.py", "repo_name": "a39676/SomethingInLife", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.apps import AppConfig\n\n\nclass Polls2Config(AppConfig):\n name = 'polls'\n" }, { "alpha_fraction": 0.6060606241226196, "alphanum_fraction": 0.7515151500701904, "avg_line_length": 12.833333015441895, "blob_id": "94f60ce662b5b7b94c3de47a10077fd403c82e26", "content_id": "8e74262a3226ba2100b618f9f57ebebe091791f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 165, "license_type": "no_license", "max_line_length": 33, "num_lines": 12, "path": "/README.md", "repo_name": "a39676/SomethingInLife", "src_encoding": "UTF-8", "text": "# SomethingInLife\n\n\nA start 2016-09-08\nJust want to change my life\nI hope some small and beautiful. \n\n2016-09-17\nTry to fix conflict.\n\n2016-09-18\nIt would be better." } ]
6
Prabhat-Thapa45/Wikipedia
https://github.com/Prabhat-Thapa45/Wikipedia
81d79d9c5fe826097ad11fc20ebddf3c7b6ef07e
d0fd1a8287d342bbe5531f33e327db9f610edcbf
ef58f07e4cb82a4207926d1fe688b79ae51e8bf5
refs/heads/master
2023-07-25T09:12:28.011578
2021-09-01T03:42:03
2021-09-01T03:42:03
393,045,607
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6005361676216125, "alphanum_fraction": 0.6085790991783142, "avg_line_length": 31.434782028198242, "blob_id": "f1699357e43e2b9da009219b91f288ead25510c4", "content_id": "b313b56e02f706081232cb49bec5230657c2907f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/wiki.py", "repo_name": "Prabhat-Thapa45/Wikipedia", "src_encoding": "UTF-8", "text": "import random\nimport wikipedia\n\n\ndef get_wiki_info(value):\n try:\n print(wikipedia.summary(value, auto_suggest=True))\n except wikipedia.exceptions.DisambiguationError as e:\n s = random.choice(e.options)\n for i, j in zip(range(1, 11), e.options[:10]):\n print(i, j)\n value = input(\"Enter the number beside the option or new word:\\n\")\n try:\n value = int(value)\n print(wikipedia.summary(e.options[value-1]))\n except ValueError as err:\n print(wikipedia.summary(value))\nwhile True:\n search_value = input(\"Enter name for search:\\n\")\n get_wiki_info(search_value)\n yes_no = input(\"Type Y/y for next search.\\n\")\n if yes_no.upper() != \"Y\":\n break\n" }, { "alpha_fraction": 0.47058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 15.857142448425293, "blob_id": "3f5555aff38e7dc4c78a002eaa3c12a8b0a5bb01", "content_id": "33353012870d7a76019d1b43aed6c23b94b02be9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 119, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/requirements.txt", "repo_name": "Prabhat-Thapa45/Wikipedia", "src_encoding": "UTF-8", "text": "beautifulsoup4==4.9.3\ncertifi==2021.5.30\nrequests==2.26.0\nidna==3.2\nsoupsieve==2.2.1\nurllib3==1.26.6\nwikipedia==1.4.0\n\n" }, { "alpha_fraction": 0.49295774102211, "alphanum_fraction": 0.5352112650871277, "avg_line_length": 11, "blob_id": "984d6cb11ec75598350e540b3183d34fcb2788f3", "content_id": "9c6864847a1f5d6bd1a69b30345dfa0a96053575", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 23, "num_lines": 6, "path": "/test_one.py", "repo_name": "Prabhat-Thapa45/Wikipedia", "src_encoding": "UTF-8", "text": "def func(a):\n return a - 1\n\n\ndef test_one():\n assert func(6) == 5" }, { "alpha_fraction": 0.8059298992156982, "alphanum_fraction": 0.8113207817077637, "avg_line_length": 122.66666412353516, "blob_id": "6ad300bb7045c5f816d7eb0108acb39368a33853", "content_id": "1f78ea711ca93440b725ab147133acbc8c3cff73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 371, "license_type": "no_license", "max_line_length": 357, "num_lines": 3, "path": "/README.md", "repo_name": "Prabhat-Thapa45/Wikipedia", "src_encoding": "UTF-8", "text": "# Wikipedia\n\nThis program makes uses of wikipedia module. This allows us to enter search keyword and get results from wikipedia. It can also deal with ambigious exception of wikipedia module. Where more than two topics are matched for the same keyword. Here those ambigius top 10 topics are listed and user is asked to choose from them or give more specific description.\n" } ]
4
gtzakis/assignment-2016-1
https://github.com/gtzakis/assignment-2016-1
58bccc3596d32d3782416277540fe6dd1de3139e
68dfc3cd7e8e055fe806de50b7cf117b4dbe711f
4f4545450756f95f4fdb9c92ab718643a98317e9
refs/heads/master
2020-12-13T19:55:58.913715
2016-03-25T22:06:40
2016-03-25T22:06:40
54,386,417
0
0
null
2016-03-21T12:21:23
2016-03-03T18:02:31
2016-03-16T09:17:35
null
[ { "alpha_fraction": 0.4027459919452667, "alphanum_fraction": 0.41418763995170593, "avg_line_length": 22.729412078857422, "blob_id": "887f0574fdcbfbea8becc6279db43b41e3ea40b5", "content_id": "8f2d1e3203ea66507c0e2a1836057f1ece4d728c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2187, "license_type": "no_license", "max_line_length": 70, "num_lines": 85, "path": "/trusses.py", "repo_name": "gtzakis/assignment-2016-1", "src_encoding": "UTF-8", "text": "import argparse\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"onomarxeiou\", help=\"name of file\",default='.txt')\r\nparser.add_argument(\"megethos\", help=\"arithmos\",\r\n type=int, default=3)\r\nargs = parser.parse_args()\r\nmegethos = args.megethos\r\nonomarxeiou = args.onomarxeiou\r\n\r\n\r\ne=[]\r\nlex={}\r\nwith open(onomarxeiou) as arxeio:\r\n \r\n \r\n for grammi in arxeio:\r\n kom=grammi.split()\r\n e.append(kom)\r\n for kom in e:\r\n kleidi = kom[0]\r\n timi = kom[1]\r\n\r\n if kleidi in lex:\r\n lex[kleidi].append(timi)\r\n else:\r\n lex[kleidi] = [timi]\r\n if timi in lex:\r\n lex[timi].append(kleidi)\r\n else:\r\n lex[timi] = [kleidi]\r\n for k in lex:\r\n t = k\r\n for k in lex:\r\n u = k\r\n if t !=u:\r\n tomi = list(set(lex[t]) & set(lex[u]))\r\n if len(tomi) < 1:\r\n if u in lex[t]:\r\n lex[t].remove(u)\r\n lex[u].remove(t)\r\n \r\n \r\n lexiko2 = {}\r\n for i in lex:\r\n kleidi2 = i\r\n for j in lex[kleidi2]:\r\n values = j\r\n if len(lex[kleidi2]) != 0:\r\n if kleidi2 not in lexiko2:\r\n lexiko2[kleidi2] = [values]\r\n else:\r\n lexiko2[kleidi2].append(values)\r\n \r\n\r\n newlist = []\r\n for i in lexiko2:\r\n lista= []\r\n key = i\r\n lista.append(key)\r\n for j in lexiko2[key]:\r\n value = j\r\n lista.append(value)\r\n lista.sort(key=None, reverse=False)\r\n newlist.append(lista)\r\n i=0\r\n\r\n \r\n while i<len(newlist):\r\n j=0\r\n while j<len(newlist):\r\n if j!=i:\r\n if newlist[i] == newlist[j]:\r\n del newlist[j]\r\n j= j+1\r\n i= i + 1\r\n i=0\r\n while i<len(newlist):\r\n i=i+1\r\n\r\n i=0\r\n while i<len(newlist):\r\n pleiada = tuple(newlist[i])\r\n print(pleiada)\r\n i=i+1\r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n\r\n" } ]
1
TanzinaRahman/PythonCode36
https://github.com/TanzinaRahman/PythonCode36
9822e1cfc2b504293c9cab8a6e1eaddb53e730c4
18f70f8214cebd2e3b813e6a77ce235aaa389b3b
598b6318d963afc98ae4835af61f3befdc5bf75f
refs/heads/main
2023-06-26T10:27:19.449555
2021-07-30T14:43:51
2021-07-30T14:43:51
391,097,361
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5568445324897766, "alphanum_fraction": 0.5777262449264526, "avg_line_length": 24.294116973876953, "blob_id": "b4e23379b9d6062b08bc8079b82b9829fbbd75e1", "content_id": "549fddb38ccc2e37b8bc6efe73c2e0666ecf3399", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 42, "num_lines": 17, "path": "/PythonProgram36.py", "repo_name": "TanzinaRahman/PythonCode36", "src_encoding": "UTF-8", "text": "# List as input from user string\nnumOfWords = 0\nnumOfLetters = 0\nnumOfDigits = 0\n\ntext = input(\"Enter a text of numbers : \")\nfor x in text:\n x = x.lower()\n if x >= ' a ' and x <= ' z ':\n numOfLetters = numOfLetters + 1\n elif x >= '0' and x <= '9':\n numOfDigits = numOfDigits + 1\n elif x == ' ':\n numOfWords = numOfWords + 1\n print(numOfLetters)\n print(numOfDigits)\n print(numOfWords + 1)\n\n" } ]
1
jcampillay8/Python-Oden-de-seleccion
https://github.com/jcampillay8/Python-Oden-de-seleccion
19c9f032526339eae5becb98ef0360ba75dc9303
9f33182c737185b0ae1c1280949baf6870af8c39
23d14bcea566da9ef8fc81a2efbfe20afcf419e3
refs/heads/master
2023-04-06T13:35:08.054010
2021-04-08T15:19:47
2021-04-08T15:19:47
355,958,010
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5506172776222229, "alphanum_fraction": 0.5802469253540039, "avg_line_length": 20.3157901763916, "blob_id": "5abbb15b6bdb9c34f714653a95abeb946266a6e7", "content_id": "bdeb86c258f9e0eec5626570b891a3151d41513e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 62, "num_lines": 19, "path": "/selection_sort.py", "repo_name": "jcampillay8/Python-Oden-de-seleccion", "src_encoding": "UTF-8", "text": "arr=[8,1,5,9,7,0,6,4,2,3]\n\ndef selection_sort(lista):\n \n control=0\n\n for i in range(len(lista)):\n\n temp =lista[control]\n valor_menor = lista[lista.index(min(lista[control:]))]\n indice_menor = lista.index(min(lista[control:]))\n\n lista[control]=valor_menor\n lista[indice_menor]=temp \n control+=1 \n \n return lista\n\nprint(selection_sort(arr))\n" } ]
1
mikkerlo/tilis_visits
https://github.com/mikkerlo/tilis_visits
35f668012b480566215d2fb3dce652aadd919c08
fb702504441f436d50b185792c6156768d00f419
185559267930bc863543eae3d47276fae1d0644f
refs/heads/master
2023-06-27T14:25:08.101233
2021-07-27T18:35:21
2021-07-27T18:35:21
389,446,162
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6620791554450989, "alphanum_fraction": 0.6642909646034241, "avg_line_length": 29.593984603881836, "blob_id": "dee129063b4f5641c14f9b7174cc01d2131cbfdd", "content_id": "0281cb91cabfc78be194d77deb24b7d241ec975a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4147, "license_type": "permissive", "max_line_length": 112, "num_lines": 133, "path": "/bot.py", "repo_name": "mikkerlo/tilis_visits", "src_encoding": "UTF-8", "text": "from functools import total_ordering\nimport logging\nimport os\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom db import Visit, Visiter\nfrom sqlalchemy.sql import func\nfrom text import RussianStrings\nfrom db import Session\nfrom datetime import datetime\n\n\ndef configure_logging():\n log_level = logging.INFO\n if os.environ.get(\"LOG_LEVEL\", \"\") == \"DEBUG\":\n log_level = logging.DEBUG\n log_config = {\n \"level\": log_level,\n \"format\": \"%(asctime)s\\t%(levelname)s\\t%(message)s\",\n \"datefmt\": \"%Y-%m-%d %H:%M:%S\",\n }\n\n logging.basicConfig(**log_config)\n\n\nconfigure_logging()\nTG_BOT_ENVIRON_VARIABLE_NAME = \"TG_TILIS_BOT\"\nTG_BOT_API_TOKEN = os.environ.get(TG_BOT_ENVIRON_VARIABLE_NAME, None)\nlogger = logging.getLogger()\nstrings = RussianStrings()\n\nif TG_BOT_API_TOKEN is None:\n logger.error(\n f\"{TG_BOT_ENVIRON_VARIABLE_NAME} is not found in enviroment. Please set it and try again\"\n )\n exit(0)\n\nbot = Bot(token=TG_BOT_API_TOKEN)\ndp = Dispatcher(bot)\n\n\ndef find_user_by_id(db_session, tg_id):\n visiter_query = db_session.query(Visiter).filter_by(tg_id=tg_id).first()\n return visiter_query\n\n\[email protected]_handler(commands=\"start\")\nasync def start_start_handler(message: types.Message):\n logger.info(f\"{message.from_user.full_name} has started the bot.\")\n db_session = Session()\n\n visiter = find_user_by_id(db_session, message.from_user.id)\n if visiter is None:\n visiter = Visiter(\n tg_id=message.from_user.id, name=message.from_user.full_name, donate_sum=0\n )\n db_session.add(visiter)\n\n visiter_balance = visiter.donate_sum\n for visit in visiter.visits:\n visiter_balance -= visit.total_payment\n\n db_session.commit()\n\n await message.reply(\n strings.get_start_text(message, visiter_balance),\n parse_mode=types.ParseMode.MARKDOWN,\n )\n\n\[email protected]_handler(commands=\"visit\")\nasync def start_visit_handler(message: types.Message):\n logger.info(f\"{message.from_user.full_name} added new visit\")\n db_session = Session()\n visit = Visit(date=datetime.now(), total_payment=0)\n visiter = find_user_by_id(db_session, message.from_user.id)\n if visiter is None:\n return await message.reply(\"Пожалуйста сначала зарегестрируйтесь в боте\")\n\n visit.visiters.append(visiter)\n logger.info(f\"Created new visit with id {visit.visit_id}\")\n\n keyboard_markup = types.InlineKeyboardMarkup(row_width=3)\n btns_and_data = ((\"Я туть\", f\"visit|1\"), (\"Я не туть\", f\"visit|0\"))\n keyboard_markup.row(*(types.InlineKeyboardButton(text, callback_data=data) for text, data in btns_and_data))\n \n visit_message = await message.reply(strings.get_visit_text(visit), reply_markup=keyboard_markup)\n visit.tg_message_id = visit_message.message_id\n \n db_session.add(visit)\n db_session.commit()\n\n return \n\n\[email protected]_query_handler(lambda query: query.data.startswith('visit|'))\nasync def inline_kb_visit_handler(query: types.CallbackQuery):\n status = query.data.split('|')[1]\n db_session = Session()\n visit = db_session.query(Visit).filter_by(tg_message_id=query.message.message_id).first()\n if visit is None:\n logger.error(\"Unexpected code\")\n return\n\n visiter = find_user_by_id(db_session, query.from_user.id)\n if visiter is None:\n return\n \n smth_changed = False\n if status == '1':\n if visiter not in visit.visiters:\n visit.visiters.append(visiter)\n smth_changed = True\n else:\n await query.answer('Слышь, ты уже туть!')\n \n elif status == '0':\n if visiter in visit.visiters:\n visit.visiters.remove(visiter)\n smth_changed = True\n else:\n await query.answer('Эй! Ты уже не туть!')\n \n print(list(visit.visiters))\n db_session.commit()\n\n if smth_changed:\n await query.message.edit_text(strings.get_visit_text(visit), reply_markup=query.message.reply_markup)\n\n return \n\n\nif __name__ == \"__main__\":\n executor.start_polling(dp, skip_updates=True)\n" }, { "alpha_fraction": 0.6890332102775574, "alphanum_fraction": 0.6897546648979187, "avg_line_length": 33.650001525878906, "blob_id": "8970ef6f7c2b1e5f5e642fdf40f634213ee244e6", "content_id": "77a57db88dee204679af02c490e4d96ce2352eaf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1386, "license_type": "permissive", "max_line_length": 139, "num_lines": 40, "path": "/db.py", "repo_name": "mikkerlo/tilis_visits", "src_encoding": "UTF-8", "text": "from sqlalchemy import Column, Integer, String, DateTime, Float, Table, create_engine\nfrom sqlalchemy.orm import relationship, declarative_base, sessionmaker\nfrom sqlalchemy.sql.schema import ForeignKey\n\nBase = declarative_base()\nengine = create_engine(\"sqlite:///:memory:\", echo=True)\nSession = sessionmaker(bind=engine)\n\nvisit_visiter = Table(\n \"visit_visiter\",\n Base.metadata,\n Column(\"visit_id\", ForeignKey(\"visit.visit_id\")),\n Column(\"visiter_id\", ForeignKey(\"visiter.visiter_id\")),\n)\n\n\nclass Visiter(Base):\n __tablename__ = \"visiter\"\n visiter_id = Column(Integer, primary_key=True)\n name = Column(String)\n tg_id = Column(Integer)\n visits = relationship(\"Visit\", secondary=visit_visiter)\n donate_sum = Column(Integer)\n\n def __repr__(self) -> str:\n return f\"Visiter(visiter_id={self.visiter_id}, name={self.name}, tg_id={self.tg_id}, donate_sum={self.donate_sum})\"\n\n\nclass Visit(Base):\n __tablename__ = \"visit\"\n visit_id = Column(Integer, primary_key=True)\n date = Column(DateTime)\n total_payment = Column(Float, default=0)\n visiters = relationship(\"Visiter\", secondary=visit_visiter)\n tg_message_id = Column(Integer)\n\n def __repr__(self) -> str:\n return f\"Visit(visit_id={self.visit_id}, date={self.date}, total_payment={self.total_payment}, tg_message_id={self.tg_message_id})\"\n\nBase.metadata.create_all(engine)\n" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 22, "blob_id": "731e1aaf0762cbb6f82bd58e4629ed6ffa1e9218", "content_id": "4c9dd149d4d3ba0d3085b613d0006264ace0ec4d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 46, "license_type": "permissive", "max_line_length": 30, "num_lines": 2, "path": "/README.md", "repo_name": "mikkerlo/tilis_visits", "src_encoding": "UTF-8", "text": "# tilis_visits\nSimple tg bot for tilis visits\n" }, { "alpha_fraction": 0.6649746298789978, "alphanum_fraction": 0.6649746298789978, "avg_line_length": 40.47368240356445, "blob_id": "5503f85b5769d2a49446c0b881d833fb69708b84", "content_id": "c72c50d9ddc42a2e681deba2cadf6a62813eaf0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 876, "license_type": "permissive", "max_line_length": 117, "num_lines": 19, "path": "/text.py", "repo_name": "mikkerlo/tilis_visits", "src_encoding": "UTF-8", "text": "from typing import List\nfrom aiogram import types\nfrom db import Visit, Visiter\nfrom datetime import datetime\n\nclass RussianStrings():\n @staticmethod\n def get_start_text(message: types.Message, balance: int):\n return f\"Привет, {message.from_user.full_name}! Ваш текущий баланс {balance}€\"\n\n @staticmethod\n def get_visit_text(visit: Visit):\n names = [v.name for v in visit.visiters]\n list_of_names = '\\n'.join(names)\n if names:\n current_cost = f\"Текущая стоимость: {visit.total_payment} или {visit.total_payment / len(names)} на чела\"\n else:\n current_cost = f\"Текущая стоимость: {visit.total_payment}\"\n return f\"Визит в Тилис {visit.date.strftime('%d.%m %H:%M')}\\n{current_cost}\\nУже вписались:\\n{list_of_names}\"\n" } ]
4
AbdelkaderMH/sarcasm_wanlp
https://github.com/AbdelkaderMH/sarcasm_wanlp
199db0896f8851f51b8c28864d9ba1fbfead2f99
76850cea42b72b6a0da74532dd8eaa90a926630b
c92f6d6e788b1261a4646da4722d22715d952cb7
refs/heads/main
2023-05-03T01:25:35.795207
2021-05-25T11:29:09
2021-05-25T11:29:09
370,653,019
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6314946413040161, "alphanum_fraction": 0.6445410847663879, "avg_line_length": 35.70588302612305, "blob_id": "99ab14e0ff0ded0161045f880f6bd8bb9c51a4aa", "content_id": "31b563222cf00b78d8fb719640ad0e584232aa79", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4369, "license_type": "permissive", "max_line_length": 132, "num_lines": 119, "path": "/eval_sentiment.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nimport preprocessing\nimport modeling\nfrom barbar import Bar\nimport random\n\nimport pandas as pd\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef evaluate(base_model, mt_classifier, iterator):\n all_sentiment_outputs = []\n all_sentiment_labels = []\n\n # set the model in eval phase\n base_model.eval()\n mt_classifier.eval()\n with torch.no_grad():\n for data_input in Bar(iterator):\n\n for k, v in data_input.items():\n data_input[k] = v.to(device)\n\n output, pooled = base_model(**data_input)\n sentiment_logits = mt_classifier(output, pooled)\n\n sentiment_probs = nn.Softmax(dim=1)(sentiment_logits).to(device)\n _, predicted_sentiment = torch.max(sentiment_probs, 1)\n all_sentiment_outputs.extend(predicted_sentiment.squeeze().int().cpu().numpy())\n\n return all_sentiment_outputs\n\n\ndef eval_full(config, test_loader):\n base_model = modeling.TransformerLayer(pretrained_path=config['pretrained_path'], both=True).to(device)\n base_model.load_state_dict(torch.load(\"./ckpts/best_basemodel_sentiment_\"+config[\"lm\"]+\".pth\"))\n base_model.to(device)\n\n mtl_classifier = modeling.CLSClassifier(base_model.output_num(),class_num=3)\n mtl_classifier.load_state_dict(torch.load(\"./ckpts/best_mtl_cls_sentiment_\"+config[\"lm\"]+\".pth\"))\n mtl_classifier.to(device)\n all_sentiment_outputs = evaluate(base_model, mtl_classifier, test_loader)\n return all_sentiment_outputs\n\n\nif __name__ == \"__main__\":\n def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered.')\n\n\n parser = argparse.ArgumentParser(description='Conditional Domain Adversarial Network')\n parser.add_argument('--lm_pretrained', type=str, default='arabert',\n help=\" path of pretrained transformer\")\n parser.add_argument('--lr', type=float, default=2e-5, help=\"learning rate\")\n parser.add_argument('--lr_mult', type=float, default=1, help=\"dicriminator learning rate multiplier\")\n\n parser.add_argument('--batch_size', type=int, default=36, help=\"training batch size\")\n parser.add_argument('--seed', type=int, default=12345)\n parser.add_argument('--num_worker', type=int, default=4)\n\n\n parser.add_argument('--epochs', type=int, default=40, metavar='N',\n help='number of epochs to train (default: 10)')\n args = parser.parse_args()\n\n\n config = {}\n config['args'] = args\n config[\"output_for_test\"] = True\n config['epochs'] = args.epochs\n config[\"class_num\"] = 1\n config[\"lr\"] = args.lr\n config['lr_mult'] = args.lr_mult\n config['batch_size'] = args.batch_size\n config['lm'] = args.lm_pretrained\n\n dosegmentation = False\n if args.lm_pretrained == 'arbert':\n config['pretrained_path'] = \"UBC-NLP/ARBERT\"\n elif args.lm_pretrained == 'marbert':\n config['pretrained_path'] = \"UBC-NLP/MARBERT\"\n elif args.lm_pretrained == 'larabert':\n config['pretrained_path'] = \"aubmindlab/bert-large-arabertv02\"\n dosegmentation = True\n else:\n config['pretrained_path'] = 'aubmindlab/bert-base-arabertv02'\n dosegmentation = True\n\n label_dict = {\n 0: \"NEG\",\n 1: \"NEU\",\n 2: \"POS\"\n }\n seeds = [12345]#, 12346, 12347, 12348, 12349]\n for RANDOM_SEED in seeds:\n random.seed(RANDOM_SEED)\n np.random.seed(RANDOM_SEED)\n torch.manual_seed(RANDOM_SEED)\n torch.cuda.manual_seed(RANDOM_SEED)\n torch.cuda.manual_seed_all(RANDOM_SEED)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n test_loader = preprocessing.loadTestData(batchsize=args.batch_size, num_worker= 1, pretraine_path=config['pretrained_path'])\n all_sentiment = eval_full(config, test_loader)\n submission = pd.DataFrame(columns=['Sentiment'])\n submission[\"Sentiment\"] = all_sentiment\n submission[\"Sentiment\"].replace(label_dict, inplace=True)\n submission.to_csv(\"results/sentiment/CS-UM6P_Subtask_2_MARBERT_CLSATT.csv\", index=False, header=False)\n\n" }, { "alpha_fraction": 0.6780821681022644, "alphanum_fraction": 0.7260273694992065, "avg_line_length": 35.75, "blob_id": "bbc2d85d2cebabed44350417f971cb0f727947f1", "content_id": "ba5f7071723fbad38c449f1fd6e329be0f722618", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "permissive", "max_line_length": 76, "num_lines": 4, "path": "/test.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import text_normalization\n\ntweet = \"🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥كود خصم موقع نون :@moh AZ205 #انتخابات_مجلس_الامه_2020\"\nprint(text_normalization.normalize(tweet))" }, { "alpha_fraction": 0.6029446721076965, "alphanum_fraction": 0.6102465987205505, "avg_line_length": 34.70085525512695, "blob_id": "959e90d3c8b9e87503b3cd556946dd8092ad99af", "content_id": "dbc61cf830db0d306babc07b880db6a31ad7a335", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8354, "license_type": "permissive", "max_line_length": 99, "num_lines": 234, "path": "/modeling.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch.nn as nn\nimport torch\nfrom transformers import AutoModel\nimport torch.nn.functional as F\nimport utils\nfrom layers import AttentionWithContext, MultiHeadAttention\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nclass TransformerLayer(nn.Module):\n def __init__(self, dropout_prob=0.15,both=False,\n pretrained_path='aubmindlab/bert-base-arabert'):\n super(TransformerLayer, self).__init__()\n\n self.both = both\n self.transformer = AutoModel.from_pretrained(pretrained_path)\n self.dropout1 = nn.Dropout(dropout_prob)\n self.dropout2 = nn.Dropout(dropout_prob)\n\n\n def forward(self, input_ids=None, attention_mask=None):\n outputs = self.transformer(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n\n pooled = outputs[1]\n pooled = self.dropout1(pooled)\n if self.both:\n output = outputs[0]\n output = self.dropout2(output)\n return output, pooled\n else:\n return pooled\n\n def output_num(self):\n return self.transformer.config.hidden_size\n\n\nclass MTClassifier(nn.Module):\n def __init__(self, in_feature, class_num_sar=1, class_num_sent=3 , dropout_prob=0.2):\n super(MTClassifier, self).__init__()\n self.W_inter = nn.Parameter(nn.init.xavier_normal_(torch.tensor((in_feature, in_feature))))\n self.b_inter = nn.Parameter(torch.zeros(in_feature))\n self.sar_attention = AttentionWithContext(in_feature)\n self.sent_attention = AttentionWithContext(in_feature)\n\n self.sracasmClassifier = nn.Sequential(\n nn.Linear(3 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num_sar)\n )\n self.SentimentClassifier = nn.Sequential(\n nn.Linear(3 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num_sent)\n )\n\n self.apply(utils.init_weights)\n\n def forward(self, x, pooled):\n att_sar = self.sar_attention(x)\n att_sent = self.sent_attention(x)\n\n sar_x = att_sar.mul(torch.sigmoid(torch.matmul(att_sar, self.W_inter) + self.b_inter))\n sent_x = att_sent.mul(torch.sigmoid(torch.matmul(att_sent, self.W_inter) + self.b_inter))\n\n sar_xx = torch.cat([sar_x, att_sar, pooled], 1)\n sent_xx = torch.cat([sent_x, att_sent, pooled], 1)\n\n sar_out = self.sracasmClassifier(sar_xx)\n sent_out = self.SentimentClassifier(sent_xx)\n return sar_out, sent_out\n\nclass MTClassifier1(nn.Module):\n def __init__(self, in_feature, class_num_sar=1, class_num_sent=3 , dropout_prob=0.2):\n super(MTClassifier1, self).__init__()\n self.sar_attention = AttentionWithContext(in_feature)\n self.sent_attention = AttentionWithContext(in_feature)\n\n self.sracasmClassifier = nn.Sequential(\n nn.Linear(2 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num_sar)\n )\n self.SentimentClassifier = nn.Sequential(\n nn.Linear(2 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num_sent)\n )\n\n self.apply(utils.init_weights)\n\n def forward(self, x, pooled):\n att_sar = self.sar_attention(x)\n att_sent = self.sent_attention(x)\n\n sar_xx = torch.cat([att_sar, pooled], 1)\n sent_xx = torch.cat([att_sent, pooled], 1)\n\n sar_out = self.sracasmClassifier(sar_xx)\n sent_out = self.SentimentClassifier(sent_xx)\n return sar_out, sent_out\n\nclass MTClassifier2(nn.Module):\n def __init__(self, in_feature, class_num_sar=1, class_num_sent=3 , dropout_prob=0.2):\n super(MTClassifier2, self).__init__()\n self.W_inter = nn.Parameter(nn.init.xavier_normal_(torch.ones((in_feature, in_feature))))\n self.b_inter = nn.Parameter(torch.zeros(in_feature))\n self.sar_attention = AttentionWithContext(in_feature)\n self.sent_attention = AttentionWithContext(in_feature)\n\n self.sracasmClassifier = nn.Sequential(\n nn.Linear(2 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num_sar)\n )\n self.SentimentClassifier = nn.Sequential(\n nn.Linear(2 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num_sent)\n )\n\n self.apply(utils.init_weights)\n\n def forward(self, x, pooled):\n att_sar = self.sar_attention(x)\n att_sent = self.sent_attention(x)\n\n sar_x = att_sar.mul(torch.sigmoid(torch.matmul(att_sar, self.W_inter) + self.b_inter))\n sent_x = att_sent.mul(torch.sigmoid(torch.matmul(att_sent, self.W_inter) + self.b_inter))\n\n sar_xx = torch.cat([sar_x, pooled], 1)\n sent_xx = torch.cat([sent_x, pooled], 1)\n\n sar_out = self.sracasmClassifier(sar_xx)\n sent_out = self.SentimentClassifier(sent_xx)\n return sar_out, sent_out\n\nclass MTClassifier0(nn.Module):\n def __init__(self, in_feature, class_num_sar=1, class_num_sent=3 , dropout_prob=0.2):\n super(MTClassifier0, self).__init__()\n\n self.sracasmClassifier = nn.Linear(in_feature, class_num_sar)\n self.SentimentClassifier = nn.Linear(in_feature, class_num_sent)\n self.apply(utils.init_weights)\n\n def forward(self, x, pooled):\n sar_out = self.sracasmClassifier(pooled)\n sent_out = self.SentimentClassifier(pooled)\n return sar_out, sent_out\n\nclass MTClassifier4(nn.Module):\n def __init__(self, in_feature, class_num_sar=1, class_num_sent=3 , dropout_prob=0.2):\n super(MTClassifier4, self).__init__()\n self.W_inter = nn.Parameter(nn.init.xavier_normal_(torch.ones((in_feature, in_feature))))\n self.b_inter = nn.Parameter(torch.zeros(in_feature))\n self.sar_attention = AttentionWithContext(in_feature)\n self.sent_attention = AttentionWithContext(in_feature)\n self.auxTaskCls = nn.Linear(in_feature, 1)\n\n self.sracasmClassifier = nn.Sequential(\n nn.Linear(2 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num_sar)\n )\n self.SentimentClassifier = nn.Sequential(\n nn.Linear(2 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num_sent)\n )\n\n self.apply(utils.init_weights)\n\n def forward(self, x, pooled):\n att_sar = self.sar_attention(x)\n att_sent = self.sent_attention(x)\n\n att_all = torch.cat([att_sar, att_sent], 0)\n out_aux = self.auxTaskCls(att_all)\n\n sar_x = att_sar.mul(torch.sigmoid(torch.matmul(att_sar, self.W_inter) + self.b_inter))\n sent_x = att_sent.mul(torch.sigmoid(torch.matmul(att_sent, self.W_inter) + self.b_inter))\n\n\n sar_xx = torch.cat([sar_x, pooled], 1)\n sent_xx = torch.cat([sent_x, pooled], 1)\n\n sar_out = self.sracasmClassifier(sar_xx)\n sent_out = self.SentimentClassifier(sent_xx)\n return sar_out, sent_out, out_aux\n\nclass CLSClassifier(nn.Module):\n def __init__(self, in_feature, class_num=1, dropout_prob=0.2):\n super(CLSClassifier, self).__init__()\n self.attention = AttentionWithContext(in_feature)\n\n self.Classifier = nn.Sequential(\n nn.Linear(2 * in_feature, in_feature),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(in_feature, class_num)\n )\n\n self.apply(utils.init_weights)\n\n def forward(self, x, pooled):\n att = self.attention(x)\n\n xx = torch.cat([att, pooled], 1)\n\n out = self.Classifier(xx)\n return out\n\nclass CLSlayer(nn.Module):\n def __init__(self, in_feature, class_num=1):\n super(CLSlayer, self).__init__()\n\n self.Classifier = nn.Linear(in_feature, class_num)\n self.apply(utils.init_weights)\n\n def forward(self, x, pooled):\n\n out = self.Classifier(pooled)\n\n return out\n" }, { "alpha_fraction": 0.5827886462211609, "alphanum_fraction": 0.5969498753547668, "avg_line_length": 31.785715103149414, "blob_id": "85031fb5fbf956addff2fc87fbc9c47d7fef7737", "content_id": "775427c304ce2ff82ef2799b2ed8f3135a01ea06", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3672, "license_type": "permissive", "max_line_length": 81, "num_lines": 112, "path": "/network.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch.nn as nn\nimport torch\nfrom transformers import AutoModel\nimport torch.nn.functional as F\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:\n nn.init.kaiming_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.zeros_(m.bias)\n elif classname.find('Linear') != -1:\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\n\n\n\nclass TransformerLayer(nn.Module):\n def __init__(self, dropout_prob=0.1,both=False,\n pretrained_path='aubmindlab/bert-base-arabert'):\n super(TransformerLayer, self).__init__()\n self.both = both\n self.transformer = AutoModel.from_pretrained(pretrained_path)\n #for param in self.transformer.parameters():\n # param.requires_grad = False\n self.dropout1 = nn.Dropout(dropout_prob)\n self.dropout2 = nn.Dropout(dropout_prob)\n self.attention = AttentionWithContext(self.output_num())\n\n\n def forward(self, input_ids=None, attention_mask=None):\n outputs = self.transformer(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n\n pooled = outputs[1]\n pooled = self.dropout1(pooled)\n if self.both:\n output = outputs[0]\n output = self.dropout2(output)\n gattention = self.attention(output)\n return output, pooled, gattention\n else:\n return pooled\n\n def get_parameters(self):\n return [{\"params\": self.parameters(),\"lr_mult\":1, 'decay_mult':1}]\n\n def output_num(self):\n return self.transformer.config.hidden_size\n\nclass AttentionWithContext(nn.Module):\n def __init__(self, hidden_dim):\n super(AttentionWithContext, self).__init__()\n\n self.attn = nn.Linear(hidden_dim, hidden_dim)\n self.contx = nn.Linear(hidden_dim, 1, bias=False)\n self.apply(init_weights)\n def forward(self, inp):\n u = torch.tanh_(self.attn(inp))\n a = F.softmax(self.contx(u), dim=1)\n s = (a * inp).sum(1)\n return s\n\n\nclass Dropout_on_dim(nn.modules.dropout._DropoutNd):\n \"\"\" Dropout that creates a mask based on 1 single input, and broadcasts\n this mask accross the batch\n \"\"\"\n\n def __init__(self, p, dim=1, **kwargs):\n super().__init__(p, **kwargs)\n self.dropout_dim = dim\n self.multiplier = 1.0 / (1.0 - self.p)\n\n def forward(self, X):\n mask = torch.bernoulli(X.new(X.size(self.dropout_dim)).fill_(1 - self.p))\n return X * mask * self.multiplier\n\nclass Classifier(nn.Module):\n def __init__(self, in_feature, class_num, dropout_prob=0.4):\n super(Classifier, self).__init__()\n self.num_class = class_num\n self.attention = AttentionWithContext(in_feature)\n self.ad_layer1 = nn.Linear(3 * in_feature, 512)\n self.ad_layer2 = nn.Linear(512, class_num)\n self.relu1 = nn.ReLU()\n self.dropout1 = nn.Dropout(dropout_prob)\n self.apply(init_weights)\n\n def forward(self, x, pooled, gattention):\n x = self.attention(x)\n #x = x + pooled\n # x = self.relu1(x)\n x = torch.cat([pooled, gattention, x], 1)\n x = self.ad_layer1(x)\n x = self.dropout1(x)\n x = self.relu1(x)\n y = self.ad_layer2(x)\n return y\n\n def output_num(self):\n return self.num_class\n\n def get_parameters(self):\n return [{\"params\": self.parameters(), \"lr_mult\": 1, 'decay_mult': 1}]\n" }, { "alpha_fraction": 0.5436328649520874, "alphanum_fraction": 0.5675501227378845, "avg_line_length": 32.630435943603516, "blob_id": "f0f697bbeebb34e9dd03aea69e6c53bae3cdb43a", "content_id": "03c31fab4953524418cad5c0db4e8abdb247b22f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1547, "license_type": "permissive", "max_line_length": 75, "num_lines": 46, "path": "/losses.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass ModelMultitaskLoss(nn.Module):\n def __init__(self):\n super(ModelMultitaskLoss, self).__init__()\n self.eta = nn.Parameter(torch.tensor([0.0,0.0]))\n\n def forward(self, loss_1, loss_2,):\n total_loss_1 = loss_1 * torch.exp(-self.eta[0]) + self.eta[0]\n total_loss_2 = loss_2 * torch.exp(-self.eta[1]) + self.eta[1]\n\n total_loss = total_loss_1 + total_loss_2\n return total_loss\n\n\nclass F1_Loss(nn.Module):\n\n def __init__(self, epsilon=1e-7, num_class=3):\n super().__init__()\n self.epsilon = epsilon\n self.num_class = num_class\n self.ce = nn.CrossEntropyLoss().to(device)\n\n def forward(self, y_pred, y_true ):\n assert y_pred.ndim == 2\n assert y_true.ndim == 1\n loss = self.ce(y_pred, y_true)\n y_true = F.one_hot(y_true, self.num_class).float()\n y_pred = F.softmax(y_pred, dim=1)\n\n tp = (y_true * y_pred).sum(dim=0).float()\n tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).float()\n fp = ((1 - y_true) * y_pred).sum(dim=0).float()\n fn = (y_true * (1 - y_pred)).sum(dim=0).float()\n\n precision = tp / (tp + fp + self.epsilon)\n recall = tp / (tp + fn + self.epsilon)\n\n f1 = 2 * (precision * recall) / (precision + recall + self.epsilon)\n f1 = f1.clamp(min=self.epsilon, max=1 - self.epsilon)\n return loss - f1.mean()\n" }, { "alpha_fraction": 0.5735719799995422, "alphanum_fraction": 0.5798894166946411, "avg_line_length": 35.89320373535156, "blob_id": "2b0a2f3b42d82a6f5e2bf584f27316285069d5ac", "content_id": "7aad55295063d031639daeade91631e5542d1262", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3799, "license_type": "permissive", "max_line_length": 117, "num_lines": 103, "path": "/layers.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport utils\n\nclass AttentionWithContext(nn.Module):\n def __init__(self, hidden_dim):\n super(AttentionWithContext, self).__init__()\n\n self.attn = nn.Linear(hidden_dim, hidden_dim)\n self.contx = nn.Linear(hidden_dim, 1, bias=False)\n self.apply(utils.init_weights)\n def forward(self, inp):\n u = torch.tanh_(self.attn(inp))\n a = F.softmax(self.contx(u), dim=1)\n s = (a * inp).sum(1)\n return s\n\nclass ScaledDotProductAttention(nn.Module):\n\n def forward(self, query, key, value, mask=None):\n dk = query.size()[-1]\n scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n attention = F.softmax(scores, dim=-1)\n return attention.matmul(value)\n\nclass MultiHeadAttention(nn.Module):\n\n def __init__(self,\n in_features,\n head_num,\n bias=True,\n activation=F.relu):\n \"\"\"Multi-head attention.\n :param in_features: Size of each input sample.\n :param head_num: Number of heads.\n :param bias: Whether to use the bias term.\n :param activation: The activation after each linear transformation.\n \"\"\"\n super(MultiHeadAttention, self).__init__()\n if in_features % head_num != 0:\n raise ValueError('`in_features`({}) should be divisible by `head_num`({})'.format(in_features, head_num))\n self.in_features = in_features\n self.head_num = head_num\n self.activation = activation\n self.bias = bias\n self.linear_q = nn.Linear(in_features, in_features, bias)\n self.linear_k = nn.Linear(in_features, in_features, bias)\n self.linear_v = nn.Linear(in_features, in_features, bias)\n self.linear_o = nn.Linear(in_features, in_features, bias)\n\n def forward(self, q, k, v, mask=None):\n q, k, v = self.linear_q(q), self.linear_k(k), self.linear_v(v)\n if self.activation is not None:\n q = self.activation(q)\n k = self.activation(k)\n v = self.activation(v)\n\n q = self._reshape_to_batches(q)\n k = self._reshape_to_batches(k)\n v = self._reshape_to_batches(v)\n if mask is not None:\n mask = mask.repeat(self.head_num, 1, 1)\n y = ScaledDotProductAttention()(q, k, v, mask)\n y = self._reshape_from_batches(y)\n\n y = self.linear_o(y)\n if self.activation is not None:\n y = self.activation(y)\n return y\n\n @staticmethod\n def gen_history_mask(x):\n \"\"\"Generate the mask that only uses history data.\n :param x: Input tensor.\n :return: The mask.\n \"\"\"\n batch_size, seq_len, _ = x.size()\n return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1)\n\n def _reshape_to_batches(self, x):\n batch_size, seq_len, in_feature = x.size()\n sub_dim = in_feature // self.head_num\n return x.reshape(batch_size, seq_len, self.head_num, sub_dim) \\\n .permute(0, 2, 1, 3) \\\n .reshape(batch_size * self.head_num, seq_len, sub_dim)\n\n def _reshape_from_batches(self, x):\n batch_size, seq_len, in_feature = x.size()\n batch_size //= self.head_num\n out_dim = in_feature * self.head_num\n return x.reshape(batch_size, self.head_num, seq_len, in_feature) \\\n .permute(0, 2, 1, 3) \\\n .reshape(batch_size, seq_len, out_dim)\n\n def extra_repr(self):\n return 'in_features={}, head_num={}, bias={}, activation={}'.format(\n self.in_features, self.head_num, self.bias, self.activation,\n )" }, { "alpha_fraction": 0.48164063692092896, "alphanum_fraction": 0.524218738079071, "avg_line_length": 31, "blob_id": "f46ac8d71d93c2ea787fcf4878635e48b09411c8", "content_id": "bfe79f753bb7eb0457cd3e0efa1343b95d7b6826", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7776, "license_type": "permissive", "max_line_length": 114, "num_lines": 240, "path": "/text_normalization.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCreated by: Mohamed Salem Elhady \nEmail: [email protected]\nText Normalization: V1 \n'''\nimport sys\nimport re\n#sys.setdefaultencoding('utf-8')\n##########################Clean Text Data #######################################\n########################Global Variable Declaration##############################\nlist_seeds = ['سبحان الله', 'الله أكبر', 'اللهم', 'بسم الله', 'يا رب', 'العضيم', 'سبحان', 'يارب', 'قران', 'quran',\n 'حديث', 'hadith', 'صلاه_الفجر', '﴾', 'ﷺ', 'صحيح البخاري', 'صحيح مسلم', 'يآرب', 'سورة']\nMaxWordPerTweet=7\n#################################################################################\n\ndef clean(sent):\n \"\"\"clean data from any English char, emoticons, underscore, and repeated > 2\n str -> str\"\"\"\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent\n\ndef normalize(sent):\n \"\"\"clean data from any English char, emoticons, underscore, and repeated > 2\n str -> str\"\"\"\n sent = re.sub(r'(?:@[\\w_]+)', \"user\", sent)\n sent = re.sub(r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&amp;+]|[!*\\(\\),]|(?:%[0-9a-f][0-9a-f]))+', \"url\", sent)\n #sent = re.sub(r\"(?:\\#+[\\w_]+[\\w\\'_\\-]*[\\w_]+)\", \"hashtag\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n return sent\ndef ReplaceThreeOrMore(s):\n # pattern to look for three or more repetitions of any character, including\n # newlines.\n pattern = re.compile(r\"(.)\\1{2,}\", re.DOTALL)\n return pattern.sub(r\"\\1\\1\", s)\ndef norm_alif(text):\n text = text.replace(u\"\\u0625\", u\"\\u0627\") # HAMZA below, with LETTER ALEF\n #text = text.replace(u\"\\u0621\", u\"\\u0627\") # HAMZA, with LETTER ALEF\n text = text.replace(u\"\\u0622\", u\"\\u0627\") # ALEF WITH MADDA ABOVE, with LETTER ALEF\n text = text.replace(u\"\\u0623\", u\"\\u0627\") # ALEF WITH HAMZA ABOVE, with LETTER ALEF\n return text\ndef remove_unicode_diac(text):\n \"\"\"Takes Arabic in utf-8 and returns same text without diac\"\"\"\n # Replace diacritics with nothing\n text = text.replace(u\"\\u064B\", \"\") # fatHatayn\n text = text.replace(u\"\\u064C\", \"\") # Dammatayn\n text = text.replace(u\"\\u064D\", \"\") # kasratayn\n text = text.replace(u\"\\u064E\", \"\") # fatHa\n text = text.replace(u\"\\u064F\", \"\") # Damma\n text = text.replace(u\"\\u0650\", \"\") # kasra\n text = text.replace(u\"\\u0651\", \"\") # shaddah\n text = text.replace(u\"\\u0652\", \"\") # sukuun\n text = text.replace(u\"\\u0670\", \"`\") # dagger 'alif\n return text\ndef norm_taa(text):\n text=text.replace(u\"\\u0629\", u\"\\u0647\") # taa' marbuuTa, with haa'\n #text=text.replace(u\"\\u064A\", u\"\\u0649\") # yaa' with 'alif maqSuura\n return text\ndef norm_yaa(text):\n if len(text)!=0:\n if text[-1] == u\"\\u064A\":\n text = text[:-1] + text[-1].replace(u\"\\u064A\", u\"\\u0649\") # yaa' with 'alif maqSuura\n return text\n\ndef NormForWord2Vec(text):\n text=norm_taa(text)\n text=norm_yaa(text)\n text=norm_alif(text)\n return text\n\ndef remove_nonunicode2(Tweet):\n ## defining set of unicode ##\n #u\"\"\n #Tweet=Tweet.decode(\"utf-8\")\n UniLex={ ## This is list of all arabic unicode characters in addition to space (to separate words)\n u\"\\u0622\",\n u\"\\u0626\",\n u\"\\u0628\",\n u\"\\u062a\",\n u\"\\u062c\",\n u\"\\u06af\",\n u\"\\u062e\",\n u\"\\u0630\",\n u\"\\u0632\",\n u\"\\u0634\",\n u\"\\u0636\",\n u\"\\u0638\",\n u\"\\u063a\",\n u\"\\u0640\",\n u\"\\u0642\",\n u\"\\u0644\",\n u\"\\u0646\",\n u\"\\u0648\",\n u\"\\u064a\",\n u\"\\u0670\",\n u\"\\u067e\",\n u\"\\u0686\",\n u\"\\u0621\",\n u\"\\u0623\",\n u\"\\u0625\",\n u\"\\u06a4\",\n u\"\\u0627\",\n u\"\\u0629\",\n u\"\\u062b\",\n u\"\\u062d\",\n u\"\\u062f\",\n u\"\\u0631\",\n u\"\\u0633\",\n u\"\\u0635\",\n u\"\\u0637\",\n u\"\\u0639\",\n u\"\\u0641\",\n u\"\\u0643\",\n u\"\\u0645\",\n u\"\\u0647\",\n u\"\\u0649\",\n u\"\\u0671\",\n ' ',\n '\\n'\n }\n fin_tweet=\"\"\n for c in Tweet:\n if c in UniLex:\n fin_tweet=fin_tweet+c\n return fin_tweet\n\n###### Heuristics Calculations ######\ndef diac_counter(text):\n #text=text.decode(\"utf-8\")\n diac = [u\"\\u064B\",u\"\\u064C\", u\"\\u064D\", u\"\\u064E\", u\"\\u064F\", u\"\\u0650\", u\"\\u0651\", u\"\\u0652\", u\"\\u0670\"]\n diac_count=0\n for d in diac:\n diac_count+=text.count(d)\n# if d in text:\n# print(d)\n# diac_count+=1\n return diac_count\ndef check_seed(list_seeds, text):\n \"\"\n for word in list_seeds:\n text = text.lower()\n if word.decode(\"utf-8\") in text:\n return True\n return False\ndef EnglishCount(text):\n printable = ['e', 'a', 'o', 't', 'i']\n count = 0\n for ch in printable:\n count += text.count(ch.lower())\n return count\n########################################\n\n\n\ndef eliminate_single_char_words(Tweet):\n parts = Tweet.split(\" \")\n cleaned_line_parts = []\n for P in parts:\n if len(P) != 1:\n cleaned_line_parts.append(P)\n cleaned_line = ' '.join(cleaned_line_parts)\n return cleaned_line\ndef clean_unicode(Tweet):\n tweet=normalize(Tweet.strip(\"\\n\"))\n if len(tweet) !=0:\n sentence = []\n for word in tweet.split(\" \"):\n word = remove_unicode_diac(word)\n word = norm_alif(word)\n word = norm_taa(word)\n word = norm_yaa(word)\n word = normalize(word)\n sentence.append(word)\n tweet = ' '.join(sentence)\n tweet =remove_nonunicode2(tweet)\n tweet =eliminate_single_char_words(tweet)\n return tweet\n\ndef clean_unicode2(Tweet):\n KeepUniOnly(Tweet)\n tweet=normalize(Tweet.strip(\"\\n\"))\n if len(tweet) !=0:\n sentence = []\n for word in tweet.split(\" \"):\n word = remove_unicode_diac(word)\n word = normalize(word)\n sentence.append(word)\n tweet = ' '.join(sentence)\n tweet =remove_nonunicode2(tweet)\n tweet =eliminate_single_char_words(tweet)\n return tweet\n\ndef NormCorpusFinal(Tweet):\n tweet=KeepUniOnly(Tweet)\n tweet=NormForWord2Vec(tweet)\n return tweet\n\ndef KeepUniOnly(Tweet):## this one is without normalization\n tweet=Tweet.replace(\"# \",\" \")\n tweet=tweet.replace(\"#\",\" \")\n tweet=tweet.replace(\"_\",\" \")\n tweet=tweet.replace(u\"\\u0657\",\" \")\n tweet=tweet.replace(\"\\n\",\" \")\n tweet=remove_nonunicode2(tweet)\n tweet=eliminate_single_char_words(tweet)\n tweet=ReplaceThreeOrMore(tweet)\n return tweet\n\ndef get_charset(rawtext):\n chars = sorted(list(set(rawtext)))\n return chars\n\ndef DialectChecker(text):\n ##Based on Hueristics done by Hassan\n if (diac_counter(text)>5 or check_seed(list_seeds,text) or EnglishCount(text)>4 or \"<URL>\" in text\n or text.count('#') >2 or '\"' in text or text.count('@') or \"\\\"RT\" in text or len(text.split(\" \")) <7):\n return False\n else:\n return True\n\n###############################################################\n'''\nFread=open(\"Egypt_portion.txt\",'r')\nFwriter=open(\"Egypt_portion_norm.txt\",'w')\nfor line in Fread:\n cleaned_line=clean_unicode_for_w2v(line)\n Fwriter.write(str(cleaned_line))\nFwriter.close()\n'''\n" }, { "alpha_fraction": 0.6078410744667053, "alphanum_fraction": 0.6178801655769348, "avg_line_length": 37.942386627197266, "blob_id": "918aeaf0f8f15d972eb42db09fab165549d9c22b", "content_id": "a0c9886706b68ce722f48b4ff0cf1762c21df7a7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9463, "license_type": "permissive", "max_line_length": 178, "num_lines": 243, "path": "/train_sarcasm.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nimport preprocessing\nimport modeling\nfrom barbar import Bar\nimport random\n\n\nfrom sklearn.metrics import f1_score, accuracy_score, classification_report\nfrom losses import ModelMultitaskLoss\nimport utils\nfrom transformers import AdamW, get_linear_schedule_with_warmup\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef train(base_model, mt_classifier, iterator, optimizer, sar_criterion, scheduler):\n\n # set the model in eval phase\n base_model.train(True)\n mt_classifier.train(True)\n acc_sarcasm= 0\n loss_sarc= 0\n\n for data_input, label_input in Bar(iterator):\n\n for k, v in data_input.items():\n data_input[k] = v.to(device)\n\n for k, v in label_input.items():\n label_input[k] = v.to(device)\n\n optimizer.zero_grad()\n\n\n #forward pass\n\n sarcasm_target = label_input['sarcasm']\n\n # forward pass\n\n output, pooled = base_model(**data_input)\n sarcasm_logits = mt_classifier(output, pooled)\n sarcasm_probs = torch.sigmoid(sarcasm_logits).to(device)\n\n # compute the loss\n loss_sarcasm = sar_criterion(sarcasm_probs.squeeze(), sarcasm_target)\n #total_loss = multi_task_loss(loss_sentiment, loss_sarcasm)\n loss_sarc += loss_sarcasm.item()\n # backpropage the loss and compute the gradients\n loss_sarcasm.backward()\n optimizer.step()\n scheduler.step()\n acc_sarcasm += utils.binary_accuracy(sarcasm_probs, sarcasm_target)\n\n accuracies = { 'Sarcasm': acc_sarcasm / len(iterator)}\n losses = { 'Sarcasm': loss_sarc / len(iterator)}\n return accuracies, losses\n\ndef evaluate(base_model, mt_classifier, iterator, sar_criterion):\n # initialize every epoch\n acc_sarcasm= 0\n loss_sarc= 0\n\n all_sarcasm_outputs = []\n all_sarcasm_labels = []\n\n # set the model in eval phase\n base_model.eval()\n mt_classifier.eval()\n with torch.no_grad():\n for data_input, label_input in Bar(iterator):\n\n for k, v in data_input.items():\n data_input[k] = v.to(device)\n\n for k, v in label_input.items():\n label_input[k] = v.to(device)\n\n\n sarcasm_target = label_input['sarcasm']\n\n # forward pass\n\n output, pooled = base_model(**data_input)\n sarcasm_logits = mt_classifier(output, pooled)\n\n sarcasm_probs = torch.sigmoid(sarcasm_logits).to(device)\n # compute the loss\n loss_sarcasm = sar_criterion(sarcasm_probs.squeeze(), sarcasm_target)\n #mtl_loss = multi_task_loss(loss_sentiment, loss_sarcasm)\n\n # compute the running accuracy and losses\n acc_sarcasm += utils.binary_accuracy(sarcasm_probs, sarcasm_target)\n\n\n loss_sarc += loss_sarcasm.item()\n\n predicted_sarcasm = torch.round(sarcasm_probs)\n all_sarcasm_outputs.extend(predicted_sarcasm.squeeze().int().cpu().numpy())\n all_sarcasm_labels.extend(sarcasm_target.squeeze().int().cpu().numpy())\n\n fscore_sarcasm = f1_score(all_sarcasm_outputs, all_sarcasm_labels, pos_label=1, average='binary')\n report_sarcasm = classification_report(all_sarcasm_outputs, all_sarcasm_labels, target_names=['False', 'True'],digits=4)\n\n\n accuracies = { 'Sarcasm': acc_sarcasm / len(iterator), 'F1_sarcasm': fscore_sarcasm, 'Report_sarcasm': report_sarcasm}\n losses = { 'Sarcasm': loss_sarc / len(iterator)}\n return accuracies, losses\n\ndef train_full(config, train_loader, stest_loader):\n lr_o = config['lr_mult'] * config['lr']\n lr = config['lr']\n\n #Instanciate models\n base_model = modeling.TransformerLayer(pretrained_path=config['pretrained_path'], both=True).to(device)\n mtl_classifier = modeling.CLSlayer(base_model.output_num(), class_num=1).to(device)\n cls = 'CLSlayer'\n\n\n ## set optimizer and criterions\n\n sarc_criterion = nn.BCELoss().to(device)\n\n params = [{'params':base_model.parameters(), 'lr':config['lr']}, {'params': mtl_classifier.parameters(), 'lr': lr_o}]#, {'params':multi_task_loss.parameters(), 'lr': 0.0005}]\n optimizer = AdamW(params, lr=config[\"lr\"])\n train_data_size = len(train_loader)\n steps_per_epoch = int(train_data_size / config['batch_size'])\n num_train_steps = len(train_loader) * config['epochs']\n warmup_steps = int(config['epochs'] * train_data_size * 0.1 / config['batch_size'])\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=10000)\n # Train model\n best_sentiment_valid_accuracy, best_sarcasm_valid_accuracy = 0, 0\n best_total_val_acc = 0\n best_val_loss = float('+inf')\n report_sarcasm = None\n epo = 0\n for epoch in range(config['epochs']):\n print(\"epoch {}\".format(epoch + 1))\n\n train_accuracies, train_losses = train(base_model, mtl_classifier, train_loader, optimizer, sarc_criterion,scheduler)\n valid_accuracies, valid_losses = evaluate(base_model, mtl_classifier, valid_loader, sarc_criterion)\n #print(multi_task_loss.parameters())\n val_loss = valid_losses['Sarcasm']\n total_val_acc = valid_accuracies['F1_sarcasm']\n if total_val_acc > best_total_val_acc:\n #if best_val_loss > val_loss:\n epo = epoch\n best_val_loss = val_loss\n best_total_val_acc = total_val_acc\n best_sarcasm_valid_accuracy = valid_accuracies['F1_sarcasm']\n report_sarcasm = valid_accuracies['Report_sarcasm']\n best_sarcasm_loss = valid_losses['Sarcasm']\n print(\"save model's checkpoint\")\n torch.save(base_model.state_dict(), \"./ckpts/best_basemodel_sarcasm_\"+config[\"lm\"]+\".pth\")\n torch.save(mtl_classifier.state_dict(), \"./ckpts/best_mtl_cls_sarcasm_\"+config[\"lm\"]+\".pth\")\n\n\n print('********************Train Epoch***********************\\n')\n print(\"accuracies**********\")\n for k , v in train_accuracies.items():\n print(k+f\" : {v * 100:.2f}\")\n print(\"losses**********\")\n for k , v in train_losses.items():\n print(k+f\": {v :.5f}\\t\")\n print('********************Validation***********************\\n')\n print(\"accuracies**********\")\n for k, v in valid_accuracies.items():\n if 'Report' not in k:\n print(k+f\": {v * 100:.2f}\")\n print(\"losses**********\")\n for k, v in valid_losses.items():\n print(k + f\": {v :.5f}\\t\")\n print('******************************************************\\n')\n print(f\"epoch of best results {epo}\")\n with open(f'reports/report_Sarcasm_ST_model_{cls}_v2.txt', 'w') as f:\n f.write(\"Sarcasm report\\n\")\n f.write(report_sarcasm)\n return best_sarcasm_valid_accuracy, best_sarcasm_loss\nif __name__ == \"__main__\":\n def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered.')\n\n\n parser = argparse.ArgumentParser(description='Conditional Domain Adversarial Network')\n parser.add_argument('--lm_pretrained', type=str, default='arabert',\n help=\" path of pretrained transformer\")\n parser.add_argument('--lr', type=float, default=2e-5, help=\"learning rate\")\n parser.add_argument('--lr_mult', type=float, default=1, help=\"dicriminator learning rate multiplier\")\n\n parser.add_argument('--batch_size', type=int, default=36, help=\"training batch size\")\n parser.add_argument('--seed', type=int, default=12345)\n parser.add_argument('--num_worker', type=int, default=4)\n\n\n parser.add_argument('--epochs', type=int, default=40, metavar='N',\n help='number of epochs to train (default: 10)')\n args = parser.parse_args()\n\n\n config = {}\n config['args'] = args\n config[\"output_for_test\"] = True\n config['epochs'] = args.epochs\n config[\"class_num\"] = 1\n config[\"lr\"] = args.lr\n config['lr_mult'] = args.lr_mult\n config['batch_size'] = args.batch_size\n config['lm'] = args.lm_pretrained\n\n dosegmentation = False\n if args.lm_pretrained == 'arbert':\n config['pretrained_path'] = \"UBC-NLP/ARBERT\"\n elif args.lm_pretrained == 'marbert':\n config['pretrained_path'] = \"UBC-NLP/MARBERT\"\n elif args.lm_pretrained == 'larabert':\n config['pretrained_path'] = \"aubmindlab/bert-large-arabertv02\"\n dosegmentation = True\n else:\n config['pretrained_path'] = 'aubmindlab/bert-base-arabertv02'\n dosegmentation = True\n\n seeds = [12345]#, 12346, 12347, 12348, 12349]\n for RANDOM_SEED in seeds:\n random.seed(RANDOM_SEED)\n np.random.seed(RANDOM_SEED)\n torch.manual_seed(RANDOM_SEED)\n torch.cuda.manual_seed(RANDOM_SEED)\n torch.cuda.manual_seed_all(RANDOM_SEED)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n train_loader, valid_loader = preprocessing.loadTrainValData(batchsize=args.batch_size, num_worker= 1, pretraine_path=config['pretrained_path'])\n best_sarcasm_acc, best_sarcasm_loss =train_full(config, train_loader, valid_loader)\n print(f' Val. Sarcasm F1: {best_sarcasm_acc * 100:.2f}% \\t Val Sarcasm Loss {best_sarcasm_loss :.4f} ')\n" }, { "alpha_fraction": 0.5855053067207336, "alphanum_fraction": 0.6162770986557007, "avg_line_length": 35.26829147338867, "blob_id": "417c24a940e76685d0465d06ed39f72fe52a31ae", "content_id": "26bde1a869ec402f275e80f942bfb70de7b1b234", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5947, "license_type": "permissive", "max_line_length": 125, "num_lines": 164, "path": "/utils.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nfrom sklearn.metrics import accuracy_score, f1_score\n\n\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:\n nn.init.kaiming_uniform_(m.weight)\n nn.init.zeros_(m.bias)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.zeros_(m.bias)\n elif classname.find('Linear') != -1:\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\ndef f1_loss(preds, target):\n target = target\n predict = preds.squeeze()\n predict = torch.clamp(predict * (1-target), min=0.01) + predict * target\n tp = predict * target\n tp = tp.sum(dim=0)\n precision = tp / (predict.sum(dim=0) + 1e-8)\n recall = tp / (target.sum(dim=0) + 1e-8)\n f1 = 2 * (precision * recall / (precision + recall + 1e-8))\n return 1 - f1.mean()\n\ndef accuracy(preds, y):\n all_output = preds.float().cpu()\n all_label = y.float().cpu()\n _, predict = torch.max(all_output, 1)\n acc = accuracy_score(all_label.numpy(), torch.squeeze(predict).float().numpy())\n return acc\n\ndef calc_accuracy(preds,y):\n predict = torch.argmax(preds, dim=1)\n accuracy = torch.sum(predict == y.squeeze()).float().item()\n return accuracy / float(preds.size()[0])\n\ndef binary_accuracy(preds, y):\n # round predictions to the closest integer\n rounded_preds = torch.round(preds).squeeze()\n\n correct = (rounded_preds == y).float()\n acc = correct.sum() / y.size(0)\n return acc\n\n\n\ndef f1_loss(y_true: torch.Tensor, y_pred: torch.Tensor, is_training=False) -> torch.Tensor:\n\n\n y_pred = y_pred.argmax(dim=1)\n\n tp = (y_true * y_pred).sum().to(torch.float32)\n tn = ((1 - y_true) * (1 - y_pred)).sum().to(torch.float32)\n fp = ((1 - y_true) * y_pred).sum().to(torch.float32)\n fn = (y_true * (1 - y_pred)).sum().to(torch.float32)\n\n epsilon = 1e-7\n\n precision = tp / (tp + fp + epsilon)\n recall = tp / (tp + fn + epsilon)\n\n f1 = 2 * (precision * recall) / (precision + recall + epsilon)\n f1.requires_grad = is_training\n return 1 - f1\n\n\ndef fscore(preds, y):\n all_output = preds.clone().detach().float().cpu()\n all_label = y.clone().detach().float().cpu()\n _, predict = torch.max(all_output, 1)\n acc = f1_score(all_label.numpy(), torch.squeeze(predict).float().numpy(), average='macro')\n return acc\n\n\n\ndef f1score(predict, target):\n target = target\n predict = predict.squeeze()\n predict = torch.clamp(predict * (1-target), min=0.01) + predict * target\n tp = predict * target\n tp = tp.sum(dim=0)\n precision = tp / (predict.sum(dim=0) + 1e-8)\n recall = tp / (target.sum(dim=0) + 1e-8)\n f1 = 2 * (precision * recall / (precision + recall + 1e-8))\n return f1.mean()\n\n\n\ndef f1_loss(preds, target):\n target = target\n predict = preds.squeeze()\n #lack_cls = target.sum(dim=0) == 0\n #if lack_cls.any():\n # loss += F.binary_cross_entropy_with_logits(\n # predict[:, lack_cls], target[:, lack_cls])\n loss = F.binary_cross_entropy_with_logits(predict, target)\n predict = torch.sigmoid(predict)\n predict = torch.clamp(predict * (1-target), min=0.01) + predict * target\n tp = predict * target\n tp = tp.sum(dim=0)\n precision = tp / (predict.sum(dim=0) + 1e-8)\n recall = tp / (target.sum(dim=0) + 1e-8)\n f1 = 2 * (precision * recall / (precision + recall + 1e-8))\n return 1 - f1.mean() + loss #+ circle_loss(preds, target)\n\ndef fscore_loss(y_pred, target, beta=1, epsilon=1e-8):\n y_true = target.unsqueeze(1)\n TP = (y_pred * y_true).sum(dim=1)\n FP = ((1 - y_pred) * y_true).sum(dim=1)\n FN = (y_pred * (1 - y_true)).sum(dim=1)\n fbeta = (1 + beta ** 2) * TP / ((1 + beta ** 2) * TP + (beta ** 2) * FN + FP + epsilon)\n fbeta = fbeta.clamp(min=epsilon, max=1 - epsilon)\n return 1 - fbeta.mean()\n\n\ndef macro_double_soft_f1(y, y_hat):\n pred = y_hat.to(torch.float).unsqueeze(1)\n truth = y.to(torch.float).unsqueeze(1)\n tp = pred.mul(truth).sum(0).float()\n fp = pred.mul(1 - truth).sum(0).float()\n fn = (1 - pred).mul(truth).sum(0).float()\n tn = (1 - pred).mul(1 - truth).sum(0).float()\n soft_f1_class1 = 2 * tp / (2 * tp + fn + fp + 1e-16)\n soft_f1_class0 = 2 * tn / (2 * tn + fn + fp + 1e-16)\n cost_class1 = 1 - soft_f1_class1 # reduce 1 - soft-f1_class1 in order to increase soft-f1 on class 1\n cost_class0 = 1 - soft_f1_class0 # reduce 1 - soft-f1_class0 in order to increase soft-f1 on class 0\n cost = 0.5 * (cost_class1 + cost_class0) # take into account both class 1 and class 0\n macro_cost = cost.mean() # average on all labels\n return macro_cost\n\ndef mcc_loss(outputs_target, temperature=3, class_num=2):\n train_bs = outputs_target.size(0)\n outputs_target_temp = outputs_target / temperature\n target_softmax_out_temp = nn.Softmax(dim=1)(outputs_target_temp)\n target_entropy_weight = Entropy(target_softmax_out_temp).detach()\n target_entropy_weight = 1 + torch.exp(-target_entropy_weight)\n target_entropy_weight = train_bs * target_entropy_weight / torch.sum(target_entropy_weight)\n cov_matrix_t = target_softmax_out_temp.mul(target_entropy_weight.view(-1, 1)).transpose(1, 0).mm(target_softmax_out_temp)\n cov_matrix_t = cov_matrix_t / torch.sum(cov_matrix_t, dim=1)\n mcc_loss = (torch.sum(cov_matrix_t) - torch.trace(cov_matrix_t)) / class_num\n return mcc_loss\n\ndef EntropyLoss(input_):\n # print(\"input_ shape\", input_.shape)\n mask = input_.ge(0.000001)\n mask_out = torch.masked_select(input_, mask)\n entropy = -(torch.sum(mask_out * torch.log(mask_out + 1e-5)))\n return entropy / float(input_.size(0))\n\ndef Entropy(input_):\n bs = input_.size(0)\n epsilon = 1e-5\n entropy = -input_ * torch.log(input_ + epsilon)\n entropy = torch.sum(entropy, dim=1)\n return entropy" }, { "alpha_fraction": 0.5612170100212097, "alphanum_fraction": 0.5931084752082825, "avg_line_length": 38.550724029541016, "blob_id": "921d8d4854a3311b90c2a4dfa5c2ae27cf5c6cd7", "content_id": "0d0dd6efea7ecbc44d091b21067387f8bc9f0505", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2728, "license_type": "permissive", "max_line_length": 107, "num_lines": 69, "path": "/metrics.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "from numpy import nanmean\nimport torch\ndef accuracy(output, target):\n \"\"\"Computes the accuracy for multiple binary predictions\"\"\"\n pred = output >= 0.5\n truth = target >= 0.5\n acc = pred.eq(truth).sum() / target.numel()\n return acc\n\n\nclass BinaryClassificationMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.tp = 0\n self.tn = 0\n self.fp = 0\n self.fn = 0\n self.acc = 0\n self.pre = 0\n self.rec = 0\n self.f1 = 0\n\n def update(self, output, target):\n pred = output >= 0.5\n truth = target >= 0.5\n pred = pred.to(torch.float)\n truth = truth.to(torch.float)\n self.tp += pred.mul(truth).sum(0).float()\n self.tn += (1 - pred).mul(1 - truth).sum(0).float()\n self.fp += pred.mul(1 - truth).sum(0).float()\n self.fn += (1 - pred).mul(truth).sum(0).float()\n self.acc = (self.tp + self.tn).sum() / (self.tp + self.tn + self.fp + self.fn).sum()\n self.pre = self.tp / (self.tp + self.fp)\n self.rec = self.tp / (self.tp + self.fn)\n self.f1 = (2.0 * self.tp) / (2.0 * self.tp + self.fp + self.fn)\n self.avg_acc = nanmean(self.acc.cpu())\n self.avg_pre = nanmean(self.pre.cpu())\n self.avg_rec = nanmean(self.rec.cpu())\n self.avg_f1 = nanmean(self.f1.cpu())\n\n\ndef macro_double_soft_f1(y, y_hat):\n \"\"\"Compute the macro soft F1-score as a cost (average 1 - soft-F1 across all labels).\n Use probability values instead of binary predictions.\n This version uses the computation of soft-F1 for both positive and negative class for each label.\n\n Args:\n y (int32 Tensor): targets array of shape (BATCH_SIZE, N_LABELS)\n y_hat (float32 Tensor): probability matrix from forward propagation of shape (BATCH_SIZE, N_LABELS)\n\n Returns:\n cost (scalar Tensor): value of the cost function for the batch\n \"\"\"\n pred = y.to(torch.float)\n truth = y_hat.to(torch.float)\n tp = pred.mul(truth).sum(0).float()\n fp = pred.mul(1 - truth).sum(0).float()\n fn = (1 - pred).mul(truth).sum(0).float()\n tn = (1 - pred).mul(1 - truth).sum(0).float()\n soft_f1_class1 = 2 * tp / (2 * tp + fn + fp + 1e-16)\n soft_f1_class0 = 2 * tn / (2 * tn + fn + fp + 1e-16)\n cost_class1 = 1 - soft_f1_class1 # reduce 1 - soft-f1_class1 in order to increase soft-f1 on class 1\n cost_class0 = 1 - soft_f1_class0 # reduce 1 - soft-f1_class0 in order to increase soft-f1 on class 0\n cost = 0.5 * (cost_class1 + cost_class0) # take into account both class 1 and class 0\n macro_cost = cost.mean() # average on all labels\n return macro_cost" }, { "alpha_fraction": 0.5709421038627625, "alphanum_fraction": 0.5758607387542725, "avg_line_length": 29.379310607910156, "blob_id": "a09ba5883db83fecaec92232fe3fe3d41031e159", "content_id": "1c618126d124644a964207b8f9a494035c85741c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2643, "license_type": "permissive", "max_line_length": 103, "num_lines": 87, "path": "/Dataset.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import AutoTokenizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedKFold\nimport pandas as pd\n\n\nclass TrainDataset(Dataset):\n def __init__(self, df, pretraine_path='aubmindlab/bert-base-arabert', max_length=128):\n self.df = df\n self.max_length = max_length\n\n self.tokenizer = AutoTokenizer.from_pretrained(pretraine_path)\n\n def __getitem__(self, index):\n text = self.df.iloc[index]['tweet']\n l_sarcasm = self.df.iloc[index][\"sarcasm\"]\n l_sentiment = self.df.iloc[index][\"sentiment\"]\n sentiment_dict = {\n \"Positive\": 2,\n \"Neutral\" : 1,\n \"Negative\": 0,\n }\n sarcasm_dict = {\n \"True\" : 1,\n \"False\" : 0\n }\n sentiment = sentiment_dict[l_sentiment]\n sarcasm = sarcasm_dict[str(l_sarcasm)]\n encoded_input = self.tokenizer(\n text,\n max_length = self.max_length,\n padding='max_length',\n truncation=True,\n return_tensors=\"pt\",\n )\n\n input_ids = encoded_input[\"input_ids\"]\n attention_mask = encoded_input[\"attention_mask\"] if \"attention_mask\" in encoded_input else None\n\n data_input = {\n \"input_ids\":input_ids.flatten(),\n \"attention_mask\": attention_mask.flatten()\n }\n\n label_input ={\n \"sarcasm\": torch.tensor(sarcasm, dtype=torch.float),\n \"sentiment\": torch.tensor(sentiment, dtype=torch.long),\n\n }\n\n return data_input, label_input\n\n def __len__(self):\n return self.df.shape[0]\n\n\nclass TestDataset(Dataset):\n def __init__(self, df, pretraine_path='aubmindlab/bert-base-arabert', max_length=128):\n self.df = df\n self.max_length = max_length\n self.tokenizer = AutoTokenizer.from_pretrained(pretraine_path)\n\n def __getitem__(self, index):\n text = self.df.iloc[index][\"tweet\"]\n\n encoded_input = self.tokenizer(\n text,\n max_length=self.max_length,\n padding='max_length',\n truncation=True,\n return_tensors=\"pt\",\n )\n\n input_ids = encoded_input[\"input_ids\"]\n attention_mask = encoded_input[\"attention_mask\"] if \"attention_mask\" in encoded_input else None\n\n data_input = {\n \"input_ids\": input_ids.flatten(),\n \"attention_mask\": attention_mask.flatten()\n }\n\n return data_input\n\n def __len__(self):\n return self.df.shape[0]\n" }, { "alpha_fraction": 0.6527666449546814, "alphanum_fraction": 0.6736167073249817, "avg_line_length": 23.959999084472656, "blob_id": "c8fafd727bc239984eaca0f3b81e1f50a12bbc5c", "content_id": "8f5882371781f4ac768e92421713b426777fab78", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1247, "license_type": "permissive", "max_line_length": 102, "num_lines": 50, "path": "/README.md", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "# Deep Multi-Task Model for Sarcasm Detection and Sentiment Analysis in Arabic Language\n\n## Requirement\n1. pytorch\n2. transformers\n3. scikitlearn\n4. pandas\n5. barbar\n\n## Model Training \n\n```\npython train_model.py [args]\n```\nOptions:\n- lm_pretrained : pretrained BERT model (MARBERT, ARBERT, AraBERT, ...)\n- lr : learning rate\n- batch_size : batch size\n- epochs : number of epochs\n- lr_mult : Classifier learning rate multiplier\n\n## Model evaluation/testing\n```\npython eval_model.py --lm_pretrained [value] --batch_size [value]\n```\n\n\nCiting this work\n---------------------\n\nIf you are using this source code please use the following citation to reference this work:\n\n```\n@inproceedings{el-mahdaouy-etal-2021-deep,\n title = \"Deep Multi-Task Model for Sarcasm Detection and Sentiment Analysis in {A}rabic Language\",\n author = \"El Mahdaouy, Abdelkader and\n El Mekki, Abdellah and\n Essefar, Kabil and\n El Mamoun, Nabil and\n Berrada, Ismail and\n Khoumsi, Ahmed\",\n booktitle = \"Proceedings of the Sixth Arabic Natural Language Processing Workshop\",\n month = apr,\n year = \"2021\",\n address = \"Kyiv, Ukraine (Virtual)\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2021.wanlp-1.42\",\n pages = \"334--339\",\n}\t\n```" }, { "alpha_fraction": 0.6795976758003235, "alphanum_fraction": 0.6846264600753784, "avg_line_length": 49.563636779785156, "blob_id": "66a666233193e80d3428c19f94f65c363c97e1a2", "content_id": "0aa9153926ebdae64e6dbbbcaadc1553d52b2395", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2784, "license_type": "permissive", "max_line_length": 136, "num_lines": 55, "path": "/preprocessing.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.preprocessing import LabelEncoder\nimport Dataset\nimport text_normalization\nfrom pickle import dump, load\nfrom sklearn.model_selection import train_test_split\n\ndef loadTrainValData(batchsize=16, num_worker=2, pretraine_path=\"bert-base-uncased\"):\n data = pd.read_csv('data/training_data.csv', delimiter=',')\n Train_data, Dev_data = train_test_split(data, test_size=0.2, stratify=data[['sarcasm', 'sentiment']], random_state=42, shuffle=True)\n Dev_data.to_csv('data/dev_set.csv')\n Train_data['tweet'] = Train_data['tweet'].apply(lambda x: text_normalization.clean(x))\n Dev_data['tweet'] = Dev_data['tweet'].apply(lambda x: text_normalization.clean(x))\n\n print(f'Training data size {Train_data.shape}')\n print(f'Validation data size {Dev_data.shape}')\n DF_train = Dataset.TrainDataset(Train_data, pretraine_path)\n DF_dev = Dataset.TrainDataset(Dev_data, pretraine_path)\n\n DF_train_loader = DataLoader(dataset=DF_train, batch_size=batchsize, shuffle=True,\n num_workers=num_worker)\n DF_dev_loader = DataLoader(dataset=DF_dev, batch_size=batchsize, shuffle=False,\n num_workers=num_worker)\n return DF_train_loader, DF_dev_loader\n\ndef loadTestData(batchsize=16, num_worker=2, pretraine_path=\"bert-base-uncased\"):\n Test_data = pd.read_csv('data/test_set.csv', delimiter=',')\n print(f'Test data size {Test_data.shape}')\n\n Test_data['tweet'] = Test_data['tweet'].apply(lambda x: text_normalization.clean(x))\n\n DF_test = Dataset.TestDataset(Test_data, pretraine_path)\n\n DF_test_loader = DataLoader(dataset=DF_test, batch_size=batchsize, shuffle=False,\n num_workers=num_worker)\n return DF_test_loader\n\n\ndef loadTrainValData_v2(batchsize=16, num_worker=2, pretraine_path=\"bert-base-uncased\"):\n Train_data = pd.read_csv('data/ArSarcasm_train.csv', delimiter=',')\n Train_data['tweet'] = Train_data['tweet'].apply(lambda x: text_normalization.clean(x))\n Dev_data = pd.read_csv('data/ArSarcasm_test.csv', delimiter=',')\n Dev_data['tweet'] = Dev_data['tweet'].apply(lambda x: text_normalization.clean(x))\n\n print(f'Training data size {Train_data.shape}')\n print(f'Validation data size {Dev_data.shape}')\n DF_train = Dataset.TrainDataset(Train_data, pretraine_path)\n DF_dev = Dataset.TrainDataset(Dev_data, pretraine_path)\n\n DF_train_loader = DataLoader(dataset=DF_train, batch_size=batchsize, shuffle=True,\n num_workers=num_worker)\n DF_dev_loader = DataLoader(dataset=DF_dev, batch_size=batchsize, shuffle=False,\n num_workers=num_worker)\n return DF_train_loader, DF_dev_loader\n\n\n\n" }, { "alpha_fraction": 0.6407333016395569, "alphanum_fraction": 0.6530292630195618, "avg_line_length": 35.95041275024414, "blob_id": "1164fcae9f752b352fb3bdda05d7b26c6fd85927", "content_id": "a04364c66d8a809293b342f61fbf1e641b282bd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4473, "license_type": "permissive", "max_line_length": 132, "num_lines": 121, "path": "/eval_sarcasm.py", "repo_name": "AbdelkaderMH/sarcasm_wanlp", "src_encoding": "UTF-8", "text": "import argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nimport preprocessing\nimport modeling\nfrom barbar import Bar\nimport random\n\n\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom losses import ModelMultitaskLoss\nimport utils\nfrom transformers import AdamW, get_linear_schedule_with_warmup\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef evaluate(base_model, mt_classifier, iterator):\n all_sentiment_outputs = []\n all_sarcasm_outputs = []\n\n # set the model in eval phase\n base_model.eval()\n mt_classifier.eval()\n with torch.no_grad():\n for data_input in Bar(iterator):\n\n for k, v in data_input.items():\n data_input[k] = v.to(device)\n\n output, pooled = base_model(**data_input)\n sarcasm_logits = mt_classifier(output, pooled)\n\n sarcasm_probs = torch.sigmoid(sarcasm_logits).to(device)\n predicted_sarcasm = torch.round(sarcasm_probs)\n all_sarcasm_outputs.extend(predicted_sarcasm.squeeze().int().cpu().numpy())\n\n return all_sarcasm_outputs\n\n\ndef eval_full(config, test_loader):\n base_model = modeling.TransformerLayer(pretrained_path=config['pretrained_path'], both=True).to(device)\n base_model.load_state_dict(torch.load(\"./ckpts/best_basemodel_sarcasm_\"+config[\"lm\"]+\".pth\"))\n base_model.to(device)\n\n mtl_classifier = modeling.CLSClassifier(base_model.output_num(), class_num=1)\n mtl_classifier.load_state_dict(torch.load(\"./ckpts/best_mtl_cls_sarcasm_\"+config[\"lm\"]+\".pth\"))\n mtl_classifier.to(device)\n all_sarcasm_outputs = evaluate(base_model,mtl_classifier, test_loader)\n return all_sarcasm_outputs\n\n\nif __name__ == \"__main__\":\n def str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered.')\n\n\n parser = argparse.ArgumentParser(description='Conditional Domain Adversarial Network')\n parser.add_argument('--lm_pretrained', type=str, default='arabert',\n help=\" path of pretrained transformer\")\n parser.add_argument('--lr', type=float, default=2e-5, help=\"learning rate\")\n parser.add_argument('--lr_mult', type=float, default=1, help=\"dicriminator learning rate multiplier\")\n\n parser.add_argument('--batch_size', type=int, default=36, help=\"training batch size\")\n parser.add_argument('--seed', type=int, default=12345)\n parser.add_argument('--num_worker', type=int, default=4)\n\n\n parser.add_argument('--epochs', type=int, default=40, metavar='N',\n help='number of epochs to train (default: 10)')\n args = parser.parse_args()\n\n\n config = {}\n config['args'] = args\n config[\"output_for_test\"] = True\n config['epochs'] = args.epochs\n config[\"class_num\"] = 1\n config[\"lr\"] = args.lr\n config['lr_mult'] = args.lr_mult\n config['batch_size'] = args.batch_size\n config['lm'] = args.lm_pretrained\n\n dosegmentation = False\n if args.lm_pretrained == 'arbert':\n config['pretrained_path'] = \"UBC-NLP/ARBERT\"\n elif args.lm_pretrained == 'marbert':\n config['pretrained_path'] = \"UBC-NLP/MARBERT\"\n elif args.lm_pretrained == 'larabert':\n config['pretrained_path'] = \"aubmindlab/bert-large-arabertv02\"\n dosegmentation = True\n else:\n config['pretrained_path'] = 'aubmindlab/bert-base-arabertv02'\n dosegmentation = True\n\n label_dict = {0:'FALSE',\n 1:'TRUE'}\n\n seeds = [12345]#, 12346, 12347, 12348, 12349]\n for RANDOM_SEED in seeds:\n random.seed(RANDOM_SEED)\n np.random.seed(RANDOM_SEED)\n torch.manual_seed(RANDOM_SEED)\n torch.cuda.manual_seed(RANDOM_SEED)\n torch.cuda.manual_seed_all(RANDOM_SEED)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n test_loader = preprocessing.loadTestData(batchsize=args.batch_size, num_worker= 1, pretraine_path=config['pretrained_path'])\n all_sarcasm = eval_full(config, test_loader)\n submission = pd.DataFrame(columns=['Sarcasm'])\n submission[\"Sarcasm\"] = all_sarcasm\n submission[\"Sarcasm\"].replace(label_dict, inplace=True)\n submission.to_csv(\"results/sarcasm/CS-UM6P_Subtask_1_MARBERT_CLSATT.csv\", index=False, header=False)\n\n\n" } ]
14
neeasthana/ModernPortfolioTheory
https://github.com/neeasthana/ModernPortfolioTheory
0a1854a9db0177f23d12eea6a186c1b6d2a758d3
853632514f58e9ef1b2a98c7ced4bf1565de36b4
cecebb9515062791ba68bb4aa269362d09adf063
refs/heads/master
2020-07-05T12:02:12.561358
2019-08-24T03:51:01
2019-08-24T03:51:01
202,643,566
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6710526347160339, "alphanum_fraction": 0.6710526347160339, "avg_line_length": 18, "blob_id": "fb46adea502512ea728c4bb52c0366476059fa57", "content_id": "abeb8a5061a5502955fd8970efeaa37ad6df69c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 30, "num_lines": 8, "path": "/stock.py", "repo_name": "neeasthana/ModernPortfolioTheory", "src_encoding": "UTF-8", "text": "class Stock(Asset):\n\tdef __init__(self, ticker):\n\t\tself.name = ticker\n\t\tself.ticker = ticker\n\n\tdef returns_from_yahoo(self):\n\t\t# use self.ticker\n\t\tpass\n" }, { "alpha_fraction": 0.6546546816825867, "alphanum_fraction": 0.6546546816825867, "avg_line_length": 18.58823585510254, "blob_id": "d1db96db4dabe58e65ab945635987fd89f5ac715", "content_id": "ead74ad2b596dc800f25734ca6e58a0ca28b01dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 39, "num_lines": 17, "path": "/asset.py", "repo_name": "neeasthana/ModernPortfolioTheory", "src_encoding": "UTF-8", "text": "class Asset:\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.quotes = []\n\t\tself._calculate_returns()\n\t\n\tdef _calculate_returns(self):\n\t\tpass\n\n\tdef average_return(self):\n\t\treturn sum(self.returns)/len(returns)\n\n\tdef average_risk(self):\n\t\treturn Math.std(self.returns)\n\n\tdef returns_from_csv(self, csv_file):\n pass\n" }, { "alpha_fraction": 0.6583124399185181, "alphanum_fraction": 0.6780840754508972, "avg_line_length": 25.992481231689453, "blob_id": "8a5e7ac92b0c3d3d3ddb84dddc13d1ab75c764e9", "content_id": "7c7a7ec6f1fbd87fa229a0dd15011bd92076fe0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3591, "license_type": "no_license", "max_line_length": 133, "num_lines": 133, "path": "/efficient_frontier.py", "repo_name": "neeasthana/ModernPortfolioTheory", "src_encoding": "UTF-8", "text": "# Source: https://plot.ly/ipython-notebooks/markowitz-portfolio-optimization/\n# Source 2: http://ahmedas91.github.io/blog/2016/03/01/efficient-frontier-with-python/\n\n#import required libraries\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport plotly.graph_objs as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, iplot\nimport cufflinks as cf\nimport cvxopt as opt\nfrom cvxopt import blas, solvers\nimport matplotlib.pyplot as plt\nimport io\nimport sys\nimport math\n\n#Function to get evctor $x$ of random portfolio weighs that sums to 1:\ndef random_wieghts(n):\n a = np.random.rand(n)\n return a/a.sum()\n\n\ndef returns(dataframe):\n\t(dataframe - dataframe.shift(1))/dataframe.shift(1)\n\n\ndata = pd.read_csv(\"portfolio/wealthfront_funds_cut.csv\")\n\ndata.Date = pd.to_datetime(data.Date, format = \"%Y-%m-%d\")\n\n\n# Columns names of all stock tickers\ncolumns = data.columns[1:]\n\n# Monthly returns of all stocks\nmonthly_returns = (data[columns] - data[columns].shift(1) ) / data[columns].shift(1)\nmonthly_returns = monthly_returns[[\"VTI\", \"VTMGX\",\"VEMAX\",\"MUB\",\"VIG\",\"VDE\"]]\n\n# Add Date to monthly returns and remove first row (which will be NaN for all returns becuase there is no return for the first month)\nmonthly_returns.insert(0, \"Date\", data.Date)\n\nmonthly_returns.set_index(\"Date\", inplace = True)\n\n\n# print(monthly_returns.index)\n\nmonthly_returns = monthly_returns.drop(monthly_returns.index[0])\n\n\ncov = np.matrix(monthly_returns.cov())\nexpected_returns = np.matrix(monthly_returns.mean())\n\nprint(monthly_returns.columns)\nprint((expected_returns).round(4) * 12)\n\nprint(np.sqrt(np.diag(cov)) * math.sqrt(12))\n\ndef initial_portfolio(monthly_returns):\n wieghs = np.matrix(random_wieghts(expected_returns.shape[1]))\n \n mu = wieghs.dot(expected_returns.T)\n sigma = np.sqrt(wieghs * cov.dot(wieghs.T))\n \n return mu[0,0],sigma[0,0]\n\n#print(np.matrix(monthly_returns.cov()))\n\nn_portfolios = 10000\nmeans, stds = np.column_stack([\n initial_portfolio(monthly_returns) \n for _ in range(n_portfolios)\n])\n\n\n\ndef optimal_portfolio(returns):\n n = len(returns)\n returns = np.asmatrix(returns)\n \n N = 100\n mus = [10**(5.0 * t/N - 1.0) for t in range(N)]\n \n # Convert to cvxopt matrices\n S = opt.matrix(np.cov(returns))\n pbar = opt.matrix(np.mean(returns, axis=1))\n \n # Create constraint matrices\n G = -opt.matrix(np.eye(n)) # negative n x n identity matrix\n h = opt.matrix(0.0, (n ,1))\n A = opt.matrix(1.0, (1, n))\n b = opt.matrix(1.0)\n \n # Calculate efficient frontier weights using quadratic programming\n opt.solvers.options['show_progress'] = False\n portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] \n for mu in mus]\n ## CALCULATE RISKS AND RETURNS FOR FRONTIER\n returns = [blas.dot(pbar, x) for x in portfolios]\n\n risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios]\n ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE\n m1 = np.polyfit(returns, risks, 2)\n x1 = np.sqrt(m1[2] / m1[0])\n # CALCULATE THE OPTIMAL PORTFOLIO\n \n wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)\n \n return np.asarray(wt), returns, risks\n\n\n\nw_f, mu_f, sigma_f = optimal_portfolio(monthly_returns.T)\n\n\nmu_f_year = [i * 12 for i in mu_f]\nsigma_f_year = [i * 12 for i in sigma_f]\n\nplt.plot(stds*12, means*12, 'o', markersize = 1, color='black')\nplt.plot(sigma_f_year, mu_f_year, 'x', markersize=5, color='red')\n\nplt.xlabel('std')\nplt.ylabel('mean')\n\nplt.show()\n\n\n\n# data.set_index(\"Date\", inplace = True)\n\n# data.plot(legend = True)\n\n# plt.show()\n\n" }, { "alpha_fraction": 0.7992978096008301, "alphanum_fraction": 0.8063194751739502, "avg_line_length": 76.63636016845703, "blob_id": "a9abf694ff54d1c876c4d6f667f821782f955c41", "content_id": "bc97952dad8e13dde4c0feefc95161fcf2ed32d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1709, "license_type": "no_license", "max_line_length": 354, "num_lines": 22, "path": "/README.md", "repo_name": "neeasthana/ModernPortfolioTheory", "src_encoding": "UTF-8", "text": "# ModernPortfolioTheory\nTool to analyze a portfolio of stocks using modern portfolio theory to understand risks and returns. The theory states that holding a single asset is very risk but creating a portfolio of uncorrelated, disversified assets will generate more consistent returns and less volatility (stock prices jumping up and down) and therefore create long term wealth. \n\n## Quick Start Guide\n\n1. Git clone\n1. import python module\n1. initialize object with stock tickers to include\n1. Analyze results\n\n## Procedure\n\n1. I will indentify a list of uncorrelated and diversified assets. The assets will be diversified by company size, industry, country, volatility, expected return, etc. Assets will consist of stocks, bonds, and alternatives (REITs). \n1. Monthly stock prices will then be retreived from online for as far back as the data is available (ideally more than 15 years). I have currently been downloading the data from Yahoo Fianance (TODO)\n1. For each stock, calculate expected monthly return \n1. From the stock return data, calculate expected yearly return and risk (standard deviation of returns)\n1. Create different portfolios of the identified assets and calculate each portfolios expected yearly return and volatility (standard deviation).\n1. Create the efficient frontier for the portfolio identified using modern portfolio theory. (TODO MATH). The efficient frontier will provide an allocation that will have the maximum possible return for a level of risk in the set of assets. One of the portfolios in the efficient frontier can then be selected as a portfolio for returns. \n\n## Contact\n\nFeel free to contact the package maintainer with suggestions and feature requests at [email protected]. \n" } ]
4
Genji-MS/SuperHeroes
https://github.com/Genji-MS/SuperHeroes
d98ef23584a64e81a9e960b3807bdf23e11ea66e
ecec14301b1493dce4f31585003a79f1947116e3
fd135902e8b6789b2dd1994c26f24cffda172704
refs/heads/master
2020-07-31T17:00:50.786174
2019-09-28T02:21:27
2019-09-28T02:21:27
210,684,230
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5633395314216614, "alphanum_fraction": 0.5752084255218506, "avg_line_length": 37.63982009887695, "blob_id": "e40fd7867c684f4d4c42e439a081ca03590bd12e", "content_id": "69deb61b4565d60660eff79bac2063b486de5ed4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17272, "license_type": "no_license", "max_line_length": 162, "num_lines": 447, "path": "/superheroes.py", "repo_name": "Genji-MS/SuperHeroes", "src_encoding": "UTF-8", "text": "#superheroes.py\nimport random\nclass Ability():\n def __init__(self, name, atk_dmg):\n '''Create instance variables :\n name: String\n atk_dmg: Int\n '''\n self.name = name\n self.atk_dmg = atk_dmg\n def attack(self):\n return random.randint(0,self.atk_dmg)\n\nclass Weapon(Ability):\n def attack(self):\n hit_or_miss = bool(random.getrandbits(1))\n if hit_or_miss:\n return self.atk_dmg\n else:\n return int(self.atk_dmg * 0.5)\n\nclass Armor():\n def __init__(self, name, block_value):\n '''Create instance variables :\n name: String\n block_value: int\n '''\n self.name = name\n self.block_value = block_value\n def block(self):\n return random.randint(0,self.block_value)\n\nclass Hero():\n #from superheroes import Ability, Armor\n '''Instance properties;\n name: string\n HP_max: int\n HP (current): int\n abilities: list\n armors: list\n '''\n def __init__(self, name, HP_max = 100):\n self.name = name\n self.HP_max = HP_max\n self.HP = HP_max\n self.abilities = list()\n self.armors = list()\n self.deaths = 0\n self.kills = 0\n\n def add_ability(self, Ability):\n self.abilities.append(Ability)\n def add_weapon(self, Weapon):\n self.abilities.append(Ability)\n def attack(self):\n total_dmg = 0\n for ability in self.abilities:\n total_dmg += ability.attack()\n return total_dmg\n def add_armor(self, Armor):\n self.armors.append(Armor)\n def defend(self):\n total_def = 0\n if len(self.armors) > 0:\n for armor in self.armors:\n total_def += armor.block()\n return total_def\n def take_dmg(self, dmg):\n sustained_dmg = 0\n if dmg > 0:\n defense = self.defend()\n sustained_dmg = dmg - defense\n if sustained_dmg < 0: sustained_dmg = 0\n if sustained_dmg > 0:\n self.HP -= sustained_dmg\n return sustained_dmg\n def add_kill(self, num_kills):\n self.kills += num_kills\n def add_death(self, num_deaths):\n self.deaths += num_deaths\n def is_alive(self):\n return True if self.HP >0 else False\n def declare_battle_against(self, opponent):\n attack_order:int = random.randint(0,1)\n while (self.is_alive() and opponent.is_alive()):\n print(f'\\n[.. {self.name} HP({self.HP}/{self.HP_max} ..<VS>.. {opponent.name} HP({opponent.HP}/{opponent.HP_max} ..]')\n if (attack_order%2 == 0):\n if (len(self.abilities) > 0):\n random_ability = random.randint(0,len(self.abilities)-1)\n random_ability_name = self.abilities[random_ability].name\n attack_power = self.attack()\n else:\n random_ability_name = 'pacifict stare'\n attack_power = 0\n\n print (f'{self.name} attacks with {random_ability_name} totaling -{attack_power}- power')\n\n if (len(opponent.armors) > 0):\n random_block = random.randint(0,len(opponent.armors)-1)\n random_block_name = opponent.armors[random_block].name\n else:\n random_block_name = 'pacifict stance'\n sustained_dmg = opponent.take_dmg(attack_power)\n\n print (f'{opponent.name} defends with {random_block_name} and recieves -{sustained_dmg}- damage')\n attack_order += 1\n else:\n if (len(opponent.abilities) > 0):\n random_ability = random.randint(0,len(opponent.abilities)-1)\n random_ability_name = opponent.abilities[random_ability].name\n attack_power = opponent.attack()\n else:\n random_ability_name = 'pacifict stare'\n attack_power = 0\n\n print (f'{opponent.name} attacks with {random_ability_name} totaling -{attack_power}- power')\n\n if (len(self.armors) > 0):\n random_block = random.randint(0,len(self.armors)-1)\n random_block_name = self.armors[random_block].name\n else:\n random_block_name = 'pacifict stance'\n sustained_dmg = self.take_dmg(attack_power)\n\n print (f'{self.name} defends with {random_block_name} and recieves -{sustained_dmg}- damage')\n attack_order += 1\n if (self.is_alive):\n print(f'The opponent {opponent.name} has taken his last blow, and has fallen!')\n opponent.add_death(1)\n print(f'Our hero {self.name} stands victorious, awaiting the next challenger')\n self.add_kill(1)\n else:\n print(f'The hero {self.name} has taken his last blow, and has fallen!')\n self.add_death(1)\n print(f'Our opponent {opponent.name} stands victorious, awaiting the next challenger') \n opponent.add_kill(1)\n\nclass Team():\n def __init__ (self,name):\n self.name = name\n self.heroes = []\n \n def add_hero(self,Hero):\n self.heroes.append(Hero)\n def remove_hero(self,name):\n index = 0 #track our index number\n length = len(self.heroes) #used to check if remove actually removed something\n for hero in self.heroes: #itterate and delete entry if found\n if hero.name == name:\n del self.heroes[index]\n else:\n index+=1 #only incriment index on a non-deletion, as on a deletion, index values are shifted\n if length == len(self.heroes):\n return 0\n\n def view_all_heroes(self):\n print (f'Team {self.name} has {len(self.heroes)} heroes')\n for hero in self.heroes:\n print (f' - {hero.name} - + - K_{hero.kills} : D_{hero.deaths} -')\n\n def attack_team(self, otherTeam):\n print(f'\\n[[.. {self.name} ._.<VS>._. {otherTeam.name} ..]]')\n #get length of teams, decrement it on loss, when one reaches 0, that team has lost\n team1_players_remaining = len(self.heroes)\n team2_players_remaining = len(otherTeam.heroes)\n while (team1_players_remaining > 0 and team2_players_remaining > 0):\n #attept to grab a random hero, if that hero is already defeated, grab the next one in the list\n hero1 = self.heroes[random.randint(0,len(self.heroes)-1)]\n while (hero1.is_alive() == False):\n index = 0\n hero1 = self.heroes[index]\n index += 1\n hero2 = otherTeam.heroes[random.randint(0,len(otherTeam.heroes)-1)]\n while (hero2.is_alive() == False):\n index = 0\n hero2 = otherTeam.heroes[index]\n index += 1\n #declare battle\n hero1.declare_battle_against(hero2)\n #after the long battle, one hero will have fallen, check and decreiment the players_remaining\n if hero1.is_alive:\n team2_players_remaining -=1\n else:\n team1_players_remaining -=1\n #Heal all heroes and end combat, individual stats were added during battle.\n #self.revive_heroes()\n #otherTeam.revive_heroes()\n def revive_heroes(self, health = 100):\n for hero in self.heroes:\n hero.HP = hero.HP_max\n def stats(self,hero):\n print (f' - {hero.name} - + - K_{hero.kills} : D_{hero.deaths} -')\n\nclass Arena():\n def __init__(self):\n self.team_one = Team('team_one')\n self.team_two = Team('team_two')\n def create_ability(self):\n ability_name = input(f'creating a new ability..\\nWhat is the abilities name?\\n(string):')\n ability_power = input(f'What is {ability_name} attack power?\\n(int):')\n ability = Ability(ability_name, ability_power)\n return ability\n def create_weapon(self):\n weapon_name = input(f'creating a new weapon..\\nWhat is the weapons name?\\n(string):')\n weapon_power = input(f'What is {weapon_name} attack power?\\n(int):')\n weapon = Weapon(weapon_name, weapon_power)\n return weapon\n def create_armor(self):\n armor_name = input(f'creating new armor..\\nWhat is the armor called?\\n(string):')\n armor_power = input(f'What is {armor_name} defense power?\\n(int):')\n armor = Armor(armor_name,armor_power)\n return armor\n def create_hero(self):\n hero_name = input(f'creating a Hero..\\nWhat is the heroes name?\\n(string):')\n print (f'What is {hero_name} maximum health?\\n(int):')\n hero_HP = input()\n new_hero = Hero(hero_name, hero_HP)\n selection = 0\n while (int(selection) != 4):\n print (f'{new_hero.name} HP:{new_hero.HP}/{new_hero.HP_max}')\n print (f'[Offensive skills: {len(new_hero.abilities)}] [Defensive skills: {len(new_hero.armors)}]')\n print (f'MENU:\\n1) Create Ability\\n2) Create Weapon\\n3) Create Armor\\n4) Finished')\n selection = input(':')\n if (int(selection) == 1):\n new_ability = self.create_ability()\n new_hero.add_ability(new_ability)\n print(f'New Ability {new_ability.name} created...')\n if (int(selection) == 2):\n new_weapon = self.create_weapon()\n new_hero.add_weapon(new_weapon)\n print(f'New Weapon {new_weapon.name} created...')\n if (int(selection) == 3):\n new_armor = self.create_armor()\n new_hero.add_armor(new_armor)\n print(f'New Armor {new_armor.name} created...')\n return new_hero\n def create_team(self, team_number):\n if (team_number != 1 and team_number != 2):\n print(f'Enter only team # as only 1 or 2')\n return\n team_text = 'one' if team_number == 1 else 'two'\n num_heroes = 0\n while (( int(num_heroes) >= 1 and int(num_heroes)<10)==False):\n num_heroes = input(f'how many members do you want for team {team_text}?')\n for index in range(int(num_heroes)):\n new_hero = self.create_hero()\n if (team_number == 1):\n self.team_one.heroes.append(new_hero)\n else :\n self.team_two.heroes.append(new_hero)\n print(f'hero {new_hero.name} added to team #{team_number}')\n def team_battle(self):\n self.team_one.attack_team(self.team_two)\n def show_stats(self):\n team1_players_remaining = 0\n team2_players_remaining = 0\n team_1K = 0\n team_1D = 0\n team_2K = 0\n team_2D = 0\n for t1 in self.team_one.heroes:\n if t1.is_alive(): \n team1_players_remaining +=1\n team_1K += t1.kills\n team_1D += t1.deaths\n for t2 in self.team_two.heroes:\n if t2.is_alive(): \n team2_players_remaining +=1\n team_2K += t2.kills\n team_2D += t2.deaths\n winning_team = self.team_one if team1_players_remaining > 0 else self.team_two\n print(f'\\n\\n<<<<<##**** The battle is over! {winning_team.name} is declared the winner!! ****##>>>>>')\n print(f'>>> {self.team_one.name} Kills:{team_1K} Deaths:{team_1D} [] {self.team_two.name} Kills:{team_2K} Deaths:{team_2D} <<<')\n print(f'\\n< < < # * * Surviving Heroes: * * # > > >')\n for hero in winning_team.heroes:\n if hero.is_alive():\n winning_team.stats(hero)\n\n\ndef test_Ability(): \n print (\"\\n^^---__--Ability--__---^^\")\n print ('creating ability')\n OnePunch = Ability('OnePunch',999)\n print (f'flurry {OnePunch.name}:{OnePunch.attack()}, {OnePunch.name}:{OnePunch.attack()}, {OnePunch.name}:{OnePunch.attack()}')\n\ndef test_Armor():\n print (\"\\n^^--___--Armor--___--^^\")\n print ('creating armor')\n PalmCatch = Armor('PalmCatch',999)\n print (f'show defense skill {PalmCatch.name}:{PalmCatch.block()}')\n\ndef test_Hero():\n print (\"\\n^^--___--Hero--___--^^\")\n #Test default behavior\n TheOne = Hero('TheOne')\n print (f'Presenting our hero {TheOne.name}\\n HP({TheOne.HP}/{TheOne.HP_max})')\n\ndef test_Hero_Attack():\n print (\"\\n^^-__Hero_Attack-__^^\")\n print ('creating ability')\n OnePunch = Ability('OnePunch',30)\n print ('creating ability')\n TwoPunch = Ability('TwoPunch',35)\n Boxer = Hero('Boxer')\n print(f'created new hero {Boxer.name}')\n Boxer.add_ability(OnePunch)\n print (f'added ability, total of {len(Boxer.abilities)} abilities now')\n Boxer.add_ability(TwoPunch)\n print (f'added ability, total of {len(Boxer.abilities)} abilities now')\n print (f'Hero {Boxer.name} throws a One-Two attack for {Boxer.attack()} damamge')\n\ndef test_Hero_Defend():\n print (\"\\n^^-__Hero_Defend__-^^\")\n print ('creating armor')\n Duck = Armor('Duck',33)\n print ('creating armor')\n Dodge = Armor('Dodge',33)\n Dodger = Hero('Dodger')\n print (f'created new hero {Dodger.name}')\n Dodger.add_armor(Duck)\n print (f'added armor, total of {len(Dodger.armors)} armors now')\n Dodger.add_armor(Dodge)\n print (f'added armor, total of {len(Dodger.armors)} armors now')\n print (f'Hero {Dodger.name} dodges and ducks and defends {Dodger.defend()}')\n\ndef test_Hero_TakeDmg():\n print (\"\\n^^-__Hero_TakeDmg__-^^\")\n TestDummy = Hero('TestDummy')\n print (f'created new hero {TestDummy.name}')\n print('creating armor')\n Dodge = Armor('Dodge',33)\n TestDummy.add_armor(Dodge)\n print (f'added armor, total of {len(TestDummy.armors)} armors now')\n TestDummy.take_dmg(50)\n print (f'Hero {TestDummy.name} is attacked by a strength of 50\\n using his {len(TestDummy.armors)} skills he dodges \\n HP({TestDummy.HP}/{TestDummy.HP_max})')\n\ndef test_Hero_IsAlive():\n print (\"\\n^^-_Hero_IsAlive_-^^\")\n hero = Hero('hero')\n print (f'created new hero {hero.name}')\n hero.take_dmg(50)#should result in hero having 50 HP, but instead he has 3 skills...\n print ('dealing 50 damage to a new hero with no skills')\n print (f'{hero.name}: HP({hero.HP}/{hero.HP_max}) Is Alive: {hero.is_alive()}')\n hero.take_dmg(100)\n print ('dealing 100 damage to a new hero with no skills')\n print (f'{hero.name}: HP({hero.HP}/{hero.HP_max}) Is Alive: {hero.is_alive()}')\n\ndef test_Hero_declare_battle_against():\n print(\"\\n ^^-= Test_Battle =-^^\")\n Boxer = Hero('Boxer')\n OnePunch = Ability('OnePunch',40)\n TwoPunch = Ability('TwoPunch',45)\n Boxer.add_ability(OnePunch)\n Boxer.add_ability(TwoPunch)\n Duck = Armor('Duck',33)\n Dodge = Armor('Dodge',33)\n Boxer.add_armor(Duck)\n Boxer.add_armor(Dodge)\n\n Dodger = Hero('JellyFish')\n WeakStrike = Ability('Tendril Poke',22)\n TimidKick = Ability('Sting like a JellyFish',27)\n Dodger.add_ability(WeakStrike)\n Dodger.add_ability(TimidKick)\n Absorbtion = Armor('Squishy Absorbtion', 45)\n Float = Armor('Float like a JellyFish', 50)\n Dodger.add_armor(Absorbtion)\n Dodger.add_armor(Float)\n\n Boxer.declare_battle_against(Dodger)\n\ndef test_Weapon():\n print('\\n ^^-_- Test_Weapon -_-^^')\n Sword = Weapon('Sword', 200)\n print(f'Attack with sword, expected range of 100 and 200 = {Sword.attack()}')\n\ndef test_Team_attack_team():\n print('\\n ^_ Team VS Team _^')\n team_one = Team(\"One\")\n jodie = Hero(\"Jodie Foster\")\n aliens = Ability(\"Alien Friends\", 10000)\n jodie.add_ability(aliens)\n team_one.add_hero(jodie)\n team_two = Team(\"Two\")\n athena = Hero(\"Athena\")\n socks = Armor(\"Socks\", 10)\n athena.add_armor(socks)\n team_two.add_hero(athena)\n team_one.attack_team(team_two)\n\ndef test_Arena_create_hero():\n test_hero = Arena()\n test_hero.create_hero()\n\ndef test_Arena_create_team():\n test_create_team = Arena()\n test_create_team.create_team(1)\n\ndef test_Arena_Show_stats():\n print('\\n ^_ Arena Test with Stats _^')\n test_game = Arena()\n jodie = Hero(\"Jodie Foster\")\n aliens = Ability(\"Alien Friends\", 10000)\n jodie.add_ability(aliens)\n test_game.team_one.add_hero(jodie)\n athena = Hero(\"Athena\")\n socks = Armor(\"Socks\", 10)\n athena.add_armor(socks)\n test_game.team_two.add_hero(athena)\n test_game.team_battle()\n test_game.show_stats()\n \nif __name__ == \"__main__\":\n #Test scripts when ran from terminal\n\n #test_Hero()\n #test_Ability()\n #test_Armor()\n #test_Hero_Attack()\n #test_Hero_Defend()\n #test_Hero_TakeDmg()\n #test_Hero_IsAlive()\n #test_Hero_declare_battle_against()\n #test_Weapon()\n #test_Team_attack_team()\n #test_Arena_create_hero()\n #test_Arena_create_team()\n #test_Arena_Show_stats()\n\n #Normal gameplay that requires user input\n\n game_is_running = True\n arena = Arena()\n arena.create_team(1)\n arena.create_team(2)\n\n while game_is_running:\n arena.team_battle()\n arena.show_stats()\n play_again = input('Duel Again? Y/N? :')\n\n if play_again.lower() == 'n':\n print(f'Thanks for playing~')\n game_is_running = False\n else:\n arena.team_one.revive_heroes()\n arena.team_two.revive_heroes()\n" } ]
1
szhu/sublime-ubuntu-installer
https://github.com/szhu/sublime-ubuntu-installer
b45c34e301d9727fa67a41cf3d13c9b975350587
0d98e7da040c4a3a398053f26a9e770135aaaadb
0c7a2e99b0ee7613a51424dd662ec87d757fdc74
refs/heads/master
2021-01-20T02:16:03.335929
2014-03-12T19:57:09
2014-03-12T19:57:09
16,707,275
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6908272504806519, "alphanum_fraction": 0.7009156942367554, "avg_line_length": 32.38859939575195, "blob_id": "588e298b52692f16485ff48ac742017b65e6af01", "content_id": "27bd433f4b7ffb231b2424558b1798ae3466f925", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 6443, "license_type": "no_license", "max_line_length": 329, "num_lines": 193, "path": "/install_sublime.sh", "repo_name": "szhu/sublime-ubuntu-installer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n#S=install_sublime.sh;curl -Lk goo.gl/9fzkCo>$S;chmod u+x $S;./$S\n\nsmso=`tput smso`\nrmso=`tput rmso`\nsmul=`tput smul`\nrmul=`tput rmul`\n\nREADME_MSG=\"This directory is used by the Sublime install script. You can delete this when it's finished installing.\"\nWORK_DIR='install_sublime_tmp'\nSUBL_TAR_URL='http://c758482.r82.cf2.rackcdn.com/Sublime%20Text%202.0.2%20x64.tar.bz2'\nSUBL_TAR_NAME='Sublime Text 2.0.2 x64.tar.bz2'\nSUBL_DIR='Sublime Text 2'\n\nINSTALL_DIR_RELATIVE='.local/share/sublime-text-2'\nINSTALL_DIR=~/\"$INSTALL_DIR_RELATIVE\"\nAPPS_DIR=~/'.local/share/applications'\nEXEC_FILE=~/'.local/share/sublime-text-2/sublime_text'\n\nPROMPT_CONTINUE=\"Press ${smso}[enter]${rmso} to continue, ${smso}[ctrl-C]${rmso} to exit.\"\nPROMPT_YN=\" ${smso}[y/n]${rmso} \"\nLINK_MSG=\"See ${smul}http://interestinglythere.com/berkeley/sublime${rmul} for more details.\\n\"\n\nPROMPT_INSTALL_BASHRC=\"Set the ${smul}subl${rmul} command to open/open files with with Sublime Text?\\nIt works just like the emacs, vim, and gedit commands!${PROMPT_YN}\"\nBASHRC_NONEED_MSG=\"no need; entry already in ~/.bashrc.\"\nBASHRC='alias subl=~/\".local/share/sublime-text-2/sublime_text\"'\nBASHRC_INSTALL_MSG=\"${smul}subl${rmul} will work with terminals opened from now on.\\nTo make the subl command work right here right now, do: \\n\\n source ~/.bashrc\\n\"\n\nPROMPT_INSTALL_MIMEAPPS=\"Set Sublime Text as the ${smul}default editor${rmul} when you double-click a text file?\\nOtherwise, right-click a file and select Open With > Sublime Text 2.${PROMPT_YN}\"\nMIME_TYPES=\"application/x-shellscript application/x-perl text/plain text/x-c++ text/x-chdr text/x-csrc text/x-dtd text/x-java text/mathml text/x-python text/x-sql text/x-scheme\"\n\nICON_MSG='An app icon has been installed to the Unity Dash (the \"start menu\" apps list).\\nYou can drag this to the Unity Launcher (dock/taskbar).'\nPROMPT_SHOW_ICON=\"Press ${smso}[enter]${rmso} to show this icon.\"\nDESKTOP_FILENAME=\"sublime-text-2.desktop\"\nDESKTOP_FILE=\"$APPS_DIR/$DESKTOP_FILENAME\"\nDESKTOP=$( cat <<EOF\n#!/usr/bin/env xdg-open\n\n[Desktop Entry]\nName=Sublime Text 2\nGenericName=Text Editor\nComment=Sophisticated text editor for code, html and prose\nExec=~/.local/share/sublime-text-2/sublime_text %F\nTerminal=false\nType=Application\nMimeType=text/plain;text/x-chdr;text/x-csrc;text/x-c++hdr;text/x-c++src;text/x-java;text/x-dsrc;text/x-pascal;text/x-perl;text/x-python;application/x-php;application/x-httpd-php3;application/x-httpd-php4;application/x-httpd-php5;application/xml;text/html;text/css;text/x-sql;text/x-diff;x-directory/normal;inode/directory;\nIcon=~/.local/share/sublime-text-2/Icon/256x256/sublime_text.png\nCategories=TextEditor;Development;Utility;\nStartupNotify=true\nActions=Window;Document;\n\nX-Desktop-File-Install-Version=0.21\n\n[Desktop Action Window]\nName=New Window\nExec=~/.local/share/sublime-text-2/sublime_text -n\nOnlyShowIn=Unity;\n\n[Desktop Action Document]\nName=New File\nExec=/usr/bin/subl --command new_file\nOnlyShowIn=Unity;\nEOF\n)\n\n\nlsb_release 2> /dev/null\nif [[ $? -ne 0 ]]; then\n\techo\n\techo -e \"This installer must be run from an Ubuntu machine. Please seat yourself at one\\n(e.g., the 2nd floor Soda machines or the hiveN.cs.berkeley.edu machines in\\n330) or ssh into one before running this script. If you're sshing, try:\\n\\n ssh ${smul}username${rmul}@hive10.cs.berkeley.edu\"\n\techo\n\techo -e \"$LINK_MSG\"\n\texit 1\nfi\n\necho\necho -e \"${smso} SO I HEARD YOU WANT TO INSTALL SUBLIME TEXT ${rmso}\"\necho\necho -e \"This script will download ${smul}Sublime Text 2.0.2${rmul} and install it into your user\\ndirectory at ~/${INSTALL_DIR_RELATIVE}.\\n\\nSublime will be added to the Ubuntu Unity Dash (apps button/\\\"start menu\\\" on\\nthe top-left) and optionally can be run with the ${smul}subl${rmul} command.\\n\\n$LINK_MSG\\n$PROMPT_CONTINUE\"\nread\n\nset -e\n# set -x\n\necho -e \"Preparing to install Sublime Text 2...\"\nmkdir -p \"$WORK_DIR\"\ncd \"$WORK_DIR\"\nmkdir -p \"$APPS_DIR\"\necho -e \"$README_MSG\">\"README\"\n\necho -en 'Removing possible old installations... '\nrm -rf \"$SUBL_TAR_NAME\" \"$SUBL_DIR\" \"$DESKTOP_FILE\"\nif [ -e \"$INSTALL_DIR\" ]; then\n\trm -rf \"$INSTALL_DIR\"\n\techo -e \"old version of Sublime Text uninstalled.\\n$PROMPT_CONTINUE\"\n\tread\nelse\n\techo -e \"no old installation exists.\"\nfi\n\necho -e \"Downloading Sublime Text 2...\"\ncurl -L \"$SUBL_TAR_URL\" > \"$SUBL_TAR_NAME\"\necho -e \"Downloaded.\"\necho -en \"Decompressing... \"\ntar -xf \"$SUBL_TAR_NAME\" --bzip2\necho -e \"done.\"\necho -en \"Moving files into place... \"\necho -e \"done.\"\ncp -r \"$SUBL_DIR\" \"$INSTALL_DIR\"\necho -en \"Installing launcher shortcut icon... \"\necho -e \"$DESKTOP\" | sed \"s,~,$HOME,g\" > \"$DESKTOP_FILE\"\nchmod u+x \"$DESKTOP_FILE\"\necho -e \"done.\"\n\necho -en \"Installing ${smul}subl${rmul} command... \"\nIS_BASHRC_INSTALLED=''\nif [ -e ~/.bashrc ]; then\n\tif [ -n \"`grep \"$BASHRC\" ~/.bashrc`\" ]; then\n\t\tIS_BASHRC_INSTALLED=true\n\t\techo -e \"$BASHRC_NONEED_MSG\"\n\tfi\nfi\nif [ -z $IS_BASHRC_INSTALLED ]; then\n\techo; echo\n\tloop=true\n\twhile \"$loop\"; do\n\t\techo -en \"$PROMPT_INSTALL_BASHRC\"\n\t\tread input\n\t\tif [ -n \"`echo -e \"$input\" | grep -i [yn]`\" ]; then\n\t\t\tloop=false\n\t\tfi\n\tdone\n\tif [ \"$input\" = \"y\" ]; then\n\t\techo >> ~/.bashrc\n\t\techo \"$BASHRC\" >> ~/.bashrc\n\t\techo -e \"$BASHRC_INSTALL_MSG\"\n\tfi\nfi\n\necho -en \"Checking mimetype defaults... \"\nIS_DEFAULT_MIMEAPP=''\nfor MIME_TYPE in $MIME_TYPES\ndo\n\tif [ \"`xdg-mime query default \"$MIME_TYPE\"`\" != \"$DESKTOP_FILENAME\" ]; then\n\t\tIS_DEFAULT_MIMEAPP=false\n\tfi\ndone\nif [ -z $IS_DEFAULT_MIMEAPP ]; then\n\techo -e 'already default app.'\nelse\n\techo; echo\n\tloop=true\n\twhile \"$loop\"; do\n\t\techo -en \"$PROMPT_INSTALL_MIMEAPPS\"\n\t\tread input\n\t\tif [ -n \"`echo -e \"$input\" | grep -i [yn]`\" ]; then\n\t\t\tloop=false\n\t\tfi\n\techo -en \"\"\n\tdone\n\tif [ \"$input\" = \"y\" ]; then\n\t\tfor MIME_TYPE in $MIME_TYPES\n\t\tdo\n\t\t\txdg-mime default \"$DESKTOP_FILENAME\" \"$MIME_TYPE\"\n\t\tdone\n\tfi\nfi\n\necho\necho -e \"Sublime Text 2 ${smul}installed${rmul}!\"\n\necho -en \"Cleaning up... \"\ncd ..\nrm -rf \"$WORK_DIR\"\necho -e \"done.\"\n\nif [ -n \"$DISPLAY\" ]; then\n\techo -e 'Opening Sublime Text...'\n\t$EXEC_FILE &\n\techo\n\techo -e 'Once it opens, please right-click the icon and select \"Lock to Launcher\".'\n\techo -e 'You can also find the icon in the Unity Dash (the \"start menu\" apps list).'\n\techo -e 'Sublime Text not working for some reason? Try logging out and back in.'\n\techo\nelse\n\techo\n\techo -e \"$ICON_MSG\"\n\techo\nfi\n\necho -e \"All done. You can run this installer again ($0) to reinstall\\nor uninstall. ${smul}Have fun with Sublime!${rmul}\"\nread" }, { "alpha_fraction": 0.6810752153396606, "alphanum_fraction": 0.6878692507743835, "avg_line_length": 30.737499237060547, "blob_id": "e537f578d7fba9d73cd5622836b14f772debf40b", "content_id": "b76baa69aae99c4415ae376d0cf43a84e332b610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10156, "license_type": "no_license", "max_line_length": 322, "num_lines": 320, "path": "/install_sublime.py", "repo_name": "szhu/sublime-ubuntu-installer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom subprocess import Popen\nfrom os.path import exists, join #, split\nfrom os import chdir\nfrom ast import literal_eval\n\nclass Cmd(Popen):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tif len(args) == 0:\n\t\t\traise TypeError('requires at least 1 argument (0 given)')\n\t\tself.args = args\n\t\tself.return_stdout = kwargs.get('return_stdout')\n\t\tself.return_stderr = kwargs.get('return_stderr')\n\t\tself.return_self = kwargs.get('return_self')\n\t\tself.custom_stdout = kwargs.get('stdout')\n\n\tdef run(self):\n\t\tfrom subprocess import PIPE\n\t\trout = self.return_stdout\n\t\trerr = self.return_stderr\n\t\tPopen.__init__(self,\n\t\t\tself.args,\n\t\t\tstdout=self.custom_stdout or (PIPE if rout else None),\n\t\t\tstderr=PIPE if rerr else None,\n\t\t\tuniversal_newlines=True\n\t\t)\n\t\tself.wait()\n\t\tif self.returncode != 0: raise CmdError(self)\n\t\tif self.return_self: return self\n\t\telif rout and rerr: return self.stdout.read(), self.stderr.read()\n\t\telif rout: return self.stdout.read()\n\t\telif rerr: return self.stderr.read()\n\nclass CmdError(Exception):\n\tdef __init__(self, cmd):\n\t\tself.cmd = cmd\n\t\tself.args = cmd.args\n\t\tself.returncode = cmd.returncode\n\n\tdef __str__(self):\n\t\treturn \"The command %s exited with return code %d\" % (' '.join(self.args), self.returncode)\n\ndef cmd(*args, **kwargs):\n\treturn Cmd(*args, **kwargs).run()\n\ndef cmd_stdout(*args):\n\tstdout = cmd(*args, return_stdout=True)\n\t# print args, '->', stdout\n\treturn stdout\n\n\nclass Strings(object):\n\ttmp_dir = 'install_sublime_tmp'\n\ttmp_directory_info = \"This directory is used by the Sublime install script. You can delete this when it's finished installing.\"\n\ttmp_directory_info_file = \"README\"\n\n\tpkg_url = 'http://c758482.r82.cf2.rackcdn.com/Sublime%20Text%202.0.2%20x64.tar.bz2'\n\tpkg_name = 'Sublime Text 2.0.2 x64.tar.bz2'\n\tpkg_dir = 'Sublime Text 2'\n\n\tinstall_subl_dir = '~/.local/share/sublime-text-2'\n\tinstall_apps_dir = '~/.local/share/applications'\n\tinstall_exe = '~/.local/share/sublime-text-2/sublime_text'\n\n\tbashrc_file = '~/.bashrc'\n\tbashrc_contents = 'alias subl=~/\".local/share/sublime-text-2/sublime_text\"'\n\n\tmime_types = [\"application/x-shellscript\",\n\t\t\t\t\"application/x-perl\",\n\t\t\t\t\"text/plain\",\n\t\t\t\t\"text/x-c++\",\n\t\t\t\t\"text/x-chdr\",\n\t\t\t\t\"text/x-csrc\",\n\t\t\t\t\"text/x-dtd\",\n\t\t\t\t\"text/x-java\",\n\t\t\t\t\"text/mathml\",\n\t\t\t\t\"text/x-python\",\n\t\t\t\t\"text/x-sql\",\n\t\t\t\t\"text/x-scheme\"]\n\n\tdesktop_name = \"sublime-text-2.desktop\"\n\tdesktop_file = join(install_apps_dir, desktop_name)\n\tdesktop_contents = '''\n#!/usr/bin/env xdg-open\n\n[Desktop Entry]\nName=Sublime Text 2\nGenericName=Text Editor\nComment=Sophisticated text editor for code, html and prose\nExec=~/.local/share/sublime-text-2/sublime_text %F\nTerminal=false\nType=Application\nMimeType=text/plain;text/x-chdr;text/x-csrc;text/x-c++hdr;text/x-c++src;text/x-java;text/x-dsrc;text/x-pascal;text/x-perl;text/x-python;application/x-php;application/x-httpd-php3;application/x-httpd-php4;application/x-httpd-php5;application/xml;text/html;text/css;text/x-sql;text/x-diff;x-directory/normal;inode/directory;\nIcon=~/.local/share/sublime-text-2/Icon/256x256/sublime_text.png\nCategories=TextEditor;Development;Utility;\nStartupNotify=true\nActions=Window;Document;\n\nX-Desktop-File-Install-Version=0.21\n\n[Desktop Action Window]\nName=New Window\nExec=~/.local/share/sublime-text-2/sublime_text -n\nOnlyShowIn=Unity;\n\n[Desktop Action Document]\nName=New File\nExec=/usr/bin/subl --command new_file\nOnlyShowIn=Unity;\n'''\n\n\tconfirm_continue = \"Press <s>[enter]</s> to continue, <s>[ctrl-C]</s> to exit.\"\n\tyes_or_no = \" <s>[y/n]</s> \"\n\tsee_url = \"See <u>http://szhu.me/subl</u> for more details.\"\n\n\tbanner = \"<s> SO I HEARD YOU WANT TO INSTALL SUBLIME TEXT </s>\"\n\trequires_ubuntu = \"This installer must be run from an Ubuntu machine. Please seat yourself at one (e.g., the 2nd floor Soda machines or the hiveN.cs.berkeley.edu machines in 330) or ssh into one before running this script. If you're sshing, try:\\n\\n ssh <u>username</u>@hive10.cs.berkeley.edu\"\n\n\twhat_this_script_does = 'This script will download <u>Sublime Text 2.0.2</u> and install it into your user directory at ' + install_subl_dir + '\\n\\nSublime will be added to the Ubuntu Unity Dash (apps button/\"start menu\" on the top-left) and optionally can be run with the <u>subl</u> command.\\n'\n\n\tuninstalling = \"Uninstalling ...\"\n\tuninstalled = \"old version of Sublime Text uninstalled.\"\n\n\tpreparing = \"Preparing ... \"\n\tdownloading = \"Downloading ... \"\n\tinstalling = \"Installing ... \"\n\tinstalled = \"Installed.\"\n\tcleaning_up = \"Cleaning up... \"\n\tcleaned_up = \"done.\"\n\t\n\tcmd_install = \"Set the <u>subl</u> command to open/open files with with Sublime Text?\\nIt works just like the emacs, vim, and gedit commands!\"\n\tcmd_already_installed = \"The <u>subl</u> command is already aliased in \" + bashrc_file + \".\"\n\tcmd_caveats = \"<u>subl</u> will work with terminals opened from now on.\\nTo make the subl command work right here right now, do: \\n\\n source ~/.bashrc\\n\"\n\n\tmime_install = \"Set Sublime Text as the <u>default editor</u> when you double-click a text file?\\nOtherwise, right-click a file and select Open With > Sublime Text 2.\"\n\tmime_already_installed = \"Sublime is already set as the default text editor.\"\n\n\ticon_installed = 'An app icon has been installed to the Unity Dash (the \"start menu\" apps list).\\nYou can drag this to the Unity Launcher (dock/taskbar).'\n\tconfirm_show_icon = \"Press <s>[enter]</s> to show this icon.\"\n\ticon_caveats = 'Once it opens, please right-click the icon and select \"Lock to Launcher\". You can also find the icon in the Unity Dash (the \"start menu\" apps list). Sublime Text not working for some reason? Try logging out and back in.'\n\n\n\tall_done = \"All done. You can run this installer again to reinstall or uninstall.\"\n\tall_caveats = \"Does Sublime's icon look blurry? Log out and log back in.\"\n\tsee_url_problem = \"Something else not working? See <u>http://szhu.me/subl</u> for more details.\"\n\thave_fun = \"<u>Have fun with Sublime!</u>\"\n\nstrings = Strings()\n\ndef s(key):\n\tif hasattr(strings, key): return getattr(strings, key)\n\telse: raise AttributeError('string not found: %r' % key)\n\nss=cmd_stdout('tput', 'smso')\nrs=cmd_stdout('tput', 'rmso')\nsu=cmd_stdout('tput', 'smul')\nru=cmd_stdout('tput', 'rmul')\ndef lang(key):\n\tval = s(key)\n\tval = val.replace('<u>', su)\n\tval = val.replace('</u>', ru)\n\tval = val.replace('<s>', ss)\n\tval = val.replace('</s>', rs)\n\treturn val\n\ndef print_lang(key):\n\tprint lang(key),\n\tflush()\n\ndef print_langblock(key):\n\tfrom textwrap import fill\n\tval = lang(key)\n\tprint fill(val, width=80)\n\t# print val\n\tflush()\n\ndef path(key):\n\tfrom os.path import expanduser\n\treturn expanduser(s(key))\n\ndef user_multi(key):\n\tfrom os.path import expanduser\n\treturn s(key).replace('~', expanduser('~'))\n\ndef readfile(path):\n\tf = open(path, 'r')\n\ttry: contents = f.read()\n\texcept IOError: f.close(); raise\n\tf.close()\n\treturn contents\n\ndef writefile(path, contents):\n\tf = open(path, 'w')\n\ttry: f.write(contents)\n\texcept IOError: f.close(); raise\n\tf.close()\n\ndef appendfile(path, contents):\n\tf = open(path, 'a')\n\ttry: f.write(contents)\n\texcept IOError: f.close(); raise\n\tf.close()\n\ndef flush(dev=None):\n\tfrom sys import stdout\n\t(dev or stdout).flush()\n\ndef user_yn(prompt):\n\twhile True:\n\t\tresponse = raw_input(lang(prompt) + lang('yes_or_no')).lower()\n\t\tif 'y' in response and 'n' in response: continue\n\t\telif 'y' in response: return True\n\t\telif 'n' in response: return False\n\ndef main():\n\n\ttry: cmd('/usr/bin/env', 'lsb_release', return_stderr=1, return_self=1)\n\texcept CmdError:\n\t\tprint\n\t\tprint_langblock('requires_ubuntu')\n\t\tprint\n\t\tprint_langblock('see_url')\n\t\treturn 1\n\n\tprint\n\tprint_lang('banner')\n\tprint\n\tprint\n\tprint_langblock('what_this_script_does')\n\tprint\n\tprint_langblock('see_url')\n\tprint_langblock('confirm_continue')\n\traw_input()\n\n\tprint_lang('preparing')\n\tcmd('mkdir', '-p', s('tmp_dir'))\n\tchdir(s('tmp_dir'))\n\n\tcmd('mkdir', '-p', path('install_apps_dir'))\n\twritefile( s('tmp_directory_info_file') , s('tmp_directory_info') )\n\t\n\tcmd('rm', '-rf', s('pkg_name'), s('pkg_dir'))\n\tif exists(path('install_subl_dir')):\n\t\tprint\n\t\tprint_lang('uninstalling')\n\t\tcmd('rm', '-rf', path('install_subl_dir'), path('desktop_file'))\n\t\tprint_langblock('uninstalled')\n\t\tprint_langblock('confirm_continue')\n\t\traw_input()\n\n\tprint_lang('downloading')\n\tpkgfile = file(s('pkg_name'), 'w')\n\tcmd('curl', '-fsSL', s('pkg_url'), stdout=pkgfile)\n\tpkgfile.close()\n\n\tprint_lang('installing')\n\tcmd('tar', '-xf', s('pkg_name'), '--bzip2')\n\tcmd('mv', s('pkg_dir'), path('install_subl_dir'))\n\twritefile( path('desktop_file') , user_multi('desktop_contents') )\n\tcmd('chmod', 'u+x', path('desktop_file'))\n\tprint_langblock('installed')\n\n\t# print_lang('cleaning_up')\n\tchdir('..')\n\tcmd('rm', '-rf', path('tmp_dir'))\n\t# print_langblock('cleaned_up')\n\n\tif exists(path('bashrc_file')) and s('bashrc_contents') in readfile(path('bashrc_file')):\n\t\tprint_langblock('cmd_already_installed')\n\telif user_yn('cmd_install'):\n\t\tappendfile(path('bashrc_file'), '\\n' + s('bashrc_contents'))\n\t\tprint_langblock('cmd_caveats')\n\n\n\tisdefault = True\n\tfor mimetype in s('mime_types'):\n\t\tif cmd_stdout('xdg-mime', 'query', 'default', mimetype).strip() != s('desktop_name'):\n\t\t\tisdefault = False\n\t\t\tbreak\n\tif isdefault:\n\t\tprint_langblock('mime_already_installed')\n\telif user_yn('mime_install'):\n\t\tfor mimetype in s('mime_types'):\n\t\t\tcmd('xdg-mime', 'default', s('desktop_name'), mimetype)\n\n\n\tlauncher_items = cmd_stdout('gsettings', 'get', 'com.canonical.Unity.Launcher', 'favorites')\n\tlauncher_items = literal_eval(launcher_items)\n\t# print launcher_items\n\tsubl_item = s('desktop_name')\n\tif subl_item not in launcher_items:\n\t\tlauncher_items.append(subl_item)\n\t# print launcher_items\n\tcmd('gsettings', 'set', 'com.canonical.Unity.Launcher', 'favorites', repr(launcher_items))\n\n\t# if [ -n \"$DISPLAY\" ]; then\n\t# \tprint -e 'Opening Sublime Text...'\n\t# \t$EXEC_FILE &\n\n\tprint\n\tprint_langblock('all_done')\n\tprint_langblock('all_caveats')\n\tprint_langblock('see_url_problem')\n\tprint\n\tprint_langblock('have_fun')\n\tprint\n\tprint\n\nif __name__ == '__main__':\n\tfrom sys import exit\n\ttry:\n\t\texit(main() or 0)\n\texcept CmdError, exc:\n\t\tprint exc\n\t\texit(1)\n\texcept (KeyboardInterrupt, EOFError):\n\t\tprint\n\t\texit(-1)\n" }, { "alpha_fraction": 0.7094240784645081, "alphanum_fraction": 0.7120419144630432, "avg_line_length": 62.5, "blob_id": "04ddcdd3bc48bf2e34d9305934176b8c2391083b", "content_id": "06dffbe7ca4b528e03fc8dd5626139abd89f120c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 382, "license_type": "no_license", "max_line_length": 192, "num_lines": 6, "path": "/README.md", "repo_name": "szhu/sublime-ubuntu-installer", "src_encoding": "UTF-8", "text": "sublime-ubuntu-standalone-installer\n===================================\n\nInstall Sublime Text 2 on any Ubuntu machine, sans root, effortlessly. This script was created for use on the Ubuntu lab computers at UC Berkeley, but should work with any Ubuntu installation.\n\n**Note:** The Bash script, `install_sublime.sh`, is outdated. `install_sublime.py` is the current install script.\n\n" } ]
3
ghurone/BuscadorCep
https://github.com/ghurone/BuscadorCep
d8f86ba3312dda85aad1d5ebaacbc76d5d185924
3262a69ad5dbee39645e2583385e951419c14752
3e75cbc2f152e6026ab26ea77efa69017b93f82f
refs/heads/main
2022-12-29T19:26:19.221032
2020-10-08T01:45:18
2020-10-08T01:45:18
302,190,113
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.55524080991745, "alphanum_fraction": 0.5600970983505249, "avg_line_length": 29.85714340209961, "blob_id": "9b5b2e0f8e26a9860932619d69d4ec8fdb221f13", "content_id": "ab5e27cc8ad9912417f94f01c9c5e57c2fc5aaa7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2487, "license_type": "permissive", "max_line_length": 110, "num_lines": 77, "path": "/buscarcep/cep.py", "repo_name": "ghurone/BuscadorCep", "src_encoding": "UTF-8", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nclass Endereco:\r\n def __init__(self, args = ()):\r\n self.rua, self.bairro, self.cidade, self.cep = args\r\n self.all = args\r\n \r\n def __repr__(self):\r\n return f\"Endereco(rua='{self.rua}', bairro='{self.bairro}', cidade='{self.cidade}', cep='{self.cep}')\"\r\n \r\n def __str__(self):\r\n return self.__repr__()\r\n \r\n def __getitem__(self, key):\r\n return self.all[key]\r\n \r\n \r\ndef buscar_cep(cep):\r\n \"\"\" FUNÇÃO PRINCIPAL\r\n Insira uma string com os números do CEP, e será retornado um objeto\r\n Endereco, com as informações da rua, bairro, cidade e irônicamente o\r\n próprio CEP\r\n \"\"\"\r\n if isinstance(cep, str):\r\n cep = cep.replace('-', '').replace(' ', '')\r\n if len(cep) != 8:\r\n raise Exception('CEP Inválido')\r\n else:\r\n raise TypeError('Digite o CEP em uma string')\r\n \r\n req = request(cep)\r\n\r\n if isinstance(req, requests.models.Response):\r\n list_values = parser(req)\r\n \r\n if list_values != 1:\r\n texto_clean = text_cleaner(list_values)\r\n \r\n return Endereco(tuple(texto_clean))\r\n else:\r\n raise Exception('CEP não encontrado')\r\n \r\n elif isinstance(req, int):\r\n raise Exception(f'Error ao checar o CEP. Status Code: {req}')\r\n\r\n\r\ndef request(cep):\r\n \"\"\"Função que recebe um cep e faz a requisição no site dos Correios\"\"\"\r\n \r\n url = 'http://www.buscacep.correios.com.br/sistemas/buscacep/resultadoBuscaCepEndereco.cfm'\r\n payload = {'relaxation': cep, 'tipoCEP': 'ALL', 'semelhante': 'N'}\r\n \r\n requisicao = requests.post(url, data=payload)\r\n if requisicao.status_code == 200:\r\n return requisicao\r\n else: \r\n return requisicao.status_code\r\n \r\n \r\ndef parser(req):\r\n \"\"\"Função que faz o parser do texto em html do request\"\"\"\r\n \r\n try:\r\n soup = BeautifulSoup(req.text, \"html.parser\")\r\n value_cells = soup.find('table', attrs={'class': 'tmptabela'})\r\n return list(value_cells.findAll('tr'))\r\n except:\r\n return 1\r\n\r\ndef text_cleaner(l_values):\r\n \"\"\"Funcao que arruma o texto, deixando mais visivel\"\"\"\r\n \r\n texto_clean = [value.get_text().strip() for value in l_values[1].findAll('td')]\r\n texto_clean[0] = texto_clean[0][:texto_clean[0].find('-') - 1]\r\n return texto_clean\r\n \r\n " }, { "alpha_fraction": 0.3794076144695282, "alphanum_fraction": 0.4414668679237366, "avg_line_length": 21.15625, "blob_id": "25f5233ca1a6a9bc53484c599c8b032dbe023b67", "content_id": "300e18c721f02345445df25d4f3e7df6d5825228", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 726, "license_type": "permissive", "max_line_length": 84, "num_lines": 32, "path": "/README.md", "repo_name": "ghurone/BuscadorCep", "src_encoding": "UTF-8", "text": "# Buscar CEP\n\n## Uma biblioteca para encontrar endereços pelo CEP\n\n### Como instalar\n---------------------------------------------------\n pip install buscarcep\n\n### Como usar\n---------------------------------------------------\n >>> from buscarcep import buscar_cep\n >>> e = buscar_cep('01001-000')\n >>> e\n Endereco(rua='Praça da Sé', bairro='Sé', cidade='São Paulo/SP', cep='01001-000')\n >>> e.all\n ('Praça da Sé', 'Sé', 'São Paulo/SP', '01001-000')\n >>> e[0]\n 'Praça da Sé'\n >>> e[1]\n 'Sé'\n >>> e[2]\n 'São Paulo/SP'\n >>> e[3]\n '01001-000'\n >>> e.rua\n 'Praça da Sé'\n >>> e.bairro\n 'Sé'\n >>> e.cidade\n 'São Paulo/SP'\n >>> e.cep\n '01001-000'\n" }, { "alpha_fraction": 0.4833333194255829, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 13.25, "blob_id": "61e1d5be5dda3ea6fe584fe960dbcaea781098aa", "content_id": "2ddb7c3728bcb2fac639f3deaf2a66dec492499a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "permissive", "max_line_length": 35, "num_lines": 8, "path": "/buscarcep/__init__.py", "repo_name": "ghurone/BuscadorCep", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 7 21:37:04 2020\r\n\r\n@author: Ghuron\r\n\"\"\"\r\n\r\nfrom .cep import buscar_cep" }, { "alpha_fraction": 0.5911853909492493, "alphanum_fraction": 0.5972644090652466, "avg_line_length": 31, "blob_id": "e5cc00ed7a430a5959be7499b4530268fa988653", "content_id": "5ebc0fc8ef89e0771ffb05c2103bb8cccbd5af9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "permissive", "max_line_length": 83, "num_lines": 20, "path": "/setup.py", "repo_name": "ghurone/BuscadorCep", "src_encoding": "UTF-8", "text": "from setuptools import setup\r\n\r\nsetup(\r\n name = 'buscarcep',\r\n version = '1.0.0',\r\n author = 'Erick Ghuron',\r\n author_email = '[email protected]',\r\n packages = ['buscarcep'],\r\n description = 'Uma simples biblioteca que permite buscar um endereço pelo cep',\r\n url = 'https://github.com/ghurone/GhuFrac',\r\n license = 'MIT',\r\n keywords = 'cep correios buscar ghuron',\r\n classifiers = [\r\n 'Development Status :: 5 - Production/Stable',\r\n 'Intended Audience :: Developers',\r\n 'License :: OSI Approved :: MIT License',\r\n 'Natural Language :: Portuguese (Brazilian)',\r\n 'Operating System :: OS Independent'\r\n ]\r\n)" } ]
4
Sergeres/vk_analyse
https://github.com/Sergeres/vk_analyse
dc32095bb357a7c9488ce2c667b1d9261188a431
1ea3dfbb3add2aa28ceb24974fc9d0b2d27aa52b
64d351e7c037bafed2f3483f619cf75138fdaf23
refs/heads/master
2020-08-26T21:06:33.770887
2019-12-18T21:29:31
2019-12-18T21:29:31
217,147,171
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5308211445808411, "alphanum_fraction": 0.5447614789009094, "avg_line_length": 32.757354736328125, "blob_id": "c6a9eccf33287f52efc2ad164c44c55c0443ea5c", "content_id": "ee2a78bbb6628057e23d061d787c551461ce990f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4591, "license_type": "no_license", "max_line_length": 112, "num_lines": 136, "path": "/main.py", "repo_name": "Sergeres/vk_analyse", "src_encoding": "UTF-8", "text": "import vk\nimport db\nimport time\nimport datetime\n\nsession = vk.Session(access_token='d5b441ccd5b441ccd5b441cc0bd5d94752dd5b4d5b441cc883ce57ed215c145977b71cd')\napi = vk.API(session)\nv = 5.103\n\n\ndef get_users():\n response = api.groups.getMembers(group_id='prcom_vyatsu', v=v, offset='0')\n members = []\n offset = 0\n count = 1\n while offset < response['count']:\n response = api.groups.getMembers(group_id='prcom_vyatsu', v=v, offset=offset)\n offset += 1000\n for member_id in response['items']:\n count += 1\n members.append(api.users.get(user_ids=member_id, v=v, fields='bdate, sex, education'))\n print(members)\n return members\n\n\ndef get_communities(members):\n member_communities = []\n for member in members:\n # print(member)\n for info in member:\n member_id = info['id']\n try:\n response = api.users.getSubscriptions(user_id=member_id, v=v, extended=1)\n subscriptions = response['items']\n all_communities = []\n for community in subscriptions:\n community_name = community['name']\n community_id = community['id']\n\n all_communities.append({'id': community_id, 'name': community_name})\n member_communities.append({'id': member_id, 'subscriptions': all_communities})\n except:\n print('profile is private')\n return member_communities\n\n\ndef get_vsu_group():\n vsu_group = api.groups.getById(group_id='prcom_vyatsu', v=v, fields='description')\n return vsu_group\n\n\ndef get_vsu_posts():\n counter = 0\n offset = 0\n response = api.wall.get(owner_id='-108366262', v=v, offset=offset, filter='owner')\n count = response['count']\n # print(count)\n posts = []\n while offset < count:\n response = api.wall.get(owner_id='-108366262', v=v, count=count, offset=offset, filter='owner')\n offset += 100\n for post in response['items']:\n date = datetime.datetime.fromtimestamp(post['date'])\n date = date.strftime('%Y-%m-%d')\n time = datetime.datetime.fromtimestamp(post['date'])\n time = time.strftime('%H:%M:%S')\n reposts = post['reposts']['count']\n comments = post['comments']['count']\n likes = post['likes']['count']\n try:\n views = post['views']['count']\n except:\n views = None\n posts.append({'postID': post['id'], 'likes': likes, 'comments': comments, 'views': views,\n 'reposts': reposts, 'date': date, 'time': time, 'text': post['text']})\n counter += 1\n if counter > count:\n break\n return posts\n\n\ndef get_activity(posts, users_ids):\n activity = []\n for post in posts:\n #print(post)\n post_id = post['postID']\n # print(post_id)\n likes_list = api.likes.getList(type='post', item_id=post_id, v=v, filter='likes', owner_id='-108366262')\n comments_list = api.wall.getComments(owner_id='-108366262', post_id=post_id, v=v)\n # print(comments_list)\n # print(comments_list)\n # print(likes_list)\n for user_id in users_ids:\n like_flag = 0\n comment_flag = 0\n if likes_list['count'] != 0:\n if user_id in likes_list['items']:\n like_flag = 1\n else:\n like_flag = 0\n # print(comments_list['items'])\n if comments_list['items'] is not None:\n for items in comments_list['items']:\n if user_id == items['from_id']:\n comment_flag = 1\n break\n else:\n comment_flag = 0\n if like_flag == 0 and comment_flag == 0:\n continue\n else:\n activity.append({'userID': user_id, 'postID': post_id,\n 'like': like_flag, 'comment': comment_flag})\n # print(*activity, sep='\\n')\n return activity\n\n\ndef main():\n db.create_tables()\n members = get_users()\n db.members_insert(members)\n member_communities = get_communities(members)\n db.member_community_insert(member_communities)\n vsu_group = get_vsu_group()\n db.vsu_community_insert(vsu_group)\n users = db.select_users_ids()\n posts = get_vsu_posts()\n db.insert_posts(posts)\n activities = get_activity(posts, users)\n db.insert_activities(activities)\n\n\nstart_time = time.time()\nprint(start_time)\nmain()\nprint(time.time() - start_time)\n" }, { "alpha_fraction": 0.5681831240653992, "alphanum_fraction": 0.6301520466804504, "avg_line_length": 55.714752197265625, "blob_id": "9108056a3471748617ac34b0805cc393d08d2873", "content_id": "26814e0c6a71d1c0155222d79d0b3504decda293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17828, "license_type": "no_license", "max_line_length": 311, "num_lines": 305, "path": "/jinja.py", "repo_name": "Sergeres/vk_analyse", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sqlite3\nimport vk\nimport pandas as pand\nimport db\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport PopularWords\nimport wordcloud\nfrom PIL import Image\nfrom jinja2 import Environment, FunctionLoader, PackageLoader, PrefixLoader, DictLoader, FileSystemLoader\n\n\ndef create_conn(db_file):\n conn = None\n conn = sqlite3.connect(db_file)\n return conn\n\n\ndef select_mem(conn):\n rows = []\n cur = conn.cursor()\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age < 18\")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 18 and age < 21\")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 21 and age < 24\")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 24 and age < 27\")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 27 and age < 30\")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 30 and age < 35\")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 35 and age < 45\")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age > 45\")\n rows.append(cur.fetchall()[0])\n rows = list(sum(rows, ()))\n summa = sum(rows)\n x_row = [\"< 18\",\"18-21\",\"21-24\",\"24-27\",\"27-30\",\"30-35\",\"35-45\",\"> 45\"]\n dataframe = pand.DataFrame()\n dataframe['Количество'] = rows\n dataframe['Категории'] = x_row\n agraph = dataframe.plot(x='Категории', kind='bar', color='teal')\n agraph.set(xlabel=\"Категории возрастов\", ylabel=\"Количество\")\n plt.tight_layout()\n plt.savefig('templates/screenshots/categoryGroupscount.png')\n for i in range(rows.__len__()):\n if rows[i] == 0:\n continue\n else:\n rows[i] = rows[i] * 100 / summa\n dataframe = pand.DataFrame()\n dataframe['Проценты'] = rows\n dataframe['Категории'] = x_row\n agraph = dataframe.plot(x = 'Категории', kind = 'bar', color = 'teal')\n agraph.set(xlabel = \"Категории возрастов\", ylabel = \"Проценты\")\n plt.tight_layout()\n plt.savefig('templates/screenshots/categoryGroups.png')\n plt.close('all')\n return rows\n\n\ndef select_member_noedc(conn):\n rows = []\n cur = conn.cursor()\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age < 18 and university is null \")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 18 and age < 21 and university is null \")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 21 and age < 24 and university is null \")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 24 and age < 27 and university is null \")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 27 and age < 30 and university is null \")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 30 and age < 35 and university is null \")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age >= 35 and age < 45 and university is null \")\n rows.append(cur.fetchall()[0])\n cur.execute(\"SELECT count(name) FROM VSU_Member WHERE age > 45 and university is null \")\n rows.append(cur.fetchall()[0])\n rows = list(sum(rows, ()))\n x_row = [\"< 18\",\"18-21\",\"21-24\",\"24-27\",\"27-30\",\"30-35\",\"35-45\",\"> 45\"]\n dataframe = pand.DataFrame()\n dataframe['Количество'] = rows\n dataframe['Категории'] = x_row\n agraph = dataframe.plot(x = 'Категории', kind = 'bar', color = 'teal')\n agraph.set(xlabel = \"Категории возрастов\", ylabel = \"Количество\")\n plt.tight_layout()\n plt.savefig('templates/screenshots/membersNOEDC.png')\n plt.close('all')\n return rows\n\n\ndef top_community(conn, gen):\n cur = conn.cursor()\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data = cur.fetchall()\n name = []\n count = []\n for row in data:\n name.append(row[0])\n count.append(row[1])\n dataframe = pand.DataFrame()\n dataframe[\"Имена\"] = name\n dataframe[\"Количество\"] = count\n cgraph = dataframe.plot(x = 'Имена', kind = 'bar', color = 'c')\n cgraph.set(xlabel = \"Названия групп\", ylabel = \"Количество\")\n plt.tight_layout()\n if gen == 'Муж.':\n plt.savefig('templates/screenshots/mensTOP5.png')\n else:\n plt.savefig('templates/screenshots/womensTOP5.png')\n\n\ndef top_ages(conn, gen):\n data = []\n cur = conn.cursor()\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen and VSU_Member.age < 18 group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data.append(cur.fetchall())\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen and VSU_Member.age >= 18 and VSU_Member.age < 21 group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data.append(cur.fetchall())\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen and VSU_Member.age >= 21 and VSU_Member.age < 24 group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data.append(cur.fetchall())\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen and VSU_Member.age >= 24 and VSU_Member.age < 27 group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data.append(cur.fetchall())\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen and VSU_Member.age >= 27 and VSU_Member.age < 30 group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data.append(cur.fetchall())\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen and VSU_Member.age >= 30 and VSU_Member.age < 35 group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data.append(cur.fetchall())\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen and VSU_Member.age >= 35 and VSU_Member.age < 45 group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data.append(cur.fetchall())\n cur.execute(\"select VSU_Member_Community.name, count(*) from VSU_Member_Community join VSU_Member on VSU_Member_Community.memberID = VSU_Member.id where VSU_Member.gender =:gen and VSU_Member.age >= 45 group by VSU_Member_Community.name order by count(*) DESC limit 5\", {\"gen\": gen})\n data.append(cur.fetchall())\n x_row = [\"меньше 18\", \"от 18 до 21\", \"от 21 до 24\", \"от 24 до 27\", \"от 27 до 30\", \"от 30 до 35\", \"от 35 до 45\", \"больше 45\"]\n graphs = []\n for i in range(data.__len__()):\n name = []\n count = []\n for row in data[i]:\n name.append(row[0])\n count.append(row[1])\n dataframe = pand.DataFrame()\n dataframe[\"Имена\"] = name\n dataframe[\"Количество\"] = count\n if gen == 'Жен.':\n cgraph = dataframe.plot(x='Имена', kind='bar', color='c', title = \"Женщины \" + x_row[i])\n cgraph.set(xlabel=\"Названия групп\", ylabel=\"Количество\")\n plt.tight_layout()\n plt.savefig('templates/screenshots/top5_W'+str(i)+'.png')\n graphs.append('screenshots/top5_W'+str(i)+'.png')\n else:\n cgraph = dataframe.plot(x='Имена', kind='bar', color='c', title=\"Мужчины \" + x_row[i])\n cgraph.set(xlabel=\"Названия групп\", ylabel=\"Количество\")\n plt.tight_layout()\n plt.savefig('templates/screenshots/top5_M' + str(i) + '.png')\n graphs.append('screenshots/top5_M' + str(i) + '.png')\n return graphs\n\n\ndef peoplewoage(conn):\n data = []\n cur = conn.cursor()\n cur.execute(\"select count(*) from VSU_Member\")\n data.append(cur.fetchall()[0])\n cur.execute(\"select count(*) from VSU_Member where age not null\")\n data.append(cur.fetchall()[0])\n data = list(sum(data, ()))\n data[0] = data[0] - data[1]\n labels = 'Не указали возраст', 'Указали возраст'\n sizes = [(data[0]/(data[0] + data[1])*100), (data[1]/(data[0] + data[1])*100)]\n explode = (0, 0.1) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)\n # ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title(\"Распределение пользователей по наличию возраста.\")\n plt.savefig('templates/screenshots/piemembers' + '.png')\n plt.close('all')\n\n\ndef toplike(conn):\n data, result = [], []\n href = []\n cur = conn.cursor()\n # cur.execute(\"select postID, count(*) from VSU_Member_Activity where like == 1 group by postID order by count(*) desc limit 5\")\n cur.execute(\"select * from VSU_Post order by likes desc limit 5\")\n data.append(cur.fetchall())\n for i in range(data[0].__len__()):\n result.append(['https://vk.com/prcom_vyatsu?w=wall-108366262_' + str(data[0][i][0]), data[0][i][5]])\n href.append('-108366262_' + str(data[0][i][0]))\n return result, href\n\n\ndef topcomment(conn):\n data, result = [], []\n cur = conn.cursor()\n cur.execute(\"select * from VSU_Post order by comments desc limit 5\")\n data.append(cur.fetchall())\n for i in range(data[0].__len__()):\n result.append(['https://vk.com/prcom_vyatsu?w=wall-108366262_' + str(data[0][i][0]), data[0][i][6]])\n return result\n\n\ndef timeanalyse(conn):\n timeset = []\n timeperiods = [\"publishTime >= '00-00-00' and publishTime < '01-00-00'\", \"publishTime >= '01-00-00' and publishTime < '02-00-00'\", \"publishTime >= '02-00-00' and publishTime < '03-00-00'\", \"publishTime >= '03-00-00' and publishTime < '04-00-00'\",\n \"publishTime >= '04-00-00' and publishTime < '05-00-00'\", \"publishTime >= '05-00-00' and publishTime < '06-00-00'\", \"publishTime >= '06-00-00' and publishTime < '07-00-00'\", \"publishTime >= '07-00-00' and publishTime < '08-00-00'\",\n \"publishTime >= '08-00-00' and publishTime < '09-00-00'\", \"publishTime >= '09-00-00' and publishTime < '10-00-00'\", \"publishTime >= '10-00-00' and publishTime < '11-00-00'\", \"publishTime >= '11-00-00' and publishTime < '12-00-00'\",\n \"publishTime >= '12-00-00' and publishTime < '13-00-00'\", \"publishTime >= '13-00-00' and publishTime < '14-00-00'\", \"publishTime >= '14-00-00' and publishTime < '15-00-00'\", \"publishTime >= '15-00-00' and publishTime < '16-00-00'\",\n \"publishTime >= '16-00-00' and publishTime < '17-00-00'\", \"publishTime >= '17-00-00' and publishTime < '18-00-00'\", \"publishTime >= '18-00-00' and publishTime < '19-00-00'\", \"publishTime >= '19-00-00' and publishTime < '20-00-00'\",\n \"publishTime >= '20-00-00' and publishTime < '21-00-00'\", \"publishTime >= '21-00-00' and publishTime < '22-00-00'\", \"publishTime >= '22-00-00' and publishTime < '23-00-00'\", \"publishTime >= '23-00-00' and publishTime < '24-00-00'\"]\n cur = conn.cursor()\n for i in timeperiods:\n cur.execute(\"select count(*), sum(likes) from VSU_Post where \" + (str)(i))\n timeset.append(cur.fetchall())\n y_row = []\n for i in timeset:\n if i[0][1] == None:\n y_row.append(0)\n else:\n y_row.append(i[0][1]/i[0][0])\n x_row = [\"0:00\", \"1:00\", \"2:00\", \"3:00\",\n \"4:00\", \"5:00\", \"6:00\", \"7:00\",\n \"8:00\", \"9:00\", \"10:00\", \"11:00\",\n \"12:00\", \"13:00\", \"14:00\", \"15:00\",\n \"16:00\", \"17:00\", \"18:00\", \"19:00\",\n \"20:00\", \"21:00\", \"22:00\", \"23:00\"]\n dataframe = pand.DataFrame()\n dataframe[\"Время\"] = x_row\n dataframe[\"Среднее кол-во лайков\"] = y_row\n cgraph = dataframe.plot(x='Время', kind='line', color='c')\n cgraph.set(xlabel=\"Время\", ylabel=\"Среднее кол-во лайков\")\n plt.tight_layout()\n plt.grid()\n plt.savefig('templates/screenshots/timespread.png')\n\n\ndef viewanalyse(conn):\n timeset = []\n timeperiods = [\"publishTime >= '00-00-00' and publishTime < '01-00-00'\", \"publishTime >= '01-00-00' and publishTime < '02-00-00'\", \"publishTime >= '02-00-00' and publishTime < '03-00-00'\", \"publishTime >= '03-00-00' and publishTime < '04-00-00'\",\n \"publishTime >= '04-00-00' and publishTime < '05-00-00'\", \"publishTime >= '05-00-00' and publishTime < '06-00-00'\", \"publishTime >= '06-00-00' and publishTime < '07-00-00'\", \"publishTime >= '07-00-00' and publishTime < '08-00-00'\",\n \"publishTime >= '08-00-00' and publishTime < '09-00-00'\", \"publishTime >= '09-00-00' and publishTime < '10-00-00'\", \"publishTime >= '10-00-00' and publishTime < '11-00-00'\", \"publishTime >= '11-00-00' and publishTime < '12-00-00'\",\n \"publishTime >= '12-00-00' and publishTime < '13-00-00'\", \"publishTime >= '13-00-00' and publishTime < '14-00-00'\", \"publishTime >= '14-00-00' and publishTime < '15-00-00'\", \"publishTime >= '15-00-00' and publishTime < '16-00-00'\",\n \"publishTime >= '16-00-00' and publishTime < '17-00-00'\", \"publishTime >= '17-00-00' and publishTime < '18-00-00'\", \"publishTime >= '18-00-00' and publishTime < '19-00-00'\", \"publishTime >= '19-00-00' and publishTime < '20-00-00'\",\n \"publishTime >= '20-00-00' and publishTime < '21-00-00'\", \"publishTime >= '21-00-00' and publishTime < '22-00-00'\", \"publishTime >= '22-00-00' and publishTime < '23-00-00'\", \"publishTime >= '23-00-00' and publishTime < '24-00-00'\"]\n cur = conn.cursor()\n for i in timeperiods:\n cur.execute(\"select count(*), sum(views) from VSU_Post where \" + (str)(i))\n timeset.append(cur.fetchall())\n y_row = []\n for i in timeset:\n if i[0][1] == None:\n y_row.append(0)\n else:\n y_row.append(i[0][1]/i[0][0])\n x_row = [\"0:00\", \"1:00\", \"2:00\", \"3:00\",\n \"4:00\", \"5:00\", \"6:00\", \"7:00\",\n \"8:00\", \"9:00\", \"10:00\", \"11:00\",\n \"12:00\", \"13:00\", \"14:00\", \"15:00\",\n \"16:00\", \"17:00\", \"18:00\", \"19:00\",\n \"20:00\", \"21:00\", \"22:00\", \"23:00\"]\n dataframe = pand.DataFrame()\n dataframe[\"Время\"] = x_row\n dataframe[\"Среднее кол-во просмотров\"] = y_row\n cgraph = dataframe.plot(x='Время', kind='line', color='c')\n cgraph.set(xlabel=\"Время\", ylabel=\"Среднее кол-во просмотров\")\n plt.tight_layout()\n plt.grid()\n plt.savefig('templates/screenshots/viewsspred.png')\n\ndef words_cloud():\n words = PopularWords.search_words()\n wc = wordcloud.WordCloud(width=2600, height=2200, background_color=\"white\", relative_scaling=1.0,\n collocations=False, min_font_size=10).generate_from_frequencies(dict(words))\n plt.axis(\"off\")\n plt.figure(figsize=(9, 6))\n plt.imshow(wc, interpolation=\"bilinear\")\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n plt.savefig('templates/screenshots/wordcloud.png')\n\nwords_cloud()\nenv = Environment(loader = FileSystemLoader('templates/'))\ntemplate = env.get_template('templateRE.html')\n\npeoplewoage(create_conn(db.generate_db_name()))\nselect_mem(create_conn(db.generate_db_name()))\ntop_community(create_conn(db.generate_db_name()), 'Жен.')\ntop_community(create_conn(db.generate_db_name()), 'Муж.')\ngraphs = top_ages(create_conn(db.generate_db_name()), 'Жен.')\ntgraphs = top_ages(create_conn(db.generate_db_name()), 'Муж.')\ndatas, href = toplike(create_conn(db.generate_db_name()))\ncomments = topcomment(create_conn(db.generate_db_name()))\nselect_member_noedc(create_conn(db.generate_db_name()))\ntimeanalyse(create_conn(db.generate_db_name()))\nviewanalyse(create_conn(db.generate_db_name()))\n\nwith open(\"templates/new.html\", \"w\", encoding='utf-8') as f:\n f.write(template.render(url1 = 'screenshots/categoryGroups.png', url2 = 'screenshots/womensTOP5.png', url3 = 'screenshots/mensTOP5.png', mems = graphs, mems0 = tgraphs,\n url4 = 'screenshots/piemembers.png', datas = datas, comments = comments, url5 = 'screenshots/membersNOEDC.png',\n url6 = 'screenshots/categoryGroupscount.png', url7='screenshots/viewsspred.png', url8='screenshots/timespread.png',\n url9 = 'screenshots/wordcloud.png'))\n\n" }, { "alpha_fraction": 0.5858668684959412, "alphanum_fraction": 0.5965489149093628, "avg_line_length": 28.707317352294922, "blob_id": "ac67d696e09fcccde7ea8a8825aa904254a74c58", "content_id": "8fe80bb2ac2e6b42dd96ac477cb2822b11d4bacc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 73, "num_lines": 41, "path": "/PopularWords.py", "repo_name": "Sergeres/vk_analyse", "src_encoding": "UTF-8", "text": "import db\nimport nltk\nimport pymorphy2\nfrom collections import Counter\nimport string\nimport re\n\n\ndef search_words():\n texts, count = db.select_posts_text()\n posts_percent = int((count / 100) * 30)\n stopwords_ru = nltk.corpus.stopwords.words('russian')\n morph = pymorphy2.MorphAnalyzer()\n words = []\n for text in texts[:posts_percent]:\n tokenizer = nltk.tokenize.TreebankWordTokenizer()\n tokens = tokenizer.tokenize(text)\n text = \" \".join(morph.normal_forms(token)[0] for token in tokens)\n tokens = tokenizer.tokenize(text)\n for token in tokens:\n if token in stopwords_ru:\n continue\n else:\n if re.search(r'[^а-яА-Я]', token):\n continue\n else:\n if len(token) >= 5:\n words.append(token)\n words_popularity = Counter(words)\n words_popularity = words_popularity.most_common(30)\n # print(words_popularity)\n # print(*words_popularity.most_common(10), sep=\"\\n\")\n words = []\n for word, count in words_popularity:\n words.append({'word': word, 'count': count})\n print(words)\n\n return words_popularity\n\n\nsearch_words()" }, { "alpha_fraction": 0.5243520140647888, "alphanum_fraction": 0.5294787883758545, "avg_line_length": 30.49327278137207, "blob_id": "8a5f5215edfe503c2676f293bc9306fd11bdbd33", "content_id": "586994f1e7cd01992665cf36ea4e917caffee6cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7028, "license_type": "no_license", "max_line_length": 117, "num_lines": 223, "path": "/db.py", "repo_name": "Sergeres/vk_analyse", "src_encoding": "UTF-8", "text": "import sqlite3\nfrom datetime import datetime\nimport re\nimport os\n\n\ndef generate_db_name():\n year_now = str(datetime.now().year)\n month_now = str(datetime.now().month)\n day_now = str(datetime.now().day)\n\n date_now = year_now + '_' + month_now + '_' + day_now\n\n db_name = 'vk_members_' + str(date_now) + '.db'\n return db_name\n\n\ndef create_db(remove):\n db_name = generate_db_name()\n\n if os.path.isfile(db_name) and remove:\n os.remove(db_name)\n\n connection = sqlite3.connect(db_name)\n return connection\n\n\ndef create_tables():\n connection = create_db(True)\n cursor = connection.cursor()\n\n cursor.execute('CREATE TABLE VSU_Community'\n '(id integer primary key, '\n 'name text, '\n 'info text, '\n 'href text)')\n\n cursor.execute('CREATE TABLE VSU_Member'\n '(id integer primary key, '\n 'name varchar(100), '\n 'gender varchar(10), '\n 'age integer, '\n 'university varchar(150),'\n 'faculty varchar(150))')\n\n cursor.execute('CREATE TABLE VSU_Member_Activity'\n '(like integer,'\n 'repost integer,'\n 'comment integer,'\n 'postID integer,'\n 'memberID integer,'\n 'communityID integer,'\n 'foreign key(postID) references VSU_Post(id),'\n 'foreign key(memberID) references VSU_Member(id),'\n 'foreign key(communityID) references VSU_Community(id))')\n\n cursor.execute('CREATE TABLE VSU_Member_Community'\n '(memberID integer,'\n 'communityID integer, '\n 'href text, '\n 'name text,'\n 'foreign key(memberID) references VSU_Member(id))')\n\n cursor.execute('CREATE TABLE VSU_Post'\n '(id integer,'\n 'content text,'\n 'publishDate DATE,'\n 'publishTime TIME,'\n 'views integer,'\n 'likes integer,'\n 'comments integer,'\n 'reposts integer'\n ')')\n\n connection.commit()\n connection.close()\n\n\ndef members_insert(members):\n connection = create_db(False)\n cursor = connection.cursor()\n\n for member in members:\n for info in member:\n member_id = info['id']\n member_name = info['first_name'] + ' ' + info['last_name']\n university = None\n faculty = None\n try:\n if info['university']:\n university = info['university_name']\n if info['faculty']:\n faculty = info['faculty_name']\n except:\n continue\n gender = ''\n if info['sex'] == 1:\n gender = 'Жен.'\n else:\n gender = 'Муж.'\n\n bdate = ''\n if 'bdate' in info:\n bdate = re.match('([0-9]+?.[0-9]+?.[0-9]+)', info['bdate'])\n age = None\n if bdate:\n birth_date = bdate.group(0).split('.')\n day_bdate = int(birth_date[0])\n month_bdate = int(birth_date[1])\n year_bdate = int(birth_date[2])\n year_now = int(datetime.now().year)\n month_now = int(datetime.now().month)\n day_now = int(datetime.now().day)\n\n if day_now > day_bdate and month_now > month_bdate:\n age = str(year_now - year_bdate)\n else:\n age = str(year_now - year_bdate - 1)\n elif bdate is None:\n age = None\n\n cursor.execute('INSERT INTO VSU_Member(id, name, gender, age, university, faculty) '\n 'VALUES(?, ?, ?, ?, ?, ?)', [member_id, member_name, gender, age, university, faculty])\n\n connection.commit()\n connection.close()\n\n\ndef member_community_insert(members_communities):\n connection = create_db(False)\n cursor = connection.cursor()\n for member_communities in members_communities:\n member_id = member_communities['id']\n for subscription in member_communities['subscriptions']:\n community_id = subscription['id']\n community_name = subscription['name']\n\n cursor.execute('INSERT INTO VSU_Member_Community(memberID, communityID, href, name)'\n ' VALUES(?, ?, ?, ?)', [member_id, community_id, '', community_name])\n\n connection.commit()\n connection.close()\n\n\ndef vsu_community_insert(vsu_group):\n connection = create_db(False)\n cursor = connection.cursor()\n for item in vsu_group:\n group_id = item['id']\n group_name = item['name']\n description = item['description']\n href = ''\n\n cursor.execute('INSERT INTO VSU_Community(id, name, info, href)'\n 'VALUES(?, ?, ?, ?)', [group_id, group_name, description, href])\n\n connection.commit()\n connection.close()\n\n\ndef select_users_ids():\n connection = create_db(False)\n cursor = connection.cursor()\n\n cursor.execute('SELECT id FROM VSU_Member')\n users = cursor.fetchall()\n users = list(sum(users, ()))\n\n connection.commit()\n connection.close()\n return users\n\n\ndef select_posts_text():\n connection = create_db(False)\n cursor = connection.cursor()\n\n cursor.execute('SELECT count(*) FROM VSU_Post')\n count = cursor.fetchall()\n count = list(sum(count, ()))\n\n cursor.execute('SELECT content FROM VSU_Post ORDER BY likes DESC')\n texts = cursor.fetchall()\n texts = list(sum(texts, ()))\n\n connection.commit()\n connection.close()\n return texts, count[0]\n\n\ndef insert_activities(activities):\n connection = create_db(False)\n cursor = connection.cursor()\n for activity in activities:\n user_id = activity['userID']\n post_id = activity['postID']\n like = activity['like']\n comment = activity['comment']\n cursor.execute('INSERT INTO VSU_Member_Activity(like, repost, comment, postID, memberID, communityID )'\n 'VALUES(?, ?, ?, ?, ?, ?)', [like, 0, comment, post_id, user_id, 108366262])\n\n connection.commit()\n connection.close()\n\n\ndef insert_posts(posts):\n connection = create_db(False)\n cursor = connection.cursor()\n\n for post in posts:\n id = post['postID']\n likes = post['likes']\n comments = post['comments']\n reposts = post['reposts']\n date = post['date']\n time = post['time']\n text = post['text']\n views = post['views']\n cursor.execute('INSERT INTO VSU_Post(id, content, publishDate, publishTime, views, likes, comments, reposts)'\n 'VALUES (?, ?, ?, ?, ?, ?, ?, ?)', [id, text, date, time, views, likes, comments, reposts])\n\n connection.commit()\n connection.close()" }, { "alpha_fraction": 0.7968936562538147, "alphanum_fraction": 0.8040621280670166, "avg_line_length": 49.727272033691406, "blob_id": "121df08f2f202f4121d3fc2f8e21efbcdf04c883", "content_id": "f4cdddf2723503f92acacd4811f74df23c1cc67e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2938, "license_type": "no_license", "max_line_length": 140, "num_lines": 33, "path": "/README.md", "repo_name": "Sergeres/vk_analyse", "src_encoding": "UTF-8", "text": "# VK_Users_analyse\nКурсовая работа по ММАД\nУчастники: Сергей Прозоров, Дмитрий Акатов\n\nФормулировка:\n\nРабота скрипта должна заключаться в следующем: методом последовательного перебора\n«рассмотреть» всех подписчиков группы Абитуриенты ВятГУ, собрать информацию о возрасте, поле, о тематических группах, в которых они состоят;\nпостах, под которыми они поставили лайки и оставили комментарии, сделали репосты.\nНа основе сформированного отчета подготовить анализ о тематической активности участников с учетом возрастных групп\nи гендерных особенностей.\n\nСтруктура проекта:\n1) В файле db.py происходит непосредственно взаимодействие с базой данных, заполнение ее в автоматическом режиме.\nКаждый раз генерируется новая база данных. В имени файла бд содержится тема проекта и дата его формирования \n(vk_members_YYYY_MM_DD)\n\n2) Вся бизнес-логика: работа с vk api, получение и формирование данных для последующего добавление их базу данных,\nрасполагается в main.py. \n \n Получено:\n 1) Участники сообщества\n 2) Сообщества на которые подписан каждый участник\n \n Необходимо получить:\n 1. id постов в группе Абитуриенты ВятГУ\n 2. подписчиков, которые лайкнули\\прокомментировали\\репостнули эти посты\n\n3) Тут еще один файл, в котором будет выполнятся работа с шаблонизатором jinja2, генерирующим html - отчет\nс графиками seaborn для нашей базы данных. Он должен включать некоторую аналитику по собранным данным:\n 1. статистика количества сообществ\\лайков\\репостов\\комментариев приходится на человека с разделением по возрастам и полу.\n 2. наиболее встречаемые 5 сообществ по каждому из возрастов с разделением по полу\n 3. распределить лайки\\репосты\\комментарии по возрастам\"\n" } ]
5
mesosphere-backup/dcos-ovh-cloud
https://github.com/mesosphere-backup/dcos-ovh-cloud
1be9c76390eda54ffdad7af65520452bcf585761
ffb5901d997a62fdd8f721ad47f5011cc0217d39
04dddd66d9accac7cbff27de0215b099ad9515d4
refs/heads/master
2021-07-05T12:44:54.164453
2017-03-24T17:12:41
2017-03-24T17:12:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.63154536485672, "alphanum_fraction": 0.6398467421531677, "avg_line_length": 27.472726821899414, "blob_id": "e8d5b45e746da172f591722291d752eabf2c2976", "content_id": "65dd3211a53f6ea81f3e2c67e8d2bec829adf480", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1566, "license_type": "permissive", "max_line_length": 112, "num_lines": 55, "path": "/ovhcloud_destroyer.py", "repo_name": "mesosphere-backup/dcos-ovh-cloud", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport ovh\nimport sys\nfrom retrying import retry\nfrom multiprocessing.pool import ThreadPool\n\n\nclass OVHClient(ovh.Client):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @retry(stop_max_attempt_number=3)\n def get(self, *args, **kwargs):\n return super().get(*args, **kwargs)\n\n @retry(stop_max_attempt_number=3)\n def delete(self, *args, **kwargs):\n return super().delete(*args, **kwargs)\n\n\ndef delete_instance(instance_id):\n print('Requesting deletion of {}'.format(instance_id))\n c.delete('/cloud/project/{}/instance/{}'.format(project_id, instance_id))\n\nif len(sys.argv[1:]) == 0:\n print('Usage: {} <projectname>'.format(sys.argv[0]))\n sys.exit(1)\n\nc = OVHClient()\ntp = ThreadPool(10)\nproject = sys.argv[1]\nproject_id = None\n\nfor service_id in c.get('/cloud/project'):\n p = c.get('/cloud/project/{}'.format(service_id))\n print('Found project {} with id {}'.format(p['description'], p['project_id']))\n if p['description'] == project:\n project_id = p['project_id']\n break\n\nif not project_id:\n print(\"Couldn't find project with name {}\".format(project))\n sys.exit(1)\n\nprint('Fetching all instances')\nr = c.get('/cloud/project/{}/instance'.format(project_id))\n\nif len(r) == 0:\n print('Project {} has no running instances'.format(project))\n sys.exit(0)\n\ninput('!!!WARNING - THIS WILL DESTROY ALL {} INSTANCES IN THE PROJECT {}!!!\\nPress Enter to continue...'.format(\n len(r), project))\n\ntp.map(delete_instance, [i['id'] for i in r])\n" } ]
1
njdevengine/pandas-python
https://github.com/njdevengine/pandas-python
0775bfd0f3388977fc99dc8bd311c0130325288d
8daf7fe612af52cfa104c80dfe61418f38d53e74
5dbe92117434dba5fb8cfb2a3620a6fe72af1654
refs/heads/master
2021-06-26T18:58:51.052796
2020-11-09T18:19:11
2020-11-09T18:19:11
169,342,017
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6690821051597595, "alphanum_fraction": 0.6763284802436829, "avg_line_length": 45, "blob_id": "29d292c1128e5e0bb2affeef53ada370d5a8b4fe", "content_id": "a9f94ba0b63edf3dee74f35ca6fb07ae6651ba08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 100, "num_lines": 9, "path": "/pandas-file-creation-by-array2.py", "repo_name": "njdevengine/pandas-python", "src_encoding": "UTF-8", "text": "import pandas as pd\nmyarray=[\"ABC\",\"DEF\",\"GHI\"]\narray_length = len(myarray)\npath = \"sourcefile.csv\"\nfor i in range(0,array_length):\n df = pd.read_csv(path,encoding = \"UTF-8\")\n #only saves values that match your array values, can be switched to != to filter out array value\n df = df[df.headertobefiltered == myarray[i]]\n df.to_csv(str(myarray[i])+\".csv\",encoding = \"utf-8\", index =False, header = True)\n" }, { "alpha_fraction": 0.6001505851745605, "alphanum_fraction": 0.6204819083213806, "avg_line_length": 29.159090042114258, "blob_id": "67908c76ee750adb0381a5323196544ae3883964", "content_id": "b26e93e84b0430727c4d583ebf1597dfc27d277c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1328, "license_type": "no_license", "max_line_length": 71, "num_lines": 44, "path": "/pandas-business-days-between.py", "repo_name": "njdevengine/pandas-python", "src_encoding": "UTF-8", "text": "import pandas as pd\n\n#read data from an excel tab\nxlsx = pd.ExcelFile('path_to_file.xlsx')\ndf = pd.read_excel(xlsx, 'sheetname')\n\n#drop first 2 rows\ndf.columns = df.iloc[1]\ndf = df.reindex(df.index.drop(0))\ndf = df.reindex(df.index.drop(1))\ndf.head()\n\nreps = [\"insert\",\"reps\",\"here\"]\nexecs = [\"insert\",\"excecs\",\"here\"]\n\n#create a dataframe from the excel sheet dataframe with these headers\nsales = df[[\"Start Date\",\"End Date\",\"Sales Rep\"]]\nsales = sales.dropna()\nimport datetime\n\n#rename headers\nsales = sales.rename(columns={'Start Date': 'start',\n 'End Date': 'end',\n 'Sales Rep': 'rep'})\n \n#convert to datetime \nsales['start'] = pd.to_datetime(sales['start'])\nsales['end'] = pd.to_datetime(sales['end'])\n\n#set start and end dates\nstart_date = \"2018-07-01\"\nend_date = \"2019-07-31\"\nmask = (sales['start'] >= start_date) & (sales['start'] <= end_date)\nsales = sales.loc[mask]\n\n#create a year month column\nsales['YearMonth'] = sales['start'].map(lambda x: 100*x.year + x.month)\n\n#find business day count between a and b\n#set it to be new column called days\nimport numpy as np\na = sales['start'].values.astype('datetime64[D]')\nb = sales['end'].values.astype('datetime64[D]')\nsales['days'] = np.busday_count(a,b)\n\n" }, { "alpha_fraction": 0.6401383876800537, "alphanum_fraction": 0.6401383876800537, "avg_line_length": 27.899999618530273, "blob_id": "3a7ba20ba266f88c5269c817186079e59e002eb5", "content_id": "6e43dfffa4502f6b2bb84fd3041943ab3d6cd252", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/dataframes_to_diff_sheets.py", "repo_name": "njdevengine/pandas-python", "src_encoding": "UTF-8", "text": "from pandas import ExcelWriter\n\noutput = '/directory/joined.xlsx'\ndef save_xls(list_dfs, xls_path):\n with ExcelWriter(xls_path) as writer:\n for n, df in enumerate(list_dfs):\n df.to_excel(writer,'sheet%s' % n)\n writer.save()\n \nsave_xls(dataframes,output)\n" }, { "alpha_fraction": 0.47093889117240906, "alphanum_fraction": 0.496770977973938, "avg_line_length": 37.71154022216797, "blob_id": "87c5904953061ea4c1db9c78f547afbfc4d7bce2", "content_id": "7c31d2e953e2f58978f64ef0446d00ede9744fc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2013, "license_type": "no_license", "max_line_length": 76, "num_lines": 52, "path": "/xlsxwriter_autofit_formatting.py", "repo_name": "njdevengine/pandas-python", "src_encoding": "UTF-8", "text": "#load in a dataframe, do conditional formatting, autofit columns\nimport pandas as pd\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\n\nwriter = pd.ExcelWriter('example.xlsx', engine='xlsxwriter')\n\n# Convert the dataframe to an XlsxWriter Excel object.\ndf.to_excel(writer, sheet_name='Sheet1')\n\n# Get the xlsxwriter objects from the dataframe writer object.\nworkbook = writer.book\nworksheet = writer.sheets['Sheet1']\n\n\nfor i, width in enumerate(get_col_widths(filtered)):\n worksheet.set_column(i, i, width)\n \n \n# Light red fill with dark red text.\nformat1 = workbook.add_format({'bg_color': '#FFC7CE',\n 'font_color': '#9C0006'})\n\n# Light yellow fill with dark yellow text.\nformat2 = workbook.add_format({'bg_color': '#FFEB9C',\n 'font_color': '#9C6500'})\n\n# Green fill with dark green text.\nformat3 = workbook.add_format({'bg_color': '#C6EFCE',\n 'font_color': '#006100'})\n\n#GREEN between 21 and 30\nworksheet.conditional_format('R2:R'+str(len(filtered)), {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 21,\n 'maximum': 30,\n 'format': format3})\n#YELLOW between 11 and 20\nworksheet.conditional_format('R2:R'+str(len(filtered)), {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 11,\n 'maximum': 20,\n 'format': format2})\n#RED between 0 and 10\nworksheet.conditional_format('R2:R'+str(len(filtered)), {'type': 'cell',\n 'criteria': 'between',\n 'minimum': 0,\n 'maximum': 10,\n 'format': format1})\n\n\nworkbook.close()\n" }, { "alpha_fraction": 0.6607142686843872, "alphanum_fraction": 0.7342436909675598, "avg_line_length": 28.75, "blob_id": "9de682d221a9373d420c64ca441edf6743d71f0f", "content_id": "1c94628d85e501d296d05545c5b3684b08f6b454", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 77, "num_lines": 32, "path": "/ted_talks_analysis_bins.py", "repo_name": "njdevengine/pandas-python", "src_encoding": "UTF-8", "text": "# Import Dependencies\nimport pandas as pd\n\n# Create a path to the csv and read it into a Pandas DataFrame\ncsv_path = \"Resources/ted_talks.csv\"\nted_df = pd.read_csv(csv_path)\n\nted_df.head()\n\n# Figure out the minimum and maximum views for a TED Talk\nminviews = ted_df[\"views\"].min()\nmaxviews =ted_df[\"views\"].max()\nprint(f\"min views {minviews}\")\nprint(f\"max views {maxviews}\")\n\n# Create bins in which to place values based upon TED Talk views\nbins = [100000,500000,1000000,5000000,10000000,99999999999999999999999]\ngroup_names = [\"Below100k\",\"Below500k\",\"Below1m\",\"Below5m\",\"Above10m\"]\n# Create labels for these bins\n\n# Slice the data and place it into bins\nbinned= ted_df[\"viewbins\"] = pd.cut(ted_df[\"views\"], bins,labels=group_names)\nted_df.head(100)\n\n# Create a GroupBy object based upon \"View Group\"\n\n# Find how many rows fall into each bin\n\nx =ted_df.groupby(\"viewbins\")\nx.count()\n# Get the average of each column within the GroupBy object\nx.mean()\n" }, { "alpha_fraction": 0.671594500541687, "alphanum_fraction": 0.7006335854530334, "avg_line_length": 26.852941513061523, "blob_id": "53df6ccd90b5fc40f51a28b77661c8e15426c9da", "content_id": "3cd9275ecf1339e537471d34b5e2e0a189c50ecd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1894, "license_type": "no_license", "max_line_length": 73, "num_lines": 68, "path": "/text-pandas-analysis-123.py", "repo_name": "njdevengine/pandas-python", "src_encoding": "UTF-8", "text": "#grab text file\n#remove punctuation, special characters etc.\n#split by spaces create an array of words\n#create 2 word phrases create an array of 2 word phrases\n#create 3 word phrases create an array of 3 word phrases\n#create a dataframe for each with headers as word/phrase/phrase and count\n#group by count for each\n#sort top 20 in each\n#visualize data in bar graph for each\n\nimport pandas as pd\nimport matplotlib.pyplot as pt\nimport numpy as np\nimport re\n\nwith open('folder/file.txt', 'r') as myfile:\n string = myfile.read().replace('\\n', ' ')\n string = string.lower()\n \nregex = re.compile('[^a-zA-Z]')\nstring = regex.sub(' ', string)\n\nwords = string.split(\" \")\n\nfor word in words:\n word.lower()\n\nwords = list(filter(None, words))\ndfphrase1 = pd.DataFrame({\"words\" : words})\ngrouped1 = dfphrase1[\"words\"].value_counts()\ngrouped1 = grouped1.to_frame()\ngrouped1.rename(columns={\"words\":\"count\"})\n#grouped1.to_csv('g1.csv')\n\nphrase = []\nnum = len(words)-1\nfor n in range(0,num):\n phrase.append(words[n]+\" \"+words[n+1])\ndfphrase2 = pd.DataFrame({\"phrase\" : phrase})\ngrouped2 = dfphrase2[\"phrase\"].value_counts()\ngrouped2 = grouped2.to_frame()\ngrouped2.rename(columns={\"phrase\":\"count\"})\n#grouped2.to_csv('g2.csv')\n\nphrase3 = []\nnum = len(words)-2\nfor n in range(0,num):\n phrase3.append(words[n]+\" \"+words[n+1]+\" \"+words[n+2])\n \ndfphrase3 = pd.DataFrame({\"phrase3\" : phrase3})\ngrouped3 = dfphrase3[\"phrase3\"].value_counts()\ngrouped3 = grouped3.to_frame()\ngrouped3.rename(columns={\"phrase3\":\"count\"})\n#grouped3.to_csv('g3.csv')\n\n#dfphrase1.head()\n#dfphrase2.head()\n#dfphrase3.head()\n\n#grouped1.head()\n#grouped2.head()\n#grouped3.head()\n\nwriter = pd.ExcelWriter('analysis/output.xlsx', engine = 'xlsxwriter')\ngrouped1.to_excel(writer, sheet_name='grouped1')\ngrouped2.to_excel(writer, sheet_name='grouped2')\ngrouped3.to_excel(writer, sheet_name='grouped3')\nwriter.save()\n" }, { "alpha_fraction": 0.5085158348083496, "alphanum_fraction": 0.5669099688529968, "avg_line_length": 26.399999618530273, "blob_id": "094d013ff6fd7a7d458c19e1ade24a934e4d81f9", "content_id": "a9c2dee57726b0ac3c5e60cbb1e621eac856286e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "no_license", "max_line_length": 76, "num_lines": 15, "path": "/bins.py", "repo_name": "njdevengine/pandas-python", "src_encoding": "UTF-8", "text": "# Import Dependencies\nimport pandas as pd\n\nraw_data = {\n 'Class': ['Oct', 'Oct', 'Jan', 'Jan', 'Oct', 'Jan'], \n 'Name': [\"Cyndy\", \"Logan\", \"Laci\", \"Elmer\", \"Crystle\", \"Emmie\"], \n 'Test Score': [90, 56, 72, 88, 98, 67]}\ndf = pd.DataFrame(raw_data)\ndf\n\nbins = [0,60,70,80,90,100]\ngroup_names = [\"F\",\"D\",\"C\",\"B\",\"A\"]\n\ndf[\"Test Score Summary\"] = pd.cut(df[\"Test Score\"], bins,labels=group_names)\ndf.head()\n" }, { "alpha_fraction": 0.7134986519813538, "alphanum_fraction": 0.7245178818702698, "avg_line_length": 39.33333206176758, "blob_id": "fedda50912041fc247d409ad2970280c5e8a172c", "content_id": "7ccea1b27cd9c26d04f1e29dbcc5cff039872894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 92, "num_lines": 9, "path": "/read_change_export.py", "repo_name": "njdevengine/pandas-python", "src_encoding": "UTF-8", "text": "# Import Dependencies\nimport pandas as pd\n# Make a reference to the books.csv file path\npath = \"Resources/books.csv\"\n# Import the books.csv file as a DataFrame\nbooksdf = pd.read_csv(path,encoding = \"UTF-8\")\nbooksdf.drop(columns = [\"column 1\",\"column 2\"])\nbooksdf.head()\nrenamed_df.to_csv(\"Output/books_clean.csv\",encoding = \"utf-8\", index = False, header = True)\n" } ]
8
v-burdenyuk/simple_tasks
https://github.com/v-burdenyuk/simple_tasks
41170cfef868762bd6947f118a3c12f7cba81284
c9edf998ab2f7bb68d54179e67bcb779e149495b
69842ad2aa13e975b6dfe6ffd8d8b43228af1cbd
refs/heads/master
2023-04-16T02:34:14.010851
2021-04-29T09:07:42
2021-04-29T09:07:42
362,479,057
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5914633870124817, "alphanum_fraction": 0.607723593711853, "avg_line_length": 25.253334045410156, "blob_id": "0548f4bf69301c0da5046d7e75b63198981a9e18", "content_id": "7258d76d057eb30c0c37ff52b86ee1deb1bebcb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1968, "license_type": "no_license", "max_line_length": 88, "num_lines": 75, "path": "/prices_calculate.py", "repo_name": "v-burdenyuk/simple_tasks", "src_encoding": "UTF-8", "text": "from decimal import Decimal\nfrom dataclasses import dataclass\nfrom pydantic import validate_arguments\n\n'''\nSimple classes for calculation of goods list price \nbased on net costs, tax and desired margin \n'''\n\ninput_goods_list = [\n {'name': 'ball', 'net_cost': 100, 'tax_percent': 10, 'margin_percent': 10},\n {'name': 'doll', 'net_cost': 200, 'tax_percent': 12, 'margin_percent': 20},\n {'name': 'dingle', 'net_cost': 300, 'tax_percent': 18, 'margin_percent': 30},\n]\n\n\n@validate_arguments\n@dataclass\nclass Good:\n name: str\n net_cost: Decimal\n tax_percent: Decimal\n margin_percent: Decimal\n\n @property\n def margin(self):\n return self.net_cost * self.margin_percent / 100\n\n @property\n def total_cost(self):\n return self.net_cost + self.margin\n\n @property\n def tax(self):\n return self.total_cost * self.tax_percent / 100\n\n @property\n def price(self):\n return self.total_cost + self.tax\n\n def __str__(self):\n return self.name\n\n\nclass GoodList:\n lst = []\n total_price = Decimal(0)\n total_cost = Decimal(0)\n total_tax = Decimal(0)\n\n def add(self, good):\n if good not in self.lst:\n self.lst.append(good)\n self.total_cost += good.total_cost\n self.total_price += good.price\n self.total_tax += good.tax\n\n\nif __name__ == \"__main__\":\n # initialisation\n goods_list = GoodList()\n\n for good_dict in input_goods_list:\n good_obj = Good(**good_dict)\n goods_list.add(good_obj)\n\n # get results\n for _good in goods_list.lst:\n print(f'Good \"{_good}\" net_cost: {_good.net_cost}, margin: {_good.margin}, '\n f'total_cost: {_good.total_cost}, tax: {_good.tax}, price: {_good.price}')\n \n print('-'*40)\n print(f'Total cost of all goods: {goods_list.total_cost}')\n print(f'Total taxes: {goods_list.total_tax}')\n print(f'Total price of all goods with taxes: {goods_list.total_price}')" }, { "alpha_fraction": 0.5641282796859741, "alphanum_fraction": 0.5721442699432373, "avg_line_length": 28.352941513061523, "blob_id": "79474374ccfecb8060382a9688da30d297ab64ad", "content_id": "be55b20e63023bc29163b8299594de3cd274f8af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 998, "license_type": "no_license", "max_line_length": 76, "num_lines": 34, "path": "/duplicates.py", "repo_name": "v-burdenyuk/simple_tasks", "src_encoding": "UTF-8", "text": "import hashlib\nimport pathlib\n\n'''\nFunction for searching duplicates in path.\nUsage: find_duplicates(path, [recursive = True])\n'''\n\n\ndef find_duplicates(path, recursive=False):\n files_dict = {}\n\n if pathlib.Path(path).is_dir():\n if recursive:\n files_list = list(pathlib.Path(path).glob('**/*'))\n else:\n files_list = list(file for file in pathlib.Path(path).iterdir())\n\n for file in files_list:\n if file.is_file():\n file_path = str(file)\n md5hash = hashlib.md5(file.read_bytes()).hexdigest()\n if md5hash not in files_dict:\n files_dict[md5hash] = [file_path, ]\n else:\n files_dict[md5hash].append(file_path)\n else:\n raise NotADirectoryError('Path should be directory')\n\n for md5hash, files_list in files_dict.items():\n if len(files_list) > 1:\n return md5hash, files_list\n\n# print(find_duplicates('/home', True))\n" } ]
2
qiaoliuhub/get_cids_from_name
https://github.com/qiaoliuhub/get_cids_from_name
b58e295320b1bdcbf51094c1a003664aefcb3e11
bacea39448a3272c19406a991aa59ef812a084fa
023412219d1f741187ed1d49381bf5cfb075fa46
refs/heads/master
2022-11-17T08:12:33.069992
2020-07-09T16:12:47
2020-07-09T16:12:47
278,412,421
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6155593395233154, "alphanum_fraction": 0.6195343732833862, "avg_line_length": 36.46808624267578, "blob_id": "ce58a93a3459b9888a50994fd3c3269f55c28a8d", "content_id": "e102344a88956cba922a9cd52f9ed9296fec74ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3522, "license_type": "no_license", "max_line_length": 120, "num_lines": 94, "path": "/get_cids_from_name.py", "repo_name": "qiaoliuhub/get_cids_from_name", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\ntry:\n from urllib.error import HTTPError\n from urllib.parse import quote, urlencode\n from urllib.request import urlopen\nexcept ImportError:\n from urllib import urlencode\n from urllib2 import quote, urlopen, HTTPError\n\nimport argparse\nimport time\nimport pandas as pd\nimport json\nimport xml.etree.ElementTree as ET\n\n# use this url to save cid list in eutils server\nNAMES_LISTKEY_API = \"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/cids/JSON?list_return=listkey\"\n\ndef xml2df(root):\n\n cols = 'ID,Name,PharmaActions,InChIKey'\n df = pd.DataFrame(columns=cols.split(\",\"))\n for i, drug in enumerate(root):\n\n info = {}\n id = drug.find(\"./*[@Name='CID']\")\n info['ID'] = id.text if id is not None else None\n name = drug.find(\"./*[@Name='MeSHHeadingList']/*[@Name='string']\")\n info['Name'] = name.text if name is not None else None\n info['PharmaActions'] = \";\".join(x.text for x in drug.findall(\"./*[@Name='PharmActionList']/*[@Name='string']\"))\n inchikey = drug.find(\"./*[@Name='InChIKey']\")\n info['InChIKey'] = inchikey.text if inchikey is not None else None\n df.loc[i] = pd.Series(info)\n\n return df\n\ndef get_id(name):\n\n namespace = 'name'\n post_body = urlencode([(namespace, name)]).encode('utf8')\n esummary = None\n try:\n response = urlopen(NAMES_LISTKEY_API, post_body)\n print(\"successfully get list key result\")\n # Construct esummary retrieve url\n lsit_key_result = json.loads(response.read())\n esummary = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi\" \\\n \"?db=pccompound&WebEnv=\" + lsit_key_result['IdentifierList']['EntrezWebEnv']\\\n + \"&query_key=\" + str(lsit_key_result['IdentifierList']['EntrezQueryKey'])\n\n except HTTPError as e:\n print(\"Fail to retrieve messages for {0!r}, caused by {1!r}\".format(name, e))\n\n \n try:\n time.sleep(5)\n if esummary is not None:\n summary_response = urlopen(esummary)\n print(\"successfully get summary result\")\n # Parsing the downloaded esummary xml string\n xml_result = summary_response.read().decode('utf-8')\n root = ET.fromstring(xml_result)\n for i, drug in enumerate(root):\n if i == 0:\n id = drug.find(\"./*[@Name='CID']\")\n return id.text\n except HTTPError as e:\n print(\"Fail to retrieve summaries for {0!r}, caused by {1!r}\".format(name, e))\n\n \n\nif __name__ == \"__main__\":\n\n argparser = argparse.ArgumentParser()\n argparser.add_argument('name_ls_df', help = 'the data frame saving names to be searched')\n argparser.add_argument('result_df', help = 'directory to save final results')\n # argparser.add_argument('xml_file', help = \"directory to save requested xml results from pubchem server\")\n args = argparser.parse_args()\n\n names = pd.read_csv(args.name_ls_df)\n names = names[names['pert_type'] == 'trt_cp']\n names = names[~names['pert_iname'].str.startswith('BRD')]\n names_list = set(names['pert_iname'].astype(str))\n print(len(names_list))\n result = pd.DataFrame(columns = ['name', 'cid'])\n\n # construct apiurl and add cids into POST body\n for i, name in enumerate(names_list):\n cid = get_id(name)\n result.loc[i] = pd.Series({'name': name, 'cid': cid})\n if i%100 == 0:\n result.to_csv(args.result_df)\n result.to_csv(args.result_df)\n" } ]
1
pontual/copy-clientes
https://github.com/pontual/copy-clientes
974116e48f0c35ea778725185997ba7c73fe716f
27e42e27c24f61a6ebebbcec337657e67579b386
5e68f23a1785bc4b966797b4fd9e6ca88ea0274e
refs/heads/master
2020-07-03T16:47:20.768492
2019-08-13T14:52:41
2019-08-13T14:52:41
201,974,639
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.416178822517395, "alphanum_fraction": 0.47684937715530396, "avg_line_length": 17.17346954345703, "blob_id": "51b998dbbc2b60b3d65169611b20000ef7e517ae", "content_id": "c0d9c90e0bdaaf8f5128a76c94f3a1afe01dbf5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1879, "license_type": "no_license", "max_line_length": 77, "num_lines": 98, "path": "/copycli.py", "repo_name": "pontual/copy-clientes", "src_encoding": "UTF-8", "text": "from time import sleep\r\nfrom pyautogui import click, hotkey, keyDown, keyUp, dragTo, press, typewrite\r\nfrom tkinter import Tk, TclError\r\nimport sys\r\n\r\nLAST_VAL = \"\"\r\n\r\ndef getfield(x1, x2, y):\r\n dt = 0.15\r\n click(x2, y)\r\n dragTo(x1, y, dt, button=\"left\")\r\n hotkey(\"ctrl\", \"c\")\r\n \r\n\r\ndef getcli(n):\r\n tk = Tk()\r\n out = []\r\n\r\n out.append(str(n))\r\n def ctrlv():\r\n global LAST_VAL\r\n nonlocal tk, out\r\n try:\r\n val = tk.selection_get(selection=\"CLIPBOARD\").strip()\r\n if val == LAST_VAL:\r\n val = \"\"\r\n out.append(val)\r\n LAST_VAL = val\r\n except TclError:\r\n print(\"nothing to copy\")\r\n \r\n \r\n # row y-coords\r\n r1 = 140\r\n r2 = 180\r\n r3 = 220\r\n r4 = 260\r\n\r\n # top bar\r\n click(25, 10)\r\n\r\n # select cli\r\n press(\"f3\")\r\n sleep(0.5)\r\n typewrite(str(n) + '\\n')\r\n \r\n getfield(14, 405, r1)\r\n ctrlv()\r\n \r\n getfield(422, 566, r1)\r\n ctrlv()\r\n\r\n getfield(671, 764, r1)\r\n ctrlv()\r\n\r\n # row 2\r\n getfield(14, 408, r2)\r\n ctrlv()\r\n\r\n getfield(422, 567, r2)\r\n ctrlv()\r\n\r\n getfield(581, 741, r2)\r\n ctrlv()\r\n\r\n getfield(756, 784, r2)\r\n ctrlv()\r\n\r\n getfield(14, 82, r3)\r\n ctrlv()\r\n\r\n getfield(117, 295, r3)\r\n ctrlv()\r\n\r\n getfield(310, 456, r3)\r\n ctrlv()\r\n\r\n getfield(493, 616, r3)\r\n ctrlv()\r\n\r\n getfield(14, 262, r4)\r\n ctrlv()\r\n \r\n tk.destroy()\r\n\r\n return '\"' + '\",\"'.join(out) + '\"'\r\n \r\n\r\nif __name__ == \"__main__\":\r\n fname = sys.argv[1]\r\n outname = sys.argv[2]\r\n input(\"Bring Cliente to front and press Enter\")\r\n with open(fname) as f, open(outname, 'w', encoding=\"utf-8\") as o:\r\n for line in f:\r\n line = line.strip()\r\n print(line)\r\n if len(line) > 0:\r\n print(getcli(line), file=o)\r\n" } ]
1
LiaoQian1996/tileable_textures
https://github.com/LiaoQian1996/tileable_textures
c4c4b3cf99e470546ab1d01ca914b772cdee7718
29ca96b99d44a58324ede302d0a63ed77bb9604e
f2b8d3eb9afd8c16c3b37ae6ff0a2922b9c68fc4
refs/heads/master
2020-11-25T18:31:44.317685
2019-12-18T10:34:53
2019-12-18T10:34:53
228,793,437
9
0
null
null
null
null
null
[ { "alpha_fraction": 0.5269300937652588, "alphanum_fraction": 0.5717933177947998, "avg_line_length": 37.79716873168945, "blob_id": "5f05c6c9de123d8f22182db436cfa9c50540103d", "content_id": "9d3a35d04db9692eca9fdbf819c6d22e272b69c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8303, "license_type": "no_license", "max_line_length": 96, "num_lines": 212, "path": "/lib/ops.py", "repo_name": "LiaoQian1996/tileable_textures", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport pdb\nimport numpy as np\nfrom PIL import Image\nimport scipy.misc\nimport os\n\ndef save_patches(img, ind, h1, w1, FLAGS):\n '''\n img是生成的1024图像,ind为上一层512块的索引,w1为当前合成图像的宽\n '''\n w0, h0 = w1//1024,h1//1024\n ind0 = (ind//w0)*2*(2*w0) + (ind%w0)*2\n print('\\t%i/%i 1024 patch of (%i,%i) image saved !'%((ind+1), w0*h0, h1, w1))\n for j in range(4):\n k, l = map(int,[j//2, j%2])\n #print('\\tNow we save No.%i 512 patch of No.%i 1024 patch in [%i,%i] Image'%(j,ind,n,n))\n save_img(np.squeeze(img[:, 512*k:512*(k+1), 512*l:512*(l+1), :]),\\\n os.path.join(FLAGS.output_dir, 'buffer', '%i_%i_%i.png'%\\\n (h1, w1, ind0 + k*2*w0 + l ))) \n print('\\t%i 512 patch of (%i,%i) image saved !'%(ind0+k*2*w0+l,h1, w1)) \n\ndef save_whole_img(img,h,w,FLAGS):\n '''\n 将图像按512分为若干块,并进行保存\n '''\n path = os.path.join(FLAGS.output_dir,'buffer')\n h0, w0 = h//512, w//512\n for ind in range(w0*h0):\n x, y = ind//w0,ind%w0\n save_img(img[:,x*512:(x+1)*512,y*512:(y+1)*512,:], path+'/%i_%i_%i.png'%(h,w,ind))\n \ndef save_img(img, path):\n scipy.misc.toimage(np.squeeze(img),cmin=-1.0,cmax=1.0) \\\n .save(path)\n \ndef to_tensor(input):\n return tf.constant(input, dtype = tf.float32)\n\ndef check_size(im):\n while(1):\n if (im.size[0] * im.size[1] > 3000000):\n w, h = int(0.9 * im.size[0]), int(0.9*im.size[1])\n print('Image size (%i,%i) is too large, will be resized to [%i,%i]'\\\n %(im.size[0], im.size[1], w, h))\n im = im.resize((w, h), Image.BICUBIC) \n else:\n return im\n\ndef check_size_crop(im):\n _, h0, w0, _ = im.shape\n h,w = h0,w0\n while(h*w > 3000000):\n h, w = map(int,[h*0.9,w*0.9])\n print('Image size (%i,%i) is too large, will be croped to (%i,%i)'\\\n %(h0, w0, h, w))\n im = im[:,(h0-h)//2:(h0-h)//2+h,(w0-w)//2:(w0-w)//2+w,:]\n print('After crop, im.shape : ',im.shape)\n return im\n \ndef gram(features):\n features = tf.reshape(features,[-1,features.shape[3]])\n return tf.matmul(features,features,transpose_a=True) \\\n / tf.cast(features.shape[0]*features.shape[1],dtype=tf.float32)\n\ndef total_variation_loss(image):\n tv_y_size = tf.size(image[:,1:,:,:],out_type=tf.float32)\n tv_x_size = tf.size(image[:,:,1:,:],out_type=tf.float32)\n tv_loss = (\n (tf.nn.l2_loss(image[:,1:,:,:] - image[:,:-1,:,:]) /\n tv_y_size) +\n (tf.nn.l2_loss(image[:,:,1:,:] - image[:,:,:-1,:]) /\n tv_x_size))\n return tv_loss\n\ndef get_layer_scope(name):\n target_layer = 'vgg_19/conv' + name[-2] + '/conv'+ name[-2] + '_' + name[-1] \n return target_layer \n\ndef get_layer_list(layer, single_layer=False):\n style_layers = []\n if single_layer == True:\n if layer == 'VGG11':\n style_layers = ['VGG11']\n elif layer == 'VGG21':\n style_layers = ['VGG21']\n elif layer == 'VGG31':\n style_layers = ['VGG31']\n elif layer == 'VGG41':\n style_layers = ['VGG41']\n elif layer == 'VGG51':\n style_layers = ['VGG51']\n elif layer == 'VGG54':\n style_layers = ['VGG54']\n else:\n raise ValueError(\"NO THIS LAYER !\")\n else:\n if layer == 'VGG11':\n style_layers = ['VGG11']\n elif layer == 'VGG21':\n style_layers = ['VGG11','VGG21']\n elif layer == 'VGG31':\n style_layers = ['VGG11','VGG21','VGG31']\n elif layer == 'VGG41':\n style_layers = ['VGG11','VGG21','VGG31','VGG41']\n elif layer == 'VGG51':\n style_layers = ['VGG11','VGG21','VGG31','VGG41','VGG51']\n elif layer == 'VGG54':\n style_layers = ['VGG11','VGG21','VGG31','VGG41','VGG51','VGG54']\n else:\n raise ValueError(\" No such layer in layer list.\")\n return style_layers \n\ndef preprocess(image):\n with tf.name_scope(\"preprocess\"):\n # [0, 1] => [-1, 1]\n return image * 2 - 1\n\n\ndef deprocess(image):\n with tf.name_scope(\"deprocess\"):\n # [-1, 1] => [0, 1]\n return (image + 1) / 2\n \n# The operation used to print out the configuration\ndef print_configuration_op(FLAGS):\n print('[Configurations]:')\n FLAGS = vars(FLAGS)\n for name, value in sorted(FLAGS.items()):\n if type(value) == float:\n print('\\t%s: %f'%(name, value))\n elif type(value) == int:\n print('\\t%s: %d'%(name, value))\n elif type(value) == str:\n print('\\t%s: %s'%(name, value))\n elif type(value) == bool:\n print('\\t%s: %s'%(name, value))\n else:\n print('\\t%s: %s' % (name, value))\n print('End of configuration')\n\n# VGG19 component\ndef vgg_arg_scope(weight_decay=0.0005):\n \"\"\"Defines the VGG arg scope.\n Args:\n weight_decay: The l2 regularization coefficient.\n Returns:\n An arg_scope.\n \"\"\"\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(weight_decay),\n biases_initializer=tf.zeros_initializer()):\n with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:\n return arg_sc\n\n# VGG19 net\n\"\"\"Oxford Net VGG 19-Layers version E Example.\nNote: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\nArgs:\ninputs: a tensor of size [batch_size, height, width, channels].\nnum_classes: number of predicted classes.\nis_training: whether or not the model is being trained.\ndropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\nspatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\nscope: Optional scope for the variables.\nfc_conv_padding: the type of padding to use for the fully connected layer\n that is implemented as a convolutional layer. Use 'SAME' padding if you\n are applying the network in a fully convolutional manner and want to\n get a prediction map downsampled by a factor of 32 as an output. Otherwise,\n the output prediction map will be (input / 32) - 6 in case of 'VALID' padding.\nReturns:\nthe last op containing the log predictions and end_points dict.\n\"\"\"\ndef vgg_19(inputs,\n num_classes=1000,\n is_training=False,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_19',\n reuse = False,\n fc_conv_padding='VALID'):\n \n with tf.variable_scope(scope, 'vgg_19', [inputs], reuse=reuse) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],\n outputs_collections=end_points_collection):\n net = slim.repeat(inputs, 2, slim.conv2d, 64, 3, scope='conv1', reuse=reuse)\n net = slim.avg_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, 3, scope='conv2',reuse=reuse)\n net = slim.avg_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 4, slim.conv2d, 256, 3, scope='conv3', reuse=reuse)\n net = slim.avg_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 4, slim.conv2d, 512, 3, scope='conv4',reuse=reuse)\n net = slim.avg_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 4, slim.conv2d, 512, 3, scope='conv5',reuse=reuse)\n net = slim.avg_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n\n return net,end_points\nvgg_19.default_image_size = 224\n" }, { "alpha_fraction": 0.576665997505188, "alphanum_fraction": 0.6147457957267761, "avg_line_length": 33.23741149902344, "blob_id": "01512a892cd83c4e900f9008d44b9959dd5da84d", "content_id": "9c5badcde40e25960b9ec11436ff2ff51b1de871", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4937, "license_type": "no_license", "max_line_length": 639, "num_lines": 139, "path": "/readme.md", "repo_name": "LiaoQian1996/tileable_textures", "src_encoding": "UTF-8", "text": "# tileable_texture\r\n\r\nIn this repository we implement a tileable texture synthesis algorithm, which can theoretically synthesize high resolution textures with an infinite size.\r\n\r\nThe paper [A Neural Algorithm of Artistic Style](http://arxiv.org/abs/1508.06576) by Gatys et al. hold the beginning of modeling *texture*, or so called *style*, of an image by computing the Gram matrix of features extracted from the pretrained deep CNNs. However, limited by the receptive field size of CNN neurons, the low frequency information and large scale structure can not be modeled. The paper [High-Resolution Multi-Scale Neural Texture Synthesis](https://doi.org/10.1145/3145749.3149449) introduce a multi-scale synthesis pipeline, which generate better results by match network layers across many scales of a Gaussian pyramid.\r\n\r\nHowever, the maximum size of image that a computer can synthesize is finite due to the restriction of hardware support. Therefore, we introduce a tileable texture synthesis algorithm, which synthesize the target texture image patch by patch, while maintaining smooth transition in patch edges by a novel *decay mse loss*. So called **tileable** means there are seamless between patches and whole image will be synthesized patch by patch in scanning direction, just like paving tiles.\r\n\r\n## results\r\n\r\n<table>\r\n\t<tr>\r\n\t\t<th valign = 'bottom'><center> Sample Textures </center></th>\r\n <th><center> Synthesized Textures <br>(thumbnail, original size (4096,4096)) </center></th>\r\n <th><center> Synthesized Texutres details</center></th>\r\n\t</tr>\r\n\t<tr>\r\n\t\t<td valign=\"middle\">\r\n\t\t\t<center><img src = \"./imgs/color.png\" width = '150px'></center>\r\n\t\t</td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/color_thumbnail.png\" width = '250px'></center>\r\n\t\t</td>\r\n <td>\r\n <center><img src = \"./outputs/color_detail.png\" width = '200px'></center>\r\n </td>\r\n\t</tr>\r\n\t<tr>\r\n <td>\r\n <center><img src = \"./imgs/denim.png\" width = '200px'></center>\r\n </td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/denim_thumbnail.png\" width = '250px'></center>\r\n\t\t</td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/denim_detail.png\" width = '200px'></center>\r\n\t\t</td>\r\n\t</tr>\r\n\t<tr>\r\n <td>\r\n <center><img src = \"./imgs/mottle.png\" width = '150px'></center>\r\n </td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/mottle_thumbnail.png\" width = '250px'></center>\r\n\t\t</td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/mottle_detail.png\" width = '200px'></center>\r\n\t\t</td>\r\n\t</tr>\r\n\t<tr>\r\n <td>\r\n <center><img src = \"./imgs/texture_8.png\" width = '200px'></center>\r\n </td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/texture_8_thumbnail.png\" width = '250px'></center>\r\n\t\t</td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/texture_8_detail.png\" width = '200px'></center>\r\n\t\t</td>\r\n\t</tr>\r\n\t<tr>\r\n <td>\r\n <center><img src = \"./imgs/wall.png\" width = '200px'></center>\r\n </td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/wall_thumbnail.png\" width = '250px'></center>\r\n\t\t</td>\r\n <td>\r\n\t\t\t<center><img src = \"./outputs/wall_detail.png\" width = '200px'></center>\r\n\t\t</td>\r\n\t</tr>\r\n</table>\r\n\r\n\r\n\r\n<table>\r\n\t<tr>\r\n\t\t<th valign = 'bottom'><center> Sample Texture </center></th>\r\n\t\t<th>\r\n <center> Synthesized Texture ( in orignal size (2048,2048) )</center>\r\n </th>\r\n\t</tr>\r\n\t<tr>\r\n\t\t<td valign=\"middle\">\r\n\t\t\t<center><img src = \"./imgs/water.png\" width=\"2000px\"></center>\r\n\t\t</td>\r\n <td>\r\n\t\t\t<center><img src = \"./results/2048_2048.png\" width=\"2000px\"></center>\r\n\t\t</td>\r\n\t</tr>\r\n</table>\r\n\r\n### Dependency\r\n\r\n- python 3.5 or 3.6\r\n- tensorflow\r\n- VGG19 model weights download from the [TF-slim models](http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz) \r\n- The code is tested on:\r\n - Ubuntu 16.04 LTS with CPU architecture x86_64 + Nvidia GeForce GTX 1080\r\n - Windows 10 + Nvidia GeForce GTX 1080\r\n\r\n### Getting started \r\n\r\nDenote the directory of this repository as ```./tileable_texture/``` \r\n\r\n- #### Download the VGG19 pre-trained model\r\n\r\n```bash\r\n# clone the repository from github\r\ngit clone https://github.com/LiaoQian1996/multi_scale_deep_texture.git\r\ncd $tileable_texture/\r\n\r\n# download the vgg19 model weights from \r\n# http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz\r\n# to tileable/vgg19/\r\n```\r\n\r\n- #### Synthesize texture image\r\n\r\n ```bash\r\n python main.py \\\r\n --output_dir result1 \\\r\n --target_dir ./imgs/mottle.png \\\r\n --texture_shape 4096 4096 \\\r\n --top_style_layer VGG54 \\\r\n --max_iter 50 \\\r\n --pyrm_layers 6 \\\r\n --W_tv 0.001 \\\r\n --pad 32 \\\r\n --vgg_ckpt ./vgg19/\r\n #--print_loss \\\r\n sleep 1\r\n python patches2img.py --path result\r\n ```\r\n \r\n- #### Combine image patches\r\n```bash\r\npython patches2img.py --path result\r\n```\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.49836066365242004, "alphanum_fraction": 0.5737704634666443, "avg_line_length": 22.538461685180664, "blob_id": "95a8f74c754c564437b43b8ee181a51822aedcc7", "content_id": "ebc7432820e056f2bb15db8c3eb31cd7f1f16819", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 305, "license_type": "no_license", "max_line_length": 36, "num_lines": 13, "path": "/run_syn.sh", "repo_name": "LiaoQian1996/tileable_textures", "src_encoding": "UTF-8", "text": "python main.py \\\n --output_dir result \\\n --target_dir ./imgs/mottle.png \\\n --texture_shape 4096 4096 \\\n --top_style_layer VGG54 \\\n --max_iter 50 \\\n --pyrm_layers 6 \\\n --W_tv 0.001 \\\n --pad 32 \\\n --vgg_ckpt ./vgg19/\n #--print_loss \\\nsleep 1\npython patches2img.py --path result" }, { "alpha_fraction": 0.47099366784095764, "alphanum_fraction": 0.5129235982894897, "avg_line_length": 42.887271881103516, "blob_id": "cfbb6415234dd8cffabdc3045ee35b36cb7396c8", "content_id": "7ef536360505b470ddb5148f15437379f17e8abc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12641, "license_type": "no_license", "max_line_length": 120, "num_lines": 275, "path": "/lib/model.py", "repo_name": "LiaoQian1996/tileable_textures", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom functools import partial\nfrom lib.ops import *\nimport collections\nimport os\nimport math\nfrom PIL import Image\nimport numpy as np\nimport time\n \n# Define the dataloader\ndef data_loader(img_dir = None, FLAGS = None):\n with tf.device('/cpu:0'):\n if img_dir is None:\n image_raw = Image.open(FLAGS.target_dir)\n else:\n image_raw = Image.open(img_dir)\n if FLAGS.texture_shape == [-1,-1]:\n image_raw = check_size(image_raw)\n if image_raw.mode is not 'RGB':\n image_raw = image_raw.convert('RGB')\n image_raw = np.asarray(image_raw)/255\n targets = preprocess(image_raw) \n samples = np.expand_dims(targets, axis = 0) \n return samples\n\ndef generator(h1,w1, initials = None, rf1=None, rf2=None, FLAGS=None):\n if initials is None:\n shape = [1, h1, w1, 3]\n var1 = tf.get_variable('gen_img',shape = shape, \\\n initializer = tf.random_normal_initializer(0,0.5),\\\n dtype=tf.float32,trainable=True, collections=None) \n else:\n var = initials + tf.random_normal(tf.shape(initials), 0, FLAGS.stddev)\n if rf1 is None:\n '''\n 没有参考图像,说明只是简单的 upsampling + gram_loss形式\n '''\n pass\n elif rf2 is None:\n '''\n 只有参考图像rf1,那么需要返回的变量是一个[1024,1536]的图像\n '''\n var = tf.concat([rf1, var], axis = 2)\n\n else:\n var = tf.concat([rf1, var], axis = 2)\n var = tf.concat([rf2, var], axis = 1)\n '''\n 返回一个[1536,1536]的大图像\n '''\n if FLAGS.pad == 0:\n var1 = tf.Variable(var)\n else:\n var1 = tf.Variable(tf.pad(var,[[0,0],[FLAGS.pad,FLAGS.pad],[FLAGS.pad,FLAGS.pad],[0,0]], \"REFLECT\")) \n return tf.tanh(var1)\n\ndef Synthesis(targets, layer, ind=0, FLAGS=None, reuse=True):\n h1,w1 = FLAGS.texture_shape[0]//(2**layer),FLAGS.texture_shape[1]//(2**layer)\n h0,w0 = h1//1024, w1//1024\n '''\n 旧坐标为 ind//w0, ind%w0\n 新坐标为 (ind//w0)*2, (ind%w0)*2\n 新索引为 (ind//w0)*2 * (2*h0) + (ind%w0)*2\n '''\n if (h1//512)*(w1//512) > 10 and ind%w0 == 0 and ind!=0 : \n targets = tf.transpose(to_tensor(targets),[0,2,1,3])\n else:\n targets = tf.identity(to_tensor(targets))\n \n img_loader = partial(data_loader, FLAGS=FLAGS) \n \n def upsampling(initials):\n w, h = [ initials.shape[1], initials.shape[2] ]\n initials = tf.image.resize_bicubic(initials, [2*int(w), 2*int(h)])\n return initials\n with tf.variable_scope('generator'):\n if (h1//512)*(w1//512) > 10:\n ind0 = ((ind//w0)*2)*2*w0 + (ind%w0)*2\n if layer == FLAGS.pyrm_layers - 1:\n '''\n 最底层,最小分辨率的合成图像,从噪声初始化\n '''\n init = None\n gen_output = generator(h1,w1, initials = init, FLAGS = FLAGS)\n elif ind == 0:\n '''\n 由上一层图像上采样作为初始化\n '''\n img_dir = os.path.join(FLAGS.output_dir,'buffer','%i_%i_0.png'%(h1//2,w1//2))\n init = img_loader(img_dir=img_dir)\n init = upsampling(init)\n gen_output = generator(h1,w1, initials = init, FLAGS = FLAGS)\n elif ind//w0 == 0:\n '''\n 首行,但不是首个图像;此时init的尺寸为固定的 512 -> 1024 大小\n '''\n init_dir = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%(h1//2,w1//2,ind))\n init = upsampling(img_loader(img_dir=init_dir))\n rf_left_up = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%(h1,w1,ind0-1))\n rf_left_down=os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%(h1,w1,ind0-1+w0*2))\n rf_left = list(map(img_loader, [rf_left_up, rf_left_down]))\n rf_left = tf.concat(list(map(to_tensor, rf_left)), axis=1)\n gen_output = generator(h1,w1, initials=init, rf1=rf_left,FLAGS=FLAGS)\n \n elif ind%w0 == 0:\n '''\n 非首行,首个图像\n '''\n init_dir = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%(h1//2,w1//2,ind))\n init = tf.transpose(upsampling(img_loader(img_dir=init_dir)),perm=[0,2,1,3])\n rf_left_up = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%\\\n (h1,w1,ind0-w0))\n rf_left_down=os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%\\\n (h1,w1,ind0-w0+1))\n rf_left = list(map(img_loader, [rf_left_up, rf_left_down]))\n rf_left = tf.concat(list(map(to_tensor, rf_left)), axis=2)\n rf_left = tf.transpose(rf_left, perm=[0,2,1,3])\n gen_output = generator(h1, w1, initials=init, rf1=rf_left,FLAGS=FLAGS) \n \n else:\n '''\n 在当前层,当前1024块的第0个512块的索引为ind0\n '''\n init_dir = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%(h1//2,w1//2,ind))\n init = upsampling(img_loader(img_dir=init_dir))\n rf_left_up = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%\\\n (h1,w1,ind0 -1))\n rf_left_down=os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%\\\n (h1,w1,ind0 - 1 + w0*2))\n rf_left = list(map(img_loader, [rf_left_up, rf_left_down]))\n rf_left = tf.concat(list(map(to_tensor, rf_left)), axis=1) \n \n rf_up_left = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%\\\n (h1,w1,ind0 - w0*2 - 1))\n rf_up_middle = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%\\\n (h1,w1,ind0 - w0*2))\n rf_up_right = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%\\\n (h1,w1,ind0 - w0*2 + 1))\n rf_up = list(map(img_loader, [rf_up_left, rf_up_middle, rf_up_right])) \n rf_up = tf.concat(list(map(to_tensor, rf_up)), axis=2) \n gen_output = generator(h1,w1, initials=init, rf1=rf_left, rf2=rf_up, FLAGS=FLAGS) \n \n rf = tf.concat([rf_left,tf.zeros([1,1024,1024,3])],axis=2)\n rf = tf.concat([rf_up,rf],axis=1)\n \n # Calculating the generator loss\n with tf.name_scope('generator_loss'): \n #print('gen_output : ',gen_output)\n with tf.name_scope('tv_loss'):\n tv_loss = total_variation_loss(gen_output)\n\n with tf.name_scope('style_loss'):\n _, vgg_gen_output = vgg_19(gen_output,is_training=False, reuse = False)\n _, vgg_tar_output = vgg_19(targets, is_training=False, reuse = True)\n style_layer_list = get_layer_list(FLAGS.top_style_layer,False)\n sl = tf.zeros([])\n ratio_list=[100.0, 1.0, 0.1, 0.0001, 1.0, 100.] # [100.0, 1.0, 0.1, 0.0001, 1.0, 100.0] \n for i in range(len(style_layer_list)):\n tar_layer = style_layer_list[i]\n target_layer = get_layer_scope(tar_layer)\n gen_feature = vgg_gen_output[target_layer]\n tar_feature = vgg_tar_output[target_layer]\n diff = tf.square(gram(gen_feature) - gram(tar_feature))\n sl = sl + tf.reduce_mean(tf.reduce_sum(diff, axis=0)) * ratio_list[i] \n style_loss = sl\n \n with tf.name_scope('decay_mse_loss'):\n if ind == 0:\n decay_mse_loss = tf.zeros([])\n else:\n gen_output1 = gen_output[:,FLAGS.pad:-FLAGS.pad,FLAGS.pad:-FLAGS.pad,:]\n #print('gen_output1 : ',gen_output1)\n if ind//w0 == 0 or ind%w0 == 0:\n decay_ratio = 1 - np.linspace(0,1,256)\n decay_ratio = np.concatenate([np.ones(128),decay_ratio,np.zeros(128)])\n decay_ratio = decay_ratio.reshape([1,1,512,1])\n decay_mse_loss = decay_ratio * tf.square(gen_output1[:,:,0:512,:] - rf_left) \n decay_mse_loss = tf.reduce_mean(decay_mse_loss)\n else:\n decay_ratio = 1 - np.linspace(0,1,256)\n #decay_ratio = decay_ratio.astype(np.float32)\n decay_ratio = np.concatenate([np.ones(128),decay_ratio,np.zeros(128+1024)])\n decay_ratio = decay_ratio.reshape([1,1,1536,1])\n decay_ratio_2d = decay_ratio * np.transpose(decay_ratio,[0,2,1,3])\n decay_mse_loss = tf.reduce_mean(decay_ratio_2d * tf.square(gen_output1 - rf))\n \n gen_loss = style_loss + FLAGS.W_tv * tv_loss + 0.1*decay_mse_loss\n gen_loss = 1e6 * gen_loss\n\n with tf.name_scope('generator_train'):\n gen_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'generator')\n optimizer = tf.contrib.opt.ScipyOptimizerInterface(\n gen_loss, var_list = gen_tvars, method='L-BFGS-B',\n options = {'maxiter': FLAGS.max_iter, 'disp': FLAGS.print_loss})\n '''\n for _ in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'generator'):\n print(_)\n '''\n \n vgg_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vgg_19')\n vgg_restore = tf.train.Saver(vgg_var_list)\n\n # Start the session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n def print_loss(gl, sl, tvl, dml):\n if FLAGS.print_loss is True:\n print('gen_loss : %s' % gl )\n print('style_loss : %s' % sl )\n print('tv_loss : %s' % tvl )\n print('decay_mse_loss : %s' %dml)\n \n init_op = tf.global_variables_initializer() \n with tf.Session(config = config) as sess:\n sess.run(init_op)\n vgg_restore.restore(sess, FLAGS.vgg_ckpt)\n print('\\tUnder Synthesizing ...')\n #start = time.time()\n optimizer.minimize(sess, loss_callback = print_loss,\n fetches = [gen_loss, style_loss, tv_loss, decay_mse_loss])\n gen_output, style_loss = gen_output.eval(), style_loss.eval()\n \n tf.reset_default_graph()\n \n if init is None or FLAGS.pad == 0:\n gen_output = gen_output[:,:,:,:] \n else:\n gen_output = gen_output[:,FLAGS.pad:-FLAGS.pad,FLAGS.pad:-FLAGS.pad,:]\n \n if (h1//512)*(w1//512) < 4:\n '''\n 第一阶段,上采样+Gram_loss\n '''\n path = os.path.join(FLAGS.output_dir,'buffer','%i_%i_%i.png'%(h1,w1,ind))\n save_img(gen_output, path)\n elif (h1//512)*(w1//512) <= 10:\n '''\n 第一阶段结束:上采样+Gram_loss能够合成的最大图像\n '''\n save_whole_img(gen_output,h1,w1,FLAGS)\n elif ind == 0:\n '''\n 第一行第一列图像,直接将整个gen_output (1024,1024)保存\n '''\n save_patches(gen_output, ind, h1, w1 , FLAGS)\n elif ind//w0 == 0:\n '''\n 第一行,不是第一列图像,保存合成图像并更新参考图像\n '''\n save_img(gen_output[:,0:512,0:512,:], rf_left_up)\n save_img(gen_output[:,512:1024,0:512,:],rf_left_down)\n save_patches(gen_output[:,:,512:,:], ind, h1, w1, FLAGS)\n elif ind%w0 == 0:\n '''\n 不是第一行,第一列\n '''\n save_patches(np.transpose(gen_output[:,:,512:,:],[0,2,1,3]), ind, h1, w1, FLAGS)\n save_img(np.transpose(gen_output[:,0:512,0:512,:],[0,2,1,3]), rf_left_up)\n save_img(np.transpose(gen_output[:,512:1024,0:512,:],[0,2,1,3]), rf_left_down) \n else:\n '''\n 不是第一行,也不是第一列\n '''\n save_patches(gen_output[:,512:,512:,:], ind, h1, w1, FLAGS)\n save_img(gen_output[:,0:512,0:512,:], rf_up_left)\n save_img(gen_output[:,0:512,512:1024,:], rf_up_middle)\n save_img(gen_output[:,0:512,1024:,:], rf_up_right)\n save_img(gen_output[:,512:1024,0:512,:], rf_left_up)\n save_img(gen_output[:,1024:,0:512,:], rf_left_down) \n \n \n \n \n \n \n \n \n \n" }, { "alpha_fraction": 0.58984375, "alphanum_fraction": 0.621279776096344, "avg_line_length": 35.080535888671875, "blob_id": "bcf6cd7dea70970ec34f9980ae9e9d8ebd0383bd", "content_id": "b3899978b7456c824de016869f326edbe32746ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5476, "license_type": "no_license", "max_line_length": 103, "num_lines": 149, "path": "/main.py", "repo_name": "LiaoQian1996/tileable_textures", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport os\nfrom lib.model import data_loader, generator, Synthesis\nfrom lib.ops import *\nimport math\nimport time\nimport numpy as np\nimport scipy.misc\nimport cv2\nimport argparse\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' \n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n '--output_dir',\n help = 'output dictionary',\n default = './result/'\n)\n\nparser.add_argument(\n '--vgg_ckpt',\n help = 'checkpoint of vgg networks, the check point file of pretrained model should be downloaded',\n default = '/home/liaoqian/DATA/vgg19/vgg_19.ckpt'\n)\n\nparser.add_argument(\n '--target_dir',\n help = 'path of target img, texture sample image or style image',\n default = './imgs/tomato.png' \n)\n\nparser.add_argument(\n '--initials',\n help = 'initialized mode of synthesis, come into force only in style_transfer task_mode',\n choices = ['noise', 'content'],\n default = 'noise'\n)\n\nparser.add_argument(\n '--top_style_layer',\n help = 'the top layer of vgg network layers used to compute style_loss',\n default = 'VGG54',\n choices = ['VGG11','VGG21','VGG31','VGG41','VGG51','VGG54']\n)\n\nparser.add_argument(\n '--texture_shape',\n help = 'img_size of synthesis output texture, if set to [-1,-1], the shape will be \\\n the same as sample texture image',\n nargs = '+',\n type = int\n)\n\nparser.add_argument(\n '--pyrm_layers',\n help = 'layers number of pyramid',\n default = 6,\n type = int\n)\n\nparser.add_argument('--W_tv',help = 'weight of total variation loss',type = float,default = 0.1)\nparser.add_argument('--pad',help='padding size',type=int,default=8)\nparser.add_argument('--stddev',help = 'standard deviation of noise',type = float,default = 0.1)\nparser.add_argument('--max_iter',help = 'max iteration',type = int,default = 100,required = True)\nparser.add_argument('--print_loss',help = 'whether print current loss',action = 'store_true')\n\nFLAGS = parser.parse_args()\nprint_configuration_op(FLAGS)\n\n# Check the output_dir is given\nif FLAGS.output_dir is None:\n raise ValueError('The output directory is needed')\n \n# Check the output directory to save the checkpoint\nif not os.path.exists(FLAGS.output_dir):\n os.mkdir(FLAGS.output_dir)\n\nif not os.path.exists(os.path.join(FLAGS.output_dir, 'buffer')):\n os.mkdir(os.path.join(FLAGS.output_dir, 'buffer'))\n\n# pyramid = get_pyramid(targets, pyramid_layers) # a list store the pyramid of target textures image\npyrm_layers = FLAGS.pyrm_layers\ntar_name = FLAGS.target_dir.split('/')[-1]; tar_name = tar_name.split('.')[0]\ntargets0 = data_loader(img_dir=None, FLAGS=FLAGS)\n# targets = to_tensor(targets0)\n\n#print(os.listdir(path+'buffer/'))\n'''\nfilleds = os.listdir(os.path.join(FLAGS.output_dir,'buffer'))\nn = 2048\nm = n//512\nfilleds = [int((_.split('_')[1]).split('.')[0]) for _ in filleds if str(n) in _]\ndef ind0(i):\n return i//(m//2)*2*m + i%(m//2)*2\nflag = [(i in filleds) for i in range(m**2)]\nfilled_inds = [flag[ind0(i)] and flag[ind0(i)+1] and flag[ind0(i)+m] and flag[ind0(i)+m+1] \\\n for i in range((m//2)**2)]\nunfilled_inds = [i for i in range((m//2)**2) if filled_inds[i]==False]\nbegin = FLAGS.texture_shape[0]//n - 1\n'''\n\nbegin = pyrm_layers - 1\nstart_time = time.time()\nfor i in range(begin, -1, -1):\n # targets = to_tensor(targets0)\n # w0, h0 = [targets0.shape[1], targets0.shape[2]]; w, h = [ w0//(2**i), h0//(2**i) ]\n # target = tf.image.resize_bicubic(targets, [w, h]) \n target = targets0[:,::(2**i),::(2**i),:]\n if target.size > 3000000:\n target = check_size_crop(target)\n h1, w1 = [ FLAGS.texture_shape[0]//(2**i), FLAGS.texture_shape[1]//(2**i)] \n h0,w0 = h1//1024,w1//1024\n print('\\nCurrent image : ', tar_name)\n print('Target image size : (%d, %d)'%(target.shape[1],target.shape[2]))\n print('Synthesizing image size : (%d, %d)' % (h1, w1))\n print('Now in pyramid layer %i, total %i layer (from L%i to L0)\\n' \\\n %(i, pyrm_layers, pyrm_layers-1)) \n print('Time has past %.3f mins'%((time.time()-start_time)/60))\n if i == pyrm_layers - 1:\n try:\n Synthesis(target, layer = i, FLAGS = FLAGS) \n except:\n raise ValueError('Pyramid is too higher ! ') \n elif (h1//512)*(w1//512) <= 10: \n '''\n 第一阶段将在合成图像除以512之后的的长宽积,在 乘以4就将大于10时结束!\n 这是因为本机硬件支持单次合成的最大尺寸为[512*2,512*5]\n '''\n Synthesis(target, layer = i, FLAGS = FLAGS) \n else:\n if i == begin:\n print('unfilled_inds : ',unfilled_inds)\n for ind in unfilled_inds:\n print('\\n\\tTime has past %.3f minitues'%((time.time()-start_time)/60))\n print('\\t%i/%i 512 patch of (%i,%i) image upsampled!'%(ind+1,h0*w0,h1//2,w1//2))\n Synthesis(target, layer = i, ind = ind, FLAGS = FLAGS) \n else:\n for ind in range(h0*w0):\n print('\\n\\tTime has past %.3f minitues'%((time.time()-start_time)/60))\n print('\\t%i/%i 512 patch of (%i,%i) image upsampled!'%(ind+1,h0*w0,h1//2,w1//2))\n Synthesis(target, layer = i, ind = ind, FLAGS = FLAGS)\nos.mkdir(os.path.join(FLAGS.output_dir,'%.3f'%((time.time()-start_time)/60)))\nprint('Optimization done !!! ') " }, { "alpha_fraction": 0.5632489919662476, "alphanum_fraction": 0.610519289970398, "avg_line_length": 28.156862258911133, "blob_id": "e6a5a79a3bd52f8aa4e75e666e4bf5fa957c8e82", "content_id": "69fc7bc5238dd7b4c372319882673e0f66f3c205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1502, "license_type": "no_license", "max_line_length": 94, "num_lines": 51, "path": "/patches2img.py", "repo_name": "LiaoQian1996/tileable_textures", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom PIL import Image\nimport os\nfrom functools import reduce\nimport argparse\n\n#print(os.listdir(path+'/buffer/'))\ndef concat_x(x1,x2):\n return np.concatenate([x1,x2],axis=1)\ndef concat_y(y1,y2):\n return np.concatenate([y1,y2],axis=0)\ndef img_loader(path):\n im = np.asarray(Image.open(path))\n return im\n\n#print(os.listdir(path+'/buffer/'))\ndef concat_x(x1,x2):\n return np.concatenate([x1,x2],axis=1)\ndef concat_y(y1,y2):\n return np.concatenate([y1,y2],axis=0)\ndef img_loader(path):\n im = np.asarray(Image.open(path))\n return im\n\ndef patches2img(h1,w1=None,path=None):\n if w1 is None:\n w1 = h1\n m = h1//512\n im = []\n for i in range(h1//512):\n row = [os.path.join(path,'buffer',str(h1)+'_'+str(w1)+'_'+str(i*(w1//512)+j)+'.png') \\\n for j in range(w1//512)]\n row = list(map(img_loader,row))\n row = reduce(concat_x,row)\n im.append(row)\n im = reduce(concat_y,im)\n #fig = plt.figure(dpi=200)\n #plt.imshow(im)\n #plt.show()\n im = Image.fromarray(im)\n im.save(os.path.join(path,str(h1)+'_'+str(w1)+'.png'))\n print(\"(%i,%i) shape of image in path %s saved!\"%(h1,w1,path))\n\nparser = argparse.ArgumentParser() \nparser.add_argument('--path',help = 'path of image patches',type = str)\na = parser.parse_args()\nif __name__ == '__main__':\n patches2img(1024,path=a.path)\n patches2img(2048,path=a.path)\n patches2img(4096,path=a.path)\n patches2img(8192,path=a.path)\n \n \n \n" } ]
6
toomuchio/ja-scripts
https://github.com/toomuchio/ja-scripts
dec5323ea572fb2844613e90176812f636d5f527
a9db68a0c2716b1a82f33c63a22360d87bf63179
20eb9ef13671319a1f275e7544a74fa0ef597397
refs/heads/master
2021-05-06T19:25:40.279781
2017-11-26T23:44:39
2017-11-26T23:44:39
112,125,491
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.7673469185829163, "alphanum_fraction": 0.7673469185829163, "avg_line_length": 39.83333206176758, "blob_id": "42e7924504bcb1612868790a3f6ac961e14162bb", "content_id": "b905f0b42b133d8aea27ae496dd5404be49efdcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 245, "license_type": "no_license", "max_line_length": 111, "num_lines": 6, "path": "/README.md", "repo_name": "toomuchio/ja-scripts", "src_encoding": "UTF-8", "text": "# Jedi Acadamy Server Scripts\n \nJust random scripts used to manage my ja servers over the years\n\n* alert.py - Random alerts to players in the server, upcoming comp, command help ect.. Best used with a cronjob\n* stats.py - Stats gathering script\n" }, { "alpha_fraction": 0.6568345427513123, "alphanum_fraction": 0.6733812689781189, "avg_line_length": 24.272727966308594, "blob_id": "6d442924b02637e15971134748d7a28b2f46cd5b", "content_id": "312bcd107ddd65aaa7c7d98159e574811bb6627a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 108, "num_lines": 55, "path": "/alert.py", "repo_name": "toomuchio/ja-scripts", "src_encoding": "UTF-8", "text": "import random, socket, time\n\n#Very basic wrapper class to send rcon data to a JA server\nclass Rcon(object):\n\tMAX_SVSAY_LEN = 140\n\tMAX_TRY = 3\n\tTRY_SLEEP = 3\n\n\tdef __init__(self, iip, iport, ipass):\n\t\tself.rconSocket = None\n\t\tself.rconIP = iip\n\t\tself.rconPort = iport\n\t\tself.rconPass = ipass\n\n\tdef connect(self):\n\t\ttry:\n\t\t\tself.rconSocket.close()\n\t\texcept:\n\t\t\tpass\n\n\t\tself.rconSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\tself.rconSocket.settimeout(1)\n\t\tself.rconSocket.connect((self.rconIP, self.rconPort))\n\n\tdef _send(self, payload, buffer_size=1024):\n\t\ttries = 1\n\t\twhile tries != Rcon.MAX_TRY:\n\t\t\ttry:\n\t\t\t\tself.connect()\n\t\t\t\tself.rconSocket.send(payload)\n\t\t\t\tself.rconSocket.recv(buffer_size)\n\t\t\t\tself.rconSocket.close()\n\t\t\t\treturn True\n\t\t\texcept socket.timeout:\n\t\t\t\ttries += 1\n\t\t\t\ttime.sleep(Rcon.TRY_SLEEP)\n\t\t\texcept socket.error:\n\t\t\t\treturn False\n\n\tdef say(self, msg):\n\t\tif len(msg) >= Rcon.MAX_SVSAY_LEN:\n\t\t\tself._send(\"\\xff\\xff\\xff\\xffrcon %s say %s\" % (self.rconPass, msg), 2048)\n\t\telse:\n\t\t\tself._send(\"\\xff\\xff\\xff\\xffrcon %s svsay %s\" % (self.rconPass, msg))\n\n#Config\nMY_PREFIX = \"[^2ALERT^7] \"\nMY_ALERTS = [\"Use !rank to view your rank\", \"Use !top to view the top players\", \"Use !rtv to rock the vote\"]\nMY_IP = \"localhost\"\nMY_PORT = 29070\nMY_PASS = \"some rcon password\"\n\n#Run\nrcon = Rcon(MY_IP, MY_PORT, MY_PASS)\nrcon.say(MY_PREFIX + random.choice(MY_ALERTS))\n" } ]
2
TE3005/pokemon-project
https://github.com/TE3005/pokemon-project
ff0c70b6d53a8ceb774eae054fe825373bde5082
8100b8acf3b93d1b81e83d2bda9fbe010956bda6
c3b60eb610bc11e6d5c596417f1f465db9ca9420
refs/heads/master
2023-07-15T20:10:31.262265
2021-08-29T13:32:01
2021-08-29T13:32:01
401,047,370
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5672276616096497, "alphanum_fraction": 0.5883082151412964, "avg_line_length": 35.6363639831543, "blob_id": "28d0c73102f3de9c15a54dc7c26bf612af52330f", "content_id": "ffe2925d77dbffb5f18c721424e3ed81e71d1345", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5645, "license_type": "no_license", "max_line_length": 138, "num_lines": 154, "path": "/pokemon_server.py", "repo_name": "TE3005/pokemon-project", "src_encoding": "UTF-8", "text": "from flask import Flask ,Response, request\nimport requests\nimport json\nfrom pymysql import IntegrityError\nimport sql \nimport pokemon_api\n\napp = Flask(__name__, static_url_path = '', static_folder = 'dist')\n\[email protected]('/<path:file_path>')\ndef serve_static_file(file_path):\n return app.send_static_file(file_path)\n\n\[email protected]('/type/<pokemon_name>', methods = [\"put\"])\ndef update_types (pokemon_name):\n try:\n if not sql.correct_name(pokemon_name):\n return json.dumps({\"error\": \"The pokemon's name \" + pokemon_name + \" does not exist\"}), 404\n data = pokemon_api.pokemon(pokemon_name) \n sql.update(data,pokemon_name)\n return json.dumps({\"status\":\"update pokemon\"}), 201\n except Exception as e:\n return({\"error db\": str(e)}), 500\n\n\[email protected]('/trainers/<pokemon_name>')\ndef find_Owners(pokemon_name):\n try:\n if not sql.correct_name(pokemon_name):\n return json.dumps({\"error\": \"The pokemon's name \" + pokemon_name + \" does not exist\"}), 404\n owners = sql.find_trainers(pokemon_name)\n return json.dumps({\"names\": owners}), 200\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\[email protected]('/pokemons/<trainer_name>')\ndef find_Roster(trainer_name):\n try:\n if not sql.correct_trainer:\n return json.dumps({\"error\": \"The trainer's name \" + trainer_name + \" does not exist\"}), 404\n pokemons =sql.find_pokemons(trainer_name)\n return json.dumps({\"names\": pokemons}), 200\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\[email protected]('/pokemon', methods = [\"post\"])\ndef add_new_pokemon():\n new_pokemon = request.get_json()\n try:\n create_pokemon = pokemon_api.add_pokemon(new_pokemon) \n if create_pokemon != \" \":\n if create_pokemon == \"name\":\n return json.dumps({\"error\": \"Pokemon's name does not exist \"} ), 404\n return json.dumps({\"error\":\"The \" + create_pokemon +\" does not suitable to the pokemon's name \" + new_pokemon[\"name\"]}), 404\n return json.dumps({\"status\": \"Success. Added pokemon\"}), 201\n except IntegrityError as e:\n code, message = e.args\n return ({\"Error\": \"The Pokemon \" + new_pokemon[\"name\"] + \" already exist \"}), 404\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\[email protected]('/pokemons')\ndef find():\n try:\n ptype = request.args.get(\"type\")\n\n if not sql.correct_type:\n return json.dumps({\"error\": \"The pokemon's type \" + ptype + \" does not exist\"}), 404 \n types = sql.find_by_type(ptype)\n return json.dumps({\"names\": types}), 200\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\[email protected]('/pokemon/<name>,<trainer>', methods = [\"delete\"])\ndef delete (name, trainer):\n try:\n if not sql.correct_name(name):\n return json.dumps({\"error\": \"The pokemon's name \" + name + \" does not exist\"}), 404\n if trainer not in sql.find_trainers(name):\n return json.dumps({\"error\": \"The pokemon's name \" + name + \" did not belong to the \" + trainer}), 404 \n sql.delete_by_trainer(name, trainer)\n return json.dumps({\"status\": \"Success. Delete pokemon\"}), 200\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\[email protected]('/evolve/<name>,<trainer>', methods = [\"put\"])\ndef evolve (name, trainer):\n try:\n if not sql.correct_name(name):\n return json.dumps({\"error\": \"The pokemon's name \" + name + \" does not exist\"}), 404\n if trainer not in sql.find_trainers(name):\n return json.dumps({\"error\": \"The pokemon's name \" + name + \" did not belong to the \" + trainer}), 404 \n pevolve = pokemon_api.pokemon_evolve(name, trainer)\n if pevolve == []:\n return json.dumps({\"error\":\"The pokemon \"+ name + \" can not evolve\"}), 404\n if pevolve == 0 :\n return json.dumps({\"error\":\"The pokemon \"+ name + \" evolve to the pokemon that exist in the trainer \" + trainer}), 404\n return json.dumps({\"evolve\":\"The pokemon \" + name + \" of trainer \" + trainer + \" elvove to \" + pevolve }), 201\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\[email protected]('/rate/<name>',methods = [\"put\"])\ndef rate(name):\n try:\n if not sql.correct_name(name):\n return json.dumps({\"error\": \"The pokemon's name \" + name + \" does not exist\"}), 404\n sql.rate_pokemon(name)\n return json.dumps({\"status\": \"Success.Rated pokemon\"}), 200\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\[email protected]('/max_rating')\ndef max_rating():\n try:\n max_ = sql.max_rating()\n return json.dumps(max_), 200\n except Exception as e:\n return({\"error db\": str(e)}), 500\n\n\[email protected]('/rating/<pokemon>')\ndef rating(pokemon):\n try:\n if not sql.correct_name(pokemon):\n return json.dumps({\"error\": \"The pokemon's name \" + pokemon + \" does not exist\"}), 404\n rat = sql.get_rating(pokemon)\n return json.dumps({\"name\": pokemon, \"rating\": rat}), 200\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\[email protected]('/avg_move')\ndef avg_move():\n try:\n moves = request.get_json()\n max_avg = pokemon_api.move_average(moves[\"names\"])\n if max_avg == \"error\":\n return json.dumps({\"error\": \"The names of the moves are incorrect\"}), 404\n return json.dumps(max_avg), 200\n except Exception as e:\n return({\"error db\": str(e)}), 500 \n\n\n\nif __name__ == '__main__':\n app.run(port = 3001) " }, { "alpha_fraction": 0.5903802514076233, "alphanum_fraction": 0.5921167135238647, "avg_line_length": 35.36075973510742, "blob_id": "1e7e3a21325ff87943ea81994e3faa0cfe40ff3c", "content_id": "1d81fe2a0cbf66f321fb1b22343643a23d6acc04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5759, "license_type": "no_license", "max_line_length": 127, "num_lines": 158, "path": "/sql.py", "repo_name": "TE3005/pokemon-project", "src_encoding": "UTF-8", "text": "import pymysql\nconnection = pymysql.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"1234\",\n db=\"sql_intro\",\n charset=\"utf8\",\n cursorclass=pymysql.cursors.DictCursor\n)\n\n\ndef pokemon_id(name):\n with connection.cursor() as cursor:\n query = \"SELECT id from pokemon WHERE name = '{}'\".format(name)\n cursor.execute(query)\n result = cursor.fetchall()\n result = result[0][\"id\"]\n return result\n\n\ndef update (pokemon_data, name):\n id_ = pokemon_id(name)\n with connection.cursor() as cursor:\n for item in pokemon_data[\"types\"]:\n query = \"\"\"INSERT into TypesPokemon (pokemon_id, pokemon_type) SELECT %s,%s \n WHERE NOT EXISTS (SELECT %s, %s FROM TypesPokemon WHERE %s = pokemon_id and %s = pokemon_type)\"\"\"\n value = (id_, item[\"type\"][\"name\"],id_, item[\"type\"][\"name\"],id_, item[\"type\"][\"name\"])\n cursor.execute(query, value)\n connection.commit()\n\n\ndef correct_name(pokemon_name):\n with connection.cursor() as cursor:\n query = \"SELECT name FROM pokemon WHERE name = '{}'\".format(pokemon_name)\n cursor.execute(query)\n if(cursor.fetchall() == ()):\n return False\n return True \n\n\ndef correct_trainer(trainer_name):\n with connection.cursor() as cursor:\n query = \"SELECT name FROM owner WHERE name = '{}'\".format(trainer_name)\n cursor.execute(query)\n if(cursor.fetchall() == ()):\n return False \n return True\n\n\ndef correct_type(type_):\n with connection.cursor() as cursor:\n query = \"SELECT pokemon_type FROM TypesPokemon WHERE pokemon_type = '{}'\".format(type_)\n cursor.execute(query)\n if(cursor.fetchall() == ()):\n return False\n return True \n\n\ndef find_trainers(pokemon_name):\n with connection.cursor() as cursor:\n query = \"\"\"SELECT owne_name FROM pokemon JOIN ownerPokemon on\n id = pokemon_id WHERE name = '{}' \"\"\".format(pokemon_name)\n cursor.execute(query)\n result = cursor.fetchall()\n return [x [\"owne_name\"]for x in result] \n\n\ndef find_pokemons(trainer_name):\n with connection.cursor() as cursor:\n query = \"\"\"SELECT name FROM pokemon JOIN ownerPokemon on \n id = pokemon_id WHERE owne_name = '{}' \"\"\".format(trainer_name)\n cursor.execute(query)\n result = cursor.fetchall()\n return [x[\"name\"] for x in result] \n\n\ndef find_by_type(type_):\n with connection.cursor() as cursor:\n query = \"\"\"SELECT name FROM pokemon join TypesPokemon on id = pokemon_id \n WHERE pokemon_type ='{}' \"\"\".format(type_)\n cursor.execute(query)\n result = cursor.fetchall()\n return [x[\"name\"]for x in result] \n\n\ndef create_pokemon(pokemon_api,new_pokemon):\n with connection.cursor() as cursor:\n query = 'INSERT into pokemon (id, name , height, weight) values (%s, %s, %s, %s)'\n val = (new_pokemon[\"id\"], new_pokemon[\"name\"], new_pokemon[\"height\"], new_pokemon[\"weight\"])\n cursor.execute(query, val)\n connection.commit()\n update(pokemon_api, new_pokemon[\"name\"])\n\n\ndef delete_by_trainer(pokemon, trainer):\n with connection.cursor() as cursor:\n id_ = pokemon_id(pokemon)\n query = \"DELETE FROM ownerPokemon WHERE owne_name = '{}' and pokemon_id = '{}'\".format(trainer, id_) \n cursor.execute(query) \n connection.commit()\n\n\ndef evolve_by_trainer(name, trainer):\n with connection.cursor() as cursor:\n query = \"SELECT town FROM owner WHERE name = '{}'\".format(trainer) \n cursor.execute(query)\n town_result = cursor.fetchall()[0][\"town\"]\n id_evolve = pokemon_id (name)\n query = \"\"\"INSERT into ownerPokemon(pokemon_id,owne_name, owne_town) SELECT %s,%s,%s\n WHERE NOT EXISTS(SELECT %s, %s, %s from ownerPokemon WHERE %s = pokemon_id AND %s = owne_name AND %s = owne_town)\"\"\" \n val = (id_evolve, trainer,town_result, id_evolve, trainer,town_result, id_evolve, trainer,town_result)\n cursor.execute(query, val)\n connection.commit()\n\n\ndef get_rating(pokemon):\n with connection.cursor() as cursor:\n id_ = pokemon_id(pokemon)\n query = \"SELECT rating FROM favorite_pokemon WHERE pokemon_id = {}\".format(id_)\n cursor.execute(query)\n result = cursor.fetchall()\n if result == ():\n return 0\n return result[0][\"rating\"] \n\n\ndef max_rating():\n with connection.cursor() as cursor:\n query = \"SELECT MAX(rating) as max FROM favorite_pokemon\"\n cursor.execute(query)\n result = cursor.fetchall()\n max_ = result[0][\"max\"]\n\n query = \"SELECT pokemon_id FROM favorite_pokemon WHERE rating = {}\".format(max_)\n cursor.execute(query)\n result = cursor.fetchall()\n fav_pokemons = []\n for item in result:\n query = \"SELECT name FROM pokemon WHERE id = {}\".format(item[\"pokemon_id\"])\n cursor.execute(query)\n result = cursor.fetchall()\n fav_pokemons.append(result[0][\"name\"])\n return {\"max_rating\":max_ , \"names\": fav_pokemons} \n\n \ndef rate_pokemon(name):\n with connection.cursor() as cursor:\n id_ = pokemon_id(name)\n pokemon_rat = get_rating(name) \n if pokemon_rat == 0:\n query = \"INSERT into favorite_pokemon (pokemon_id, rating) values(%s, %s)\"\n val = (id_, 1)\n cursor.execute(query, val)\n connection.commit()\n else:\n query = \"UPDATE favorite_pokemon SET rating = {} WHERE pokemon_id = {}\".format(pokemon_rat + 1 , id_)\n cursor.execute(query)\n connection.commit() \n \n\n" }, { "alpha_fraction": 0.6534296274185181, "alphanum_fraction": 0.667870044708252, "avg_line_length": 18.738094329833984, "blob_id": "fab63d8db971393f16034c180c402eb5895d9846", "content_id": "634e5dc0bb3deff8448555c8223587ec39f575b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 831, "license_type": "no_license", "max_line_length": 68, "num_lines": 42, "path": "/data.sql", "repo_name": "TE3005/pokemon-project", "src_encoding": "UTF-8", "text": "USE sql_intro; \n\nCREATE TABLE owner(\n name VARCHAR (20),\n town VARCHAR (20),\nPRIMARY KEY(name, town)\n);\n\n\nCREATE TABLE pokemon(\n id INT ,\n name VARCHAR(20),\n height INT,\n weight INT,\n PRIMARY KEY(id)\n);\n\n\nCREATE TABLE ownerPokemon(\n pokemon_id INT ,\n owne_name VARCHAR (20),\n owne_town VARCHAR (20),\n PRIMARY KEY(pokemon_id, owne_name, owne_town),\n FOREIGN KEY(owne_name, owne_town) REFERENCES owner (name, town),\n FOREIGN KEY(pokemon_id) REFERENCES pokemon (id)\n);\n\n\nCREATE TABLE TypesPokemon (\n pokemon_id INT,\n pokemon_type VARCHAR(20),\n PRIMARY KEY(pokemon_id, pokemon_type),\n FOREIGN KEY (pokemon_id) REFERENCES pokemon (id)\n);\n\n\nCREATE TABLE favorite_pokemon(\n\tpokemon_id INT,\n\trating INT,\n\tPRIMARY KEY(pokemon_id),\n\tFOREIGN KEY(pokemon_id) REFERENCES pokemon (id)\n); \n\n" }, { "alpha_fraction": 0.545511782169342, "alphanum_fraction": 0.5480315089225769, "avg_line_length": 34.13333511352539, "blob_id": "419804d47f475d78f36ef6c68eb13f23742efcb4", "content_id": "58f481684caea64365fc5d4ce3b2a0bdea7e68d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3175, "license_type": "no_license", "max_line_length": 79, "num_lines": 90, "path": "/pokemon_api.py", "repo_name": "TE3005/pokemon-project", "src_encoding": "UTF-8", "text": "import requests\nimport json\nimport sql\n\ndef pokemon(name):\n pokemon_url = 'https://pokeapi.co/api/v2/pokemon/{}'.format(name)\n res = requests.get(url = pokemon_url, verify=False)\n pokemon_data = res.json ()\n return pokemon_data\n\n\ndef add_pokemon(new_pokemon):\n try:\n pokemon_data = pokemon(new_pokemon[\"name\"])\n if (new_pokemon[\"id\"]) != pokemon_data [\"id\"]:\n return \"id \"\n if (new_pokemon[\"height\"]) != pokemon_data [\"height\"]:\n return \"height \" \n if (new_pokemon[\"weight\"]) != pokemon_data [\"weight\"]:\n return \"weight \"\n types_list = [x[\"type\"][\"name\"] for x in pokemon_data[\"types\"]] \n for item in new_pokemon[\"types\"]:\n if item not in types_list:\n return \"type \" + item\n if len(types_list) != len(new_pokemon[\"types\"]):\n return \"type's pokemon less \"\n\n sql.create_pokemon(pokemon_data,new_pokemon)\n return \" \"\n except json.decoder.JSONDecodeError:\n return \"name\" \n\n\ndef add (pokemon):\n pokemon_url = 'https://pokeapi.co/api/v2/pokemon/{}'.format(pokemon)\n res = requests.get(url = pokemon_url, verify=False)\n pokemon_data = res.json ()\n sql.create_pokemon(pokemon_data,pokemon_data)\n \n\ndef pokemon_evolve(pokemon,trainer):\n url_evolve = 'https://pokeapi.co/api/v2/pokemon-species/{}'.format(pokemon)\n res = requests.get(url = url_evolve , verify=False)\n evolve_data = res.json()\n url_chain = str(evolve_data[\"evolution_chain\"][\"url\"])\n res = requests.get(url = url_chain , verify=False)\n evolve_chain = res.json()\n find = False \n list_ = evolve_chain[\"chain\"][\"evolves_to\"] \n if evolve_chain[\"chain\"][\"species\"][\"name\"]!= pokemon:\n while not find: \n for item in list_:\n if item [\"species\"][\"name\"] == pokemon:\n find = True\n list_ = item [\"evolves_to\"]\n break\n else:\n list_ = list_[0][\"evolves_to\"]\n if list_== []:\n return [] \n for item in list_:\n if item [\"species\"][\"name\"] not in sql.find_pokemons(trainer):\n new_pokemon = item [\"species\"][\"name\"] \n break\n else:\n return 0\n if not (sql.correct_name(new_pokemon)):\n add(new_pokemon)\n sql.evolve_by_trainer(new_pokemon,trainer)\n return new_pokemon\n\n\ndef move_average(list_moves):\n try:\n max_move = {\"max_avg\":0, \"moves\":[]}\n keys = [\"accuracy\", \"pp\", \"power\" ]\n for move in list_moves:\n url_moves = 'https://pokeapi.co/api/v2/move/{}'.format(move)\n res = requests.get(url = url_moves , verify=False)\n move_data = res.json()\n list_values = [move_data[key]for key in keys if move_data[key]]\n avg = sum(list_values)/3 \n if avg == max_move[\"max_avg\"]:\n max_move[\"moves\"].append(move) \n elif avg > max_move[\"max_avg\"]:\n max_move[\"max_avg\"] = avg\n max_move[\"moves\"] = [move]\n return max_move \n except json.decoder.JSONDecodeError:\n return(\"error\")\n\n\n\n\n\n\n\n \n" } ]
4
rafaellamgs/mini_twitter
https://github.com/rafaellamgs/mini_twitter
6ac07ff1641433be9359ae0c010708db2a668331
af9c77813d94ec833fcfd36e3c77bb835b2a703b
68c867f25021910b32494467efe6b0035c2440ef
refs/heads/master
2020-04-17T15:10:31.977565
2019-01-20T19:42:48
2019-01-20T19:42:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6200000047683716, "alphanum_fraction": 0.6272727251052856, "avg_line_length": 21.79166603088379, "blob_id": "098fc853a8cafd78717066ced224aab5842a6cb6", "content_id": "31ff66cb4c78e70fbc03ffcf9fdebedbf65022ae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "permissive", "max_line_length": 46, "num_lines": 24, "path": "/MiniTwitter/Tweet/models.py", "repo_name": "rafaellamgs/mini_twitter", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth import get_user_model\nfrom django.utils import timezone\n\n# Create your models here.\nclass Tweet(models.Model):\n author = models.ForeignKey (\n get_user_model(),\n on_delete = models.CASCADE,\n related_name='tweet'\n )\n content = models.TextField(\n max_length=280\n )\n\n created_at = models.DateTimeField(\n default = timezone.now\n )\n likes = models.BigIntegerField(\n default = 0\n )\n\n def __str__(self):\n return self.content\n\n\n\n" }, { "alpha_fraction": 0.5786272883415222, "alphanum_fraction": 0.6055603623390198, "avg_line_length": 29.3157901763916, "blob_id": "b9915c1cc59543fa6ac33268c7c84d86c64ef7f2", "content_id": "a5305d11fe1768eb380c7b7f29a86bf7b72f388a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1153, "license_type": "permissive", "max_line_length": 79, "num_lines": 38, "path": "/MiniTwitter/accounts/validators.py", "repo_name": "rafaellamgs/mini_twitter", "src_encoding": "UTF-8", "text": "from django.core. exceptions import ValidationError\n\n\ndef validate_cpf(cpf):\n expected_cpf = [int(digit) for digit in cpf][:9] # convert str to int list\n cpf_test = [int(digit) for digit in cpf]\n\n # Building first verif_digit\n weights = [10, 9, 8, 7, 6, 5, 4, 3, 2]\n result = []\n for idx, w in enumerate(weights):\n x = w*expected_cpf[idx]\n result.append(x)\n resul_sum = sum(result)\n remainder = resul_sum % 11 # used to validate the first veirf_digit\n if 11 - remainder > 9:\n expected_cpf.append(0)\n else:\n expected_cpf.append(11 - remainder)\n\n # validating the second verif_digit\n weights = [11] + weights # append 11 at the begginning\n result = []\n for idx, w in enumerate(weights):\n x = w*expected_cpf[idx]\n result.append(x)\n resul_sum = sum(result)\n remainder = resul_sum % 11\n if 11 - remainder > 9:\n expected_cpf.append(0)\n else:\n expected_cpf.append(11 - remainder)\n\n if cpf_test != expected_cpf:\n # print(\"Por favor, insira um CPF válido!\")\n raise ValidationError(\n 'Por favor, insira um CPF válido.'\n)" }, { "alpha_fraction": 0.8054711222648621, "alphanum_fraction": 0.8054711222648621, "avg_line_length": 31.799999237060547, "blob_id": "3757007b3e1ca44b3c1246919e901f03ac236e7f", "content_id": "3a3616a9784d987211809f035d8761ef4dbd0afc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "permissive", "max_line_length": 57, "num_lines": 10, "path": "/MiniTwitter/accounts/views.py", "repo_name": "rafaellamgs/mini_twitter", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom .models import MyUser\n\nfrom .serializers import MyUserMModelSerializer\n# Create your views here.\n\nclass MyUserModelViewSet(viewsets.ModelViewSet):\n serializer_class = MyUserMModelSerializer\n queryset = MyUser.objects.all().order_by('-username')\n\n" }, { "alpha_fraction": 0.7922077775001526, "alphanum_fraction": 0.7922077775001526, "avg_line_length": 21, "blob_id": "7f971a3a19b46eb9a4a7134eabea7a43af321a41", "content_id": "cfd5dcaa4ce9be7ae3855add74cc79b8f7f33622", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "permissive", "max_line_length": 48, "num_lines": 14, "path": "/MiniTwitter/Tweet/views.py", "repo_name": "rafaellamgs/mini_twitter", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\nfrom rest_framework import viewsets\n\nfrom .models import Tweet\nfrom .serializers import TweetModelSerializer\n\nclass TweetModelViewSet(viewsets.ModelsViewSet):\n serializer_class = TweetModelSerializer\n queryset = Tweet.objects.all()\n \n\n\n# Create your views here.\n" }, { "alpha_fraction": 0.7115384340286255, "alphanum_fraction": 0.7115384340286255, "avg_line_length": 25.125, "blob_id": "aab112603abb988383eea89097b7fc038b399539", "content_id": "af3fe4beeada52a1013dbc3fc606a1a73e6bdd99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "permissive", "max_line_length": 56, "num_lines": 8, "path": "/MiniTwitter/MiniTwitter/serializer.py", "repo_name": "rafaellamgs/mini_twitter", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom .models import Tweet\n\nclass TweetModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tweet\n fiels = ('author', 'content', 'likes')" }, { "alpha_fraction": 0.6392045617103577, "alphanum_fraction": 0.6448863744735718, "avg_line_length": 24.071428298950195, "blob_id": "25816cc79ee50d2a54a72792ba6ecd19875b6729", "content_id": "22ae976c98358959bc76ab0b1d9113f8fb8b1723", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "permissive", "max_line_length": 43, "num_lines": 14, "path": "/MiniTwitter/accounts/models.py", "repo_name": "rafaellamgs/mini_twitter", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom .validators import validate_cpf\n\nclass MyUser(User):\n cpf = models.CharField(\n max_length = 14,\n #validators = [validate_cpf]\n )\n following = models.ManyToManyField(\n 'self',\n related_name= 'followers',\n symmetrical= False\n )\n\n" }, { "alpha_fraction": 0.7180616855621338, "alphanum_fraction": 0.7180616855621338, "avg_line_length": 27.5, "blob_id": "ca0f601f1afcd692188b6397a7efb8602ecf5339", "content_id": "f19c36460b293237ddb6b63195512316ee57dc39", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "permissive", "max_line_length": 62, "num_lines": 8, "path": "/MiniTwitter/accounts/serializers.py", "repo_name": "rafaellamgs/mini_twitter", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom .models import MyUser\n\nclass MyUserModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = MyUser\n fiels = ('username', 'email', 'password', 'following')" } ]
7
HamaniKhalil/iot-traffic-predictor
https://github.com/HamaniKhalil/iot-traffic-predictor
e933c8b2bdef1625064ff68159a5c18368f09cd4
22e1c634011e944a19e1271e182c3f3c0dfc0225
3a9f97dfcdbeb2916a40dc405dd3536deae3de56
refs/heads/master
2020-05-05T00:31:15.267647
2019-04-04T21:22:58
2019-04-04T21:22:58
179,577,736
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4593749940395355, "alphanum_fraction": 0.4635416567325592, "avg_line_length": 38.18367385864258, "blob_id": "0cc7c015b0ffead20bf570b322508e00f58011ea", "content_id": "c9d5ac4fc070c1bd3d2dc01b607a74651f7cd5bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1926, "license_type": "no_license", "max_line_length": 121, "num_lines": 49, "path": "/Encoder/Encoder.py", "repo_name": "HamaniKhalil/iot-traffic-predictor", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\n#!/anaconda3/bin/python\n\n# -----------------------------------------------------------------------------------------------------------------------\n# | Encoder Class |\n# -----------------------------------------------------------------------------------------------------------------------\n\nclass Encoder:\n def __init__(self, count=1):\n self.data_to_index = {}\n self.index_to_data = {}\n self.count = count\n\n def set_data_to_index(self, data_to_index):\n self.data_to_index = data_to_index\n self.index_to_data = {v: k for k, v in list(data_to_index.items())}\n self.count = max(list(self.data_to_index.values()))\n\n def encode_data(self, new_data): # encode la nouvelle donnée si elle n'est pas déjà encodé, sinon retourn son code\n if isinstance(new_data, str):\n new_data = [new_data]\n \n res = []\n\n for data in new_data:\n if data not in self.data_to_index:\n self.data_to_index[data] = self.count\n self.index_to_data[self.count] = data\n self.count += 1\n res.append(self.data_to_index[data])\n return res\n\n def set_code(self, data, code): # assigne un code a la donnée \"data\"\n if data not in self.data_to_index:\n self.index_to_data[code] = data\n self.data_to_index[data] = code\n return code\n else:\n return self.data_to_index[data]\n\n def get_code(self, data): # donne le code correspondant a data sinon 0 si n exsite pas\n for i in self.data_to_index: \n if i == data:\n return self.data_to_index[i]\n return 0\n\n\n def get_data(self, code): # donne la donnée correspondante a code sinon 0\n return self.index_to_data.get(code, 0)\n" }, { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 23, "blob_id": "ad8188097d4c01d12a572b2aff8e11511e6415b6", "content_id": "a196c3c7b0e88cee7a46774a162680d67b69f8eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 24, "license_type": "no_license", "max_line_length": 23, "num_lines": 1, "path": "/README.md", "repo_name": "HamaniKhalil/iot-traffic-predictor", "src_encoding": "UTF-8", "text": "# iot-traffic-predictor\n" } ]
2
Graduation-tanta/lang-modeling
https://github.com/Graduation-tanta/lang-modeling
293833d343a508bf3464352b3487c21aabb04bbd
8315ea1c7bc243138f10cfc3678cddbbec111f7d
177ec52e5da63d5d0bcd07e820fcc18948f6cb6f
refs/heads/master
2020-03-21T13:04:35.622913
2018-05-18T18:11:34
2018-05-18T18:11:34
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6428717970848083, "alphanum_fraction": 0.6568472981452942, "avg_line_length": 49.50865173339844, "blob_id": "54f81d1d04ada1c8118d56c0374d27e6ddc1c0ab", "content_id": "8de41e1499ee470ca23e171bd0f07a68fa347201", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14597, "license_type": "no_license", "max_line_length": 245, "num_lines": 289, "path": "/tf_my_lm_charlevel.py", "repo_name": "Graduation-tanta/lang-modeling", "src_encoding": "UTF-8", "text": "import gzip\nimport math\nimport os\nimport pickle\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn # rnn stuff temporarily in contrib, moving back to code in TF 1.1\n\nfrom lm_commons import read_data_files_as_chars, convert_to_one_line, build_char_dataset, minibatch_generator, negativeLogProb, perplexity, sample_from_probabilities, make_zip_results, minute\n\ntf.set_random_seed(0)\n\noutputFileName = 'linux.log'\nsys.stdout = open(outputFileName, 'w')\n\n# configs\nkeep_prop = .8 # of course for training .. give 1 at validation\nsequence_length = 30\nbatch_size = 200\nvocabulary_size = -1 # will be initialized by the text encoder\ninternal_state_size = 512\nstacked_layers = 3\nlearning_rate = 0.001 # fixed learning rate for optimizer\ngrad_clip = 10\n\nvalidation_steps = 1000\nnum_epochs = 10000\n\nlog_epoch_every_mins = 3\nvalidation_every_mins = 5\nchechpoint_every_mins = 15\n\ndata_root_path = './tuts/Martin RNN/shakespeare/**/*.txt'\ndata_root_path = \"linux.txt\"\n\nconcat_text = read_data_files_as_chars(data_root_path)\ndata_length = len(concat_text)\nprint(\"data length in chars \", data_length)\nprint(\"-\" * 100 + '\\n', \"sample data\", convert_to_one_line(''.join(concat_text[:500])))\nprint(\"-\" * 100 + '\\n', ''.join(concat_text[:500]))\n\nencoded_data, counts, dictionary, reverse_dictionary = build_char_dataset(concat_text)\ntrain, validation = encoded_data[:data_length - data_length // 10], encoded_data[data_length - data_length // 10:]\n\n# noinspection PyRedeclaration\nvocabulary_size = len(counts)\n\nprint(\"model hyperparams\", {\n 'keep_prop': keep_prop,\n 'sequence_length': sequence_length,\n 'batch_size': batch_size,\n 'vocabulary_size': vocabulary_size,\n 'internal_state_size': internal_state_size,\n 'stacked_layers': stacked_layers,\n 'learning_rate': learning_rate,\n 'grad_clip': grad_clip, }\n )\n\nwith gzip.open(\"reverse_dictionary.pkl.gz\", 'w') as f:\n pickle.dump(reverse_dictionary, f)\n\nvalidation_generator = minibatch_generator(validation, nb_epochs=1, gen_batch_size=1, gen_seq_len=1)\n\ngraph = tf.Graph()\nwith graph.as_default():\n keep_prop_tf = tf.placeholder(tf.float32, name='keep_prop_tf') # , name='keep_prop_tf'\n\n softmax_weights = tf.Variable(tf.truncated_normal([internal_state_size, vocabulary_size], stddev=1.0 / math.sqrt(internal_state_size)), name='weights')\n softmax_biases = tf.Variable(tf.zeros([vocabulary_size]), name='biases')\n\n # repeat it stacked_layers\n dropcells = [tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.GRUCell(internal_state_size), output_keep_prob=keep_prop_tf) for _ in range(stacked_layers)]\n multi_cell = rnn.MultiRNNCell(dropcells, state_is_tuple=True)\n\n with tf.variable_scope('train'):\n '''inputs'''\n batch_seq_input = tf.placeholder(tf.uint8, [batch_size, sequence_length]) # [ batch_size, sequence_length ] --- None will share the graph between validation and training\n one_hot_batch_seq_input = tf.one_hot(batch_seq_input, vocabulary_size, 1.0, 0.0) # [ batch_size, sequence_length, vocabulary_size ]\n '''expected outputs = same sequence shifted by 1 since we are trying to predict the next character'''\n batch_seq_labels = tf.placeholder(tf.uint8, [batch_size, sequence_length]) # [ batch_size, sequence_length ]\n # one_hot_batch_seq_labels = tf.one_hot(batch_seq_labels, vocabulary_size, 1.0, 0.0) # [ batch_size, sequence_length, vocabulary_size ] # i will use sparse cross entropy\n\n # When using state_is_tuple=True, you must use multicell.zero_state\n # to create a tuple of placeholders for the input states (one state per layer).\n # When executed using session.run(zerostate), this also returns the correctly\n # shaped initial zero state to use when starting your training loop.\n zero_state_train = multi_cell.zero_state(batch_size, dtype=tf.float32)\n\n print(zero_state_train)\n out_states_train, hidden_state_train = tf.nn.dynamic_rnn(multi_cell, one_hot_batch_seq_input, dtype=tf.float32, initial_state=zero_state_train)\n # out_states_train: [ batch_size, sequence_length, internal_state_size ]\n # hidden_state_train: [ batch_size, internal_state_size*stacked_layers ] # this is the last state in the sequence\n\n # Softmax layer implementation:\n # Flatten the first two dimension of the output [ batch_size, sequence_length, vocabulary_size ] => [ batch_size x sequence_length, vocabulary_size ]\n # then apply softmax readout layer. This way, the weights and biases are shared across unrolled time steps.\n # From the readout point of view, a value coming from a cell or a minibatch is the same thing\n\n out_states_flattened_train = tf.reshape(out_states_train, [-1, internal_state_size]) # [ batch_size x sequence_length, internal_state_size ]\n logits_train = tf.nn.xw_plus_b(out_states_flattened_train, softmax_weights, softmax_biases) # [ batch_size x sequence_length, internal_state_size >>> project on vocabulary_space ]\n\n batch_seq_softmax_train = tf.nn.softmax(logits_train) # [ batch_size x sequence_length, vocabulary_size ]\n batch_seq_pred_train = tf.reshape(tf.argmax(batch_seq_softmax_train, 1), [batch_size, -1]) # [ batch_size, sequence_length ]\n\n # labels\n labels_flattened = tf.cast(tf.reshape(batch_seq_labels, [-1]), dtype=tf.int32) # [ batch_size x sequence_length ] .... dont leave it int8\n\n # optimization\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_train, labels=labels_flattened) # [ batch_size x sequence_length ] vs [ batch_size x sequence_length, vocabulary_size ] so losses are [ batch_size x sequence_length ]\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n # returns grads_and_vars is a list of tuples [(gradient, variable)]\n gradients, variables = zip(*optimizer.compute_gradients(loss))\n # check this\n # zip([(2,3),(4,5),(4,5),(4,5),(4,5)]) will be <zip at 0xb3de450648> and as list [((2, 3),), ((4, 5),), ((4, 5),), ((4, 5),), ((4, 5),)] !!\n\n # a,b=zip(*[(2,3),(4,5),(4,5),(4,5),(4,5)]) will be ((2, 4, 4, 4, 4),(3, 5, 5, 5, 5))\n # the same as so * is unpack operator\n # a,b=zip((2,3),(4,5),(4,5),(4,5),(4,5))\n\n gradients, _ = tf.clip_by_global_norm(gradients, clip_norm=grad_clip)\n # computes the global norm and then shrink all gradients with the same ratio clip_norm/global_norm only if global_norm > clip_norm\n\n train_step = optimizer.apply_gradients(list(zip(gradients, variables))) # zip to relate variable to gradient as list of tuples\n\n # stats for display\n mean_loss = tf.reduce_mean(loss)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(batch_seq_labels, tf.cast(batch_seq_pred_train, tf.uint8)), tf.float32))\n loss_summary = tf.summary.scalar(\"batch_loss\", mean_loss)\n acc_summary = tf.summary.scalar(\"batch_accuracy\", accuracy)\n summaries = tf.summary.merge([loss_summary, acc_summary])\n\n with tf.variable_scope('valid'): # batch size is always 1\n '''inputs'''\n val_seq_input = tf.placeholder(tf.uint8, [1, None], name='val_seq_input') # [ 1, dynamic sequence_length ] --- None will share the graph between validation and training\n one_hot_val_seq_input = tf.one_hot(val_seq_input, vocabulary_size, 1.0, 0.0) # [ 1, sequence_length, vocabulary_size ]\n\n zero_state_valid = multi_cell.zero_state(1, dtype=tf.float32)\n\n out_states_valid, hidden_state_valid = tf.nn.dynamic_rnn(multi_cell, one_hot_val_seq_input, dtype=tf.float32, initial_state=zero_state_valid)\n # out_states_valid: [ 1, sequence_length, internal_state_size ]\n # hidden_state_valid: [ 1, internal_state_size*stacked_layers ] # this is the last state in the sequence\n\n hidden_state_valid = tf.identity(hidden_state_valid, name='hidden') # just to give it a name\n\n # Softmax layer implementation:\n # Flatten the first two dimension of the output [ 1, sequence_length, vocabulary_size ] => [ 1 x sequence_length, vocabulary_size ]\n # then apply softmax readout layer. This way, the weights and biases are shared across unrolled time steps.\n # From the readout point of view, a value coming from a cell or a minibatch is the same thing\n out_states_flattened_valid = tf.reshape(out_states_valid, [-1, internal_state_size]) # [ 1 x sequence_length, internal_state_size ]\n\n logits_valid = tf.nn.xw_plus_b(out_states_flattened_valid, softmax_weights, softmax_biases) # [ 1 x sequence_length, vocabulary_size ] >>> project on vocabulary space\n\n final_predictions = tf.nn.softmax(logits_valid, name=\"final_predictions\") # [ batch_size, sequence_length ]\n\n saver = tf.train.Saver(max_to_keep=1)\n\n# Init Tensorboard stuff. This will save Tensorboard information into a different\n# folder at each run named 'log/<timestamp>/'.\n\ntimestamp = str(math.trunc(time.time()))\nsummary_writer = tf.summary.FileWriter(\"log/\" + timestamp + \"-training\")\nvalidation_writer = tf.summary.FileWriter(\"log/\" + timestamp + \"-validation\")\n\n# Init for saving models. They will be saved into a directory named 'checkpoints'.\n# Only the last checkpoint is kept.\nif not os.path.exists(\"checkpoints\"):\n os.mkdir(\"checkpoints\")\n\nexecution_start = time.time()\n\ncheckpoint_last = time.time()\nepoch_log_last = time.time()\nsummary_last = time.time()\n\nwith tf.Session(graph=graph) as sess:\n tf.global_variables_initializer().run()\n\n # training loop\n istate = sess.run(zero_state_train) # initial zero input state (a tuple)\n v_istate = sess.run(zero_state_valid) # initial zero input state (a tuple)\n step = 0\n\n for batch, label, epoch in minibatch_generator(train, nb_epochs=num_epochs, gen_batch_size=batch_size, gen_seq_len=sequence_length):\n current_time = time.time()\n\n # train on one minibatch\n feed_dict = {batch_seq_input: batch, batch_seq_labels: label, keep_prop_tf: keep_prop}\n # This is how you add the input state to feed dictionary when state_is_tuple=True.\n # zerostate is a tuple of the placeholders for the NLAYERS=3 input states of our\n # multi-layer RNN cell. Those placeholders must be used as keys in feed_dict.\n # istate is a tuple holding the actual values of the input states (one per layer).\n # Iterate on the input state placeholders and use them as keys in the dictionary\n # to add actual input state values.\n for i, v in enumerate(zero_state_train):\n feed_dict[v] = istate[i]\n\n if (current_time - summary_last) > validation_every_mins * minute:\n summary_last = time.time()\n\n _, predictions, softmax_probabilities, ostate, smm, ls, acc = sess.run([train_step, batch_seq_pred_train, batch_seq_softmax_train, hidden_state_train, summaries, mean_loss, accuracy], feed_dict=feed_dict)\n t_perplexity = float(perplexity(softmax_probabilities, np.reshape(label, [-1])))\n print('step : %d epoch : %d Minibatch perplexity: %.2f' % (step, epoch, t_perplexity))\n print(\"train :{}:loss {},acc{}\".format(epoch, ls, acc))\n\n # save training data for Tensorboard\n\n summary_writer.add_summary(smm, step)\n summary_per = tf.Summary(value=[\n tf.Summary.Value(tag=\"perplexity\", simple_value=t_perplexity),\n ])\n summary_writer.add_summary(summary_per, step)\n\n # do some validation\n v_istate = sess.run(zero_state_valid) # initial zero input state (a tuple)\n valid_logprob = 0\n for i in range(validation_steps):\n v_batch, v_label, _ = next(validation_generator)\n\n feed_dict = {val_seq_input: v_batch, keep_prop_tf: 1} # no dropout for validation\n for _i, v in enumerate(zero_state_valid):\n feed_dict[v] = v_istate[_i]\n prediction, v_ostate = sess.run([final_predictions, hidden_state_valid], feed_dict=feed_dict)\n valid_logprob = valid_logprob + negativeLogProb(prediction, v_label)\n\n v_istate = v_ostate\n\n v_perplexity = float(2 ** (valid_logprob / validation_steps))\n print('step : %d epoch : %d validation perplexity: %.2f' % (step, epoch, v_perplexity))\n\n\n summary = tf.Summary(value=[\n tf.Summary.Value(tag=\"perplexity\", simple_value=v_perplexity),\n ])\n\n validation_writer.add_summary(summary, step)\n\n v_istate = sess.run(zero_state_valid) # initial zero input state (a tuple)\n # display a short text generated with the current weights and biases (every 150 batches)\n print((\"=\" * 50) + \"generation\" + (\"=\" * 50))\n next_feed = np.array([[dictionary['n']]])\n for k in range(2000):\n feed_dict = {val_seq_input: next_feed, keep_prop_tf: 1} # no dropout for validation\n for _i, v in enumerate(zero_state_valid):\n feed_dict[v] = v_istate[_i]\n\n probabilities, v_ostate = sess.run([final_predictions, hidden_state_valid], feed_dict=feed_dict)\n sample = sample_from_probabilities(probabilities, topn=10 if epoch <= 1 else 2, vocabulary_size=vocabulary_size)\n print(reverse_dictionary[sample], end=\"\")\n next_feed = np.array([[sample]]) # feedback\n v_istate = v_ostate\n print(\"\\n\", (\"=\" * 50) + \"end\" + (\"=\" * 50))\n else:\n _, ostate = sess.run([train_step, hidden_state_train], feed_dict=feed_dict)\n\n # save a checkpoint\n if (current_time - checkpoint_last) > chechpoint_every_mins * minute:\n checkpoint_last = time.time()\n\n saved_file = saver.save(sess, 'checkpoints/rnn_train_' + timestamp, global_step=step)\n print(\"Saved file: \" + saved_file)\n make_zip_results(\"linux\", step, outputFileName)\n\n if (current_time - epoch_log_last) > log_epoch_every_mins * minute:\n epoch_log_last = time.time()\n\n print(\"step :\", step, \"epoch :\", epoch, )\n\n # loop state around\n istate = ostate\n step += 1\n sys.stdout.flush()\n\nexecution_end = time.time()\n\n\ndef timer(start, end):\n hours, rem = divmod(end - start, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"this took {:0>2}:{:0>2}:{:05.2f}\".format(int(hours), int(minutes), seconds))\n\n\ntimer(execution_start, execution_end)\n\nsummary_writer.close()\nvalidation_writer.close()\n" }, { "alpha_fraction": 0.6637585759162903, "alphanum_fraction": 0.7054939866065979, "avg_line_length": 48.3139533996582, "blob_id": "70a773ed6f07f2dce9c4ce647fab98225a4f793b", "content_id": "ce1aa96f3907ab1f4b09ade99346f9dfbebf6028", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4241, "license_type": "no_license", "max_line_length": 176, "num_lines": 86, "path": "/tf_my_lm_player.py", "repo_name": "Graduation-tanta/lang-modeling", "src_encoding": "UTF-8", "text": "import gzip\nimport pickle\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom lm_commons import sample_from_probabilities\n\nmodel = 5\nrand_init = False\n\nseedword = '#inclu' # if rand_init == False\n\npath_pieces = [\n (\"linux_out\", '1522494838-185000'),\n (\"shake_out\", '1522455228-95000'),\n ('final_word_level', \"1522489752-65000\"), # operates on embedding\n (\"linux2_out\", '1522855612-25000'), # operates on embedding\n (\"linux3_out\", '1523103812-25000'),\n\n (\"final_linux\", '1523182986-108012'),\n (\"final_linux_em\", '1523183860-105491'),\n (\"word_level\", '1523232165-36030'),\n][model]\n\nwith gzip.open('.\\\\runs\\\\' + path_pieces[0] + '\\\\reverse_dictionary.pkl.gz', 'rb') as f:\n reverse_dictionary = pickle.load(f)\n\ndictionary = dict(zip(reverse_dictionary.values(), reverse_dictionary.keys()))\nvocabulary_size = len(dictionary)\nprint(\"vocabulary\", vocabulary_size)\nstacked_layers = 3\ninternal_state_size = 256 if model == 2 else 512\n\nmeta_graph = '.\\\\runs\\\\{}\\checkpoints\\\\rnn_train_{}.meta'.format(*path_pieces)\nvariable_state = '.\\\\runs\\\\{}\\checkpoints\\\\rnn_train_{}'.format(*path_pieces)\n\nzero_state_tuple = ('valid/MultiRNNCellZeroState/DropoutWrapperZeroState/GRUCellZeroState/zeros:0',\n 'valid/MultiRNNCellZeroState/DropoutWrapperZeroState_1/GRUCellZeroState/zeros:0',\n 'valid/MultiRNNCellZeroState/DropoutWrapperZeroState_2/GRUCellZeroState/zeros:0')\n\nwith tf.Session() as sess:\n new_saver = tf.train.import_meta_graph(meta_graph)\n new_saver.restore(sess, variable_state)\n state_tuple = sess.run(zero_state_tuple)\n\n if rand_init:\n feed = np.random.randint(vocabulary_size) # random word id\n feed = np.array([[feed]]) # shape [batch_size, sequence_length] with batch_size=1 and sequence_length=1\n else:\n\n feed = np.zeros([1, len(seedword)])\n\n for i, letter in enumerate(seedword):\n feed[0, i] = dictionary[letter]\n\n # initial values\n y = feed\n h = np.zeros([1, internal_state_size * stacked_layers], dtype=np.float32) # [ batch_size, INTERNALSIZE * NLAYERS]\n for i in range(1000000000):\n\n feed_dict = {'valid/val_seq_input:0': feed, 'keep_prop_tf:0' if model > 2 else 'Placeholder:0': 1} # no dropout for validation\n for _i, v in enumerate(zero_state_tuple):\n feed_dict[v] = state_tuple[_i]\n prediction, state_tuple = sess.run(['valid/final_predictions:0', 'valid/hidden:0'], feed_dict=feed_dict)\n\n # If sampling is be done from the topn most likely characters, the generated text\n # is more credible and more \"english\". If topn is not set, it defaults to the full\n # distribution (ALPHASIZE)\n\n next_feed = sample_from_probabilities(prediction, topn=2000 if model == 2 or model == 7 else 2, vocabulary_size=vocabulary_size, is_word_level=model == 2 or model == 7)\n feed = np.array([[next_feed]]) # shape [batch_size, sequence_length] with batch_size=1 and sequence_length=1\n next_feed = reverse_dictionary[next_feed]\n print(next_feed, end=\" \" if model == 2 or model == 7 else \"\")\n\n# was still much better only to remain in this area as the part of the empire of africa until the death of justinian i\n# the islamic republic of mali was filled by a republic and a tribe of indigenous tribes from this one is a peculiar list\n# of deities representing the great majority of the muslim population the lake itself has developed a wide variety of religious\n# beliefs including that of the large muslim muslim state the muslim population runs from sunni bush means connected with the\n# population and the population of the country about a quarter of the arab population there are about three zero zero zero zero muslims worldwide\n# in december the jewish community of palestinian state hosts the arab population there is a population\n\n# the country main article demographics of lebanon religion main article islam religion its existence in islam is the ancient arabic\n# language meaning a descendant of arab or muslim descent\n# culture of afghanistan many of whom have adherents the schools of islam generally speaking the well and less advanced\n# regions of india are called villages and thus the majority of these schools\n" }, { "alpha_fraction": 0.6517928242683411, "alphanum_fraction": 0.6577689051628113, "avg_line_length": 26.565933227539062, "blob_id": "6af709ae4b8b4aee64e1e698c6afccee939dbe8b", "content_id": "ad2bb8d81e75001ad19d6046f8a7889040ca065f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5020, "license_type": "no_license", "max_line_length": 226, "num_lines": 182, "path": "/README.md", "repo_name": "Graduation-tanta/lang-modeling", "src_encoding": "UTF-8", "text": "# langauge modeling with RNNs in tensorflow\n\nI built it with 3 stacked GRUs ..text8 as dataset for word level language model .. aslo tried character level language modeling on linux source code :D by appending some of linux c files and results are so fun :D\n\nI included only the trained model for linux source code in this repository .. I had 6 more models working on character level(linux source code and shakespere books) with embedding and one hot encoding aslo word level(on text8)\n\ncharachers/words are generated by sampling (probabilistic sampling not greedy)\n\n## How to play ??\njust run tf_my_lm_player.py and this will generate some c-like code\n\n## Example generation for \"#inclu\" seed :D \n\n```\nde <linux/string.h>\n#include <linux/uacdir.h>\n#include <linux/mm.h>\n#include <linux/module.h>\n#include <linux/sched/names.h>\n#include <linux/slab.h>\n#include <linux/slab.h>\n#include <linux/partitull.h>\n#include <linux/slab.h>\n\n```\n\n## Example generation for random seed \n```\n\nstatic void pipe_lock(struct pipe_inode_info *pipe)\n{\n\tstruct pipe_inodeinfo *ipimap = null;\n\n\tinode = lock_inode(inode);\n\tstatus = file_mapping_tree_lock(&fl->fl_file, fl);\n\tlist_for_each_entry(&fl->fl_list, &conf_lock->lock, fl_list)) {\n\t\tstruct page *page;\n\t\tint rc = 0;\n\n\t\t/* copy the page of the page */\n\t\tpage = local_page_address(page);\n\n\t\tif (pages) {\n\t\t\tpage = find_pages_per_page(inode);\n\t\t\tif (page == null)\n\t\t\t\tgoto out_fail;\n\t\t\tlen = page_size;\n\t\t\tpage = false;\n\t\t}\n\t} else {\n\t\t/*\n\t\t * if we've active a page to be a page of a single page and the state of the\n\t\t * request as we're not a possible to avoid any operation the one of\n\t\t * the policy. if we don't want to specify\n\t\t * a no longer the non-locked page is allocated and we are not\n\t\t * a complete and\n\t\t * the page is a pointer to the leaf page and the page is a caller and the new page\n\t\t * and the page is already already allocated and we're already the lock.\n\t\t */\n\t\tif (page->index) {\n\t\t\t/* if we can't allocate an extended page in this leaf. */\n\t\t\tpage->index = page_size;\n\t\t\tput_page(page);\n\t\t\tret = -einval;\n\t\t\tgoto out;\n\t\t}\n\t\tpage->mapping->a_ops = &pagelocked;\n\t\tpage->mapping = page;\n\t\tput_page(page);\n\t\treturn -enomem;\n\t}\n\n\tif (!page) {\n\t\tpage = falloc_extent_map();\n\t\treturn -enoent;\n\n\t\t/* set the page */\n\t\tif (page != pmd)\n\t\t\tret = -eio;\n\n\t\tif (rc)\n\t\t\treturn rc;\n\t\tlen = page_size(page);\n\t\tlocked_page->index =\n\t\t\tlength - 1;\n\t\tlen = length;\n\t}\n\n\t/* fill the new page in the parent page */\n\tpage = page_size(page);\n\tpage = page_address(page);\n\tput_page(page);\n\tpage->mapping->host = file;\n\tpage->mapping->host = falloc_fl;\n\tret = falloc_fl_put(page);\n\tif (ret < 0)\n\t\tgoto out;\n\n\t/* for a pointer to the length. */\n\tpool = pos;\n\tinode->i_old_pages[i] = page;\n\tif (page == null) {\n\t\tpage->index++;\n\t\tret = -enomem;\n\t\tgoto out;\n\t}\n\n\t/* find a newly allocated bytes of a node */\n\tif (pos >= page_size) {\n\t\t/* note that we can't alled the page on the page of the page */\n\t\tpage = null;\n\t\tlen = page_size;\n\n\t\t/*\n\t\t * if we can't allocate the page and we already have the page\n\t\t * and allowed the page and the non-loop is not already been\n\t\t * a caller and the page is not allowed to allocate the offset\n\t\t */\n\t\tinode = old;\n\t\tif (!pageuptodate(page)) {\n\t\t\t/*\n\t\t\t * insert any of transaction or a set of the length of a page of the length. in the\n\t\t\t * extent is a page is a part of a non-exact extent to the page and we can point\n\t\t\t * to the non-extent of an entry. the page is not already allowed to be\n\t\t\t * a possible to a partial page as a new entry.\n\t\t\t */\n\t\t\tif (!(page->index == page->index + 1)) {\n\t\t\t\tpage = false;\n\t\t\t\treturn err_ptr(-enomem, page_start, end);\n\t\t\t}\n\t\t\tif (page == null) {\n\t\t\t\tpage = null;\n\t\t\t\tend_page_writeback(page);\n\t\t\t\tret = page_shift_pages(inode->i_mapping);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif (pages[i] == null)\n\t\t\t\treturn -eio;\n\t\t\telse if (!page)\n\t\t\t\treturn;\n\t\t\tpage = null;\n\t\t}\n\t\tif (!pageuptodate(page))\n\t\t\tgoto out;\n\t\tif (page == null) {\n\t\t\tpage = false;\n\t\t\tret = -enomem;\n\t\t\tpage = null;\n\t\t\tgoto out;\n\t\t}\n\t\textent_start = len - 1;\n\t}\n\n\t/* if we are a page buffer */\n\tif (buffer_uptodate(page)) {\n\t\t/*\n\t\t * if we've already a page before the page isn't allocated to truncate the page\n\t\t * and real the page.\n\t\t */\n\t\tpage = find_address(page, page_size);\n\t}\n\n\t/* if we are a page that we've already a pointer to this page in the buffer.\n\t */\n\tif (page->index < page_size) {\n\t\tpage = find_page_container_add(page);\n\t\tif (page != page)\n\t\t\treturn -einval;\n\t}\n\treturn 0;\n}\n\n```\n\n## Resources\n\n* [karpathy blog post](http://karpathy.github.io/2015/05/21/rnn-effectiveness/)\n* [colah blog post on LSTM](http://colah.github.io/posts/2015-08-Understanding-LSTMs/)\n* [R2RT blog post](https://r2rt.com/recurrent-neural-networks-in-tensorflow-i.html)\n* [R2RT blog post](https://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html)\n* [R2RT blog post on RNNs](https://r2rt.com/written-memories-understanding-deriving-and-extending-the-lstm.html)\n* [martin-RNN (repo + video)](https://github.com/martin-gorner/tensorflow-rnn-shakespeare)\n\n\n\n" }, { "alpha_fraction": 0.6406506299972534, "alphanum_fraction": 0.648858368396759, "avg_line_length": 40.621116638183594, "blob_id": "8df7fc7598097cc63a811e1fe5cb85da31c4c01e", "content_id": "c4e53350f6a536b13d513b186fc09567889b2569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6701, "license_type": "no_license", "max_line_length": 160, "num_lines": 161, "path": "/lm_commons.py", "repo_name": "Graduation-tanta/lang-modeling", "src_encoding": "UTF-8", "text": "import collections\nimport glob\n\nimport numpy as np\n\nfrom zipFileHelperClass import ZipFile\n\n# # time tracking\nminute = 60\n\n\ndef negativeLogProb(predictions, labels): # [batch_size*seq_length , vocabulary] as labels [batch_size*seq_length]\n \"\"\"Log-probability of the true labels in a predicted batch.\"\"\"\n predictions[predictions < 1e-10] = 1e-10 # wont go -infinity\n return np.sum(-np.log2(predictions[np.arange(labels.shape[0]), labels])) / labels.shape[0] # single value\n\n\ndef perplexity(predictions, labels):\n \"\"\"perplexity of the model.\"\"\"\n return 2 ** negativeLogProb(predictions, labels)\n\n\ndef minibatch_generator(data, nb_epochs, gen_batch_size, gen_seq_len):\n \"\"\"\n thanks to (martin_gorner) repo\n Divides the data into batches of sequences so that all the sequences in one batch\n continue in the next batch. This is a generator that will keep returning batches\n until the input data has been seen nb_epochs times. Sequences are continued even\n between epochs, >>apart from one, the one corresponding to the end of raw_data.<< accepted approximation\n The remainder at the end of raw_data that does not fit in an full batch is ignored.\n \"\"\"\n data = np.array(data)\n data_len = data.shape[0]\n # using (data_len-1) because we must provide for the sequence shifted by 1 too\n steps_per_epoch = (data_len - 1) // (gen_batch_size * gen_seq_len)\n\n assert steps_per_epoch > 0, \"Not enough data, even for a single batch. Try using a smaller batch_size.\"\n\n rounded_data_len = steps_per_epoch * gen_batch_size * gen_seq_len\n xdata = np.reshape(data[0:rounded_data_len], [gen_batch_size, steps_per_epoch * gen_seq_len]) # [....####] => [....,####]\n ydata = np.reshape(data[1:rounded_data_len + 1], [gen_batch_size, steps_per_epoch * gen_seq_len])\n\n # batch generator\n for epoch in range(nb_epochs):\n for step in range(steps_per_epoch):\n x = xdata[:, step * gen_seq_len:(step + 1) * gen_seq_len]\n y = ydata[:, step * gen_seq_len:(step + 1) * gen_seq_len]\n\n # this will circulate shift UP for epoch > 0\n x = np.roll(x, -epoch, axis=0) # to continue continue continue the text from epoch to epoch (do not reset rnn state! except the last bottom sample)\n y = np.roll(y, -epoch, axis=0)\n\n yield x, y, epoch\n\n\ndef read_data_files_as_chars(directory):\n concat_text = []\n file_matches = glob.glob(directory, recursive=True)\n for directory in file_matches:\n print(\"Loading file \" + directory)\n with open(directory, \"r\") as file:\n try:\n concat_text.extend(file.read().lower() + \"\\n\\n\\n\")\n except ValueError:\n _ = None\n\n return concat_text\n\n\ndef read_data_files_as_words(directory):\n concat_text = \"\"\n file_matches = glob.glob(directory, recursive=True)\n for file_dir in file_matches:\n print(\"Loading file \" + file_dir)\n with open(file_dir, \"r\") as file:\n try:\n concat_text += file.read().lower() + \"\\n\\n\\n\"\n except ValueError:\n _ = None\n\n return concat_text.split(' ')\n\n\ndef build_char_dataset(corpus):\n '''\n extract features by mapping each word to number and vice versa\n make count array of the most common words\n make a dictionary for word to num word -> rank\n make reverse dictionary word <- rank\n map each word to its number(rank or 0 for UNK) as list of words\n :param corpus: the text\n :return: dictionaries and the encoded data\n '''\n count = collections.Counter(corpus).items()\n _dictionary = dict() # map common words to >> number according to frequency 0 for UNK 1 for THE\n for char, _ in count:\n _dictionary[char] = len(_dictionary)\n\n data = list(map(lambda _word: _dictionary[_word], corpus)) # same as words list but of numbers corresponding to each word\n _reverse_dictionary = dict(zip(_dictionary.values(), _dictionary.keys()))\n return data, count, _dictionary, _reverse_dictionary\n\n\ndef build_word_dataset(corpus, vocabulary_size):\n '''\n extract features by mapping each word to number and vice versa\n make count array of the most common words\n make a dictionary for word to num word -> rank\n make reverse dictionary word <- rank\n map each word to its number(rank or 0 for UNK) as list of words\n :param corpus: the text\n :return: dictionaries and the encoded data\n '''\n count = [['UNK', -1]] # frequency of the most common 20000 word\n count.extend(collections.Counter(corpus).most_common(vocabulary_size - 1))\n dictionary = dict() # map common words to >> number according to frequency 0 for UNK 1 for THE\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list() # same as words list but of numbers corresponding to each word\n unk_count = 0\n for word in corpus:\n if word in dictionary: # is it a common word ?\n index = dictionary[word] # it's rank\n else:\n index = 0 # UNK is mapped to 0\n unk_count = unk_count + 1\n data.append(index)\n count[0][1] = unk_count # ['UNK', -1] => ['UNK', unk_count]\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reverse_dictionary\n\n\ndef convert_to_one_line(text):\n return text.replace(\"\\n\", \"\\\\n\").replace(\"\\t\", \"\\\\t\")\n\n\ndef sample_from_probabilities(probabilities, topn, vocabulary_size, is_word_level=False):\n \"\"\"Roll the dice to produce a random integer in the [0..vocabulary_size] range,\n according to the provided probabilities. If topn is specified, only the\n topn highest probabilities are taken into account.\n :param probabilities: a list of size vocabulary_size with individual probabilities\n :param topn: the number of highest probabilities to consider. Defaults to all of them.\n :return: a random integer\n \"\"\"\n probabilities = probabilities[-1, :] # take the last in sequence : works if sample char by char or (even sequence)\n\n p = np.squeeze(probabilities)\n p[np.argsort(p)[:-topn]] = 0 # leave only the topn , zero otherwise\n if is_word_level: # cut the UNK\n p[0] /= 1000\n p = p / np.sum(p) # normalize\n return np.random.choice(vocabulary_size, 1, p=p)[0] # get one sample\n\n\ndef make_zip_results(filename, step, outputFileName):\n myzipfile = ZipFile('{}{}.zip'.format(filename, step))\n myzipfile.addDir('log/')\n myzipfile.addDir('checkpoints/')\n myzipfile.addFile('reverse_dictionary.pkl.gz')\n myzipfile.addFile(outputFileName)\n myzipfile.print_info()\n" } ]
4
dummys/malware_analysis
https://github.com/dummys/malware_analysis
7b1d338d3ab9526951e89e535d2db0498e0e4365
9b7f05ba221e008f2a89fac18d8a78ade7512011
2fd6011bd81a7b28b00b554699d942eb26bb8f54
refs/heads/master
2020-04-11T08:55:31.284623
2018-12-13T15:47:53
2018-12-13T15:47:53
161,660,150
0
0
null
2018-12-13T15:43:48
2018-12-08T20:05:20
2018-11-27T21:39:54
null
[ { "alpha_fraction": 0.5889391303062439, "alphanum_fraction": 0.6051582098007202, "avg_line_length": 29.08799934387207, "blob_id": "ed6885b3fd21a1eab34b30d4cee657df881be672", "content_id": "c94739b9b5c10f7aad77cfba2f0abfa4c81472f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3761, "license_type": "no_license", "max_line_length": 115, "num_lines": 125, "path": "/trickbot/trick_config_decoder.py", "repo_name": "dummys/malware_analysis", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n\"Decodes AES encrypted modules of TrickBot\"\n# Crypto implementation taken from: https://github.com/kevthehermit/RATDecoders/blob/master/decoders/TrickBot.py\n# resource module by dummys\n\n__AUTHOR__ = 'hasherezade'\n\nimport argparse\nfrom hashlib import sha256\nfrom Crypto.Cipher import AES\nfrom pefile import PE\nfrom struct import unpack_from\n\n\ndef derive_key(n_rounds, input_bf):\n intermediate = input_bf\n for i in range(0, n_rounds):\n sha = sha256()\n sha.update(intermediate)\n current = sha.digest()\n intermediate += current\n return current\n\n\ndef aes_decrypt(data):\n key = derive_key(128, data[:32])\n iv = derive_key(128, data[16:48])[:16]\n aes = AES.new(key, AES.MODE_CBC, iv)\n mod = len(data[48:]) % 16\n if mod != 0:\n data += '0' * (16 - mod)\n return aes.decrypt(data[48:])[:-(16 - mod)]\n\n\ndef find_rsrc(pe):\n \"\"\"Assumption is that the RSRC is called 'RES'\n \"\"\"\n\n for rsrc in pe.DIRECTORY_ENTRY_RESOURCE.entries:\n for entry in rsrc.directory.entries:\n if entry.name.string == \"RES\":\n offset = entry.directory.entries[0].data.struct.OffsetToData\n size = entry.directory.entries[0].data.struct.Size\n return pe.get_memory_mapped_image()[offset:offset + size]\n return 0\n\n\ndef dump_to_file(filename, data):\n with open(filename, 'wb') as f:\n f.write(data)\n\n\ndef dexor(data, key):\n maxlen = len(data)\n keylen = len(key)\n j = 0 # key index\n decoded = \"\"\n for i in range(0, maxlen):\n kval = key[j % keylen]\n decoded += chr(ord(data[i]) ^ ord(kval))\n j += 1\n return decoded\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"TrickBot AES decoder\")\n group = parser.add_mutually_exclusive_group(required=True)\n\n group.add_argument('--executable', dest=\"executable\", default=None, help=\"Malware executable\")\n group.add_argument('--datafile', dest=\"datafile\", default=None, help=\"encrypted module or config\")\n parser.add_argument('--botkey', dest=\"botkey\", default=None, help=\"BotKey (SHA256)\", required=False)\n parser.add_argument('--outfile', dest=\"outfile\", default=None, help=\"Where to dump the output\", required=False)\n\n args = parser.parse_args()\n rsrc_mode = False\n if args.executable:\n pe = PE(args.executable)\n data = find_rsrc(pe)\n if not data:\n # we did not found an encrypted config\n print \"Didn't found encrypted config\"\n return -1\n rsrc_mode = True\n data_len = unpack_from(\"<I\",data)[0]\n print \"Length of the encrypted config file: %d\" % data_len\n\n else:\n # we are in datamode\n data = open(args.datafile, 'rb').read()\n\n if args.botkey is not None:\n botkey = args.botkey.strip()\n if len(botkey) == 64:\n if rsrc_mode:\n data = dexor(data[4:], botkey)\n else:\n data = dexor(data, botkey)\n else:\n print \"ERROR: Invalid BotKey: expected SHA256 hash\"\n return -1\n else:\n print \"WARNING: in the new version of the TrickBot, BotKey (SHA256) is required for decoding modules\"\n data = data[4:]\n\n output = aes_decrypt(data)\n length = unpack_from('<I', output)[0]\n output = output[8:length + 8]\n print \"Decoded: %d bytes\" % len(output)\n\n if output is None:\n print \"Output is empty\"\n return -1\n\n if args.outfile is None:\n if rsrc_mode:\n args.outfile = args.executable + \".out\"\n else:\n args.outfile = args.datafile + \".out\"\n\n dump_to_file(args.outfile, output)\n print \"Dumped decoded to: %s\" % args.outfile\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
keeeto/phonopy2climax
https://github.com/keeeto/phonopy2climax
ed29b28adbeb406c5a2809c247bc67fefd5d719e
009e976f80544786bab0a6478a7103b9ba33c571
a482f48fb7f7f88714bb1ec2d1eb7dc8c4b79035
refs/heads/master
2021-01-21T07:04:21.826644
2020-03-30T08:07:01
2020-03-30T08:07:01
83,314,198
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7737752199172974, "alphanum_fraction": 0.7766570448875427, "avg_line_length": 48.57143020629883, "blob_id": "b967f442e276010b2818c2077f806602c65ed86c", "content_id": "28c1babc795b237b326c39ff615ed90c417f7cba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 694, "license_type": "no_license", "max_line_length": 290, "num_lines": 14, "path": "/README.md", "repo_name": "keeeto/phonopy2climax", "src_encoding": "UTF-8", "text": "# phonopy2climax\n## Purpose\nThis script takes a `phonopy` file, `mesh.yaml` and extracts information on the eigenvectors, writing them out in a format that is compatible with the a-climax code, for calculating inelastic neutron scattering spectra.\n\n## Requirements\nThe code relies on a number of external python modules\n* yaml\n* ase\n* numpy\n\n## Use\nTo use the script, you must first have a `mesh.yaml` file with the eigenvectors printed out - this is achieved by setting the `EIGENVECTORS=.TRUE.` tag in your `mesh.conf` file. You must also have the original coordinates used for building your phonon calculation in `VASP` `POSCAR` format.\n\nThe script is then run as `python phonopy2climax.py`\n" }, { "alpha_fraction": 0.5613138675689697, "alphanum_fraction": 0.5824817419052124, "avg_line_length": 44.16483688354492, "blob_id": "77e365c1f615bf6711dad246fbfbfea0df873dfe", "content_id": "228e45d45a01b70c08f79a2dacd4092edc0d72a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4110, "license_type": "no_license", "max_line_length": 119, "num_lines": 91, "path": "/test/vasp2climax_fortran.py", "repo_name": "keeeto/phonopy2climax", "src_encoding": "UTF-8", "text": "import yaml\nimport numpy as np\nimport ase.io as io\n\n\ndef yaml_loader(filepath):\n \"\"\"Reads in yaml files\"\"\"\n with open(filepath,\"r\") as file_descriptor:\n data = yaml.load(file_descriptor)\n return data\n\ndef load_frequencies(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n new_list = np.zeros(shape=(len(data['phonon']),len(data['phonon'][0]['band'])))\n for i in range(len(data['phonon'])):\n for j in range(len(data['phonon'][i]['band'])):\n new_list[i,j] = float(data['phonon'][i]['band'][j]['frequency'])\n return new_list\n\ndef load_weights(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n new_list = np.zeros(shape=(len(data['phonon'])))\n for i in range(len(data['phonon'])):\n new_list[i] = data['phonon'][i]['weight']\n return new_list\n\ndef load_q_points(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n new_list = []\n for i in range(len(data['phonon'])):\n new_list.append(data['phonon'][i]['q-position'])\n return new_list\n\ndef load_eigenvectors(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n w, h = len(data['phonon']), len(data['phonon'][0]['band'])\n w1 = len(data['phonon'][0]['band'][0]['eigenvector'])\n real_list = np.zeros(shape=(w,h,w1,3))\n im_list = np.zeros(shape=(w,h,w1,3))\n for i in range(w):\n for j in range(h):\n for k in range(w1):\n for l in range(3):\n real_list[i,j,k,l] = float(data['phonon'][i]['band'][j]['eigenvector'][k][l][0])\n im_list[i,j,k,l] = data['phonon'][i]['band'][j]['eigenvector'][k][l][1]\n return real_list,im_list\n\ndata = yaml_loader('mesh.yaml')\n# Load up the frequencies, they are stored a list of lists, sorted by q-point\nfrequencies = load_frequencies(data)\n# Loads up the eigenvotors, they are sorted by [q-pt,band,atom,cartesian_direction]\nreal_vec, im_vec = load_eigenvectors(data)\n# Loads up the q-points\nq_points = load_q_points(data)\n# Loads up the q-point weights\nweights = load_weights(data)\n\ncrystal = io.read('POSCAR',format='vasp')\nsymbols = crystal.get_chemical_symbols()\nmasses = crystal.get_masses()\npositions = crystal.get_scaled_positions()\n\noutput = open('climax_input.phonon', 'w')\noutput.write(\" BEGIN header \\n\")\noutput.write(\" Number of ions %i\\n\" % len(crystal.positions))\noutput.write(\" Number of branches %i\\n\" % len(data['phonon'][0]['band']))\noutput.write(\" Number of wavevectors %i\\n\" % len(data['phonon']))\noutput.write(\" Frequencies in cm-1\\n\")\noutput.write(\" IR intensities in (D/A)**2/amu\\n\")\noutput.write(\" Raman intensities in A**4\\n\")\noutput.write(\" Unit cell vectors (A)\\n\")\nfor vector in crystal.cell:\n output.write(\" %8.5f %8.5f %8.5f \\n\" % (vector[0],vector[1],vector[2]))\noutput.write(\" Fractional Co-ordinates\\n\")\nfor i, ion in enumerate(crystal.positions):\n output.write(\" %i %8.5f %8.5f %8.5f %s %8.5f \\n\" % (i+1,positions[i,0],\n positions[i,1],positions[i,2], symbols[i], masses[i]))\noutput.write(\" END header \\n\")\n\nfor i in range(len(data['phonon'])):\n output.write(\" q-pt= %i %8.5f %8.5f %8.5f %8.5f \\n \" % ( i+1, q_points[i][0], q_points[i][1],\n q_points[i][2], weights[i]))\n for j in range(len(data['phonon'][0]['band'])):\n output.write(\" %i %8.5f \\n\" % (j, frequencies[i,j] * 33.35641))\n output.write(\" Phonon Eigenvectors \\n\")\n output.write(\"Mode Ion X Y Z\\n\")\n for j in range(len(data['phonon'][0]['band'])):\n for k in range(len(crystal.positions)):\n output.write(\" %i %i %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f\\n\" % (j+1, k+1, real_vec[i,j,k,0],\n im_vec[i,j,k,0],real_vec[i,j,k,1],im_vec[i,j,k,1],real_vec[i,j,k,2],im_vec[i,j,k,2]))\noutput.close()\n" }, { "alpha_fraction": 0.5666172504425049, "alphanum_fraction": 0.5865283012390137, "avg_line_length": 43.12149429321289, "blob_id": "9353da2329072f1db557d78f746def1e3804d346", "content_id": "e9e24eb2b7a32796e3608938a4dbaf33f0f6836c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4721, "license_type": "no_license", "max_line_length": 119, "num_lines": 107, "path": "/phonopy2climax.py", "repo_name": "keeeto/phonopy2climax", "src_encoding": "UTF-8", "text": "import yaml\nimport numpy as np\nfrom optparse import OptionParser\n\nparser = OptionParser()\nparser.add_option(\"-f\", \"--file\",\n action=\"store\", type=\"string\", dest=\"file\", default=\"mesh.yaml\",\n help=\"Path to input file [default: ./mesh.yaml]\")\n(options, args) = parser.parse_args()\n\ndef yaml_loader(filepath):\n \"\"\"Reads in yaml files\"\"\"\n with open(filepath,\"r\") as file_descriptor:\n data = yaml.load(file_descriptor)\n return data\n\ndef load_frequencies(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n new_list = np.zeros(shape=(len(data['phonon']),len(data['phonon'][0]['band'])))\n for i in range(len(data['phonon'])):\n for j in range(len(data['phonon'][i]['band'])):\n new_list[i,j] = float(data['phonon'][i]['band'][j]['frequency'])\n return new_list\n\ndef load_weights(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n new_list = np.zeros(shape=(len(data['phonon'])))\n for i in range(len(data['phonon'])):\n new_list[i] = data['phonon'][i]['weight']\n return new_list\n\ndef load_positions(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n positions = []\n for i in range(len(data['points'])):\n positions.append([data['points'][i]['coordinates'],data['points'][i]['symbol'],data['points'][i]['mass']])\n return positions\n\ndef load_lattice(data):\n lattice = np.zeros(shape=(3,3))\n for i, element in enumerate(data['lattice']):\n lattice[i] = element\n return lattice\n\ndef load_q_points(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n new_list = []\n for i in range(len(data['phonon'])):\n new_list.append(data['phonon'][i]['q-position'])\n return new_list\n\ndef load_eigenvectors(data):\n \"\"\"Extracts a particular property from the disctionary and returns as a list\"\"\"\n w, h = len(data['phonon']), len(data['phonon'][0]['band'])\n w1 = len(data['phonon'][0]['band'][0]['eigenvector'])\n real_list = np.zeros(shape=(w,h,w1,3))\n im_list = np.zeros(shape=(w,h,w1,3))\n for i in range(w):\n for j in range(h):\n for k in range(w1):\n for l in range(3):\n real_list[i,j,k,l] = float(data['phonon'][i]['band'][j]['eigenvector'][k][l][0])\n im_list[i,j,k,l] = data['phonon'][i]['band'][j]['eigenvector'][k][l][1]\n return real_list,im_list\n\ndata = yaml_loader(options.file)\n# Load up the frequencies, they are stored a list of lists, sorted by q-point\nfrequencies = load_frequencies(data)\n# Loads up the eigenvotors, they are sorted by [q-pt,band,atom,cartesian_direction]\nreal_vec, im_vec = load_eigenvectors(data)\n# Loads up the q-points\nq_points = load_q_points(data)\n# Loads up the q-point weights\nweights = load_weights(data)\n\npositions = load_positions(data) \nlattice = load_lattice(data)\n\noutput = open('climax_input.phonon', 'w')\noutput.write(\" BEGIN header \\n\")\noutput.write(\" Number of ions %i\\n\" % len(positions))\noutput.write(\" Number of branches %i\\n\" % len(data['phonon'][0]['band']))\noutput.write(\" Number of wavevectors %i\\n\" % len(data['phonon']))\noutput.write(\" Frequencies in cm-1\\n\")\noutput.write(\" IR intensities in (D/A)**2/amu\\n\")\noutput.write(\" Raman intensities in A**4\\n\")\noutput.write(\" Unit cell vectors (A)\\n\")\nfor vector in lattice:\n output.write(\" %8.5f %8.5f %8.5f \\n\" % (vector[0],vector[1],vector[2]))\noutput.write(\" Fractional Co-ordinates\\n\")\nfor i, ion in enumerate(positions):\n output.write(\" %i %8.5f %8.5f %8.5f %s %8.5f \\n\" % (i+1,positions[i][0][0],\n positions[i][0][1],positions[i][0][2], positions[i][1], positions[i][2]))\noutput.write(\" END header \\n\")\n\nfor i in range(len(data['phonon'])):\n output.write(\" q-pt= %i %8.5f %8.5f %8.5f %8.5f \\n \" % ( i+1, q_points[i][0], q_points[i][1],\n q_points[i][2], weights[i]))\n for j in range(len(data['phonon'][0]['band'])):\n output.write(\" %i %8.5f \\n\" % (j, frequencies[i,j] * 33.35641))\n output.write(\"Phonon Eigenvectors \\n\")\n output.write(\"Mode Ion X Y Z\\n\")\n for j in range(len(data['phonon'][0]['band'])):\n for k in range(len(positions)):\n output.write(\" %i %i %10.6f %10.6f %10.6f %10.6f %10.6f %10.6f\\n\" % (j+1, k+1, real_vec[i,j,k,0],\n im_vec[i,j,k,0],real_vec[i,j,k,1],im_vec[i,j,k,1],real_vec[i,j,k,2],im_vec[i,j,k,2]))\noutput.close()\n" } ]
3
rolandblok/ledboard
https://github.com/rolandblok/ledboard
3a2f772f133389cfc2c6cfb51f9e306d0e16af3d
d3b32117790716c8c9bbc0eaa5b9161e34076acc
77070848e2658cf0e5664e9a28c35a79ae73848a
refs/heads/master
2022-12-21T08:35:00.062257
2020-09-23T09:04:02
2020-09-23T09:04:02
291,457,714
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 13, "blob_id": "61ba0a35fb884bc3ba5a7897a4d61e701dccc425", "content_id": "9ea11685772b1c1c64c0a2856da18ebc8848e382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 16, "num_lines": 2, "path": "/README.md", "repo_name": "rolandblok/ledboard", "src_encoding": "UTF-8", "text": "# ledboard\nen board met led\n" }, { "alpha_fraction": 0.5750554800033569, "alphanum_fraction": 0.6107961535453796, "avg_line_length": 29.66666603088379, "blob_id": "c4d7f561d0bd6aeea1104ffce51e261b95459f6f", "content_id": "d252dfc89e8da781cd9bdf2936918c447ff4cf78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4059, "license_type": "no_license", "max_line_length": 107, "num_lines": 132, "path": "/ledboard.py", "repo_name": "rolandblok/ledboard", "src_encoding": "UTF-8", "text": "import time\nimport datetime \nimport colorsys\nimport threading as th\nimport sys\n\nimport ledmatrix\n\nloop_active = True\n\ndef key_capture_thread(): \n global loop_active\n print(\"ïk ga wachten\") \n input()\n loop_active = False\n print('ïk kap ermee')\nth.Thread(target=key_capture_thread, args=(), name='stop', daemon=True).start()\n\n\nWIDTH = 14\nHEIGHT = 14\n\nfrom PIL import Image\nregenboog = Image.open(\"images/regenboog.png\")\nregenboog = regenboog.resize((14,14))\n\npacman_7_7_l = [ Image.open(\"images/pekmen_0.png\"), Image.open(\"images/pekmen_links.png\") ]\npacman_7_7_r = [ Image.open(\"images/pekmen_0.png\"), Image.open(\"images/pekmen_rechts.png\") ]\nspook_paars_7_7 = [ Image.open(\"images/spook_paars_0.png\"), Image.open(\"images/spook_paars_1.png\") ]\n\n\nim_number = [0 for i in range(10)]\nim_number[0] = Image.open(\"images/0.png\")\nim_number[1] = Image.open(\"images/1.png\")\nim_number[2] = Image.open(\"images/2.png\")\nim_number[3] = Image.open(\"images/3.png\")\nim_number[4] = Image.open(\"images/4.png\")\nim_number[5] = Image.open(\"images/5.png\")\nim_number[6] = Image.open(\"images/6.png\")\nim_number[7] = Image.open(\"images/7.png\")\nim_number[8] = Image.open(\"images/8.png\")\nim_number[9] = Image.open(\"images/9.png\")\nim_dd = Image.open(\"images/dd.png\")\nnumber_width = 3 \nnumber_height = 6 \n\n\n \n\nled_matrix = ledmatrix.ledmatrix()\n\n# scroll all numbers\n# pos = WIDTH\n# while(pos > -10 *(number_width + 1 )): \n# offset = 0\n# led_matrix.clear_screen()\n# for n in range(10) :\n# led_matrix.set_image(pos + offset, 0, im_number[n])\n# offset += number_width + 1\n# led_matrix.set_show()\n# #time.sleep(0.01)\n# pos -= 1\n \n\nlast_FPS_time_s = time.time()\nlast_pacmen_time_s = time.time()\nframes = 0\npacman_min_pos = -16\npacman_hour_pos = 16\nlast_minute = datetime.datetime.now().minute\nlast_hour = datetime.datetime.now().hour\npacman_min_active = False\npacman_hour_active = False\nwhile(loop_active):\n frames += 1\n now = datetime.datetime.now() # print now.year, now.month, now.day, now.hour, now.minute, now.second\n cur_hour_str = str(now.hour).zfill(2)\n cur_minu_str = str(now.minute).zfill(2)\n\n hsv_h = (time.time() % 10) /10\n led_matrix.clear_screen()\n\n # fill background\n rgb_back = colorsys.hsv_to_rgb((hsv_h + 0.5)%1, 1, 0.1)\n rgb_back = (255*rgb_back[0], 255*rgb_back[1], 255*rgb_back[2])\n led_matrix.fill_screen(rgb_back)\n\n # display time\n rgb = colorsys.hsv_to_rgb(hsv_h, 1, 1)\n rgb = (255*rgb[0], 255*rgb[1], 255*rgb[2])\n led_matrix.set_image(1, 0, im_number[int(cur_hour_str[0])], rgb)\n led_matrix.set_image(5, 0, im_number[int(cur_hour_str[1])], rgb)\n\n led_matrix.set_image(6, 7, im_number[int(cur_minu_str[0])], rgb)\n led_matrix.set_image(10, 7, im_number[int(cur_minu_str[1])], rgb)\n\n\n if(pacman_min_active):\n led_matrix.set_image(pacman_min_pos, 7, pacman_7_7_r[int(pacman_min_pos % 2)] )\n led_matrix.set_image(pacman_min_pos + 9, 7, spook_paars_7_7[int(pacman_min_pos % 2)] )\n\n if(pacman_hour_active):\n led_matrix.set_image(pacman_hour_pos, 0, spook_paars_7_7[int(pacman_hour_pos % 2)] )\n led_matrix.set_image(pacman_hour_pos + 9, 0, pacman_7_7_l[int(pacman_hour_pos % 2)] )\n\n\n led_matrix.set_show()\n\n if time.time() > last_FPS_time_s + 1 :\n #print(f'FPS {frames}')\n frames = 0\n last_FPS_time_s = time.time()\n \n if (time.time() > last_pacmen_time_s + 0.25):\n if pacman_min_active :\n pacman_min_pos += 1\n if (pacman_min_pos > 16): \n pacman_min_pos = -16\n pacman_min_active = False\n if pacman_hour_active :\n pacman_hour_pos -= 1\n if (pacman_hour_pos < -16): \n pacman_hour_pos = 16\n pacman_hour_active = False\n last_pacmen_time_s = time.time()\n\n if (now.minute != last_minute ) :\n pacman_min_active = True\n last_minute = now.minute\n if (now.hour != last_hour ) :\n pacman_hour_active = True\n last_hour = now.hour\n\n \n\n\n\n" }, { "alpha_fraction": 0.4622544050216675, "alphanum_fraction": 0.48241984844207764, "avg_line_length": 31.929824829101562, "blob_id": "4e1a80ab1e72df8a917ef37083abde0119f029f1", "content_id": "03d2f8402670c8784adfef21e21c3b1814a51308", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 120, "num_lines": 57, "path": "/ledmatrix.py", "repo_name": "rolandblok/ledboard", "src_encoding": "UTF-8", "text": "import math, time\r\n\r\nimport board\r\nimport neopixel\r\n\r\nclass ledmatrix:\r\n def __init__(self, pixel_encoding=\"RGB\", height=14, width=14):\r\n self._height = height\r\n self._width = width\r\n self._total_length = height*width\r\n self._neopixel = neopixel.NeoPixel(board.D18, self._total_length, auto_write=False, pixel_order=pixel_encoding)\r\n for i in range(0, self._total_length):\r\n self._neopixel[i] = (255,255,255)\r\n self._neopixel.show()\r\n time.sleep(0.1)\r\n\r\n def set_show(self) : \r\n self._neopixel.show()\r\n\r\n def clear_screen(self):\r\n for i in range(0, self._total_length):\r\n self._neopixel[i] = (0,0,0)\r\n\r\n def fill_screen(self, color):\r\n for i in range(0, self._total_length):\r\n self._neopixel[i] = color\r\n\r\n def set_pixel(self, x, y, color):\r\n self.__set_pixel(x, y, color)\r\n\r\n\r\n def set_image(self, x, y, image, color=None):\r\n size_x = min(image.width, self._width - x)\r\n size_y = min(image.height, self._height - y)\r\n for xx in range(0, size_x):\r\n for yy in range(0, size_y):\r\n if image.getpixel((xx, yy))[:3] != (0,0,0) :\r\n if color == None :\r\n c = image.getpixel((xx, yy))[:3]\r\n else : \r\n c = color\r\n self.__set_pixel(x+xx, y+yy, c)\r\n #else :\r\n # c = (0,0,0) \r\n \r\n\r\n def __set_pixel(self, x, y, color):\r\n if (x >= 0) and (x < self._width) and (y >= 0) and (y < self._height) :\r\n i = self.calculate_index(x, y)\r\n self._neopixel[i] = color\r\n\r\n def calculate_index(self, x, y):\r\n row = self._height - y\r\n invert_row = (row%2 == 0)\r\n if invert_row:\r\n x = self._width - 1 - x\r\n return self._total_length - 1 - x - (y*self._width)\r\n" } ]
3
grampajoe/simple-tenant-templates-example
https://github.com/grampajoe/simple-tenant-templates-example
e6ee832cef0ac18068a20c2ac9d53cafb26b9d13
820421a27373424c185debafe9fe7e65393aa0dd
aa12bbf848ad8b7ea681358d63fab0a5c063449a
refs/heads/master
2016-09-05T16:00:08.765646
2013-06-03T12:28:52
2013-06-03T12:28:52
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6809338331222534, "alphanum_fraction": 0.6809338331222534, "avg_line_length": 27.55555534362793, "blob_id": "2631591113ca5a84b6f9e2ca5c5b115de7f251f1", "content_id": "8a8c0f1710db12ecb293a2e46d8c74360a6ed5e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 58, "num_lines": 9, "path": "/simple/middleware.py", "repo_name": "grampajoe/simple-tenant-templates-example", "src_encoding": "UTF-8", "text": "from django.conf import settings\n\n\nclass TenantMiddleware(object):\n def process_request(self, request):\n tenant_slug = request.GET.get('tenant', 'default')\n\n if tenant_slug in settings.TENANTS:\n request.tenant_slug = tenant_slug\n" }, { "alpha_fraction": 0.6417112350463867, "alphanum_fraction": 0.6631016135215759, "avg_line_length": 30.16666603088379, "blob_id": "53ca543f5610352ed74c17b19238d9d5a19d6545", "content_id": "983f95a7e9d585a0040bb036f08702b24b7ab44c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 187, "license_type": "no_license", "max_line_length": 69, "num_lines": 6, "path": "/README.md", "repo_name": "grampajoe/simple-tenant-templates-example", "src_encoding": "UTF-8", "text": "Simple Tenant Templates Example\n===============================\n\nA simple example of Django Tenant Templates usage.\n\nView this example on the Web at http://pure-fjord-4027.herokuapp.com.\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 19, "blob_id": "fff2fdf420e26560e0a32e53c3fc14a03e515f77", "content_id": "ae9253fa178bfba2574a97cc4a51b70a6f99f219", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 60, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/requirements.txt", "repo_name": "grampajoe/simple-tenant-templates-example", "src_encoding": "UTF-8", "text": "Django==1.5.1\ndjango-tenant-templates==0.4\ngunicorn==0.17.4\n" } ]
3
DBML-model/DBML
https://github.com/DBML-model/DBML
be9711aa053d5ecc1c3d62bd1aaa74f6ab62a80c
123e2bcf6a9e5cc9661ef53061b6acb956972aa3
f86c1141281a8273b03c45471a1603b87a92ec7a
refs/heads/master
2020-07-01T04:16:23.230639
2019-10-28T13:41:59
2019-10-28T13:41:59
201,046,395
7
1
null
null
null
null
null
[ { "alpha_fraction": 0.5643807649612427, "alphanum_fraction": 0.5855034589767456, "avg_line_length": 19.682634353637695, "blob_id": "00cb73144f801244a56e1fdfbee1a33b32940229", "content_id": "96215d491299c72796542640ddae643655061e32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7066, "license_type": "no_license", "max_line_length": 255, "num_lines": 334, "path": "/train_offline.py", "repo_name": "DBML-model/DBML", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport time\nimport pickle\nfrom torch.utils.data import DataLoader, Dataset\n\n\n# In[ ]:\n\n\nimport random\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n# 设置随机数种子\nsetup_seed(3)\n\n\n# In[ ]:\n\n\nfrom file_utils.mydata import MyData\n\n\n# In[ ]:\n\n\ndata_name = 'Electronics'\n\n\n# In[ ]:\n\n\nwith open('input_data/dataset_time_'+data_name+'.bin', 'rb') as f:\n data_set = pickle.load(f)\n\n\n# In[ ]:\n\n\n'''\ndataset statistic\n'''\ndata_set.productNum, data_set.userNum, data_set.wordNum, len(data_set.train_data)\n\n\n# In[ ]:\n\n\nfrom models.DBML_offline import PSM\n\n\n# ## 定义参数 \n\n# In[ ]:\n\n\n'''\n实验参数\n'''\nembedding_dim = 50\nout_size = 10\nbatch_size = 256\nneg_sample_num = data_set.neg_sample_num\ndataLen = len(data_set.train_data)\nbatch_num = int(dataLen/batch_size)\nfull_len = batch_num*batch_size\ntime_bin_num = len(data_set.time_data)\ntotal_epoch = 2\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# device = torch.device(\"cpu\")\n\n\n# ## 定义模型\n\n# In[ ]:\n\n\ndbml = PSM(data_set.userNum,\n data_set.productNum,\n data_set.wordNum,\n embedding_dim,\n data_set.max_query_len,\n data_set.max_review_len,\n batch_size,\n data_set.time_num + 1,\n neg_num=5,\n sample_num=1,\n transfer_hidden_dim=100,\n sigma_parameter=1e-3,\n kl_parameter=1e-3,\n word_parameter=1e0,\n device=device)\ndbml.to(device)\n\n\n# In[ ]:\n\n\n# dbml = torch.nn.DataParallel(dbml)\n\n\n# ### load model\n\n# In[ ]:\n\n\n# dbml.load_state_dict(torch.load('./out/Electronics_2019-07-22-13-05-52_success.pkl'))\n\n\n# ### 加载数据\n\n# In[ ]:\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndbml.to(device)\ndata_gen = DataLoader(data_set, batch_size=batch_size, shuffle=True, drop_last=True)\n\n\n# ### 定义优化器\n\n# In[ ]:\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndbml.to(device)\noptimizer = torch.optim.Adam(dbml.parameters(), lr=0.00001)\n\n\n# In[ ]:\n\n\ndbml.kl_parameter\n\n\n# In[ ]:\n\n\ndbml.train()\ntotal_epoch = 500\ntotal_batch = len(data_gen)\nfor e in range(total_epoch):\n for i, data in enumerate(data_gen):\n \n user_mean, user_std, query, item_mean_pos, item_std_pos, items_mean_neg, items_std_neg, user_sample, product_sample, product_sample_neg, loss, dis_pos, dis_neg, word_mean_pos, word_std_pos, word_mean_neg, word_std_neg = dbml(\n data[0][0].to(device), data[0][1].to(device),\n torch.stack(data[0][2]).t().to(device), data[0][3].to(device),\n torch.stack(data[0][4]).t().to(device), data[0][5].to(device), data[0][6].to(device),\n torch.stack(data[1][0]).t().to(device),\n torch.stack(data[1][1]).t().to(device))\n \n optimizer.zero_grad()\n loss[0].backward()\n optimizer.step()\n if (i % 20 == 0):\n print('E: {}/{} | B: {}/{} | Loss: {} | POS: {} | NEG: {}'.format(e, total_epoch, i, total_batch,loss[0].item(), dis_pos.item(), dis_neg.item()))\n print('Loss:{} | Main:{} | Word:{} | KL:{}'.format(loss[0].item(), loss[1].item(), loss[2].item(), loss[3].item()))\n\n\n# ### 模型保存\n\n# In[ ]:\n\n\ne\n\n\n# In[ ]:\n\n\nimport time as tt\n\n\n# In[ ]:\n\n\ntorch.save(dbml.state_dict(), 'out/{}_{}_{}.pkl'.format( data_name, tt.strftime(\"%Y-%m-%d-%H-%M-%S\", tt.localtime()),'ok'))\n\n\n# In[ ]:\n\n\n\n\n\n# ### 测试模型\n\n# In[ ]:\n\n\ndef dcg_at_k(r, k, method=0):\n r = np.asfarray(r)[:k]\n if r.size:\n if method == 0:\n return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))\n elif method == 1:\n return np.sum(r / np.log2(np.arange(2, r.size + 2)))\n else:\n raise ValueError('method must be 0 or 1.')\n return 0.\ndef mean_reciprocal_rank(r):\n return np.sum(r / np.arange(1, r.size + 1))\ndef hit_rate(r):\n if (np.sum(r) >= 0.9):\n return 1\n else:\n return 0\n\n\n# In[ ]:\n\n\ndef get_query_laten(q_linear, query, query_len, max_query_len):\n '''\n input size: (batch, maxQueryLen)\n 对query处理使用函数\n tanh(W*(mean(Q))+b)\n '''\n query_len = torch.tensor(query_len).view(1,-1).float()\n # size: ((batch, maxQueryLen))) ---> (batch, len(query[i]), embedding)\n # query len mask 使得padding的向量为0\n len_mask = torch.tensor([ [1.]*int(i.item())+[0.]*(max_query_len-int(i.item())) for i in query_len]).unsqueeze(2)\n query = query.mul(len_mask)\n query = query.sum(dim=1).div(query_len)\n query = q_linear(query).tanh()\n\n return query\n\n\n# In[ ]:\n\n\ndbml.eval()\ndevice=torch.device('cpu')\ndbml.to(device)\nall_p_m = torch.empty(data_set.time_num, data_set.productNum, embedding_dim)\nfor ii in range(data_set.time_num):\n for i in range(data_set.productNum):\n p_mean = dbml.item_mean(torch.tensor([i], device=device)).squeeze(1)\n time= dbml.time_embdding(torch.tensor([ii], device=device)+torch.tensor(1, device=device)).squeeze(1)\n p_mean = dbml.time2mean(torch.cat([p_mean, time], 1)).squeeze()\n all_p_m[ii][i] = p_mean\n\n\n# In[ ]:\n\n\nfrom tqdm import trange\n\n\n# In[ ]:\n\n\neval_dataset = data_set.test_data\ntest_counter = 0\nall_hr = 0\nall_ndcg = 0\nall_mrr = 0\nfor ii in trange(len(eval_dataset)):\n td = eval_dataset[ii]\n '''\n 应该定义一个训练过的user, 这里简单的先取训练过的时间段的用户\n '''\n if (td[6] >= 0):\n user = dbml.user_mean(torch.tensor([td[0]], device=device)).squeeze(1)\n time= dbml.time_embdding(torch.tensor([td[6]], device=device)+torch.tensor(1, device=device)).squeeze(1)\n user = dbml.time2mean(torch.cat([user, time], 1)).squeeze()\n \n query_len = td[3]\n query = torch.cat(tuple([dbml.wordEmbedding_mean(torch.tensor([i], device=device).squeeze(0)) for i in td[2]])).view(1,-1,embedding_dim)\n query = get_query_laten(dbml.queryLinear, query, query_len, data_set.max_query_len)\n user_query = user+query\n# uq_i = torch.empty(datasets.productNum)\n user_query.squeeze_(0)\n uq_i = (user_query - all_p_m[td[6]]).norm(2, dim=1)*(-1.)\n# for i in range(datasets.productNum):\n# p_mean = product_time_latent[td[6]+1][i][0]\n# uq_i[i] = -1*(user_query-p_mean).norm(2).item()\n ranks_order = uq_i.topk(20)[1]\n r = torch.eq(ranks_order, td[1]).numpy()\n all_hr += hit_rate(r)\n all_mrr += mean_reciprocal_rank(r)\n all_ndcg += dcg_at_k(r, 20, 1)\n test_counter += 1\nhr = all_hr / float(test_counter+1e-6)\nmrr = all_mrr / float(test_counter+1e-6)\nndcg = all_ndcg / float(test_counter+1e-6)\nprint(hr, mrr, ndcg)\n\n\n# In[ ]:\n\n\nlen(eval_dataset)\n\n\n# In[ ]:\n\n\ndata_set.eval_data\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.6039645075798035, "alphanum_fraction": 0.6159171462059021, "avg_line_length": 50.212120056152344, "blob_id": "e0608f07e0a8a4a7397a36d3dcc5160a8848e61a", "content_id": "d04297c86e967991b37426d265a54efe4408e105", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17134, "license_type": "no_license", "max_line_length": 170, "num_lines": 330, "path": "/models/DBML_offline.py", "repo_name": "DBML-model/DBML", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\n'''\nproduct search model\n'''\n'''\nproduct search model\n'''\nclass PSM(nn.Module):\n def __init__(self, user_size, item_size, word_size, embedding_dim,\\\n max_query_len, max_review_len, batch_size, time_num,\\\n neg_num=5,sample_num=1,transfer_hidden_dim=100,\\\n sigma_parameter=1e0, kl_parameter=1e0, word_parameter=1e0, device=torch.device('cpu')):\n super(PSM, self).__init__()\n self.user_size = user_size\n self.item_size = item_size\n self.word_size = word_size\n self.embedding_dim = embedding_dim\n self.batch_size = batch_size\n self.max_query_len = max_query_len\n self.max_review_len = max_review_len\n self.sample_num = sample_num\n self.transfer_hidden_dim = transfer_hidden_dim\n self.kl_parameter = kl_parameter\n self.sigma_parameter = sigma_parameter\n self.word_parameter = word_parameter\n self.device = device\n self.neg_num = neg_num\n self.time_num = time_num\n \n self.esp = 1e-10\n \n \n \n self.time_embdding = nn.Embedding(self.time_num, self.embedding_dim)\n self.time2mean_u = nn.Linear(self.embedding_dim*2, self.embedding_dim)\n self.time2mean_i = nn.Linear(self.embedding_dim*2, self.embedding_dim)\n self.time2mean_w = nn.Linear(self.embedding_dim*2, self.embedding_dim)\n self.time2std_i = nn.Linear(self.embedding_dim*2, self.embedding_dim)\n self.time2std_u = nn.Linear(self.embedding_dim*2, self.embedding_dim)\n self.time2std_w = nn.Linear(self.embedding_dim*2, self.embedding_dim)\n \n self.user_mean = nn.Embedding(self.user_size, self.embedding_dim, _weight=torch.ones(self.user_size, self.embedding_dim))\n self.user_std = nn.Embedding(self.user_size, self.embedding_dim, _weight=torch.zeros(self.user_size, self.embedding_dim))\n \n self.item_mean = nn.Embedding(self.item_size, self.embedding_dim, _weight=torch.ones(self.item_size, self.embedding_dim))\n self.item_std = nn.Embedding(self.item_size, self.embedding_dim, _weight=torch.zeros(self.item_size, self.embedding_dim))\n \n \n self.wordEmbedding_mean = nn.Embedding(self.word_size, self.embedding_dim, padding_idx=0, _weight=torch.ones(self.word_size, self.embedding_dim))\n self.wordEmbedding_std = nn.Embedding(self.word_size, self.embedding_dim, padding_idx=0, _weight=torch.zeros(self.word_size, self.embedding_dim))\n self.queryLinear = nn.Linear(self.embedding_dim, self.embedding_dim)\n \n \n self.transfer_linear_u = nn.Linear(self.embedding_dim, self.transfer_hidden_dim)\n self.transfer_linear_i = nn.Linear(self.embedding_dim, self.transfer_hidden_dim)\n self.transfer_linear_ni = nn.Linear(self.embedding_dim, self.transfer_hidden_dim)\n self.transfer_linear_w = nn.Linear(self.embedding_dim, self.transfer_hidden_dim)\n self.transfer_linear_nw = nn.Linear(self.embedding_dim, self.transfer_hidden_dim)\n# self.transfer_linear = {\n# \"u\":nn.Linear(self.embedding_dim, self.transfer_hidden_dim),\n# \"i\":nn.Linear(self.embedding_dim, self.transfer_hidden_dim),\n# \"ni\":nn.Linear(self.embedding_dim, self.transfer_hidden_dim),\n# \"w\":nn.Linear(self.embedding_dim, self.transfer_hidden_dim),\n# 'nw':nn.Linear(self.embedding_dim, self.transfer_hidden_dim)\n# }\n self.transfer_mean_u = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n self.transfer_mean_i = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n self.transfer_mean_ni = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n self.transfer_mean_w = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n self.transfer_mean_nw = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n# self.transfer_mean = {\n# \"u\":nn.Linear(self.transfer_hidden_dim, self.embedding_dim),\n# \"i\":nn.Linear(self.transfer_hidden_dim, self.embedding_dim),\n# \"ni\":nn.Linear(self.transfer_hidden_dim, self.embedding_dim),\n# 'w':nn.Linear(self.transfer_hidden_dim, self.embedding_dim),\n# 'nw':nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n# }\n self.transfer_std_u = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n self.transfer_std_i = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n self.transfer_std_ni = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n self.transfer_std_w = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n self.transfer_std_nw = nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n# self.transfer_std = {\n# \"u\":nn.Linear(self.transfer_hidden_dim, self.embedding_dim),\n# \"i\":nn.Linear(self.transfer_hidden_dim, self.embedding_dim),\n# \"ni\":nn.Linear(self.transfer_hidden_dim, self.embedding_dim),\n# 'w':nn.Linear(self.transfer_hidden_dim, self.embedding_dim),\n# 'nw':nn.Linear(self.transfer_hidden_dim, self.embedding_dim)\n# }\n \n\n \n# self.userDecoder = nn.Linear(self.embedding_dim, self.embedding_dim)\n# self.itemDecoder = nn.Linear(self.embedding_dim, self.embedding_dim)\n '''\n (uid, pid_pos, qids_pos, len_pos, time_bin_pos)\n [( uid, pid, qids_neg, len_neg, time_bin_pos),..,( uid, pid, qids_neg, len_neg, time_bin_pos)]*neg_sample_num\n '''\n \n def forward(self, user, item_pos, query, query_len, word, word_len, times, items_neg, word_neg):\n self.batch_size = user.shape[0]\n '''\n time embedding\n '''\n time_laten = self.time_embdding(times+torch.tensor(1).to(self.device)).squeeze(1)\n pri_time_laten =self.time_embdding(times)\n \n '''\n user\n '''\n user_mean = self.user_mean(user).squeeze(1) # (batch, out_size)\n user_mean_pri = self.time2mean_u(torch.cat([user_mean, pri_time_laten], 1))\n user_mean = self.time2mean_u(torch.cat([user_mean, time_laten], 1))\n \n user_std = self.user_std(user).squeeze(1) # (batch, out_size)\n user_std_pri = self.time2std_u(torch.cat([user_std, pri_time_laten], 1)).mul(0.5).exp()\n user_std = self.time2std_u(torch.cat([user_std, time_laten], 1)).mul(0.5).exp()\n \n\n \n '''\n query\n '''\n query = self.get_train_query_tanh_mean(query, query_len)# ((batch, maxQueryLen))) ---> ((batch, embedding)\n \n \n '''\n word\n '''\n word_mean_pos = self.wordEmbedding_mean(word)\n word_mean_pos_pri = self.time2mean_w(torch.cat([word_mean_pos, pri_time_laten.unsqueeze(1).expand_as(word_mean_pos)], 2))\n word_mean_pos = self.time2mean_w(torch.cat([word_mean_pos, time_laten.unsqueeze(1).expand_as(word_mean_pos)], 2))\n \n word_std_pos = self.wordEmbedding_std(word)\n word_std_pos_pri = self.time2std_w(torch.cat([word_std_pos, pri_time_laten.unsqueeze(1).expand_as(word_std_pos)], 2)).mul(0.5).exp()\n word_std_pos = self.time2std_w(torch.cat([word_std_pos, time_laten.unsqueeze(1).expand_as(word_std_pos)], 2)).mul(0.5).exp()\n \n \n '''\n neg word\n '''\n word_mean_neg = self.wordEmbedding_mean(word_neg)\n word_mean_neg_pri = self.time2mean_w(torch.cat([word_mean_neg, pri_time_laten.unsqueeze(1).expand_as(word_mean_neg)], 2))\n word_mean_neg = self.time2mean_w(torch.cat([word_mean_neg, time_laten.unsqueeze(1).expand_as(word_mean_neg)], 2))\n \n word_std_neg = self.wordEmbedding_std(word_neg)\n word_std_neg_pri = self.time2std_w(torch.cat([word_std_neg, pri_time_laten.unsqueeze(1).expand_as(word_std_neg)], 2)).mul(0.5).exp()\n word_std_neg = self.time2std_w(torch.cat([word_std_neg, time_laten.unsqueeze(1).expand_as(word_std_neg)], 2)).mul(0.5).exp() \n \n \n '''\n pos product\n '''\n item_mean_pos = self.item_mean(item_pos).squeeze(1) # (batch, out_size)\n item_mean_pos_pri = self.time2mean_i(torch.cat([item_mean_pos, pri_time_laten], 1))\n item_mean_pos = self.time2mean_i(torch.cat([item_mean_pos, time_laten], 1))\n \n item_std_pos = self.item_std(item_pos).squeeze(1) # (batch, out_size)\n item_std_pos_pri = self.time2std_i(torch.cat([item_std_pos, pri_time_laten], 1)).mul(0.5).exp()\n item_std_pos = self.time2std_i(torch.cat([item_std_pos, time_laten], 1)).mul(0.5).exp()\n\n \n '''\n neg product\n '''\n items_mean_neg = self.item_mean(items_neg)# (batch, neg_sample_num, out_size)\n items_mean_neg_pri = self.time2mean_i(torch.cat([items_mean_neg, pri_time_laten.unsqueeze(1).expand_as(items_mean_neg)], 2))\n items_mean_neg = self.time2mean_i(torch.cat([items_mean_neg, time_laten.unsqueeze(1).expand_as(items_mean_neg)], 2))\n \n items_std_neg = self.item_std(items_neg)# (batch, neg_sample_num, out_size)\n items_std_neg_pri = self.time2std_i(torch.cat([items_std_neg, pri_time_laten.unsqueeze(1).expand_as(items_std_neg)], 2)).mul(0.5).exp()\n items_std_neg = self.time2std_i(torch.cat([items_std_neg, time_laten.unsqueeze(1).expand_as(items_std_neg)], 2)).mul(0.5).exp()\n \n \n '''\n 用户和product word的隐变量采样\n '''\n user_sample = self.reparameter(user_mean, user_std)\n product_sample = self.reparameter(item_mean_pos, item_std_pos)\n product_sample_neg = self.reparameter(items_mean_neg, items_std_neg)\n word_sample = self.reparameter(word_mean_pos, word_std_pos)\n word_sample_neg = self.reparameter(word_mean_neg, word_std_neg)\n \n# query_sample\n '''\n loss 计算\n '''\n # 主要的损失u+q-i 采样得到的uqi 计算重构误差\n loss_main, dis_pos, dis_neg = self.lossF_sigmod_ml(user_sample, query, product_sample, product_sample_neg)\n # 计算uw和iw的损失\n user_word_loss = self.word_loss(user_sample, word_sample, word_len, word_sample_neg)\n item_word_loss = self.word_loss(product_sample, word_sample, word_len, word_sample_neg)\n\n \n # 转移损失(KL损失) -->\n # 转移概率 loss current_mean, current_std, prior_mean, prior_std\n user_trans_loss = self.transfer_kl_loss(user_mean, user_std, user_mean_pri, user_std_pri, False, 'u')\n product_trans_pos_loss = self.transfer_kl_loss(item_mean_pos, item_std_pos, item_mean_pos_pri, item_std_pos_pri, False, 'i')\n product_trans_neg_loss = self.transfer_kl_loss(items_mean_neg, items_std_neg, items_mean_neg_pri, items_std_neg_pri, True, 'ni')\n word_trans_pos_loss = self.transfer_kl_loss(word_mean_pos, word_std_pos, word_mean_pos_pri, word_std_pos_pri, True, 'w')\n word_trans_pos_neg_loss = self.transfer_kl_loss(word_mean_neg, word_std_neg, word_mean_neg_pri, word_std_neg_pri, True, 'nw')\n\n \n \n \n # query_trans_loss\n loss = loss_main+\\\n (user_word_loss+item_word_loss)*torch.tensor(self.word_parameter).to(self.device)+\\\n (user_trans_loss+product_trans_pos_loss+product_trans_neg_loss+word_trans_pos_loss+word_trans_pos_neg_loss)*\\\n torch.tensor(self.kl_parameter).to(self.device)\n \n loss = (loss, loss_main, user_word_loss+item_word_loss, user_trans_loss+product_trans_pos_loss+product_trans_neg_loss+word_trans_pos_loss+word_trans_pos_neg_loss)\n \n return user_mean, user_std, query, \\\n item_mean_pos, item_std_pos,\\\n items_mean_neg, items_std_neg, \\\n user_sample, product_sample, product_sample_neg, \\\n loss, dis_pos, dis_neg,\\\n word_mean_pos, word_std_pos, word_mean_neg, word_std_neg\n \n def word_loss(self, itemOrUser, word_pos, word_len, word_neg):\n len_mask = torch.tensor([ [1.]*int(i.item())+[0.]*(self.max_review_len-int(i.item())) for i in word_len]).unsqueeze(2).to(self.device)\n word_pos = word_pos.mul(len_mask)\n itemOrUser.unsqueeze_(1)\n dis_pos = (itemOrUser - word_pos).norm(2, dim=2).mean(dim=1)\n dis_neg = (itemOrUser - word_neg).norm(2, dim=2).mean(dim=1)\n wl = torch.log(torch.sigmoid(dis_neg-dis_pos)).mean()*(-1.0)\n itemOrUser.squeeze_(1)\n return wl\n \n def reparameter(self, mean, std):\n# sigma = torch.exp(torch.mul(0.5,log_var))\n std_z = torch.randn(std.shape, device=self.device)\n return mean + torch.tensor(self.sigma_parameter).to(self.device)*std* Variable(std_z) # Reparameterization trick\n \n \n def get_train_query_tanh_mean(self, query, query_len):\n '''\n input size: (batch, maxQueryLen)\n 对query处理使用函数\n tanh(W*(mean(Q))+b)\n \n '''\n query = self.wordEmbedding_mean(query) # size: ((batch, maxQueryLen))) ---> (batch, len(query[i]), embedding)\n # query len mask 使得padding的向量为0\n len_mask = torch.tensor([ [1.]*int(i.item())+[0.]*(self.max_query_len-int(i.item())) for i in query_len]).unsqueeze(2).to(self.device)\n query = query.mul(len_mask)\n\n query = query.sum(dim=1).div(query_len.unsqueeze(1).float())\n query = self.queryLinear(query).tanh()\n \n return query\n\n def transfer_mlp(self, prior, aim='u'):\n transfer_linear = getattr(self, 'transfer_linear_'+aim)\n current_hidden = transfer_linear(prior)\n transfer_mean = getattr(self, 'transfer_mean_'+aim)\n transfer_std = getattr(self, 'transfer_std_'+aim)\n return transfer_mean(current_hidden), transfer_std(current_hidden).mul(0.5).exp()\n\n \n def transfer_kl_loss(self, current_mean, current_std, prior_mean, prior_std, dim3=False, aim='u'):\n dim2 = current_mean.shape[1]\n if (dim3 == False):\n current_transfer_mean = torch.zeros((self.batch_size, self.embedding_dim), device=self.device)\n current_transfer_std = torch.zeros((self.batch_size, self.embedding_dim), device=self.device)\n for i in range(self.sample_num):\n prior_instance = self.reparameter(prior_mean, prior_std)\n cur_instance = self.transfer_mlp(prior_instance, aim)\n current_transfer_mean += cur_instance[0]\n current_transfer_std += cur_instance[1]\n\n # 取多个采样的Q(Zt-1)分布的均值为最终的loss 计算使用的P(Zt|B1:t-1)分布\n current_transfer_mean = current_transfer_mean.div(self.sample_num)\n current_transfer_std = current_transfer_std.div(self.sample_num**2)\n\n kl_loss = self.DKL(current_mean, current_std, current_transfer_mean, current_transfer_std)\n else:\n current_transfer_mean = torch.zeros((self.batch_size, dim2, self.embedding_dim), device=self.device)\n current_transfer_std = torch.zeros((self.batch_size, dim2, self.embedding_dim), device=self.device)\n for i in range(self.sample_num):\n prior_instance = self.reparameter(prior_mean, prior_std)\n cur_instance = self.transfer_mlp(prior_instance, aim)\n current_transfer_mean += cur_instance[0]\n current_transfer_std += cur_instance[1]\n\n # 取多个采样的Q(Zt-1)分布的均值为最终的loss 计算使用的P(Zt|B1:t-1)分布\n current_transfer_mean = current_transfer_mean.div(self.sample_num)\n current_transfer_std = current_transfer_std.div(self.sample_num)\n\n kl_loss = self.DKL(current_mean, current_std, current_transfer_mean, current_transfer_std, True)\n \n return kl_loss\n \n \n '''\n KL 误差\n KL(Q(Zt)||P(Zt|B1:t-1))\n P(Zt|B1:t-1) 使用采样计算~~1/K sum_{i=1}^K(P(Zt|Z_{i}t-1))\n '''\n def DKL(self, mean1, std1, mean2, std2, neg = False):\n var1 = std1.pow(2) + self.esp\n var2 = std2.pow(2) + self.esp\n mean_pow2 = (mean2-mean1)*(torch.tensor(1.0, device=self.device)/var2)*(mean2-mean1)\n tr_std_mul = (torch.tensor(1.0, device=self.device)/var2)*var1\n if (neg == False):\n dkl = (torch.log(var2/var1)-1+tr_std_mul+mean_pow2).mul(0.5).sum(dim=1).mean()\n else:\n dkl = (torch.log(var2/var1)-1+tr_std_mul+mean_pow2).mul(0.5).sum(dim=2).sum(dim=1).mean()\n return dkl\n \n '''\n 主损失 重构误差\n -Eq(log{P(Bt|Zt)})\n '''\n def lossF_sigmod_ml(self, user, query, item_pos, items_neg):\n u_plus_q = user+query\n dis_pos = (u_plus_q - item_pos).norm(2, dim=1).mul(5.)\n u_plus_q.unsqueeze_(1)\n dis_neg = (u_plus_q - items_neg)\n dis_neg = dis_neg.norm(2,dim=2)\n dis_pos = dis_pos.view(-1,1)\n batch_loss = torch.log(torch.sigmoid(dis_neg-dis_pos)).sum(dim=1)*(-1.0)\n return batch_loss.mean() , dis_pos.mean(), dis_neg.mean()\n" }, { "alpha_fraction": 0.7543978095054626, "alphanum_fraction": 0.7631934881210327, "avg_line_length": 31.844444274902344, "blob_id": "b0693fab59213748eba2a848ee6912d0d0389317", "content_id": "bb6cab38dd7f42438b0be62d94ee117929f25f58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1478, "license_type": "no_license", "max_line_length": 224, "num_lines": 45, "path": "/README.md", "repo_name": "DBML-model/DBML", "src_encoding": "UTF-8", "text": "# DBML\nThis is the python implementation of DBML model for paper \"Dynamic Bayesian Metric Learning for Personalized Product Search\n(Teng Xiao<sup>\\*</sup>, Jiaxin Ren<sup>\\*</sup>, Shangsong Liang and Zaiqiao Meng)\" \n\n\n# Introduction\nDBML is a novel probabilistic metric learning approach that is able to avoid the contradicts, keep\nthe triangle inequality in the latent space, and correctly utilize implicit feedbacks. The inferred dynamic semantic representations of entities collaboratively inferred in a unified form by our DBML can benefit not only for\nimproving personalized product search, but also for capturing the affinities between users, products and words. Please refer to the paper for further details.\n\n# Requirements\n* pytorch(0.4 or later)\n* nltk\n* tqdm\n* dateutil\n* gzip\n\n\n\n\n\n# Datasets\n\nDownload Amazon review datasets from http://jmcauley.ucsd.edu/data/amazon/ (In our paper, we used 5-core review data and metedata).\n\n\n# Run\nRun train_offline.py for offline model.\n\n\nWhere the data_name can be 'Electronics', 'Cell Phones and Accessories', 'Clothing, Shoes and Jewelry' or 'Toys and Games'.\n\n\n\n# Citation\nif you want to use our codes in your research, please cite:\n```\n@inproceedings{dbml/cikm/2019,\n title={Dynamic Bayesian Metric Learning for Personalized Product Search},\n author={Xiao, Teng and Ren, Jiaxin and Liang, Shangsong and Meng, Zaiqiao},\n booktitle={Proceedings of the 28th ACM International Conference on Information and Knowledge Management},\n year={2019},\n organization={ACM}\n}\n```\n" }, { "alpha_fraction": 0.4495130181312561, "alphanum_fraction": 0.45838984847068787, "avg_line_length": 33.66666793823242, "blob_id": "0c7632eadd8eb3c7298ea200e06ab157d1f9aa09", "content_id": "032704fbc5a3528c89a6ef7f8548648c8fc97c71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8405, "license_type": "no_license", "max_line_length": 118, "num_lines": 234, "path": "/file_utils/mydata.py", "repo_name": "DBML-model/DBML", "src_encoding": "UTF-8", "text": "from torch.utils.data import DataLoader, Dataset\nimport numpy as np\nclass MyData(Dataset):\n def __init__(self, reviewData, metaData, neg_sample_num, max_query_len, max_review_len, time_num, weights = True):\n \n \n self.id2user = dict()\n self.user2id = dict()\n \n self.id2product = dict()\n self.product2id = dict()\n \n self.product2query = dict()\n \n # query\n self.word2id = dict()\n self.id2word = dict()\n \n \n# self.userText = dict()\n \n self.userReviews = dict()\n self.userReviewsCount = dict()\n self.userReviewsCounter = dict()\n self.userReviewsTest = dict()\n \n \n self.nes_weight = []\n self.word_weight = []\n self.max_review_len = max_review_len\n self.max_query_len = max_query_len\n self.neg_sample_num = neg_sample_num\n \n self.time_num = time_num\n self.time_data = []\n \n self.init_dict(reviewData, metaData)\n \n\n self.train_data = []\n self.test_data = []\n self.eval_data = []\n\n self.init_dataset(reviewData, weights)\n self.init_sample_table()\n def init_dict(self, reviewData, metaData):\n for i in range(self.time_num):\n self.time_data.append([])\n \n uid = 0\n us = set(reviewData['reviewerID'])\n pr = set()\n words = set()\n for u in us:\n # 只有两个购买记录 不够验证和测试\n asins = list(reviewData[reviewData['reviewerID'] == u]['asin'])\n if (len(asins) <= 2):\n continue\n\n self.id2user[uid] = u\n self.user2id[u] = uid\n\n # 得到每个用户购买物品记录\n pr.update(asins)\n self.userReviews[uid] = asins\n # 最后一个购买的物品做测试集\n self.userReviewsTest[uid] = asins[-1]\n words.update(set(' '.join(list(review_datas[review_datas['reviewerID'] == u]['reviewText'])).split()))\n# reviewTexts += list(reviewData[reviewData['reviewerID'] == u]['reviewText'])\n uid += 1\n if uid % 100 == 0:\n with open (r'out.txt','a+') as ff:\n ff.write(str(len(us))+' uid: '+str(uid)+'\\n')\n self.userNum = uid\n \n pid = 0\n# words = set()\n for p in pr:\n if pid % 300 == 0:\n with open (r'out.txt','a+') as ff:\n ff.write(str(len(pr)) + ' pid:'+str(pid)+'\\n')\n try:\n '''\n 判断这个product是否有query\n '''\n if (len(metaData.loc[p]['query']) > 0):\n self.product2query[p] = metaData.loc[p]['query']\n words.update(' '.join(metaData.loc[p]['query']).split(' '))\n except:\n pass\n self.id2product[pid] = p\n self.product2id[p] = pid\n pid += 1\n \n self.productNum = pid\n self.nes_weight = np.zeros(self.productNum)\n self.queryNum = len(self.product2query)\n \n wi = 0\n self.word2id['<pad>'] = wi\n self.id2word[wi] = '<pad>'\n wi += 1\n for w in words:\n if(w==''):\n continue\n self.word2id[w] = wi\n self.id2word[wi] = w\n wi += 1\n self.wordNum = wi\n self.word_weight = np.zeros(wi)\n def init_dataset(self, reviewData,weights=True):\n try:\n self.data_X = []\n for r in range(len(reviewData)):\n if r % 100 == 0:\n with open (r'out.txt','a+') as ff:\n ff.write(str(len(reviewData))+ ' review: '+str(r) + '\\n')\n rc = reviewData.iloc[r]\n try:\n uid = self.user2id[rc['reviewerID']]\n pid_pos = self.product2id[rc['asin']]\n time_bin_pos = int(rc['timeBin'])\n except:\n # 这个user没有加入到字典,购买次数不到3次\n continue\n\n text = rc['reviewText']\n\n try:\n # 得到product的query数组\n q_text_array_pos = self.product2query[self.id2product[pid_pos]]\n except:\n '''\n 没有对应的query\n '''\n continue\n try:\n text_ids, len_r= self.trans_to_ids(text, self.max_review_len)\n # 设置product的负采样频率\n self.nes_weight[pid_pos] += 1\n except:\n continue\n # 遍历每个物品的每个query 得到一个(u, p, q, r)元组\n for qi in range(len(q_text_array_pos)):\n try:\n qids_pos, len_pos = self.trans_to_ids(q_text_array_pos[qi], self.max_query_len)\n except:\n break\n self.data_X.append((uid, pid_pos, qids_pos, len_pos, text_ids, len_r, time_bin_pos))\n try:\n self.userReviewsCount[uid] += 1\n self.userReviewsCounter[uid] += 1\n except:\n self.userReviewsCount[uid] = 1\n self.userReviewsCounter[uid] = 1\n\n\n '''\n 数据集合划分 ---> 取每个用户购买过的item的最后一个\n '''\n for r in self.data_X:\n # 只考虑有3个以上(uqi)三元组的user\n if self.userReviewsCount[r[0]] > 2:\n t = self.userReviewsCounter[r[0]]\n if (t == 0):\n continue\n elif (t == 2): # 倒数第二个\n self.eval_data.append(r)\n elif (t == 1): # 倒数第一个\n self.test_data.append(r)\n else:\n self.train_data.append(r)\n self.time_data[r[6]].append(r)\n self.userReviewsCounter[r[0]] -= 1\n\n if weights is not False:\n wf = np.power(self.nes_weight, 0.75)\n wf = wf / wf.sum()\n self.weights = wf\n wf = np.power(self.word_weight, 0.75)\n wf = wf / wf.sum()\n self.word_weight = wf\n except e:\n with open (r'out.txt','a+') as ff:\n ff.write(str(e)+ '\\n')\n def trans_to_ids(self, query, max_len, weight_cal = True):\n query = query.split(' ')\n qids = []\n for w in query:\n if w == '':\n continue\n qids.append(self.word2id[w])\n # 需要统计词频\n if weight_cal:\n self.word_weight[self.word2id[w]-1] += 1\n for _ in range(len(qids), max_len):\n qids.append(self.word2id['<pad>'])\n return qids, len(query)\n \n def neg_sample(self):\n neg_item = []\n neg_word = []\n for ii in range(self.neg_sample_num):\n neg_item.append(self.sample_table_item[np.random.randint(self.table_len_item)])\n neg_word.append(self.sample_table_word[np.random.randint(self.table_len_word)])\n return neg_item,neg_word\n \n def init_sample_table(self):\n table_size = 1e6\n count = np.round(self.weights*table_size)\n self.sample_table_item = []\n for idx, x in enumerate(count):\n self.sample_table_item += [idx]*int(x)\n self.table_len_item = len(self.sample_table_item)\n \n count = np.round(self.word_weight*table_size)\n self.sample_table_word = []\n for idx, x in enumerate(count):\n self.sample_table_word += [idx]*int(x)\n self.table_len_word = len(self.sample_table_word)\n \n def __getitem__(self, i):\n pos = self.train_data[i]\n neg = self.neg_sample()\n \n return pos, neg\n def get_time_data(self, time_bin, i):\n pos = self.time_data[time_bin][i]\n neg = self.neg_sample()\n return pos, neg\n def getTestItem(self, i):\n return self.test_data[i]\n def __len__(self):\n return len(self.train_data)" } ]
4
supertako/WomenInPython
https://github.com/supertako/WomenInPython
8cea1802dfa5c8261e3558909331e53a49854f2d
6efb4542b87a58f6e94c0bbaea031ce75a01498a
ae84cb383573203dfbd9c9f94bd88d2c1498b802
refs/heads/master
2020-06-25T00:18:03.243936
2019-08-08T04:32:37
2019-08-08T04:32:37
199,136,780
0
0
null
2019-07-27T08:16:01
2019-07-26T07:58:45
2019-07-26T07:58:43
null
[ { "alpha_fraction": 0.5846356749534607, "alphanum_fraction": 0.618904173374176, "avg_line_length": 25.55500030517578, "blob_id": "14bda3600d14dd0e99a751a22853b161870a8b75", "content_id": "f2f81d0f926204af274e672dbb5ee6960e7ae553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6225, "license_type": "no_license", "max_line_length": 109, "num_lines": 200, "path": "/Blackjack/PW20190808Blackjack.py", "repo_name": "supertako/WomenInPython", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 24 12:12:44 2019\n\n@author: tseng\n\"\"\"\n\n# The Basics\n\n'''\nData Types --> string int\nVariable\noperators symbols ---> =, ==, !=, <=, >=, <, >, and, or, not \nClass\nobject\nlist\nFor loop\nwhile loop\nif else statement\n\n'''\n\n# build blackjack Card Game\n\nsuits_name = ['Spades', 'Hearts','Diamonds', 'Clubs']\nsuits_symbols = ['♠','♥','♦','♣']\nsuits = ['\\u2660','\\u2665','\\u2666','\\u2663']\n\n# setup Playing cards\n\nclass Card:\n\n card_values = {\n 'A': 1, '2': 2, '3': 3, '4': 4,\n '5': 5, '6': 6, '7': 7,\n '8': 8, '9': 9, '10': 10,\n 'J': 10, 'Q': 10,'K': 10\n }\n \n def __init__(self, suit, rank):\n self.suit = suit\n self.rank = rank\n self.points = self.card_values[rank]\n \n def printCard(self):\n return self.rank + self.suit\n \n def getCardValue(self):\n return self.card_values[self.rank]\n\nsuits = ['\\u2660','\\u2665','\\u2666','\\u2663']\nranks = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\ndeck = []\n \n# Create and populate 1 deck of cards\nfor s in suits:\n for r in ranks:\n card = Card(s, r)\n deck.append(card)\n\n''' \nfor i in range(len(suits)):\n for j in range(len(ranks)):\n card = Card(suits[i], ranks[j])\n deck.append(card)\n'''\n\n# checking if the cards are created correctly by print out\ndef showCardList(cardlist):\n for i in range(len(cardlist)):\n if (i+1) % 13 != 0: \n print(cardlist[i].printCard(), end='')\n else:\n print(cardlist[i].printCard())\n\n#showCardList(deck)\n\ndeck = 5*deck\ntotalCards = len(deck)\n\n# Shuffle Cards\nimport random\n# Shuffle Cards\ndef shuffleCards(CardDeck):\n for i in range(totalCards): # 要交换的数量\n rCardPos1 = random.randint(0, totalCards-1) #生成第一个随机位置 (用于deck列表的位置)\n rCardPos2 = random.randint(0, totalCards-1) #生成第二个随机位置 (用于deck列表的位置)\n temp = CardDeck[rCardPos1] #创造一个临时变量 来保存第一个随机挑选的卡牌\n CardDeck[rCardPos1] = CardDeck[rCardPos2] #deck列表的第一个随机 现在可以被第二个随机位取代\n CardDeck[rCardPos2] = temp #deck列表的第二个随机位 现在可以被 临时保存的第一个随机卡牌取代\n \n return CardDeck;\n\ndeck = shuffleCards(deck)\n#showCardList(deck)\n\n# game logic\n\n''' Initialize the variables we need 初始化我们需要的变量 '''\nPlayerHand = []\nDealerHand = []\n\ncardIndex = 0 #deck中发牌之后的位置\nplayertotal = 0\ndealertotal = 0\n\n# initial round dealer get 1 card player get 2 cards\n# every time draw a card cardIndex increase by 1\n''' 第一局我们可以固定发牌顺序 '''\nPlayerHand.append(deck[cardIndex])\ncardIndex += 1 # 每发一次牌 deck中的牌位就会加 1 \nDealerHand.append(deck[cardIndex])\ncardIndex += 1\nPlayerHand.append(deck[cardIndex])\ncardIndex += 1\n\n''' 自定义功能函数 将列表中的卡牌相加 '''\ndef sumUp(list1):\n total = 0\n list1.sort(key=lambda x: x.points) # 利用排序功能 排序小到大 \n list1.reverse() # 相反功能 排序大到小\n for i in list1:\n if total < 11 and i.rank=='A': # 卡片 ‘A’ 可为 1 或 11\n total += i.getCardValue()+10\n else:\n total += i.getCardValue()\n \n return total\n\n''' 自定义功能函数 将列表中的卡牌l列印出来 '''\ndef showHand(cardlist):\n total = 0\n for card in cardlist:\n if total < 11 and card.rank=='A':\n total += card.getCardValue()+10\n else:\n total += card.getCardValue()\n print(card.printCard() + \" \", end='')\n \n print(\"\\t--> \" + str(total))\n\nprint('Dealer: ',end='')\nshowHand(DealerHand)\nprint('Player: ',end='')\nshowHand(PlayerHand)\n\nuserCall = input(\"Hit or Stay: \")\n\n''' 当玩家没有输入’STAY‘ 就持续循环,输入hit就发一张牌给Player'''\nwhile userCall.upper() != \"STAY\":\n if userCall.upper() == \"HIT\": # 发一张牌给Player\n PlayerHand.append(deck[cardIndex])\n cardIndex += 1\n \n playertotal = sumUp(PlayerHand)\n if playertotal > 21: #如果Player列表的牌超过21,那就爆了\n print(\"Player busted at \" + str(playertotal) + \" !!!! Dealer Wins\")\n break\n \n print('Player: ', end='')\n showHand(PlayerHand)\n userCall = input(\"Hit or Stay: \") # 再次问player是否要牌 (防止无限循环)\n\n''' 当庄家的点数没有到大于等于17 就持续循环 '''\nwhile dealertotal < 17:\n DealerHand.append(deck[cardIndex])# 发一张票给庄家\n cardIndex += 1\n print('Dealer: ', end='')\n showHand(DealerHand)\n dealertotal = sumUp(DealerHand)\n if dealertotal > 21: #如果点数超过21 那就爆了\n if playertotal <= 21:\n print(\"Dealer busted at \" + str(dealertotal) + \" !!!! Player Wins\")\n break\n\n''' 如果玩家和庄家的点数都在21之内: 那就要对比谁比较大'''\nif playertotal <= 21 and dealertotal <= 21:\n if playertotal > dealertotal:\n print(\"Player Win\")\n elif playertotal < dealertotal:\n print(\"Player lose\")\n else:\n print(\"Dealer: \"+str(dealertotal)+ \" Player: \"+ str(playertotal) +\" --->Draw\")\n\n\n''' Future Upgrades examples (Food for thoughts)\n\n1) 让玩家可以选择持续玩下一局\n Let player have the option to continue playing the game\n2) 模拟10000局,算出输赢的比例,拿到21的几率,达到两种相同的牌的概率\n Simulate 10000 rounds, calculate the winning rate, the rate of geting 21, the rate of getting a pair\n3) Multiple players 多个玩家\n4) Bargaining chips 筹码\n5) 第一局的时候玩家直接拿到21点,庄家拿到'A'时,是否可以选择even money直接赢 | \n when player get 21 in the 1st round, dealer got an 'A'. player should have the option to choose even money\n 第一局的时候玩家直接拿到21点,直接赢1.5倍 |\n when player get 21 in the 1st round, win the round with 1.5 times the bet\n6) 当玩家拿到两张相同的牌 是否可以split | Option to split if player got a pair\n7) 提升算法效率 | making algorithm more efficent\n'''\n" } ]
1
SuLixian/Street_view_segementation_practices
https://github.com/SuLixian/Street_view_segementation_practices
e69cc81f01ccf4a5eb4050135b6ed60f44fc5e81
df039b5dfaf397cc9d1f6fddb49c573a4259e6a1
fa996c7189e4249407190cc665add7703b2199b6
refs/heads/main
2023-07-18T10:46:45.270585
2021-09-05T16:31:33
2021-09-05T16:31:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6095017194747925, "alphanum_fraction": 0.658169150352478, "avg_line_length": 22.86111068725586, "blob_id": "6173e44f6a531eb3085aad547bd34b265fb753f6", "content_id": "dd39fe638d724cdc063382879e75af420a9429ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 65, "num_lines": 36, "path": "/read_depth.py", "repo_name": "SuLixian/Street_view_segementation_practices", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\ndef depth_read(filename):\n # loads depth map D from png file\n # and returns it as a numpy array,\n # for details see readme.txt\n\n depth_png = np.array(Image.open(filename), dtype=int)\n # make sure we have a proper 16bit depth map here.. not 8bit!\n # assert(np.max(depth_png) > 255)\n print (depth_png[:,512])\n depth = (depth_png.astype(np.float)-1) / 256.\n depth[depth <= 0] = 0.0\n return depth\n\ndepth = depth_read('zurich_000121_000019_disparity.png')\nprint(depth)\n\n\ndepth_new = depth.reshape(2048*1024)\ndepth_new = list(depth_new)\nprint(len(depth_new))\nnew = ''\nfor i in depth_new:\n new += (str(i) + ' ')\n# open('cityscape_01.txt', 'w').write(new)\n# cv2.imshow('dep', depth)\n# cv2.waitKey(0)\n\nplt.imshow(depth)\nplt.show()\n\n\n\n\n" }, { "alpha_fraction": 0.6416666507720947, "alphanum_fraction": 0.684374988079071, "avg_line_length": 32.28571319580078, "blob_id": "9632661cbf6d5d056c8ba217bc088b695038073e", "content_id": "8d483c9384ec82b2c69d4660746301bdd78444d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1230, "license_type": "no_license", "max_line_length": 121, "num_lines": 28, "path": "/test.py", "repo_name": "SuLixian/Street_view_segementation_practices", "src_encoding": "UTF-8", "text": "from PIL import Image\r\nimport torchvision.transforms as T\r\nfrom models.mobilenet_master2 import MobileNet # 导入自己定义的网络模型\r\nfrom torch.autograd import Variable as V\r\nimport torch as t\r\n\r\ntrans = T.Compose([\r\n transforms.Scale(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n T.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n])\r\n\r\n# 读入图片\r\nimg = Image.open('Z:\\\\毕业\\\\1\\\\0_0.JPG')\r\nin_put = trans(img) # 这里经过转换后输出的input格式是[C,H,W],网络输入还需要增加一维批量大小B\r\nimg = img.unsqueeze(0) # 增加一维,输出的img格式为[1,C,H,W]\r\n\r\nmodel = MobileNet().cuda() # 导入网络模型\r\nmodel.eval()\r\nmodel.load_state_dict(t.load('Z:\\\\毕业\\\\已经训练好的Cityscapes模型?\\\\OCNet.pytorch-master\\\\run_resnet101_asp_oc.sh')) # 加载训练好的模型文件\r\n\r\nin_put = V(img.cuda())\r\nscore = model(input) # 将图片输入网络得到输出\r\nprobability = t.nn.functional.softmax(score, dim=1) # 计算softmax,即该图片属于各类的概率\r\nmax_value, index = t.max(probability, 1) # 找到最大概率对应的索引号,该图片即为该索引号对应的类别\r\nprint(index)\r\n" } ]
2
natgeosociety/django-formfield
https://github.com/natgeosociety/django-formfield
7b77f69f102d78daf63ae100107482da95645a21
de6c39876d004045e86e6e76ff45df43bcc31b8b
cd2499386fb94002bdf6269f010da162d1239624
refs/heads/master
2021-01-13T06:56:15.071863
2020-02-28T13:24:28
2020-02-28T13:24:28
81,336,132
0
0
Apache-2.0
2017-02-08T14:05:45
2017-02-08T14:05:47
2020-02-28T13:24:29
Python
[ { "alpha_fraction": 0.5927175879478455, "alphanum_fraction": 0.5930728316307068, "avg_line_length": 32.117645263671875, "blob_id": "9c356b233c759ecf8d540feb4d3f13bc699f4b66", "content_id": "0faf722a2310708730c811322c91786f4e50d3f2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5630, "license_type": "permissive", "max_line_length": 91, "num_lines": 170, "path": "/formfield/fields.py", "repo_name": "natgeosociety/django-formfield", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\nimport six\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .widgets import FormFieldWidget\n\n\nclass JSONField(models.TextField):\n \"\"\"\n JSONField is a generic textfield that serializes/unserializes\n the data from our form fields\n \"\"\"\n default_error_messages = {\n 'invalid': _(\"'%s' is not a valid JSON string.\")\n }\n description = \"JSON object\"\n\n def __init__(self, *args, **kwargs):\n self.dump_kwargs = kwargs.pop('dump_kwargs',\n {'cls': DjangoJSONEncoder})\n self.load_kwargs = kwargs.pop('load_kwargs', {})\n\n super(JSONField, self).__init__(*args, **kwargs)\n self.validate(self.get_default(), None)\n\n def validate(self, value, model_instance):\n if not self.null and value is None:\n raise ValidationError(self.error_messages['null'])\n try:\n self.get_prep_value(value)\n except ValueError:\n raise ValidationError(self.error_messages['invalid'] % value)\n\n def to_python(self, value):\n if isinstance(value, six.string_types):\n try:\n return json.loads(value, **self.load_kwargs)\n except ValueError:\n raise ValidationError(self.error_messages['invalid'] % value)\n\n return value\n\n def get_prep_value(self, value):\n if value is None:\n if not self.null and self.blank:\n return \"\"\n return None\n return json.dumps(value, **self.dump_kwargs)\n\n def from_db_value(self, value, expression, connection, context):\n if value is None:\n return None\n return json.loads(value, **self.load_kwargs)\n\n def get_db_prep_value(self, value, *args, **kwargs):\n if isinstance(value, six.string_types):\n return value\n\n return self.get_prep_value(value)\n\n def _get_val_from_obj(self, obj):\n return self.value_from_object(obj)\n\n def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_db_prep_value(value)\n\n def value_from_object(self, obj):\n \"\"\"\n Returns the value of this field in the given model instance.\n \"\"\"\n if obj is not None:\n return getattr(obj, self.attname)\n else:\n return self.get_default()\n\n\nclass FormField(forms.MultiValueField):\n \"\"\"The form field we can use in forms\"\"\"\n\n def __init__(self, form, **kwargs):\n import inspect\n if inspect.isclass(form) and issubclass(form, forms.Form):\n form_class = form\n elif callable(form):\n form_class = form()\n self.form = form_class()\n elif isinstance(form, six.string_types):\n from django.utils import module_loading\n if hasattr(module_loading, 'import_by_path'):\n form_class = module_loading.import_by_path(form)\n else:\n form_class = module_loading.import_string(form)\n elif form is None:\n form_class = forms.Form\n else:\n raise ValueError(\"FormField got an unusual value for 'form': {0}\".format(form))\n self.form = form_class()\n\n # Set the widget and initial data\n kwargs['widget'] = FormFieldWidget([f for f in self.form])\n kwargs['initial'] = [f.field.initial for f in self.form]\n # The field it self should not be required, this allows us to\n # have optional fields in a sub form\n kwargs['required'] = False\n\n self.max_length = kwargs.pop('max_length', None)\n\n super(FormField, self).__init__(**kwargs)\n\n self.fields = [f.field for f in self.form]\n\n def compress(self, data_list):\n \"\"\"\n Return the cleaned_data of the form, everything should already be valid\n \"\"\"\n data = {}\n if data_list:\n return dict(\n (f.name, data_list[i]) for i, f in enumerate(self.form))\n return data\n\n def clean(self, value):\n \"\"\"\n Call the form is_valid to ensure every value supplied is valid\n \"\"\"\n if not value:\n raise ValidationError(\n 'Error found in Form Field: Nothing to validate')\n\n data = dict((bf.name, value[i]) for i, bf in enumerate(self.form))\n self.form = form = self.form.__class__(data)\n if not form.is_valid():\n error_dict = list(form.errors.items())\n raise ValidationError([\n ValidationError(mark_safe('{} {}'.format(\n k.title(), v)), code=k) for k, v in error_dict])\n\n # This call will ensure compress is called as expected.\n return super(FormField, self).clean(value)\n\n\nclass ModelFormField(JSONField):\n \"\"\"The json backed field we can use in our models\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n This field needs to be nullable and blankable. The supplied form\n will provide the validation.\n \"\"\"\n self.form = kwargs.pop('form', None)\n\n kwargs['null'] = True\n kwargs['blank'] = True\n\n super(ModelFormField, self).__init__(*args, **kwargs)\n\n def formfield(self, form_class=FormField, **kwargs):\n # Need to supply form to FormField\n return super(ModelFormField, self).formfield(form_class=form_class,\n form=self.form, **kwargs)\n" }, { "alpha_fraction": 0.6702127456665039, "alphanum_fraction": 0.6755319237709045, "avg_line_length": 19.88888931274414, "blob_id": "c0bd31aaeb976d97ae854db2af8b7b95274b16dd", "content_id": "0924930ad23bcf071cba73332384a7e0a08bbfea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 188, "license_type": "permissive", "max_line_length": 46, "num_lines": 9, "path": "/example/urls.py", "repo_name": "natgeosociety/django-formfield", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.conf.urls import url, include\n\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n]\n" }, { "alpha_fraction": 0.6197183132171631, "alphanum_fraction": 0.6555697917938232, "avg_line_length": 27.88888931274414, "blob_id": "1352b3ec4cfb83a799414c596f9ca218280301e5", "content_id": "99a1d8752e3829091e8ab645965b578f1253cc99", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 781, "license_type": "permissive", "max_line_length": 75, "num_lines": 27, "path": "/tox.ini", "repo_name": "natgeosociety/django-formfield", "src_encoding": "UTF-8", "text": "# Tox (https://tox.readthedocs.io/) is a tool for running tests\n# in multiple virtualenvs. This configuration file will run the\n# test suite on all supported python versions. To use it, \"pip install tox\"\n# and then run \"tox\" from this directory.\n\n[tox]\nenvlist = py{27,36}-django{19,110,111}\n\n[testenv]\ncommands = pytest --cov=formfield \\\n --cov-report term-missing \\\n --cov-report xml \\\n --junitxml=junit-{envname}.xml \\\n --ds=example.settings {posargs}\nsetenv =\n PYTHONPATH={toxinidir}:{toxinidir}/example\ndeps =\n -rrequirements.txt\n pytest\n pytest-cov\n pytest-django\n django19: Django<1.10\n django110: Django<1.11\n django111: Django<2.0\n\n[pytest]\npython_files = tests.py **/tests.py **/tests/*.py **/tests.py\n\n" }, { "alpha_fraction": 0.652601957321167, "alphanum_fraction": 0.6540084481239319, "avg_line_length": 20.545454025268555, "blob_id": "1add627612d6524bb8bc8c4675f38f3b5dcf7816", "content_id": "35d3fa38ee1076ae4d5a61f488546037f064f53c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 711, "license_type": "permissive", "max_line_length": 65, "num_lines": 33, "path": "/example/settings.py", "repo_name": "natgeosociety/django-formfield", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nAPP = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\nPROJ_ROOT = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(APP)\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'dev.db',\n }\n}\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'g2_39yupn*6j4p*cg2%w643jiq-1n_annua*%i8+rq0dx9p=$n'\n\nROOT_URLCONF = 'example.urls'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'formfield',\n 'sample_app',\n)\n\nTEST_RUNNER = 'django.test.runner.DiscoverRunner'\n" } ]
4
nswarner/Facial-Recognition
https://github.com/nswarner/Facial-Recognition
07f81ba404ee4a7d5fa57107c592470b39cb654d
3c0a40d39fb906b990b750d362b9edc663114878
1dd0866f07ae106bfcff388a4eea645ba4c3d770
refs/heads/master
2020-07-20T12:22:13.366665
2016-09-06T07:16:55
2016-09-06T07:16:55
67,318,037
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.779411792755127, "alphanum_fraction": 0.779411792755127, "avg_line_length": 33, "blob_id": "d240b16d644fed129bddc76aae508a96322bf2db", "content_id": "1d1823c8827476a3003df24348a1f6af733e913a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 68, "license_type": "no_license", "max_line_length": 46, "num_lines": 2, "path": "/README.md", "repo_name": "nswarner/Facial-Recognition", "src_encoding": "UTF-8", "text": "# Facial-Recognition\nFace Recognition via Python, OpenCV, PCA, etc.\n" }, { "alpha_fraction": 0.5825977325439453, "alphanum_fraction": 0.5876418948173523, "avg_line_length": 41.180850982666016, "blob_id": "13a2d1856998d832b1a1ec4f0ceaec74a68e0255", "content_id": "9d889a81027af16d05e7febd70d749c652f40e3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3965, "license_type": "no_license", "max_line_length": 109, "num_lines": 94, "path": "/face_recognizer.py", "repo_name": "nswarner/Facial-Recognition", "src_encoding": "UTF-8", "text": "import cv2, os\nimport numpy as np\nfrom PIL import Image\n\n\nclass FaceRecognizer:\n\n def __init__(self, load_previous=None, create_new=None):\n # For face recognition we will the the LBPH Face Recognizer\n self.recognizer = cv2.createLBPHFaceRecognizer()\n\n if self.old(load_previous):\n self.load_recognizer(load_previous)\n\n else:\n images, labels = self.prepare_images_and_labels(create_new)\n self.train_from_images(images, labels)\n\n def load_recognizer(self, r_path):\n self.recognizer = cv2.createLBPHFaceRecognizer()\n self.recognizer.load(r_path)\n\n def new_recognizer(self, path):\n # For face recognition we will the the LBPH Face Recognizer\n self.recognizer = cv2.createLBPHFaceRecognizer()\n images, labels = self.prepare_images_and_labels(path)\n self.train_from_images(images, labels)\n self.save_recognizer(\"./last_recognizer.xml\")\n\n def old(self, path):\n if os.path.isfile(path):\n return True\n else:\n return False\n\n def save_recognizer(self, save_file):\n self.recognizer.save(save_file)\n\n def predict2(self, image):\n nbr_predicted, conf = self.recognizer.predict(image)\n return nbr_predicted, conf\n\n def prepare_images_and_labels(self, path):\n # For face detection we will use the Haar Cascade provided by OpenCV.\n cascade_path = \"haarcascade_frontalface_default.xml\"\n face_cascade = cv2.CascadeClassifier(cascade_path)\n # Append all the absolute image paths in a list image_paths\n # We will not read the image with the .sad extension in the training set\n # Rather, we will use them to test our accuracy of the training\n #image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('.sad')]\n image_paths = [os.path.join(path, f) for f in os.listdir(path)]\n # images will contains face images\n images = []\n # labels will contains the label that is assigned to the image\n labels = []\n for image_path in image_paths:\n # Read the image and convert to grayscale\n image_pil = Image.open(image_path).convert('L')\n # Convert the image format into numpy array\n image = np.array(image_pil, 'uint8')\n # Get the label of the image\n #nbr = int(os.path.split(image_path)[1].split(\".\")[0].replace(\"subject\", \"\"))\n #nbr = os.path.split(image_path)[1].split(\".\")[0]\n if (image_path.startswith(\"./training_set\\\\g\")):\n nbr = 1\n else:\n nbr = 2\n # Detect the face in the image\n faces = face_cascade.detectMultiScale(image)\n # If face is detected, append the face to images and the label to labels\n for (x, y, w, h) in faces:\n # Let's verify that each photo is in the training set\n cv2.imshow(\"Adding faces to training set...\", image[y: y + h, x: x + w])\n cv2.waitKey(200)\n var = raw_input(\"[\" + image_path + \"] 'n' if this isn't a face, [enter] otherwise: \").lower()\n # Verify it's someone\n if var == \"\":\n images.append(image[y: y + h, x: x + w])\n labels.append(nbr)\n if (nbr == 1):\n print(\"Image added, recognized [Grant]\")\n else:\n print(\"Image added, recognized [Ashley]\")\n # return the images list and labels list\n return images, labels\n\n def train_from_images(self, images, labels):\n # Path to the Yale Dataset\n #path = './yalefaces'\n # Call the get_images_and_labels function and get the face images and the\n # corresponding labels\n cv2.destroyAllWindows()\n # Perform the tranining\n self.recognizer.train(images, np.array(labels))\n" }, { "alpha_fraction": 0.6537949442863464, "alphanum_fraction": 0.6693297624588013, "avg_line_length": 39.25, "blob_id": "6ca98112893cc84945017bf70eb75d05d0fec25d", "content_id": "6226fb8a5a879d8fef861b287ef2d3b645572361", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2253, "license_type": "no_license", "max_line_length": 92, "num_lines": 56, "path": "/facial_rec.py", "repo_name": "nswarner/Facial-Recognition", "src_encoding": "UTF-8", "text": "import cv2, os\nimport numpy as np\nfrom PIL import Image\n\n'''\n Much of the facial detection/recognition code was provided by Bikramjot Singh Hanzra\n Contact: [email protected]\n URL: http://hanzratech.in/2015/02/03/face-recognition-using-opencv.html\n\n Revised Todo:\n 1. Download a personal training dataset from FB\n - Set up as training_set folder\n 2. Validate the training set via display-verify\n - Display a found face, ask who's face this is, discard \"\"\n 3. Incorporate RPi's camera module to constantly scan for faces\n 4. If recognized face, load personalized document (browser?)\n'''\n\nimport cv2, os\n#import numpy as np\nfrom PIL import Image\nimport face_recognizer\n\n# Test if we already have a recognizer set\n\nload_previous = \"./last_recognizer.xml\"\ncreate_new = \"./training_set\"\n#create_new = \"./yalefaces\"\nrecog = face_recognizer.FaceRecognizer(load_previous, create_new)\n#recog.save_recognizer(\"./last_recognizer.xml\")\n\npath = \"./testing_set\"\n\n# Append the images with the extension .sad into image_paths\n#image_paths = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.sad')]\nimage_paths = [os.path.join(path, f) for f in os.listdir(path)]\nfor image_path in image_paths:\n predict_image_pil = Image.open(image_path).convert('L')\n predict_image = np.array(predict_image_pil, 'uint8')\n cascade_path = \"haarcascade_frontalface_default.xml\"\n face_cascade = cv2.CascadeClassifier(cascade_path)\n faces = face_cascade.detectMultiScale(predict_image)\n for (x, y, w, h) in faces:\n #nbr_predicted, conf = recognizer.predict(predict_image[y: y + h, x: x + w])\n nbr_predicted, conf = recog.predict2(predict_image[y: y + h, x: x + w])\n #nbr_actual = int(os.path.split(image_path)[1].split(\".\")[0].replace(\"subject\", \"\"))\n # Let's fix this instead of leaving it broken...\n nbr_actual = 1\n if nbr_actual == nbr_predicted:\n print \"{} is Correctly Recognized with confidence {}\".format(nbr_actual, conf)\n cv2.waitKey(1000)\n else:\n print \"{} is Incorrect Recognized as {}\".format(nbr_actual, nbr_predicted)\n cv2.waitKey(1000)\n cv2.imshow(\"Recognizing Face\", predict_image[y: y + h, x: x + w])\n cv2.waitKey(200)" } ]
3
mdbloice/Journal-Club
https://github.com/mdbloice/Journal-Club
de4134944793fa61cc40411c807a934b64900b88
f53dbda6a861ac3b13293f3969613b1dd32859f2
558b335045ebbd291537e792bba45f782a3a7dba
refs/heads/master
2020-03-29T15:38:17.688510
2018-09-24T08:51:26
2018-09-24T08:51:26
150,073,446
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.5921052694320679, "avg_line_length": 11.5, "blob_id": "53c3e2382e731bf357513c60b09c0069aed45ada", "content_id": "9ca2930c73bb5c9c860a4b104a7b29f236a8d33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "no_license", "max_line_length": 40, "num_lines": 6, "path": "/external-file.py", "repo_name": "mdbloice/Journal-Club", "src_encoding": "UTF-8", "text": "import sys\n\ndef hello():\n print(\"Hello from %s\" % sys.argv[0])\n\nhello()\n\n" }, { "alpha_fraction": 0.7824427485466003, "alphanum_fraction": 0.7938931584358215, "avg_line_length": 57.11111068725586, "blob_id": "9e94f42d732b6fdf2552cdf95b21c50dd1eab967", "content_id": "7544a7dd1341d78c3c5d95b12b826150fa9ee57a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 524, "license_type": "no_license", "max_line_length": 173, "num_lines": 9, "path": "/README.md", "repo_name": "mdbloice/Journal-Club", "src_encoding": "UTF-8", "text": "# Journal Club Jupyter Seminar\n\nThis repository contains the notebook presented in the [Journal Club](https://sites.google.com/view/biomedical-informatics-jc/) seminar on Jupyter for Winter Semester 2018. \n\n## Use\n\nTo view a static version of the notebook, just open the [Journal Club.ipynb](https://github.com/mdbloice/Journal-Club/blob/master/Journal%20Club.ipynb) file directly.\n\nTo execute the notebook locally, clone this resository and run `jupyter notebook` from the command line in the repository's root directory.\n\n" } ]
2
thflacjsak12/rnrmfgksdid12
https://github.com/thflacjsak12/rnrmfgksdid12
bdfc571414afe191d58ea72e84a55ef042833600
70d8656caf7e1c1a2a78d626b94acc03ecd17089
6a2f9442d4bb7b09e84ddf1f1a7d1fb9c163fafb
refs/heads/master
2020-09-21T01:29:19.285101
2019-11-28T11:50:32
2019-11-28T11:50:32
224,640,719
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6456257104873657, "alphanum_fraction": 0.6644518375396729, "avg_line_length": 27.129032135009766, "blob_id": "bb7a2a6369ddd52843a2aa27f5459d461dd4bbc0", "content_id": "5cd0dfb0d18b3347021244a161f0e2576972d04a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 977, "license_type": "no_license", "max_line_length": 109, "num_lines": 31, "path": "/multiple-object-detection_01-image.py", "repo_name": "thflacjsak12/rnrmfgksdid12", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 19 23:50:48 2019\r\n\r\n@author: Kim\r\n\"\"\"\r\n\r\nfrom imageai.Detection import ObjectDetection\r\nimport os\r\n\r\nexecution_path = os.getcwd()\r\n# print(execution_path)\r\n\r\n# 모델 설정하기\r\ndetector = ObjectDetection()\r\ndetector.setModelTypeAsYOLOv3()\r\ndetector.setModelPath( os.path.join(execution_path , \"models/yolo.h5\"))\r\n\r\n# 모델 성능 설정하기\r\ndetector.loadModel(detection_speed=\"fast\") #fast, faster, fastest, flash\r\n\r\n# 모델에 이미지 입력 및 출력 설정하기\r\ndetections = detector.detectObjectsFromImage(\r\n input_image=os.path.join(execution_path , \"images/street.jpg\"), \r\n output_image_path=os.path.join(execution_path , \"image_out.jpg\"), \r\n minimum_percentage_probability=50)\r\n\r\n# 실행결과 출력하기\r\nfor eachObject in detections:\r\n print(eachObject[\"name\"] , \" : \", eachObject[\"percentage_probability\"], \" : \", eachObject[\"box_points\"] )\r\n print(\"--------------------------------\")\r\n" }, { "alpha_fraction": 0.6958580017089844, "alphanum_fraction": 0.7289940714836121, "avg_line_length": 29.370370864868164, "blob_id": "c331eb8b2174370a3ab18eb8c35a490a52326ae2", "content_id": "3bd15c283003db2ae6ca5e6bd2f683a27d392cb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 94, "num_lines": 27, "path": "/multiple-object-detection_03-VIDEO.py", "repo_name": "thflacjsak12/rnrmfgksdid12", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 20 00:07:55 2019\r\n\r\n@author: Kim\r\n\"\"\"\r\n\r\nfrom imageai.Detection import VideoObjectDetection\r\nimport imageai\r\nimport os\r\n\r\nexecution_path = os.getcwd()\r\n\r\n# detector = VideoObjectDetection()\r\n# detector.setModelTypeAsRetinaNet()\r\n# detector.setModelPath( os.path.join(execution_path , \"models/resnet50_coco_best_v2.0.1.h5\"))\r\ndetector = VideoObjectDetection()\r\ndetector.setModelTypeAsYOLOv3()\r\ndetector.setModelPath( os.path.join(execution_path , \"models/yolo.h5\"))\r\n\r\ndetector.loadModel(detection_speed=\"fast\") #fast, faster, fastest, flash\r\n\r\nvideo_path = detector.detectObjectsFromVideo(\r\n input_file_path=os.path.join(execution_path, \"videos/seoul_02_0.mp4\"),\r\n output_file_path=os.path.join(execution_path, \"video_out2\"),\r\n frames_per_second=20, log_progress=True)\r\nprint(video_path)" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.8666666746139526, "avg_line_length": 14, "blob_id": "872f14c102f1eeb99373073ad299ae002daa2086", "content_id": "15186ab7116217a034e42f45d13fb85dc2f345d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/README.md", "repo_name": "thflacjsak12/rnrmfgksdid12", "src_encoding": "UTF-8", "text": "# rnrmfgksdid12\nrnrmfgksdid12\n" } ]
3
zubrzubr/learnenjoy
https://github.com/zubrzubr/learnenjoy
38eb5aec59e1575defbd1b1210f50f25b65c03d2
853c84ed714bb942c896d7076adcc38c8378d427
1563061b915062b24cc2bd2caa637dc268a20c1d
refs/heads/master
2021-11-09T08:25:59.474738
2020-02-18T20:53:46
2020-02-18T20:53:46
167,740,276
0
0
null
2019-01-26T21:21:26
2020-02-18T20:53:50
2021-09-22T17:50:33
Python
[ { "alpha_fraction": 0.7091454267501831, "alphanum_fraction": 0.7496252059936523, "avg_line_length": 34.105262756347656, "blob_id": "a3b6975f705f7e6b4a5266f5c3155fdaccd8217b", "content_id": "efec1c879382c1ff3c2a5e2d0fccdae7b70e1c9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 667, "license_type": "no_license", "max_line_length": 102, "num_lines": 19, "path": "/target/tests/factories.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import datetime\n\nimport factory.fuzzy\n\nfrom target.models import Target\nfrom book.tests.factories import BookFactory\nfrom reward.tests.factories import RewardFactory\n\n\nclass TargetFactory(factory.django.DjangoModelFactory):\n title = factory.Sequence(lambda n: 'title_%d' % n)\n description = factory.fuzzy.FuzzyText(length=100)\n book = factory.SubFactory(BookFactory)\n reward = factory.SubFactory(RewardFactory)\n start_date = factory.fuzzy.FuzzyDate(datetime.datetime(2018, 1, 1), datetime.datetime(2018, 2, 1))\n end_date = factory.fuzzy.FuzzyDate(datetime.datetime(2019, 1, 1), datetime.datetime(2019, 2, 1))\n\n class Meta:\n model = Target\n" }, { "alpha_fraction": 0.720812201499939, "alphanum_fraction": 0.720812201499939, "avg_line_length": 15.416666984558105, "blob_id": "a24aeffad4ba8fc06b607511380ced962664b343", "content_id": "521ba4f33652c2e763561daf16436996d91b04c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/reward/admin.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom reward.models import Reward\n\n\nclass RewardAdmin(admin.ModelAdmin):\n \"\"\"\n Reward admins class\n \"\"\"\n pass\n\n\nadmin.site.register(Reward, RewardAdmin)\n" }, { "alpha_fraction": 0.5928030014038086, "alphanum_fraction": 0.6123737096786499, "avg_line_length": 43, "blob_id": "6b2368738549f3c92fff660e7baab33dc502c59f", "content_id": "4c25cf414caa387941c47cc97eac90c41441bb09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1584, "license_type": "no_license", "max_line_length": 135, "num_lines": 36, "path": "/target/migrations/0001_initial.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.6 on 2018-06-23 14:52\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('reward', '0001_initial'),\n ('book', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Target',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255, verbose_name=\"Target's name\")),\n ('description', models.TextField(max_length=1024, verbose_name=\"Target's description\")),\n ('start_date', models.DateField(verbose_name=\"Target's start date\")),\n ('end_date', models.DateField(verbose_name=\"Target's end date\")),\n ('current_page_progress', models.PositiveIntegerField(default=0, verbose_name='Current page progress')),\n ('book', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='targets', to='book.Book')),\n ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),\n ('reward', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='targets', to='reward.Reward')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n" }, { "alpha_fraction": 0.7443946003913879, "alphanum_fraction": 0.7443946003913879, "avg_line_length": 17.58333396911621, "blob_id": "1fcb59886dab1bb893db2c15cbdcc9dbfd3eb7fc", "content_id": "63620471165346fe9ab364205841b2f9d2c520ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 48, "num_lines": 12, "path": "/custom_user/admin.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom custom_user.models import CustomUser\n\n\nclass CustomUserAdmin(admin.ModelAdmin):\n \"\"\"\n Custom user admins class\n \"\"\"\n pass\n\n\nadmin.site.register(CustomUser, CustomUserAdmin)\n" }, { "alpha_fraction": 0.615936279296875, "alphanum_fraction": 0.615936279296875, "avg_line_length": 28.186046600341797, "blob_id": "3bfbb16507820c2b2f767f6a99d9397404f0471d", "content_id": "f2f12ce082d173b910f6402d3a42bf411ff0e908", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1255, "license_type": "no_license", "max_line_length": 118, "num_lines": 43, "path": "/custom_user/serializers.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom custom_user.models import CustomUser\n\n\nclass BaseUserSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer to present users.\n \"\"\"\n class Meta:\n model = CustomUser\n fields = (\n 'username', 'first_name', 'last_name', 'bio', 'country', 'city', 'birth_date', 'favorite_books', 'targets'\n )\n\n\nclass CreateUserSerializer(serializers.ModelSerializer):\n \"\"\"\n Used for user's registration\n \"\"\"\n password = serializers.CharField(write_only=True)\n email = serializers.EmailField(required=True)\n\n class Meta:\n model = CustomUser\n fields = (\n 'first_name', 'last_name', 'bio', 'country', 'city', 'email', 'username', 'password', 'birth_date',\n 'favorite_books', 'targets'\n )\n \n def create(self, validated_data):\n user = super(CreateUserSerializer, self).create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UpdateUserSerializer(CreateUserSerializer):\n class Meta:\n model = CustomUser\n fields = (\n 'first_name', 'last_name', 'bio', 'country', 'city', 'birth_date', 'favorite_books', 'targets'\n )\n" }, { "alpha_fraction": 0.6764994859695435, "alphanum_fraction": 0.6853490471839905, "avg_line_length": 32.900001525878906, "blob_id": "516c1d30edfd8ff8f052b6b7e2bb481675d4670b", "content_id": "8dc310d43a6ac707aaddeddeffbad04b370e0553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 114, "num_lines": 30, "path": "/learntoenjoy/urls.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework_swagger.views import get_swagger_view\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n\n\nswagger_view = get_swagger_view(title='Learn to enjoy API')\n\napi_v1_pattern = r'^api/v1/'\n\napi_urls = [\n url(api_v1_pattern, include('book.urls')),\n url(api_v1_pattern, include('target.urls')),\n url(api_v1_pattern, include('custom_user.urls')),\n url(api_v1_pattern, include('reward.urls')),\n url('{}{}'.format(api_v1_pattern, 'login/token/$'), TokenObtainPairView.as_view(), name='token_obtain_pair'),\n url('{}{}'.format(api_v1_pattern, 'login/token/refresh/$'), TokenRefreshView.as_view(), name='token_refresh'),\n\n]\n\ncommon_urls = [\n url(r'^secret_magic_room/', admin.site.urls),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url('{}{}'.format(api_v1_pattern, 'docs'), swagger_view),\n]\n\nurlpatterns = api_urls + common_urls\n" }, { "alpha_fraction": 0.7632508873939514, "alphanum_fraction": 0.7632508873939514, "avg_line_length": 20.769229888916016, "blob_id": "80cec346dbe0dd96ec1abe12e27f6dacafd912cc", "content_id": "48c8ef667497bc3e9e90aec14200d3952bd838d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/custom_user/urls.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom rest_framework import routers\n\nfrom custom_user.views import CustomUsersViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', CustomUsersViewSet, base_name='users')\n\n\nurlpatterns = [\n url(r'', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.5734127163887024, "alphanum_fraction": 0.6001983880996704, "avg_line_length": 31.516128540039062, "blob_id": "045c484d57b8bb8f14d8e4edc34f9836062ba7a2", "content_id": "f80b7986b5634b8e252654bf62949ab273dbc1ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1008, "license_type": "no_license", "max_line_length": 209, "num_lines": 31, "path": "/custom_user/migrations/0002_auto_20180623_1452.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.6 on 2018-06-23 14:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('custom_user', '0001_initial'),\n ('auth', '0009_alter_user_last_name_max_length'),\n ('target', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='customuser',\n name='targets',\n field=models.ManyToManyField(blank=True, related_name='users_targets', to='target.Target'),\n ),\n migrations.AddField(\n model_name='customuser',\n name='user_permissions',\n field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),\n ),\n migrations.AlterUniqueTogether(\n name='customuser',\n unique_together={('email', 'username')},\n ),\n ]\n" }, { "alpha_fraction": 0.5765334963798523, "alphanum_fraction": 0.5773776173591614, "avg_line_length": 31.456621170043945, "blob_id": "c1d1dee44651267821a004911650c4d69ccf964e", "content_id": "306f6304d3df02641ad7dfbe7c5109e852d4b4df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7108, "license_type": "no_license", "max_line_length": 118, "num_lines": 219, "path": "/custom_user/tests/test_views.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import pytest\nimport simplejson\n\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\n\nfrom custom_user.tests.factories import UserFactory\nfrom common.tests.utils import get_login_params_dict\n\n\nUserModel = get_user_model()\n\n\[email protected]_db\nclass TestCustomUsersView(object):\n def test_registration_should_be_valid_and_create_user(self, client):\n user_list_url = reverse('users-list')\n params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n resp = simplejson.loads(client.post(user_list_url, params).content)\n\n user = UserModel.objects.get(username=params.get('username'))\n\n assert resp.get('username') == params.get('username')\n assert resp.get('email') == params.get('email')\n\n assert params.get('username') == user.username\n\n def test_registration_should_be_invalid_if_email_with_wrong_format(self, client):\n user_list_url = reverse('users-list')\n params = {\n 'username': 'test',\n 'email': 'test....test.com',\n 'password': 'test'\n }\n resp = simplejson.loads(client.post(user_list_url, params).content)\n expected_resp = {'email': ['Enter a valid email address.']}\n assert resp == expected_resp\n\n def test_registration_should_be_invalid_if_email_is_empty(self, client):\n user_list_url = reverse('users-list')\n params = {\n 'username': 'test',\n 'email': ' ',\n 'password': 'test'\n }\n resp = simplejson.loads(client.post(user_list_url, params).content)\n expected_resp = {'email': ['This field may not be blank.']}\n\n assert resp == expected_resp\n\n def test_registration_should_be_invalid_if_empty_password(self, client):\n user_list_url = reverse('users-list')\n params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': ' '\n }\n resp = simplejson.loads(client.post(user_list_url, params).content)\n expected_resp = {'password': ['This field may not be blank.']}\n assert resp == expected_resp\n\n def test_registration_should_be_invalid_if_empty_username(self, client):\n user_list_url = reverse('users-list')\n params = {\n 'username': ' ',\n 'email': '[email protected]',\n 'password': '123321'\n }\n resp = simplejson.loads(client.post(user_list_url, params).content)\n expected_resp = {'username': ['This field may not be blank.']}\n assert resp == expected_resp\n\n def test_should_return_three_users(self, client):\n user_list_url = reverse('users-list')\n users_count = 3\n\n UserFactory.create(password='test_1')\n UserFactory.create(password='test_2')\n UserFactory.create(password='test_3')\n\n resp = simplejson.loads(client.get(user_list_url).content)\n\n assert users_count == len(resp)\n\n def test_returned_fields(self, client):\n user_list_url = reverse('users-list')\n\n UserFactory.create(password='test_1')\n\n resp = simplejson.loads(client.get(user_list_url).content)\n \n fields = resp[0].keys()\n expected_fields = [\n 'username', 'first_name', 'last_name', 'bio', 'country', 'city', 'birth_date', 'favorite_books', 'targets'\n ]\n \n assert list(fields) == expected_fields\n\n def test_should_not_return_superuser_in_response(self, client):\n user_list_url = reverse('users-list')\n\n UserFactory.create(username='su', password='test', is_superuser=True)\n UserFactory.create(username='not_su', password='test_2', is_superuser=False)\n\n resp = simplejson.loads(client.get(user_list_url).content)\n resp_user_names = [resp_obj.get('username') for resp_obj in resp]\n\n assert 'su' not in resp_user_names\n\n\[email protected]_db\nclass TestCustomUsersDetailView(object):\n def test_city_should_be_changed_after_put_request(self, client):\n params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n UserFactory.create(**params)\n user = UserModel.objects.get(username='test')\n\n user_detail = reverse('users-detail', args=[user.id])\n\n users_city_before = user.city\n\n params.pop('email')\n \n token_dict = get_login_params_dict(client, params)\n\n client.put(\n user_detail, simplejson.dumps({'city': 'New city'}), **token_dict\n )\n\n user = UserModel.objects.get(username='test')\n users_city_after = user.city\n\n assert users_city_before != users_city_after\n\n def test_email_should_not_be_changed_after_put_request(self, client):\n params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n\n UserFactory.create(**params)\n user = UserModel.objects.get(username='test')\n user_detail = reverse('users-detail', args=[user.id])\n\n users_email_before = user.email\n\n params.pop('email')\n\n token_dict = get_login_params_dict(client, params)\n\n client.put(\n user_detail, simplejson.dumps({'email': '[email protected]'}), **token_dict\n )\n\n user = UserModel.objects.get(username='test')\n users_email_after = user.email\n\n assert users_email_before == users_email_after\n\n def test_not_owner_can_not_change_user_profile(self, client):\n params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n params_1 = {\n 'username': 'test_not_owner',\n 'email': '[email protected]',\n 'password': 'test'\n }\n UserFactory.create(**params)\n UserFactory.create(**params_1)\n user = UserModel.objects.get(username='test')\n\n user_detail = reverse('users-detail', args=[user.id])\n\n params_1.pop('email')\n token_dict = get_login_params_dict(client, params_1)\n\n resp = simplejson.loads(\n client.put(\n user_detail, simplejson.dumps({'email': '[email protected]'}), **token_dict\n ).content\n )\n expected_resp = {'detail': 'You do not have permission to perform this action.'}\n\n assert resp == expected_resp\n\n def test_owner_can_change_user_profile(self, client):\n params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test',\n 'city': 'Kyiv',\n }\n new_city = 'Lviv'\n UserFactory.create(**params)\n user = UserModel.objects.get(username='test')\n\n user_detail = reverse('users-detail', args=[user.id])\n\n token_dict = get_login_params_dict(client, params)\n\n resp = simplejson.loads(\n client.put(\n user_detail, simplejson.dumps({'city': new_city}), **token_dict\n ).content\n )\n\n assert resp.get('city') == new_city\n" }, { "alpha_fraction": 0.7050359845161438, "alphanum_fraction": 0.7050359845161438, "avg_line_length": 29.40625, "blob_id": "5b863a222e0930367ebd5f812d6961860b6bd5b9", "content_id": "5eff4a4aa4e06aee205b172e15df9e31e935c549", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "no_license", "max_line_length": 113, "num_lines": 32, "path": "/target/serializers.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom reward.serializers import RewardSerializer\nfrom target.models import Target\nfrom book.serializers import BookBaseSerializer\nfrom target.services import ProgressService\n\n\nclass BaseTargetSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for target creation.\n \"\"\"\n owner = serializers.HiddenField(default=serializers.CurrentUserDefault())\n reward = RewardSerializer(many=False, required=False)\n pages_per_day = serializers.SerializerMethodField()\n\n class Meta:\n model = Target\n fields = (\n 'title', 'description', 'book', 'current_page_progress', 'start_date', 'end_date', 'owner', 'reward',\n 'pages_per_day',\n )\n\n def get_pages_per_day(self, obj):\n return ProgressService(obj).get_pages_daily_target()\n\n\nclass TargetSerializer(BaseTargetSerializer):\n \"\"\"\n Serializer to present all targets.\n \"\"\"\n book = BookBaseSerializer(many=False)\n" }, { "alpha_fraction": 0.6302428245544434, "alphanum_fraction": 0.631346583366394, "avg_line_length": 33.846153259277344, "blob_id": "83cf78ad5669ca3277fd3defce0ec7f5279b6fe6", "content_id": "f17bbf6e0f371a6eb3317a0ee20ba69d70de1e3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "no_license", "max_line_length": 104, "num_lines": 26, "path": "/target/services.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import datetime\n\n\nclass ProgressService(object):\n \"\"\"\n Logic to calculate common operation for target's progress\n \"\"\"\n def __init__(self, target_instance):\n self.target_instance = target_instance\n\n def get_pages_daily_target(self):\n \"\"\"\n Calculates pages which user should read every day to achieve the goal\n :return: int count of pages per day\n \"\"\"\n end_date = self.target_instance.end_date\n today_date = datetime.datetime.today().date()\n if isinstance(end_date, datetime.datetime):\n end_date = end_date.date()\n if end_date < today_date:\n return\n date_diff = end_date - datetime.datetime.today().date()\n pages_diff = (self.target_instance.book.page_count - self.target_instance.current_page_progress)\n daily_target = pages_diff / date_diff.days\n\n return round(daily_target, 1)\n" }, { "alpha_fraction": 0.5960395932197571, "alphanum_fraction": 0.6336633563041687, "avg_line_length": 25.578947067260742, "blob_id": "6df6982b6a95a8e6b9b2144be25aee7cef79d422", "content_id": "3960f59406013e2d3be660dde640bb68ca1f0693", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 148, "num_lines": 19, "path": "/target/migrations/0002_auto_20180804_1022.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.6 on 2018-08-04 10:22\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('target', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='target',\n name='reward',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='targets', to='reward.Reward'),\n ),\n ]\n" }, { "alpha_fraction": 0.5815916657447815, "alphanum_fraction": 0.584941029548645, "avg_line_length": 31.881057739257812, "blob_id": "5ced749a01f9d6f772ca3e5b3a2d5798cbcf40a9", "content_id": "9ea6f80a55a731ef616374f00105a30bd5e1a090", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7464, "license_type": "no_license", "max_line_length": 104, "num_lines": 227, "path": "/target/tests/test_views.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import datetime\n\nimport pytest\nimport simplejson\nfrom django.urls import reverse\n\nfrom book.tests.factories import BookFactory\nfrom common.tests.utils import get_login_params_dict\nfrom custom_user.tests.factories import UserFactory\nfrom target.models import Target\nfrom target.tests.factories import TargetFactory\n\n\[email protected]_db\nclass TestTargetsView(object):\n def test_targets_view_should_return_three_targets(self, client):\n targets_url = reverse('targets-list')\n\n TargetFactory.create()\n TargetFactory.create()\n TargetFactory.create()\n\n expected_len = 3\n\n resp = simplejson.loads(client.get(targets_url).content)\n\n assert len(resp) == expected_len\n\n def test_targets_returned_fields(self, client):\n targets_url = reverse('targets-list')\n TargetFactory.create()\n\n resp = simplejson.loads(client.get(targets_url).content)\n resp_keys = resp[0].keys()\n\n expected_keys = [\n 'title', 'description', 'book', 'current_page_progress', 'start_date', 'end_date', 'reward',\n 'pages_per_day',\n ]\n\n assert expected_keys == list(resp_keys)\n\n def test_not_authenticated_users_can_not_add_book(self, client):\n targets_url = reverse('targets-list')\n params = {\n 'title': 'My target bla bla bla'\n }\n\n resp = simplejson.loads(client.post(targets_url, params).content)\n expected_resp = {'detail': 'Authentication credentials were not provided.'}\n\n assert resp == expected_resp\n\n def test_authenticated_users_can_add_target(self, client):\n targets_url = reverse('targets-list')\n\n book = BookFactory.create()\n end_date = datetime.datetime.now() + datetime.timedelta(days=21)\n\n params = simplejson.dumps({\n 'title': 'Test title',\n 'description': 'test',\n 'book': book.id,\n 'start_date': datetime.datetime.now().strftime('%Y-%m-%d'),\n 'end_date': end_date.strftime('%Y-%m-%d'),\n })\n login_params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n\n UserFactory.create(**login_params)\n\n token_dict = get_login_params_dict(client, login_params)\n resp = simplejson.loads(client.post(targets_url, params, **token_dict).content)\n expected_resp_title = 'Test title'\n\n assert expected_resp_title == resp.get('title')\n\n\[email protected]_db\nclass TestTargetDetailView(object):\n def test_target_detail_view_get_detail_target(self, client):\n target_params = {\n 'title': 'test',\n 'description': 'test test',\n }\n\n target = TargetFactory.create(**target_params)\n\n rewards_url = reverse('targets-detail', args=[target.id])\n\n resp = simplejson.loads(client.get(rewards_url).content)\n\n assert resp['title'] == target_params['title']\n assert resp['description'] == target_params['description']\n\n def test_not_owner_can_not_change_target(self, client):\n targets_url = reverse('targets-list')\n\n user_params_owner = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n user_params_not_owner = {\n 'username': 'test_not_owner',\n 'email': '[email protected]',\n 'password': 'test'\n }\n book = BookFactory.create()\n end_date = datetime.datetime.now() + datetime.timedelta(days=21)\n\n targets_create_params = {\n 'title': 'Test title',\n 'description': 'test',\n 'book': book.id,\n 'start_date': datetime.datetime.now().strftime('%Y-%m-%d'),\n 'end_date': end_date.strftime('%Y-%m-%d'),\n }\n\n UserFactory.create(**user_params_owner)\n UserFactory.create(**user_params_not_owner)\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n\n client.post(targets_url, simplejson.dumps(targets_create_params), **token_dict_owner)\n\n created_target = Target.objects.get(title='Test title')\n targets_detail = reverse('targets-detail', args=[created_target.id])\n\n user_params_not_owner.pop('email')\n token_dict_not_owner = get_login_params_dict(client, user_params_not_owner)\n\n resp = simplejson.loads(\n client.put(\n targets_detail, simplejson.dumps({'name': 'new'}), **token_dict_not_owner\n ).content\n )\n\n expected_resp = {'detail': 'You do not have permission to perform this action.'}\n\n assert resp == expected_resp\n\n def test_owner_can_change_target(self, client):\n targets_url = reverse('targets-list')\n user_params_owner = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n book = BookFactory.create()\n end_date = datetime.datetime.now() + datetime.timedelta(days=21)\n targets_create_params = {\n 'title': 'Test title',\n 'description': 'test',\n 'book': book.id,\n 'start_date': datetime.datetime.now().strftime('%Y-%m-%d'),\n 'end_date': end_date.strftime('%Y-%m-%d'),\n }\n\n new_title = 'test_new'\n\n UserFactory.create(**user_params_owner)\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n client.post(targets_url, simplejson.dumps(targets_create_params), **token_dict_owner)\n\n created_target = Target.objects.get(title='Test title')\n targets_detail = reverse('targets-detail', args=[created_target.id])\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n\n resp = simplejson.loads(\n client.patch(\n targets_detail, simplejson.dumps({'title': new_title}), **token_dict_owner\n ).content\n )\n\n assert resp.get('title') == new_title\n\n def test_detail_target_view_should_contain_pages_per_day(self, client):\n book = BookFactory.create()\n book.page_count = 700\n book.save()\n\n end_date = datetime.datetime.now() + datetime.timedelta(days=21)\n params = {\n 'title': 'Test title',\n 'description': 'test',\n 'book': book,\n 'start_date': datetime.datetime.now().strftime('%Y-%m-%d'),\n 'end_date': end_date.strftime('%Y-%m-%d'),\n }\n\n target = TargetFactory(**params)\n target.save()\n\n targets_url = reverse('targets-detail', args=[target.id])\n\n resp = simplejson.loads(client.get(targets_url).content)\n\n assert resp.get('pages_per_day') == 33.3\n\n def test_detail_target_view_should_contain_pages_per_day_and_return_int(self, client):\n book = BookFactory.create()\n book.page_count = 2100\n book.save()\n\n end_date = datetime.datetime.now() + datetime.timedelta(days=21)\n params = {\n 'title': 'Test title',\n 'description': 'test',\n 'book': book,\n 'start_date': datetime.datetime.now().strftime('%Y-%m-%d'),\n 'end_date': end_date.strftime('%Y-%m-%d'),\n }\n\n target = TargetFactory(**params)\n target.save()\n\n targets_url = reverse('targets-detail', args=[target.id])\n\n resp = simplejson.loads(client.get(targets_url).content)\n\n assert resp.get('pages_per_day') == 100\n" }, { "alpha_fraction": 0.5930232405662537, "alphanum_fraction": 0.7441860437393188, "avg_line_length": 20.5, "blob_id": "58377f359fc356e696d70121ffdae65f9adb13ce", "content_id": "9f61e8dba9aae3ccbbe6761bb47df783f557b473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 258, "license_type": "no_license", "max_line_length": 36, "num_lines": 12, "path": "/requirements.txt", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "djangorestframework==3.9.1\ndjango-configurations==2.1\npytest==3.4.2\npytest-django==3.1.2\nmock==2.0.0\nsimplejson==3.13.2\ndjango-rest-swagger==2.1.2\ndjangorestframework-simplejwt==3.2.3\ndjango==3.0.3\nfactory_boy==2.10.0\ngunicorn==19.9.0\npsycopg2-binary==2.7.7\n" }, { "alpha_fraction": 0.7682926654815674, "alphanum_fraction": 0.7682926654815674, "avg_line_length": 33.52631759643555, "blob_id": "7fc958a9c327716c885bf131dd73e044cf2e7ad2", "content_id": "d08581b0c5a9101ac80dd6b73411c6ef4938bbd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "no_license", "max_line_length": 77, "num_lines": 19, "path": "/target/views.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom target.models import Target\nfrom target.serializers import TargetSerializer, BaseTargetSerializer\nfrom common.permissions import BaseIsOwnerOrReadOnly\n\n\nclass TargetsViewSet(viewsets.ModelViewSet):\n \"\"\"\n Model view for targets, presents: detail view, and list view for targets.\n \"\"\"\n queryset = Target.objects.all()\n permission_classes = (BaseIsOwnerOrReadOnly, IsAuthenticatedOrReadOnly)\n\n def get_serializer_class(self):\n if self.action == 'create':\n return BaseTargetSerializer\n return TargetSerializer\n" }, { "alpha_fraction": 0.5877453684806824, "alphanum_fraction": 0.5885385870933533, "avg_line_length": 29.938650131225586, "blob_id": "0b26496bb40bc1acf104a09066fe201e8f206b06", "content_id": "7a30e47f0e95deda0ebc621ce6adfee4120a5770", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5043, "license_type": "no_license", "max_line_length": 93, "num_lines": 163, "path": "/reward/tests/test_views.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import pytest\nimport simplejson\n\nfrom django.urls import reverse\n\nfrom reward.models import Reward\nfrom reward.tests.factories import RewardFactory\nfrom common.tests.utils import get_login_params_dict\nfrom custom_user.tests.factories import UserFactory\n\n\[email protected]_db\nclass TestRewardView(object):\n def test_rewards_list_view_should_return_three_rewards(self, client):\n rewards_url = reverse('rewards-list')\n RewardFactory.create()\n RewardFactory.create()\n RewardFactory.create()\n \n expected_len = 3\n resp = simplejson.loads(client.get(rewards_url).content)\n\n assert len(resp) == expected_len\n\n def test_rewards_returned_fields(self, client):\n books_url = reverse('rewards-list')\n RewardFactory.create()\n \n resp = simplejson.loads(client.get(books_url).content)\n resp_keys = resp[0].keys()\n expected_keys = ['id', 'name', 'url']\n\n assert expected_keys == list(resp_keys)\n\n def test_not_authenticated_users_can_not_add_reward(self, client):\n rewards_url = reverse('rewards-list')\n params = {\n 'name': 'laptop',\n 'url': 'https://www.test.com/laptop/'\n }\n\n resp = simplejson.loads(client.post(rewards_url, params).content)\n expected_resp = {'detail': 'Authentication credentials were not provided.'}\n\n assert resp == expected_resp\n\n def test_authenticated_users_can_add_reward(self, client):\n rewards_url = reverse('rewards-list')\n\n params = simplejson.dumps({\n 'name': 'laptop',\n 'url': 'https://www.test.com/laptop/'\n })\n login_params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n\n UserFactory.create(**login_params)\n\n token_dict = get_login_params_dict(client, login_params)\n\n resp = simplejson.loads(\n client.post(rewards_url, params, **token_dict).content\n ) \n\n expected_resp = {\n 'id': 1,\n 'name': 'laptop',\n 'url': 'https://www.test.com/laptop/'\n }\n\n assert expected_resp == resp\n\n\[email protected]_db\nclass TestRewardDetailView(object):\n def test_reward_detail_view_get_detail_reward(self, client):\n reward_params = {\n 'name': 'test',\n 'url': 'https://www.test.com'\n }\n\n reward = RewardFactory.create(**reward_params)\n\n rewards_url = reverse('rewards-detail', args=[reward.id])\n\n resp = simplejson.loads(client.get(rewards_url).content)\n\n expected_result = dict(reward_params)\n expected_result['id'] = 1\n\n assert resp == expected_result\n\n def test_not_owner_can_not_change_reward(self, client):\n rewards_url = reverse('rewards-list')\n\n user_params_owner = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n user_params_not_owner = {\n 'username': 'test_not_owner',\n 'email': '[email protected]',\n 'password': 'test'\n }\n\n rewards_create_params = {'name': 'test'}\n\n UserFactory.create(**user_params_owner)\n UserFactory.create(**user_params_not_owner)\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n\n client.post(rewards_url, simplejson.dumps(rewards_create_params), **token_dict_owner)\n\n created_reward = Reward.objects.get(name='test')\n rewards_detail = reverse('rewards-detail', args=[created_reward.id])\n\n user_params_not_owner.pop('email')\n token_dict_not_owner = get_login_params_dict(client, user_params_not_owner)\n\n resp = simplejson.loads(\n client.put(\n rewards_detail, simplejson.dumps({'name': 'new'}), **token_dict_not_owner\n ).content\n )\n\n expected_resp = {'detail': 'You do not have permission to perform this action.'}\n\n assert resp == expected_resp\n\n def test_owner_can_change_reward(self, client):\n rewards_url = reverse('rewards-list')\n\n user_params_owner = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n\n rewards_create_params = {'name': 'test', 'url': 'http://www.test.com'}\n new_name = 'test_new'\n\n UserFactory.create(**user_params_owner)\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n client.post(rewards_url, simplejson.dumps(rewards_create_params), **token_dict_owner)\n\n created_reward = Reward.objects.get(name='test')\n rewards_detail = reverse('rewards-detail', args=[created_reward.id])\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n\n resp = simplejson.loads(\n client.put(\n rewards_detail, simplejson.dumps({'name': new_name}), **token_dict_owner\n ).content\n )\n\n assert resp.get('name') == new_name\n" }, { "alpha_fraction": 0.6157480478286743, "alphanum_fraction": 0.6157480478286743, "avg_line_length": 25.45833396911621, "blob_id": "4c807657114ebb8a65f2b92a9bb9fd571e33d181", "content_id": "e15e2e415c2e02407c499400d41a27548e4e2b3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "no_license", "max_line_length": 106, "num_lines": 24, "path": "/common/abstract_models.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass OwnerModel(models.Model):\n \"\"\"\n Add to models owner field. By default on delete policy is 'set_null'\n \"\"\"\n user_on_delete_policy = 'set_null'\n\n on_delete_policy_mapping = {\n 'cascade': models.CASCADE,\n 'protect': models.PROTECT,\n 'set': models.SET,\n 'set_null': models.SET_NULL,\n 'set_default': models.SET_DEFAULT,\n 'do_nothing': models.DO_NOTHING,\n }\n\n owner = models.ForeignKey(\n 'custom_user.CustomUser', null=True, on_delete=on_delete_policy_mapping.get(user_on_delete_policy)\n )\n\n class Meta:\n abstract = True\n" }, { "alpha_fraction": 0.6465989351272583, "alphanum_fraction": 0.6661454439163208, "avg_line_length": 30.19512176513672, "blob_id": "32af7b7e83adb6239796f2729045c2feb11ae232", "content_id": "8d9e5c9f818e3fbadf99e1cb9af234aa21910d0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1279, "license_type": "no_license", "max_line_length": 76, "num_lines": 41, "path": "/book/models.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils.translation import ugettext as _\n\nfrom common.abstract_models import OwnerModel\n\n\nclass Author(models.Model):\n \"\"\"\n Model to define author for books\n \"\"\"\n first_name = models.CharField(_(\"Author's first name\"), max_length=255)\n last_name = models.CharField(_(\"Author's last name\"), max_length=255)\n bio = models.TextField(_(\"Author's biography\"), max_length=1024)\n\n def __str__(self):\n return \"{} {}\".format(self.last_name, self.first_name)\n\n\nclass Genre(models.Model):\n \"\"\"\n Model to define genre for books\n \"\"\"\n title = models.CharField(_(\"Genre title\"), max_length=255)\n description = models.TextField(_(\"Genre description\"), max_length=1024)\n\n def __str__(self):\n return self.title\n\n\nclass Book(OwnerModel):\n \"\"\"\n Model to define book for user's targets\n \"\"\"\n title = models.CharField(_(\"Book's name\"), max_length=255)\n description = models.TextField(_(\"Book's description\"), max_length=1024)\n authors = models.ManyToManyField(Author, related_name=\"authors_books\")\n genres = models.ManyToManyField(Genre, related_name=\"books\")\n page_count = models.PositiveIntegerField(_(\"Count of pages\"), default=0)\n\n def __str__(self):\n return self.title\n" }, { "alpha_fraction": 0.6257088780403137, "alphanum_fraction": 0.6332703232765198, "avg_line_length": 23.045454025268555, "blob_id": "4f73af465f98e58f5c4948dfaa424149672e1944", "content_id": "0e608520b79251ae188e09a0fd129ce9ad8403d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 529, "license_type": "no_license", "max_line_length": 73, "num_lines": 22, "path": "/learntoenjoy/settings.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from learntoenjoy.configs.common import BaseSettings\nfrom learntoenjoy.configs.mixins import DevMixin, ProdMixin, TestingMixin\n\n\nclass DevSettings(DevMixin, BaseSettings):\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'HOST': 'localhost',\n 'PORT': 5432\n }\n }\n\n\nclass ProdSettings(ProdMixin, BaseSettings):\n pass\n\n\nclass TestSettings(TestingMixin, DevMixin, BaseSettings):\n pass\n" }, { "alpha_fraction": 0.7003222107887268, "alphanum_fraction": 0.7078410387039185, "avg_line_length": 37.79166793823242, "blob_id": "7005bd16dd25fd7d341c712102496df31aca0363", "content_id": "d06937db526d26364c0cae67e3c6dc4646291f40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "no_license", "max_line_length": 98, "num_lines": 24, "path": "/custom_user/models.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth.models import AbstractUser\n\nfrom book.models import Book\nfrom target.models import Target\n\n\nclass CustomUser(AbstractUser):\n \"\"\"\n Custom user model to add new attributes to django user model.\n \"\"\"\n bio = models.TextField(_(\"User's biography\"), max_length=500, blank=True)\n country = models.CharField(_(\"User's country\"), max_length=30, blank=True)\n city = models.CharField(_(\"User's city\"), max_length=30, blank=True)\n birth_date = models.DateField(_(\"User's birth date\"), null=True, blank=True)\n favorite_books = models.ManyToManyField(Book, related_name=\"users_favorite_books\", blank=True)\n targets = models.ManyToManyField(Target, related_name=\"users_targets\", blank=True)\n\n class Meta(object):\n unique_together = ('email', 'username')\n\n def __str__(self):\n return self.email\n" }, { "alpha_fraction": 0.795918345451355, "alphanum_fraction": 0.795918345451355, "avg_line_length": 97, "blob_id": "414ab90ed8fb64e6fbbbaeff1099475c043bbc2d", "content_id": "5f727f64fcf4d852594b94992b4b45d1e3ddcba1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 97, "num_lines": 1, "path": "/README.MD", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "Project for motivation to read and to learn something new. More detail will be added a bit later.\n" }, { "alpha_fraction": 0.6368159055709839, "alphanum_fraction": 0.6389481425285339, "avg_line_length": 28.3125, "blob_id": "49f47ebc61f0e21ebb570a9dafc73b2e5484cca0", "content_id": "50013679f135bbf223a961a107c4e8bd35defc77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1407, "license_type": "no_license", "max_line_length": 63, "num_lines": 48, "path": "/custom_user/tests/factories.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import datetime\n\nimport factory.fuzzy\nfrom django.contrib.auth.models import Group\n\nfrom custom_user.models import CustomUser\n\n\nclass GroupFactory(factory.django.DjangoModelFactory):\n name = 'test_group'\n\n class Meta:\n model = Group\n\n\nclass UserFactory(factory.django.DjangoModelFactory):\n username = factory.Sequence(lambda n: 'user_%d' % n)\n email = factory.Sequence(lambda n: 'email_%[email protected]' % n)\n bio = factory.fuzzy.FuzzyText(length=100)\n country = factory.Sequence(lambda n: 'test country %d' % n)\n city = factory.Sequence(lambda n: 'test city %d' % n)\n birth_date = factory.LazyFunction(datetime.datetime.utcnow)\n password = factory.PostGenerationMethodCall('set_password')\n\n class Meta:\n model = CustomUser\n\n @factory.post_generation\n def targets(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for target in extracted:\n self.targets.add(target)\n\n @factory.post_generation\n def favorites_books(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for book in extracted:\n self.favorites_books.add(book)\n" }, { "alpha_fraction": 0.7285180687904358, "alphanum_fraction": 0.7285180687904358, "avg_line_length": 37.238094329833984, "blob_id": "665eb758e56b01a03d72fa4c28cb5124b71ef528", "content_id": "7b876d98ba0952c5195448e4b78608eebf6cc56f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 803, "license_type": "no_license", "max_line_length": 98, "num_lines": 21, "path": "/custom_user/views.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\n\nfrom custom_user.models import CustomUser\nfrom custom_user.serializers import BaseUserSerializer, CreateUserSerializer, UpdateUserSerializer\nfrom custom_user.permissions import IsRegisteredUserOwnerOrReadonly\n\n\nclass CustomUsersViewSet(viewsets.ModelViewSet):\n \"\"\"\n Model view for targets, presents: detail view, and list view for targets.\n \"\"\"\n queryset = CustomUser.objects.filter(is_superuser=False)\n permission_classes = (IsRegisteredUserOwnerOrReadonly,)\n http_method_names = ['get', 'post', 'head', 'put', 'patch']\n \n def get_serializer_class(self):\n if self.action == 'create':\n return CreateUserSerializer\n if self.action == 'update':\n return UpdateUserSerializer\n return BaseUserSerializer\n" }, { "alpha_fraction": 0.6873156428337097, "alphanum_fraction": 0.6873156428337097, "avg_line_length": 23.214284896850586, "blob_id": "beee16572ea64a25db05f0829b5b91eb1fc84078", "content_id": "cbc2e0551cde6c46d2ce9a01a7ccff8c860e12c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 77, "num_lines": 14, "path": "/reward/serializers.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom reward.models import Reward\n\n\nclass RewardSerializer(serializers.ModelSerializer):\n \"\"\"\n Main reward's serializer\n \"\"\"\n owner = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n class Meta:\n model = Reward\n fields = ('id', 'name', 'url', 'owner')\n" }, { "alpha_fraction": 0.7027601003646851, "alphanum_fraction": 0.7027601003646851, "avg_line_length": 28.4375, "blob_id": "84f2685bc9b15b339bb8d100f592cabbd9447031", "content_id": "bc9c84b586b22112cdccf485dcfbf57ba5d62f61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 84, "num_lines": 16, "path": "/common/tests/utils.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import simplejson\n\nfrom django.urls import reverse\n\n\ndef get_login_params_dict(client, login_params):\n \"\"\"\n Returns dict with params which needed for unit tests to login with api point\n \"\"\"\n\n user_login_url = reverse('token_obtain_pair')\n\n resp_login = simplejson.loads(client.post(user_login_url, login_params).content)\n\n token = \"Bearer {}\".format(resp_login.get('access'))\n return {'content_type': 'application/json', 'HTTP_AUTHORIZATION': token}\n" }, { "alpha_fraction": 0.720812201499939, "alphanum_fraction": 0.720812201499939, "avg_line_length": 15.416666984558105, "blob_id": "7fc666f97b144857ff149191673010961ec1c7d6", "content_id": "d15861122f8b88a9e2f231f313ae73456a7ef9c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/target/admin.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom target.models import Target\n\n\nclass TargetAdmin(admin.ModelAdmin):\n \"\"\"\n Target admins class\n \"\"\"\n pass\n\n\nadmin.site.register(Target, TargetAdmin)\n" }, { "alpha_fraction": 0.6853932738304138, "alphanum_fraction": 0.6853932738304138, "avg_line_length": 23.272727966308594, "blob_id": "71cd69a3dd956889b5703cb57a31ce514e765c98", "content_id": "f9e5d71691e0e65a1437ad7778880170ee5aa592", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/reward/tests/factories.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import factory\n\nfrom reward.models import Reward\n\n\nclass RewardFactory(factory.django.DjangoModelFactory):\n name = factory.Sequence(lambda n: 'name_%d' % n)\n url = factory.Sequence(lambda n: 'http://www.test_%d.com' % n)\n\n class Meta:\n model = Reward\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 18.5, "blob_id": "c5f1808c51bdb7994f07159aadb97ab1b6f51b7f", "content_id": "62bef84afc29f46960b9f3030ee616d9e3c0731b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 195, "license_type": "no_license", "max_line_length": 72, "num_lines": 10, "path": "/conftest.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import os\n\nimport configurations\n\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'learntoenjoy.settings')\nos.environ.setdefault('DJANGO_CONFIGURATION', 'TestSettings')\n\n\nconfigurations.setup()\n" }, { "alpha_fraction": 0.6935483813285828, "alphanum_fraction": 0.7526881694793701, "avg_line_length": 15.909090995788574, "blob_id": "62467ead8a9ce6ba80b37cbd847089518d17eb7c", "content_id": "141278f2a08b1ca009c4a4de70661bde43bb39a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 186, "license_type": "no_license", "max_line_length": 56, "num_lines": 11, "path": "/Dockerfile", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "FROM ubuntu:18.04\n\nRUN apt update && apt install python3-pip python3-dev -y\n\nCOPY . /srv/www/learnenjoy/\n\nWORKDIR /srv/www/learnenjoy/\n\nRUN pip3 install -r requirements.txt\n\nEXPOSE 8000\n" }, { "alpha_fraction": 0.6799116730690002, "alphanum_fraction": 0.6799116730690002, "avg_line_length": 15.178571701049805, "blob_id": "79fef18d12d7567dce788846414bb2c45010b160", "content_id": "9907ca5b3c0d85040adb4279c373f5d958e5214f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 453, "license_type": "no_license", "max_line_length": 43, "num_lines": 28, "path": "/book/admin.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom book.models import Book, Genre, Author\n\n\nclass AuthorAdmin(admin.ModelAdmin):\n \"\"\"\n Author admins class\n \"\"\"\n pass\n\n\nclass GenreAdmin(admin.ModelAdmin):\n \"\"\"\n Book admins class\n \"\"\"\n pass\n\n\nclass BookAdmin(admin.ModelAdmin):\n \"\"\"\n Book admins class\n \"\"\"\n pass\n\n\nadmin.site.register(Author, AuthorAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Book, BookAdmin)\n" }, { "alpha_fraction": 0.5414999723434448, "alphanum_fraction": 0.5615000128746033, "avg_line_length": 39.81632614135742, "blob_id": "53b8e1ffd233f29956b098044c3c8b358073178d", "content_id": "41e925174e40d7fcf90e2b05f95e27c1b9fb16e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 114, "num_lines": 49, "path": "/book/migrations/0001_initial.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.6 on 2018-06-23 14:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Author',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('first_name', models.CharField(max_length=255, verbose_name=\"Author's first name\")),\n ('last_name', models.CharField(max_length=255, verbose_name=\"Author's last name\")),\n ('bio', models.TextField(max_length=1024, verbose_name=\"Author's biography\")),\n ],\n ),\n migrations.CreateModel(\n name='Book',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255, verbose_name=\"Book's name\")),\n ('description', models.TextField(max_length=1024, verbose_name=\"Book's description\")),\n ('page_count', models.PositiveIntegerField(default=0, verbose_name='Count of pages')),\n ('authors', models.ManyToManyField(related_name='authors_books', to='book.Author')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Genre',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255, verbose_name='Genre title')),\n ('description', models.TextField(max_length=1024, verbose_name='Genre description')),\n ],\n ),\n migrations.AddField(\n model_name='book',\n name='genres',\n field=models.ManyToManyField(related_name='books', to='book.Genre'),\n ),\n ]\n" }, { "alpha_fraction": 0.6427789926528931, "alphanum_fraction": 0.6564551591873169, "avg_line_length": 32.85185241699219, "blob_id": "a56cbda00d87bfb5dc4f51a8c964c119d798f309", "content_id": "edb11d6fb0c1f5128b0c47e71d40b6be89743945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1828, "license_type": "no_license", "max_line_length": 97, "num_lines": 54, "path": "/target/tests/test_services.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import datetime\n\nimport pytest\n\nfrom book.tests.factories import BookFactory\nfrom target.services import ProgressService\nfrom target.tests.factories import TargetFactory\n\n\[email protected]_db\nclass TestProgressService(object):\n def test_get_goal_per_day_should_return_fifty(self):\n today_date = datetime.datetime.today()\n end_date = today_date + datetime.timedelta(days=5)\n book = BookFactory(page_count=500)\n book.save()\n target = TargetFactory(\n end_date=end_date, start_date=today_date, current_page_progress=250, book=book\n )\n target.save()\n\n goal_value = ProgressService(target).get_pages_daily_target()\n expected_result = 50\n\n assert goal_value == expected_result\n\n def test_get_goal_per_day_should_return_none_if_end_day_less_then_today(self):\n today_date = datetime.datetime.today()\n end_date = today_date - datetime.timedelta(days=5)\n book = BookFactory(page_count=500)\n book.save()\n target = TargetFactory(\n end_date=end_date, start_date=today_date, current_page_progress=250, book=book\n )\n target.save()\n\n goal_value = ProgressService(target).get_pages_daily_target()\n\n assert goal_value is None\n\n def test_get_goal_per_day_should_return_fifty_if_date_object_passed(self):\n today_date = datetime.datetime.today()\n end_date = today_date + datetime.timedelta(days=5)\n book = BookFactory(page_count=500)\n book.save()\n target = TargetFactory(\n end_date=end_date.date(), start_date=today_date, current_page_progress=250, book=book\n )\n target.save()\n\n goal_value = ProgressService(target).get_pages_daily_target()\n expected_result = 50\n\n assert goal_value == expected_result\n" }, { "alpha_fraction": 0.8015123009681702, "alphanum_fraction": 0.8015123009681702, "avg_line_length": 34.266666412353516, "blob_id": "74cc9958b1b9fec251339022f6acec7680f322c5", "content_id": "c007694e8ba6562eba33c1388c2e42afa1022c5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 529, "license_type": "no_license", "max_line_length": 77, "num_lines": 15, "path": "/reward/views.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom reward.models import Reward\nfrom reward.serializers import RewardSerializer\nfrom common.permissions import BaseIsOwnerOrReadOnly\n\n\nclass RewardsViewSet(viewsets.ModelViewSet):\n \"\"\"\n Model view for Rewards, presents: detail view, and list view for rewards.\n \"\"\"\n queryset = Reward.objects.all()\n serializer_class = RewardSerializer\n permission_classes = (BaseIsOwnerOrReadOnly, IsAuthenticatedOrReadOnly)\n" }, { "alpha_fraction": 0.6199421882629395, "alphanum_fraction": 0.6264451146125793, "avg_line_length": 26.68000030517578, "blob_id": "c33f680e4f859bd66a0c51b1389709688ab3fe69", "content_id": "0fdc5ef194125d0c1d0b6acb65963d29271db3c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 64, "num_lines": 50, "path": "/book/tests/factories.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import factory.fuzzy\n\nfrom book.models import Book, Author, Genre\n\n\nclass GenreFactory(factory.django.DjangoModelFactory):\n title = factory.Sequence(lambda n: 'title_%d' % n)\n description = factory.fuzzy.FuzzyText(length=100)\n\n class Meta:\n model = Genre\n\n\nclass AuthorFactory(factory.django.DjangoModelFactory):\n first_name = factory.Sequence(lambda n: 'first_name_%d' % n)\n last_name = factory.Sequence(lambda n: 'last_name_%d' % n)\n bio = factory.fuzzy.FuzzyText(length=100)\n\n class Meta:\n model = Author\n\n\nclass BookFactory(factory.django.DjangoModelFactory):\n title = factory.Sequence(lambda n: 'title_%d' % n)\n description = factory.fuzzy.FuzzyText(length=100)\n\n @factory.post_generation\n def authors(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for author in extracted:\n self.authors.add(author)\n\n @factory.post_generation\n def genres(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for genre in extracted:\n self.genres.add(genre)\n\n class Meta:\n model = Book\n" }, { "alpha_fraction": 0.7097505927085876, "alphanum_fraction": 0.7188208699226379, "avg_line_length": 39.09090805053711, "blob_id": "cf397af57250a0ec984bf0d27cb3e10b7457adf1", "content_id": "f2fc17ecf19268392e9077e01f21ab751ad005cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 882, "license_type": "no_license", "max_line_length": 111, "num_lines": 22, "path": "/target/models.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils.translation import ugettext as _\n\nfrom book.models import Book\nfrom reward.models import Reward\nfrom common.abstract_models import OwnerModel\n\n\nclass Target(OwnerModel):\n \"\"\"\n Target's model for tracking user's targets\n \"\"\"\n title = models.CharField(_(\"Target's name\"), max_length=255)\n description = models.TextField(_(\"Target's description\"), max_length=1024)\n book = models.ForeignKey(Book, related_name='targets', on_delete=models.PROTECT)\n reward = models.ForeignKey(Reward, related_name='targets', on_delete=models.PROTECT, null=True, blank=True)\n start_date = models.DateField(_(\"Target's start date\"))\n end_date = models.DateField(_(\"Target's end date\"))\n current_page_progress = models.PositiveIntegerField(_(\"Current page progress\"), default=0)\n\n def __str__(self):\n return self.title\n" }, { "alpha_fraction": 0.7591241002082825, "alphanum_fraction": 0.7591241002082825, "avg_line_length": 20.076923370361328, "blob_id": "a83cb9caa71780b15dc6240378ce8d1b1389600e", "content_id": "ae24c84509e4d40587c5ec4a991af8fcf9dfd419", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/target/urls.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom rest_framework import routers\n\nfrom target.views import TargetsViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'targets', TargetsViewSet, base_name='targets')\n\n\nurlpatterns = [\n url(r'', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6913875341415405, "avg_line_length": 26.866666793823242, "blob_id": "a6ea97fea719b411577b1f8719cd83150b462a1a", "content_id": "8328f2e8458c9ab7a5f99436ded32b6594da9698", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 69, "num_lines": 15, "path": "/reward/models.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils.translation import ugettext as _\n\nfrom common.abstract_models import OwnerModel\n\n\nclass Reward(OwnerModel):\n \"\"\"\n Rewards's model to present rewards which related to targets\n \"\"\"\n name = models.CharField(_(\"Reward's name\"), max_length=255)\n url = models.URLField(_(\"Link to reward\"), null=True, blank=True)\n\n def __str__(self):\n return self.name\n" }, { "alpha_fraction": 0.5675139427185059, "alphanum_fraction": 0.5691364407539368, "avg_line_length": 30.517045974731445, "blob_id": "04edfa6c07c0f089ec788066c563143e11494f86", "content_id": "87471018f14ef93f4eac23a8fda6526502265d62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5547, "license_type": "no_license", "max_line_length": 91, "num_lines": 176, "path": "/book/tests/test_views.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "import pytest\nimport simplejson\n\nfrom django.urls import reverse\n\nfrom book.tests.factories import BookFactory, AuthorFactory, GenreFactory\nfrom custom_user.tests.factories import UserFactory\nfrom common.tests.utils import get_login_params_dict\nfrom book.models import Book\n\n\[email protected]_db\nclass TestBooksView(object):\n def test_books_list_view_should_return_three_books(self, client):\n books_url = reverse('books-list')\n BookFactory.create()\n BookFactory.create()\n BookFactory.create()\n \n expected_len = 3\n resp = simplejson.loads(client.get(books_url).content)\n\n assert len(resp) == expected_len\n\n def test_books_returned_fields(self, client):\n books_url = reverse('books-list')\n BookFactory.create()\n \n resp = simplejson.loads(client.get(books_url).content)\n resp_keys = resp[0].keys()\n expected_keys = ['id', 'title', 'authors']\n\n assert expected_keys == list(resp_keys)\n\n def test_not_authenticated_users_can_not_add_book(self, client):\n books_url = reverse('books-list')\n params = {\n 'title': 'Lord of the rings'\n }\n\n resp = simplejson.loads(client.post(books_url, params).content)\n expected_resp = {'detail': 'Authentication credentials were not provided.'}\n\n assert resp == expected_resp\n \n def test_authenticated_users_can_add_book(self, client):\n books_url = reverse('books-list')\n\n params = simplejson.dumps({\n 'title': 'Lord of the rings'\n })\n login_params = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n\n UserFactory.create(**login_params)\n\n token_dict = get_login_params_dict(client, login_params)\n\n resp = simplejson.loads(\n client.post(books_url, params, **token_dict).content\n ) \n expected_resp_title = 'Lord of the rings'\n\n assert expected_resp_title == resp.get('title')\n\n\[email protected]_db\nclass TestBookDetailView(object):\n def test_books_detail_view_get_detail_book(self, client):\n book_params = {\n 'title': 'test',\n 'page_count': 500,\n 'description': 'test_description'\n }\n author_params = {\n 'first_name':'John', \n 'last_name':'Dad',\n 'bio': 'test'\n }\n genre_params = {\n 'title': 'test',\n 'description': 'test'\n }\n author = AuthorFactory.create(**author_params)\n genre = GenreFactory.create(**genre_params)\n book = BookFactory.create(**book_params, authors=[author], genres=[genre])\n books_url = reverse('books-detail', args=[book.id])\n \n resp = simplejson.loads(client.get(books_url).content)\n expected_resp = {\n 'id': 1, \n 'title': 'test', \n 'page_count': 500, \n 'description': 'test_description', \n 'authors': [{'first_name': 'John', 'last_name': 'Dad', 'bio': 'test'}], \n 'genres': [{'title': 'test', 'description': 'test'}]\n }\n \n assert resp == expected_resp\n\n def test_not_owner_can_not_change_book(self, client):\n books_url = reverse('books-list')\n\n user_params_owner = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n user_params_not_owner = {\n 'username': 'test_not_owner',\n 'email': '[email protected]',\n 'password': 'test'\n }\n\n books_create_params = simplejson.dumps({\n 'title': 'Lord of the rings'\n })\n\n UserFactory.create(**user_params_owner)\n UserFactory.create(**user_params_not_owner)\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n\n client.post(books_url, books_create_params, **token_dict_owner)\n\n created_book = Book.objects.get(title='Lord of the rings')\n books_detail = reverse('books-detail', args=[created_book.id])\n\n user_params_not_owner.pop('email')\n token_dict_not_owner = get_login_params_dict(client, user_params_not_owner)\n\n resp = simplejson.loads(\n client.put(\n books_detail, simplejson.dumps({'title': 'Hobbit'}), **token_dict_not_owner\n ).content\n )\n\n expected_resp = {'detail': 'You do not have permission to perform this action.'}\n\n assert resp == expected_resp\n\n def test_owner_can_change_book(self, client):\n books_url = reverse('books-list')\n\n user_params_owner = {\n 'username': 'test',\n 'email': '[email protected]',\n 'password': 'test'\n }\n\n books_create_params = simplejson.dumps({\n 'title': 'Lord of the rings'\n })\n new_title = 'Hobbit'\n\n UserFactory.create(**user_params_owner)\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n\n client.post(books_url, books_create_params, **token_dict_owner)\n\n created_book = Book.objects.get(title='Lord of the rings')\n books_detail = reverse('books-detail', args=[created_book.id])\n\n token_dict_owner = get_login_params_dict(client, user_params_owner)\n\n resp = simplejson.loads(\n client.put(\n books_detail, simplejson.dumps({'title': new_title}), **token_dict_owner\n ).content\n )\n\n assert resp.get('title') == new_title\n" }, { "alpha_fraction": 0.7591241002082825, "alphanum_fraction": 0.7591241002082825, "avg_line_length": 20.076923370361328, "blob_id": "cca5ccd1173aedc12d06528f190f3cc52ffc49f5", "content_id": "88c1a61fd6e51dfdbae70ff3b3a4bb46ad33a228", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/reward/urls.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom rest_framework import routers\n\nfrom reward.views import RewardsViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'rewards', RewardsViewSet, base_name='rewards')\n\n\nurlpatterns = [\n url(r'', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.758571445941925, "alphanum_fraction": 0.758571445941925, "avg_line_length": 32.33333206176758, "blob_id": "7442eee27b6a37dd36ab969f3412bf6ea8ee8a0d", "content_id": "88193c3b439635b67ba04638ce01070e73ec1c0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "no_license", "max_line_length": 75, "num_lines": 21, "path": "/book/views.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom book.models import Book\nfrom book.serializers import BookBaseSerializer, BookDetailSerializer\nfrom common.permissions import BaseIsOwnerOrReadOnly\n\n\nclass BooksViewSet(viewsets.ModelViewSet):\n \"\"\"\n Model view for books, presents:\n detail view, and list view for books.\n \"\"\"\n queryset = Book.objects.all()\n serializer_class = BookDetailSerializer\n permission_classes = (BaseIsOwnerOrReadOnly, IsAuthenticatedOrReadOnly)\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n return BookDetailSerializer\n return BookBaseSerializer\n" }, { "alpha_fraction": 0.6686747074127197, "alphanum_fraction": 0.6686747074127197, "avg_line_length": 27.869565963745117, "blob_id": "34f74ed62d64f0a8c09d1883da00c1acdd40e558", "content_id": "fc1054b9d2e0fb89c0ef69fa2be6f581e7f2f0c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1328, "license_type": "no_license", "max_line_length": 98, "num_lines": 46, "path": "/book/serializers.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom book.models import Book, Author, Genre\n\n\nclass AuthorSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for authors. Need for books serializers to present m to m relation and for\n author's api point.\n \"\"\"\n class Meta:\n model = Author\n fields = ('first_name', 'last_name', 'bio')\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for genres. Need for books serializers to present m to m relation and for\n author's api point.\n \"\"\"\n class Meta:\n model = Genre\n fields = ('title', 'description')\n\n\nclass BookBaseSerializer(serializers.ModelSerializer):\n \"\"\"\n Base serializer for books. Presents id, title and authors.\n \"\"\"\n owner = serializers.HiddenField(default=serializers.CurrentUserDefault())\n authors = AuthorSerializer(read_only=True, many=True)\n\n class Meta:\n model = Book\n fields = ('id', 'title', 'authors', 'owner')\n\n\nclass BookDetailSerializer(BookBaseSerializer):\n \"\"\"\n Serializer for details view for books. Presents data with related fields (authors and genres).\n \"\"\"\n genres = GenreSerializer(read_only=True, many=True)\n\n class Meta:\n model = Book\n fields = ('id', 'title', 'page_count', 'description', 'authors', 'genres')\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 19.30769157409668, "blob_id": "86cb8e85bb4a28a2d80878050017724f25c8bceb", "content_id": "aa2f0a508c6e98c2158512d57b7d975c9f7a397a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 58, "num_lines": 13, "path": "/book/urls.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\nfrom rest_framework import routers\n\nfrom book.views import BooksViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'books', BooksViewSet, base_name='books')\n\n\nurlpatterns = [\n url(r'', include(router.urls)),\n]\n" }, { "alpha_fraction": 0.7269076108932495, "alphanum_fraction": 0.7269076108932495, "avg_line_length": 23.899999618530273, "blob_id": "c456e7ded53ae05a9fe957c6d1445019836f39b7", "content_id": "ad86e8b65d94c154eb7e8560b3973fc23fb7c663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 61, "num_lines": 10, "path": "/custom_user/permissions.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from common.permissions import BaseIsOwnerOrReadOnly\n\n\nclass IsRegisteredUserOwnerOrReadonly(BaseIsOwnerOrReadOnly):\n \"\"\"\n Custom permission Class to override _get_obj method\n \"\"\"\n @staticmethod\n def _get_obj(obj):\n return obj\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 16.65217399597168, "blob_id": "834c9fa1d73cd02cbad9786af408556ec00292fd", "content_id": "841e60dbb0cfa3b0cbcfab180f91ed513fdeebfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/learntoenjoy/configs/mixins.py", "repo_name": "zubrzubr/learnenjoy", "src_encoding": "UTF-8", "text": "from configurations import values\n\n\nclass DevMixin(object):\n DEBUG = values.BooleanValue(True)\n\n\nclass TestingMixin(object):\n TESTING = True\n LOGGING = {}\n CACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n }\n\n\nclass ProdMixin(object):\n EMAIL_HOST = values.Value(default='localhost')\n\n\nclass CeleryProdMixin(object):\n pass\n" } ]
44
fredpallen/santorini
https://github.com/fredpallen/santorini
a6ff8e63476b01f90cb7c1e587895635f097a176
555edc504f2f59117c2536cf628f7c934175062d
d760317222702e6defee74161edee50de88ba3ee
refs/heads/master
2020-12-02T22:44:54.420595
2017-08-02T15:20:22
2017-08-02T15:20:22
96,176,396
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6329113841056824, "alphanum_fraction": 0.6708860993385315, "avg_line_length": 38.5, "blob_id": "2124bae316712b8a469f05589e1746ceb84521a4", "content_id": "80d4a1ecf86357fab6d14ab4ccce7c80ce52c69f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 79, "license_type": "no_license", "max_line_length": 66, "num_lines": 2, "path": "/src/build.sh", "repo_name": "fredpallen/santorini", "src_encoding": "UTF-8", "text": "#!/bin/bash\ng++ -std=c++11 -O3 -o santorini -Wall -Wextra -Werror santorini.cc\n" }, { "alpha_fraction": 0.49539029598236084, "alphanum_fraction": 0.5470190644264221, "avg_line_length": 31.540000915527344, "blob_id": "f075ff03859d7e17e89444815e4f1efc83c3efe6", "content_id": "6c3e129b6e714b87e87e70c08f4f701d1106adfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1627, "license_type": "no_license", "max_line_length": 79, "num_lines": 50, "path": "/src/starting_positions.py", "repo_name": "fredpallen/santorini", "src_encoding": "UTF-8", "text": "import itertools\n\nBOARD_WIDTH = 5\nCELLS = tuple([(x, y) for x in range(BOARD_WIDTH) for y in range(BOARD_WIDTH)])\n\ndef rot90(p):\n \"\"\"Rotate a position tuple (x, y) 90 degrees.\"\"\"\n x, y = p\n return (BOARD_WIDTH - 1 - y, x)\n\ndef reflect(p):\n \"\"\"Reflect a position tuple (x, y).\"\"\"\n x, y = p\n return (BOARD_WIDTH - 1 - x, BOARD_WIDTH - 1 - y)\n\ndef rot90_all(p):\n \"\"\"Rotate a tuple of pawn position tuples by 90 degrees.\"\"\"\n (p11, p12), (p21, p22) = p\n return (tuple(sorted([rot90(p11), rot90(p12)])),\n tuple(sorted([rot90(p21), rot90(p22)])))\n\ndef reflect_all(p):\n \"\"\"Reflect a tuple of pawn positions.\"\"\"\n (p11, p12), (p21, p22) = p\n return (tuple(sorted([reflect(p11), reflect(p12)])),\n tuple(sorted([reflect(p21), reflect(p22)])))\n\ngenerators = set()\npositions = set()\n\nfor p in itertools.permutations(CELLS, r=4):\n position = (\n tuple(sorted([p[0], p[1]])),\n tuple(sorted([p[2], p[3]])))\n# print 'position =', position\n# print 'generator count =', len(generators)\n if position not in positions:\n print (\n '{p[0][0][0]} {p[0][0][1]} {p[0][1][0]} {p[0][1][1]} '\n '{p[1][0][0]} {p[1][0][1]} {p[1][1][0]} {p[1][1][1]}'\n .format(p=position))\n generators.add(position)\n for i in range(4):\n# print 'position =', position\n positions.add(position)\n# print 'reflected position =', reflect_all(position)\n positions.add(reflect_all(position))\n position = rot90_all(position)\n\n#print 'Number of generators =', len(generators)\n" }, { "alpha_fraction": 0.5246913433074951, "alphanum_fraction": 0.5362217426300049, "avg_line_length": 27.65183448791504, "blob_id": "978727aa43616ba462d8921766c56b17829c5aa1", "content_id": "a9f9a7f3d930caf0ece9c0da77b926794c04e497", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 25758, "license_type": "no_license", "max_line_length": 80, "num_lines": 899, "path": "/src/santorini.cc", "repo_name": "fredpallen/santorini", "src_encoding": "UTF-8", "text": "#include <algorithm>\n#include <cassert>\n#include <chrono>\n#include <cmath>\n#include <cstdio>\n#include <cstring>\n#include <fstream>\n#include <functional>\n#include <iostream>\n#include <limits>\n#include <random>\n#include <tuple>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\nusing namespace std;\n\n// The board is a 5x5 square of cells.\nconstexpr int BOARD_WIDTH = 5;\n\n// Each player has 2 pawns.\nconstexpr int PAWN_COUNT = 2;\n\n// Maximum height for each cell.\nconstexpr int MAX_HEIGHT = 4;\n\n// Each pawn could have 8 places to move and then 8 places to build;\nconstexpr int MAX_LEGAL_MOVES = PAWN_COUNT * 8 * 8;\n\n// A vector with a maximum length of N.\n//\n// It doesn't do checks for you though, so it's up to you not to overfill it.\ntemplate <typename T, int N> class SmallVec {\npublic:\n SmallVec() : length_(0) {}\n\n void push_back(const T &val) {\n values_[length_] = val;\n ++length_;\n }\n\n T &operator[](int n) { return values_[n]; }\n const T &operator[](int n) const { return values_[n]; }\n T *begin() { return values_; }\n T *end() { return values_ + length_; }\n const T *begin() const { return values_; }\n const T *end() const { return values_ + length_; }\n int size() const { return length_; }\n\nprivate:\n int length_;\n T values_[N];\n};\n\nstruct Position {\n int x;\n int y;\n\n Position() {}\n Position(int x, int y) : x(x), y(y) {}\n\n bool operator==(const Position &that) const {\n return that.x == x && that.y == y;\n }\n\n bool operator!=(const Position &that) const { return !(*this == that); }\n};\n\nstruct State {\n int player;\n Position position[2][PAWN_COUNT]; // First index is player.\n int height[BOARD_WIDTH][BOARD_WIDTH]; // First index is y, second is x.\n\n bool operator==(const State &that) const {\n return 0 == memcmp(this, &that, sizeof(that));\n }\n\n int get_height(const Position &p) const { return height[p.y][p.x]; }\n\n int get_height(int player, int pawn) const {\n Position p = position[player][pawn];\n return height[p.y][p.x];\n }\n\n int increment_height(const Position &p) { return ++height[p.y][p.x]; }\n\n bool is_pawn_at(const Position &p) const {\n for (int player = 0; player < 2; ++player) {\n for (int pawn = 0; pawn < PAWN_COUNT; ++pawn) {\n if (position[player][pawn] == p) {\n return true;\n }\n }\n }\n return false;\n }\n\n bool is_blocked(const Position &p) const {\n return height[p.y][p.x] == MAX_HEIGHT || is_pawn_at(p);\n }\n\n bool heights_can_happen_given(const State &s) const {\n for (int y = 0; y < BOARD_WIDTH; ++y) {\n for (int x = 0; x < BOARD_WIDTH; ++x) {\n if (s.height[y][x] < height[y][x]) {\n return false;\n }\n }\n }\n return true;\n }\n};\n\nstruct Play {\n int pawn;\n Position end;\n Position build;\n\n bool operator==(const Play &that) const {\n return pawn == that.pawn && end == that.end && build == that.build;\n }\n};\n\nusing Plays = SmallVec<Play, MAX_LEGAL_MOVES>;\n\nstruct Counts {\n double wins;\n double plays;\n\n Counts() : wins(0), plays(0) {}\n Counts(double wins, double plays) : wins(wins), plays(plays) {}\n};\n\nusing Neighbors = SmallVec<Position, 8>;\n\nnamespace std {\n// We need this so we can use State as the key in an unordered_map.\ntemplate <> struct hash<State> {\n size_t operator()(const State &state) const {\n hash<int> int_hash;\n size_t result = int_hash(state.player);\n for (int player = 0; player < 2; ++player) {\n for (int pawn = 0; pawn < PAWN_COUNT; ++pawn) {\n result = (result << 1) ^ int_hash(state.position[player][pawn].x);\n result = (result << 1) ^ int_hash(state.position[player][pawn].y);\n }\n }\n for (int x = 0; x < BOARD_WIDTH; ++x) {\n for (int y = 0; y < BOARD_WIDTH; ++y) {\n result = (result << 1) ^ int_hash(state.height[y][x]);\n }\n }\n return result;\n }\n};\n} // namespace std\n\n// Finds the neighboring positions of a given position.\nNeighbors get_neighbors(const Position &p) {\n Neighbors results;\n for (int dy = -1; dy < 2; ++dy) {\n for (int dx = -1; dx < 2; ++dx) {\n int x = p.x + dx;\n int y = p.y + dy;\n if ((dx || dy) && x >= 0 && y >= 0 && x < BOARD_WIDTH &&\n y < BOARD_WIDTH) {\n results.push_back(Position(x, y));\n }\n }\n }\n return results;\n}\n\nvoid print_state(const State &state) {\n cout << \"Next player = \" << state.player << \"\\n\";\n char screen[11][26];\n for (int y = 0; y < 11; ++y) {\n for (int x = 0; x < 26; ++x) {\n screen[y][x] = ' ';\n }\n }\n for (int y = 0; y < 5; ++y) {\n for (int x = 0; x < 5; ++x) {\n screen[2 * y][5 * x] = '+';\n screen[2 * y][5 * x + 1] = '-';\n screen[2 * y][5 * x + 2] = '-';\n screen[2 * y][5 * x + 3] = '-';\n screen[2 * y][5 * x + 4] = '-';\n screen[2 * y + 1][5 * x] = '|';\n screen[2 * y + 1][5 * x + 1] = '0' + state.height[y][x];\n }\n }\n for (int player = 0; player < 2; ++player) {\n for (int pawn = 0; pawn < 2; ++pawn) {\n Position p = state.position[player][pawn];\n screen[2 * p.y + 1][5 * p.x + 2] = ':';\n screen[2 * p.y + 1][5 * p.x + 3] = player ? 'b' : 'a';\n screen[2 * p.y + 1][5 * p.x + 4] = '0' + pawn;\n }\n }\n for (int y = 0; y < 11; ++y) {\n for (int x = 0; x < 26; ++x) {\n cout << screen[y][x];\n }\n cout << \"\\n\";\n }\n}\n\nState get_start_state() {\n State state;\n memset(&state, 0, sizeof(state));\n state.position[0][0] = Position(0, 0);\n state.position[0][1] = Position(4, 4);\n state.position[1][0] = Position(0, 4);\n state.position[1][1] = Position(4, 0);\n return state;\n}\n\nState get_next_state(const State &state, const Play &play) {\n State result(state);\n result.player = 1 - state.player;\n result.position[state.player][play.pawn] = play.end;\n result.increment_height(play.build);\n return result;\n}\n\nbool has_legal_play(const State &state) {\n for (int pawn = 0; pawn < PAWN_COUNT; ++pawn) {\n Position start = state.position[state.player][pawn];\n for (Position end : get_neighbors(start)) {\n int height_change = state.get_height(end) - state.get_height(start);\n if (state.is_blocked(end) || height_change > 1) {\n continue;\n }\n return true;\n }\n }\n return false;\n}\n\nPlays get_legal_plays(const State &state) {\n Plays plays;\n for (int pawn = 0; pawn < PAWN_COUNT; ++pawn) {\n Position start = state.position[state.player][pawn];\n for (Position end : get_neighbors(start)) {\n int height_change = state.get_height(end) - state.get_height(start);\n if (state.is_blocked(end) || height_change > 1) {\n continue;\n }\n Play play;\n play.pawn = pawn;\n play.end = end;\n play.build = start;\n plays.push_back(play);\n for (Position build : get_neighbors(end)) {\n if (state.is_blocked(build)) {\n continue;\n }\n play.build = build;\n plays.push_back(play);\n }\n }\n }\n return plays;\n}\n\nint get_winner(const State &state) {\n for (int player = 0; player < 2; ++player) {\n for (int pawn = 0; pawn < PAWN_COUNT; ++pawn) {\n Position p = state.position[player][pawn];\n if (state.get_height(p) == MAX_HEIGHT - 1) {\n return player;\n }\n }\n }\n if (!has_legal_play(state)) {\n return 1 - state.player;\n }\n return -1;\n}\n\n// Simple AI that looks ahead to the opponent's next move.\nclass SimplePlayer {\npublic:\n SimplePlayer(unsigned int seed) : rng_(seed) {}\n\n int select_move(const State &state, const Plays &plays) {\n int obvious = get_obvious_move(state, plays);\n if (obvious >= 0) {\n return obvious;\n }\n\n auto blunders = get_blunders(state, plays);\n if (blunders.size() == plays.size()) {\n // All the moves are losers, so just pick the first one,\n // you loser.\n return 0;\n }\n\n // Choose at random from among the non-losing moves.\n std::uniform_int_distribution<int> uni(0,\n plays.size() - blunders.size() - 1);\n int rand = uni(rng_);\n int skip = rand;\n int base = 0;\n for (int blunder : blunders) {\n if (base + skip < blunder) {\n return base + skip;\n }\n skip -= blunder - base;\n base = blunder + 1;\n }\n\n if (base + skip >= plays.size()) {\n // It shouldn't be possible to get here.\n std::fprintf(stderr,\n \"Couldn't find %d winners from \"\n \"%d moves minus %d losers\\n\",\n rand, plays.size(), blunders.size());\n std::exit(EXIT_FAILURE);\n }\n\n return base + skip;\n }\n\nprotected:\n int get_obvious_move(const State &state, const Plays &plays) {\n // First see if any of the moves wins the game.\n // If so, select that move.\n for (int i = 0; i < plays.size(); ++i) {\n Play play = plays[i];\n if (state.get_height(play.end) == MAX_HEIGHT - 1) {\n return i;\n }\n }\n\n // Check if a single move stops the other player from winning.\n for (int pawn = 0; pawn < PAWN_COUNT; ++pawn) {\n Position them = state.position[1 - state.player][pawn];\n if (state.get_height(them) == MAX_HEIGHT - 2) {\n // This pawn is at the right height to win on the next move.\n for (Position end : get_neighbors(them)) {\n if (state.get_height(end) == MAX_HEIGHT - 1) {\n // This move will win the game for the opponent,\n // so try to build here. We know we can't move here\n // because we checked that above.\n int stopper_index = -1;\n bool stopper_seen = false;\n for (int i = 0; i < plays.size(); ++i) {\n if (plays[i].build == end) {\n if (stopper_seen) {\n // More than one way to stop them, so\n // it's not obvious what to do.\n return -1;\n }\n stopper_seen = true;\n stopper_index = i;\n }\n }\n if (stopper_seen) {\n // This stops this particular winning move for\n // the opponent, but the opponent may have other\n // winning moves. In any case, we can only stop\n // one so do not bother checking for others.\n return stopper_index;\n } else {\n // The other user is going to win and we have no\n // way to stop it, so just give up.\n return 0;\n }\n }\n }\n }\n }\n // No obvious move found, return -1.\n return -1;\n }\n\n SmallVec<int, MAX_LEGAL_MOVES> get_blunders(const State &state,\n const Plays &plays) {\n SmallVec<int, MAX_LEGAL_MOVES> blunders;\n for (int i = 0; i < plays.size(); ++i) {\n Position build = plays[i].build;\n if (state.get_height(build) == MAX_HEIGHT - 2) {\n // This makes a tower of winning height,\n // make sure no opponent is nearby.\n for (int pawn = 0; pawn < 2; ++pawn) {\n Position them = state.position[1 - state.player][pawn];\n int dx = them.x - build.x;\n int dy = them.y - build.y;\n int d2 = dx * dx + dy * dy; // Squared horizontal distance.\n int height = state.get_height(them);\n if (d2 <= 2 && height == MAX_HEIGHT - 2) {\n // Opponent is close enough, vertically and\n // horizontally, so do not choose this move.\n blunders.push_back(i);\n break;\n }\n }\n }\n }\n return blunders;\n }\n\n std::mt19937 rng_;\n};\n\ntemplate <bool DO_IMMEDIATE_WIN_CHECK> class MonteCarlo {\npublic:\n MonteCarlo(chrono::milliseconds time_limit) : time_limit_(time_limit) {}\n\n int select_move(const State &state, const Plays &plays) {\n Play play = get_next_play(state);\n for (int i = 0; i < plays.size(); ++i) {\n if (play == plays[i]) {\n return i;\n }\n }\n cout << \"No valid move selected. Picking the first.\\n\";\n return -1;\n }\n\n Play get_next_play(const State &state) {\n max_depth_ = 0;\n Plays legal = get_legal_plays(state);\n\n if (!legal.size()) {\n Play play;\n play.pawn = -1; // Negative pawn means error.\n return play;\n } else if (legal.size() == 1) {\n return legal[0];\n }\n\n if (DO_IMMEDIATE_WIN_CHECK) {\n for (Play play : legal) {\n if (state.get_height(play.end) == MAX_HEIGHT - 1) {\n return play;\n }\n }\n }\n\n int games = 0;\n const auto start_time = chrono::steady_clock::now();\n while (chrono::steady_clock::now() - start_time < time_limit_) {\n run_simulation(state);\n games++;\n }\n\n cout << \"Game count = \" << games << \"\\n\";\n\n Play best_play;\n State best_next_state = state;\n double best_win_percent = -1;\n for (const Play &play : legal) {\n State next_state = get_next_state(state, play);\n auto iter = state_counts_.find(next_state);\n double win_percent = iter == state_counts_.end()\n ? 0.0\n : iter->second.wins / iter->second.plays;\n if (win_percent > best_win_percent) {\n best_win_percent = win_percent;\n best_play = play;\n best_next_state = next_state;\n }\n }\n cout << \"max depth = \" << max_depth_ << \"\\n\";\n cout << \"win percent = \" << best_win_percent << \"\\n\";\n erase_early_states(best_next_state);\n return best_play;\n }\n\nprivate:\n void run_simulation(const State &state) {\n unordered_set<State> visited_states;\n\n bool expand = true;\n int winner = -1;\n State this_state = state;\n for (int t = 0;; ++t) {\n Plays legal = get_legal_plays(this_state);\n\n if (DO_IMMEDIATE_WIN_CHECK) {\n // Check for an immediate winning move.\n for (int pawn = 0; pawn < 2; ++pawn) {\n Position p = this_state.position[this_state.player][pawn];\n if (this_state.get_height(p) == MAX_HEIGHT - 2) {\n // This pawn is just below the winning height.\n //\n // If it has a neighbor at winning height, then there\n // is a winning move for the current player.\n for (Position n : get_neighbors(p)) {\n if (this_state.get_height(n) == MAX_HEIGHT - 1) {\n winner = this_state.player;\n break;\n }\n }\n }\n }\n if (winner >= 0) {\n // Mark all moves ending on MAX_HEIGHT - 1 as visited.\n for (Play play : legal) {\n if (this_state.get_height(play.end) == MAX_HEIGHT - 1) {\n visited_states.emplace(get_next_state(this_state, play));\n }\n }\n break;\n }\n // If we get here, no immediate winning move was found.\n }\n\n double total = 0.0;\n bool all_seen = true;\n State next_state;\n SmallVec<Counts, MAX_LEGAL_MOVES> play_counts;\n for (const Play &play : legal) {\n next_state = get_next_state(this_state, play);\n auto iter = state_counts_.find(next_state);\n if (iter == state_counts_.end()) {\n all_seen = false;\n break;\n }\n play_counts.push_back(iter->second);\n total += iter->second.plays;\n }\n if (all_seen) {\n double log_total = log(total);\n double best_score = -1;\n for (int i = 0; i < legal.size(); ++i) {\n Play play = legal[i];\n Counts counts = play_counts[i];\n State s = get_next_state(this_state, play);\n double score =\n counts.wins / counts.plays + sqrt(2 * log_total / counts.plays);\n if (score > best_score) {\n best_score = score;\n next_state = s;\n }\n }\n }\n\n this_state = next_state;\n\n if (expand && !all_seen) {\n expand = false;\n Counts counts(0, 0);\n state_counts_.emplace(this_state, counts);\n if (t > max_depth_) {\n max_depth_ = t;\n }\n }\n\n visited_states.emplace(this_state);\n\n winner = get_winner(this_state);\n if (winner >= 0) {\n break;\n }\n }\n\n for (const State &visited_state : visited_states) {\n auto iter = state_counts_.find(visited_state);\n if (iter == state_counts_.end()) {\n continue;\n }\n iter->second.plays++;\n if (visited_state.player != winner) {\n iter->second.wins++;\n }\n }\n }\n\n void erase_early_states(const State &state) {\n cout << \"Before erase: state_counts_.size() == \" << state_counts_.size()\n << \".\\n\";\n for (auto iter = state_counts_.begin(); iter != state_counts_.end();) {\n if (!iter->first.heights_can_happen_given(state)) {\n iter = state_counts_.erase(iter);\n } else {\n ++iter;\n }\n }\n cout << \"After erase: state_counts_.size() == \" << state_counts_.size()\n << \".\\n\";\n }\n\n chrono::milliseconds time_limit_;\n int max_depth_;\n unordered_map<State, Counts> state_counts_;\n};\n\n// Plays the game with the state as the starting state and the scratch space.\n//\n// Returns the index of the winning player (either 0 or 1).\ntemplate <bool verbose = false, typename P0, typename P1>\nint play_game(State *state, P0 *p0, P1 *p1) {\n for (int move_number = 0;; ++move_number) {\n if (verbose) {\n printf(\"Move %2d\\n\", move_number);\n }\n Plays plays = get_legal_plays(*state);\n if (!plays.size()) {\n // Next player loses because they have no legal moves.\n if (verbose) {\n printf(\"Player %d wins because player %d has no legal moves.\\n\",\n 1 - state->player, state->player);\n }\n return 1 - state->player;\n }\n int index = state->player ? p1->select_move(*state, plays)\n : p0->select_move(*state, plays);\n Play play = plays[index];\n if (state->get_height(play.end) == MAX_HEIGHT - 1) {\n // Next player wins because they stepped to the winning height.\n int winner = state->player;\n if (verbose) {\n printf(\"Player %d wins by stepping onto (%d,%d)\\n\", state->player,\n play.end.x, play.end.y);\n *state = get_next_state(*state, play);\n print_state(*state);\n }\n return winner;\n }\n if (verbose) {\n printf(\"Player %d moves pawn %d to (%d,%d) and builds at (%d,%d)\\n\",\n state->player, play.pawn, play.end.x, play.end.y, play.build.x,\n play.build.y);\n }\n // Update board due to selected move.\n *state = get_next_state(*state, play);\n if (verbose) {\n print_state(*state);\n }\n }\n}\n\nclass SimpleRolloutPlayer : public SimplePlayer {\npublic:\n SimpleRolloutPlayer(std::chrono::milliseconds time_limit, unsigned int seed)\n : SimplePlayer(seed), time_limit_(time_limit) {}\n\n int select_move(const State &state, const Plays &plays) {\n std::chrono::system_clock clock;\n const auto start_time = clock.now();\n\n int obvious = get_obvious_move(state, plays);\n if (obvious >= 0) {\n return obvious;\n }\n\n auto blunders = get_blunders(state, plays);\n if (blunders.size() == plays.size()) {\n // All the moves are losers, so just pick the first one,\n // you loser.\n return 0;\n }\n\n // Collect moves that aren't blunders.\n SmallVec<Node, MAX_LEGAL_MOVES> nodes;\n int blunder_index = 0;\n for (int i = 0; i < plays.size(); ++i) {\n if (blunders.size() && i == blunders[blunder_index]) {\n ++blunder_index;\n continue;\n }\n Node node;\n node.index = i;\n node.wins = 0;\n node.visits = 0;\n nodes.push_back(node);\n }\n\n uniform_int_distribution<unsigned int> seed_dist;\n SimplePlayer player_object(seed_dist(rng_));\n double rollout_count = 0;\n for (int n = 0;; n = (n + 1) % nodes.size()) {\n // Keep going until time expires.\n if (clock.now() - start_time > time_limit_) {\n break;\n }\n // Play games from here using SimplePlayer for both sides.\n Play play = plays[nodes[n].index];\n State next_state = get_next_state(state, play);\n for (int trial = 0; trial < 100;\n ++trial, ++rollout_count, ++nodes[n].visits) {\n State rollout_state = next_state;\n int winner = play_game(&rollout_state, &player_object, &player_object);\n nodes[n].wins += (winner == state.player) ? 1 : 0;\n }\n }\n std::printf(\"Rollout count = %.0f\\n\", rollout_count);\n\n int best_index = -1;\n double best_ratio = std::numeric_limits<double>::lowest();\n for (const auto &node : nodes) {\n double ratio = static_cast<double>(node.wins) / node.visits;\n if (ratio > best_ratio) {\n best_ratio = ratio;\n best_index = node.index;\n }\n }\n std::printf(\"Best ratio = %f\\n\", best_ratio);\n return best_index;\n }\n\nprivate:\n struct Node {\n int index;\n int wins;\n int visits;\n };\n\n std::chrono::milliseconds time_limit_;\n};\n\nclass HumanPlayer {\npublic:\n int select_move(const State &state, const Plays &plays) {\n // print_state(state);\n string player_label = state.player ? \"b\" : \"a\";\n string expected_pawns[] = {player_label + \"0\", player_label + \"1\"};\n int pawn;\n Position end;\n Position build;\n while (true) {\n string input;\n\n // Get pawn.\n pawn = 0;\n while (true) {\n cout << \"Which pawn will you move (\" << expected_pawns[0] << \" or \"\n << expected_pawns[1] << \")\\n> \";\n cin >> input;\n if (input != expected_pawns[0] && input != expected_pawns[1]) {\n cout << \"Invalid pawn selection, please enter \" << expected_pawns[0]\n << \" or \" << expected_pawns[1] << \".\\n\";\n continue;\n }\n if (input == expected_pawns[1]) {\n pawn = 1;\n }\n break;\n }\n bool valid_pawn = false;\n for (int i = 0; i < plays.size(); ++i) {\n if (plays[i].pawn == pawn) {\n valid_pawn = true;\n break;\n }\n }\n if (!valid_pawn) {\n cout << \"Pawn \" << expected_pawns[pawn] << \" has no valid moves, \"\n << \"please select the other pawn.\\n\";\n continue;\n }\n\n // Get end.\n Position start = state.position[state.player][pawn];\n while (true) {\n cout << \"Which direction will you move\\n> \";\n char direction;\n cin >> direction;\n end = get_new_position(start, direction);\n if (end.x < 0) {\n cout << \"Invalid move direction\\n\";\n continue;\n }\n bool valid_move = false;\n for (int i = 0; i < plays.size(); ++i) {\n if (plays[i].pawn == pawn && plays[i].end == end) {\n valid_move = true;\n break;\n }\n }\n if (!valid_move) {\n cout << \"That move is not legal for that pawn. \"\n \"Try again.\\n\";\n continue;\n }\n break;\n }\n\n // Get build.\n while (true) {\n cout << \"Which direction will you build\\n> \";\n char direction;\n cin >> direction;\n build = get_new_position(end, direction);\n if (build.x < 0) {\n cout << \"Invalid build direction\\n\";\n continue;\n }\n for (int i = 0; i < plays.size(); ++i) {\n if (plays[i].pawn == pawn && plays[i].end == end &&\n plays[i].build == build) {\n return i;\n }\n }\n cout << \"That build is not legal for that pawn \"\n << \"and that move. Try again.\\n\";\n }\n }\n }\n\nprivate:\n Position get_new_position(const Position &start, char entry) {\n Position end;\n switch (entry) {\n case '1': // fall-through.\n case 'z':\n end.x = start.x - 1;\n end.y = start.y + 1;\n break;\n case '2': // fall-through.\n case 'x':\n end.x = start.x;\n end.y = start.y + 1;\n break;\n case '3': // fall-through.\n case 'c':\n end.x = start.x + 1;\n end.y = start.y + 1;\n break;\n case '4': // fall-through.\n case 'a':\n end.x = start.x - 1;\n end.y = start.y;\n break;\n case '6': // fall-through.\n case 'd':\n end.x = start.x + 1;\n end.y = start.y;\n break;\n case '7': // fall-through.\n case 'q':\n end.x = start.x - 1;\n end.y = start.y - 1;\n break;\n case '8': // fall-through.\n case 'w':\n end.x = start.x;\n end.y = start.y - 1;\n break;\n case '9': // fall-through.\n case 'e':\n end.x = start.x + 1;\n end.y = start.y - 1;\n break;\n default:\n end.x = -1;\n end.y = -1;\n }\n return end;\n }\n};\n\nvoid ref_games(unsigned int seed) {\n printf(\"Seed = %u\\n\", seed);\n mt19937 rng(seed);\n\n int counts[2] = {0, 0};\n MonteCarlo<true> player0(chrono::seconds(10));\n MonteCarlo<true> player1(chrono::seconds(10));\n for (int trial = 0; trial < 1; ++trial) {\n State state = get_start_state();\n print_state(state);\n printf(\"\\n\");\n int winner = play_game<true>(&state, &player0, &player1);\n ++counts[winner];\n printf(\"Trial %3d won by player %d (%d to %d).\\n\", trial, winner, counts[0],\n counts[1]);\n }\n printf(\"\\nPlayer 0 wins %d times, player 1 wins %d times.\\n\", counts[0],\n counts[1]);\n}\n\nvoid evaluate_starting_positions() {\n fstream fs(\"starting_positions.txt\", fstream::in);\n for (int i = 0; fs; ++i) {\n State state = get_start_state();\n fs\n >> state.position[0][0].x >> state.position[0][0].y\n >> state.position[0][1].x >> state.position[0][1].y\n >> state.position[1][0].x >> state.position[1][0].y\n >> state.position[1][1].x >> state.position[1][1].y;\n\n MonteCarlo<true> player(chrono::minutes(2));\n // TODO: Get victory percentage.\n player.get_next_play(state);\n }\n}\n\nint main() {\n// random_device random_device;\n// unsigned int seed = argc > 1 ? stoul(argv[1]) : random_device();\n\n evaluate_starting_positions();\n}\n" } ]
3
jorender/midea-msmart
https://github.com/jorender/midea-msmart
62355bbc67d3d8515e94eaa57a74c701869dc8ef
151e6d43c811b3362a19995a336b520fc42c0f98
297e6c06f8f3b5b7aef8d23301af1ab5cfdd8b0b
refs/heads/master
2022-12-03T22:28:11.924825
2020-07-26T03:33:28
2020-07-26T03:33:28
282,562,573
0
0
null
2020-07-26T02:43:46
2020-07-25T12:46:45
2020-07-07T05:34:54
null
[ { "alpha_fraction": 0.4812212884426117, "alphanum_fraction": 0.555525541305542, "avg_line_length": 33.25925827026367, "blob_id": "33bd6571c432e3bbd5e6da4586f715a4952edf5f", "content_id": "646614f965b45c92de8d6aed665ca16b0b1e8a56", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3701, "license_type": "permissive", "max_line_length": 135, "num_lines": 108, "path": "/msmart/cli.py", "repo_name": "jorender/midea-msmart", "src_encoding": "UTF-8", "text": "\n# -*- coding: UTF-8 -*-\nimport click\nimport logging\nimport socket\nimport sys\nfrom msmart.security import security\nfrom msmart.device import convert_device_id_int\nfrom msmart.device import device as midea_device\n\nif sys.version_info < (3, 5):\n print(\n \"To use this script you need python 3.5 or newer, got %s\" % (\n sys.version_info,)\n )\n sys.exit(1)\n\nVERSION = '0.1.20'\n\n_LOGGER = logging.getLogger(__name__)\n\nBROADCAST_MSG = bytearray([\n 0x5a, 0x5a, 0x01, 0x11, 0x48, 0x00, 0x92, 0x00,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0x7f, 0x75, 0xbd, 0x6b, 0x3e, 0x4f, 0x8b, 0x76,\n 0x2e, 0x84, 0x9c, 0x6e, 0x57, 0x8d, 0x65, 0x90,\n 0x03, 0x6e, 0x9d, 0x43, 0x42, 0xa5, 0x0f, 0x1f,\n 0x56, 0x9e, 0xb8, 0xec, 0x91, 0x8e, 0x92, 0xe5\n])\n\n\[email protected]()\[email protected](\"-d\", \"--debug\", default=False, count=True)\n# @click.pass_context\ndef discover(debug: int):\n \"\"\"Discover Midea Devices with UDP Broadcast\"\"\"\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n _LOGGER.info(\"Debug mode active\")\n else:\n logging.basicConfig(level=logging.INFO)\n\n _security = security()\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(5)\n found_devices = {}\n _LOGGER.info(\"msmart version: {}\".format(VERSION))\n _LOGGER.info(\n \"Discovering devices with UDP Broadcast, press CTRL-C to quit...\")\n for i in range(10):\n try:\n sock.sendto(BROADCAST_MSG, (\"255.255.255.255\", 6445))\n while True:\n data, addr = sock.recvfrom(512)\n m_ip = addr[0]\n m_id, m_type, m_sn, m_ssid = 'unknown', 'unknown', 'unknown', 'unknown'\n if len(data) >= 104 and (data[:2].hex() == '5a5a' or data[8:10].hex() == '5a5a') and m_ip not in found_devices:\n _LOGGER.info(\"Midea Local Data {} {}\".format(m_ip, data.hex()))\n if data[8:10].hex() == '5a5a':\n data = data[8:-16]\n m_id = convert_device_id_int(data[20:26].hex())\n found_devices[m_ip] = m_id\n encrypt_data = data[40:-16]\n reply = _security.aes_decrypt(encrypt_data)\n\n m_sn = reply[14:14+26].decode(\"utf-8\")\n # ssid like midea_xx_xxxx net_xx_xxxx\n m_ssid = reply[14+27:14+27+13].decode(\"utf-8\")\n m_type = m_ssid.split('_')[1]\n \n m_support = support_test(m_ip, int(m_id))\n\n _LOGGER.info(\n \"*** Found a {} '0x{}' at {} - id: {} - sn: {} - ssid: {}\".format(m_support, m_type, m_ip, m_id, m_sn, m_ssid))\n elif m_ip not in found_devices:\n _LOGGER.info(\"Maybe not midea local data {} {}\".format(m_ip, data.hex()))\n\n except socket.timeout:\n continue\n except KeyboardInterrupt:\n sys.exit(0)\n\n\ndef support_test(device_ip, device_id: int):\n _device = midea_device(device_ip, device_id)\n device = _device.setup()\n device.refresh()\n if device.support:\n return 'supported'\n else:\n return 'unsupported'\n\n\ndef remove_duplicates(device_list: list):\n newlist = []\n for i in device_list:\n if i not in newlist:\n newlist.append(i)\n return newlist\n\n\n# if __name__ == '__main__':\n# discover()\n" } ]
1
phil-dolgolev/bigdata_2015
https://github.com/phil-dolgolev/bigdata_2015
9d447a9e8eca2de41eab9525fe43be7dda71b007
93062936fc854b234e2bdcfc2022ec0691acf65f
d9285027958dfe34f534ac466d0df84721758eee
refs/heads/master
2021-05-30T02:55:24.931394
2015-04-30T00:11:07
2015-04-30T00:11:07
44,609,123
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.602280855178833, "alphanum_fraction": 0.609408438205719, "avg_line_length": 33.24390411376953, "blob_id": "f6aa8caa90198dd5916982662a1d29ba781c6120", "content_id": "59dd6d8f72c503036ed46b47fd51c24ea6575967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1403, "license_type": "no_license", "max_line_length": 112, "num_lines": 41, "path": "/hw04/hw04util.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n\ndef read_vertices(vertices, filename, vertexConstructor):\n with open(filename) as f:\n for l in f:\n l = l.strip()\n if len(l) == 0:\n continue\n components = l.split(\" \")\n if len(components) < 2:\n raise Exception(\"ERROR: too few components in the vertex record (expected at least 2): %s\" % l)\n docid = int(components[0])\n vertex = vertexConstructor(docid)\n vertices[docid] = vertex\n if len(components) > 2:\n vertex.value = components[2]\n\ndef read_edges(vertices, filename):\n with open(filename) as f:\n for l in f:\n l = l.strip()\n if len(l) == 0:\n continue\n components = l.split(\" \")\n outlinks = components[1]\n if outlinks != \"==\":\n docid = int(components[0])\n if not docid in vertices:\n raise Exception(\"ERROR when creating links from %d to %s: source not found\" % (docid, outlinks))\n src = vertices[docid]\n for dst_id in [int(v) for v in outlinks.split(',')]:\n if not dst_id in vertices:\n raise Exception(\"ERROR when creating a link from %d to %d: destination not found\" % (docid, dst_id))\n dst = vertices[dst_id]\n src.out_vertices.append(dst)\n\ndef read_graph(filename, vertexConstructor):\n vertices = {}\n read_vertices(vertices, filename, vertexConstructor)\n read_edges(vertices, filename)\n return vertices" }, { "alpha_fraction": 0.5998842120170593, "alphanum_fraction": 0.6311522722244263, "avg_line_length": 23.309858322143555, "blob_id": "c06ef6edbc74ef2e565cccba95311fb5264639b8", "content_id": "041c3967be2e85eb8651af4f9e42994a0f00dd77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1758, "license_type": "no_license", "max_line_length": 158, "num_lines": 71, "path": "/class08/mr_jaccard.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "\t# encoding: utf-8\nimport mincemeat\n\ndef mapfn1(k, v):\n set_id, shard_id = k\n items = v\n for i in items:\n yield i, set_id\n\ndef reducefn1(k, vs):\n return vs\n\ns = mincemeat.Server() \n\ninput0 = {}\ninput0[('set1', 'shard1')] = ['a', 'c', 'e']\ninput0[('set1', 'shard2')] = ['b', 'f', 'h']\ninput0[('set2', 'shard1')] = ['a', 'f', 'g']\ninput0[('set2', 'shard2')] = ['b', 'd', 'e']\ninput0[('set2', 'shard3')] = ['c', 'i', 'j']\ninput0[('set3', 'shard1')] = ['i', 'k', 'm']\n\n# и подаем этот список на вход мапперам\ns.map_input = mincemeat.DictMapInput(input0) \ns.mapfn = mapfn1\ns.reducefn = reducefn1\n\nresults1 = s.run_server(password=\"\") \nfor key, value in sorted(results1.items()):\n print(\"%s: %s\" % (key, value) )\n\n\ndef mapfn2(item, plist):\n for d1 in plist:\n for d2 in plist:\n yield d1, d2\n\ndef reducefn2(d1, docs):\n from collections import defaultdict\n counts = defaultdict(int)\n for d2 in docs:\n counts[d2] += 1\n return counts\n\n\ns.map_input = mincemeat.DictMapInput(results1) \ns.mapfn = mapfn2\ns.reducefn = reducefn2\n\nresults2 = s.run_server(password=\"\") \nfor key, value in sorted(results2.items()):\n print(\"%s: %s\" % (key, value) )\n\ndef mapfn3(k, v):\n set_id, shard_id = k\n items = v\n yield set_id, len(items)\n\ndef reducefn3(set_id, vs):\n return sum(vs)\n\ns.map_input = mincemeat.DictMapInput(input0) \ns.mapfn = mapfn3\ns.reducefn = reducefn3\n\nsizes = s.run_server(password=\"\") \n\nfor set1, intersections in results2.items():\n for set2, intersect_size in intersections.items():\n union_size = sizes[set1] + sizes[set2] - intersect_size\n print \"%s, %s: INTERSECTION = %d, UNION = %d, JACCARD_SIMILARITY=%f \" % (set1, set2, intersect_size, union_size, float(intersect_size)/float(union_size))\n" }, { "alpha_fraction": 0.6049180030822754, "alphanum_fraction": 0.6081967353820801, "avg_line_length": 22.461538314819336, "blob_id": "91bb1358207e5c5629e6647baaaf51a873794cee", "content_id": "9234a4ee4389586179a833603160afee486fd9d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 62, "num_lines": 26, "path": "/hw02/dfs_client.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "import test_dfs as dfs\n\ndef get_file_content(filename):\n chunks = []\n for f in dfs.files():\n if f.name == filename:\n chunks = f.chunks\n if len(chunks) == 0:\n return\n clocs = {}\n for c in dfs.chunk_locations():\n clocs[c.id] = c.chunkserver\n\n for chunk in chunks:\n try:\n loc = clocs[chunk]\n if loc == \"\":\n raise \"ERROR: location of chunk %s is unknown\" % chunk\n for l in dfs.get_chunk_data(loc, chunk):\n yield l[:-1]\n except StopIteration:\n pass\n\ndef write_file(filename, content):\n chunk = dfs.create_file(filename)\n dfs.write_chunk(chunk, content)\n" }, { "alpha_fraction": 0.6771653294563293, "alphanum_fraction": 0.7454068064689636, "avg_line_length": 43.82352828979492, "blob_id": "5a58a4df4d2cbcdc328161c9ce09f9ee302ce1db", "content_id": "c10f1096c06514a68682b5b1a34813afb3bdf58a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1222, "license_type": "no_license", "max_line_length": 133, "num_lines": 17, "path": "/hw07/README.md", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "В файле `raft.py` реализована заготовка узла алгоритма Raft. Узел умеет играть роль лидера и ведомого.\n\n### Запуск узла\n\nЭта команда запустит на порту 8001 ведомого, находящегося в поколении №1 с журналом 1:H,1:E,1:L\n\n python raft.py -p 8001 -t 1 -l 1:H,1:E,1:L\n \nА эта запустит на порту 8000 лидера, находящегося в поколении №3 с журналом 1:H,1:E,1:L,2:L,3:O и ведомыми на портах 8001 и 8002\n\n python raft.py -p 8000 -t 3 -l 1:H,1:E,1:L,2:L,3:O -f 8001,8002\n \nВ имеющейся заготовке ведомый не делает ничего, а лидер при старте посылает GET запрос всем заданным ему ведомым и печатает их ответ.\n\n### Что нужно сделать\n\nВаша реализация должна выполнить репликацию журнала и по её окончании напечатать в консоли состояние журнала каждого ведомого узла\n" }, { "alpha_fraction": 0.6365403532981873, "alphanum_fraction": 0.6448007822036743, "avg_line_length": 32.73770523071289, "blob_id": "b44254af2772e24afad25024399978bb5741ffa6", "content_id": "c9558acc6b35b868846ee5f4a5ed15b3b46f8e13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2370, "license_type": "no_license", "max_line_length": 103, "num_lines": 61, "path": "/hw04/pagerank.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n\"\"\"pagerank.py illustrates how to use the pregel.py library, and tests\nthat the library works.\n\n\"\"\"\n\nfrom pregel import Vertex, Pregel\nfrom hw04util import *\nimport sys\n\nvertices = {}\nnum_workers = 1\nnum_iterations = 50\n\ndef main(filename):\n global vertices\n global num_vertices\n # читаем граф из файла, используя конструкток PageRankVertex\n vertices = read_graph(filename, PageRankVertex)\n\n # Инициализируем начальное распределение pagerank\n num_vertices = len(vertices)\n for v in vertices.values():\n v.value = 1.0/num_vertices\n\n # Запускаем подсчет\n pagerank_pregel(vertices.values())\n\ndef pagerank_pregel(vertices):\n \"\"\"Computes the pagerank vector associated to vertices, using\n Pregel.\"\"\"\n p = Pregel(vertices,num_workers,num_iterations)\n p.run()\n for vertex in p.vertices:\n print \"#%s: %s\" % (vertex.id, vertex.value)\n print \"Sum=%f\" % sum(v.value for v in p.vertices)\n\nclass PageRankVertex(Vertex):\n def __init__(self, id):\n Vertex.__init__(self, id, None, [])\n\n def update(self):\n global vertices\n # На нулевой итерации у нас еще нет никаких входящих сообщений, поэтому мы не меняем PR вершины\n if self.superstep > 0:\n # Вероятность перехода с любой страницы + вероятность перехода со ссылающихся страниц\n self.value = 0.15 / num_vertices + 0.85*sum(\n [pagerank for (vertex,pagerank) in self.incoming_messages])\n if len(self.out_vertices) == 0:\n # тупиковая страница рассылает часть своего PR всем\n outgoing_pagerank = self.value / num_vertices\n self.outgoing_messages = [(vertex,outgoing_pagerank) \n for vertex in vertices.values()] \n else:\n # нетупиковая только тем, на кого ссылается\n outgoing_pagerank = self.value / len(self.out_vertices)\n self.outgoing_messages = [(vertex,outgoing_pagerank) \n for vertex in self.out_vertices]\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n" }, { "alpha_fraction": 0.7837116122245789, "alphanum_fraction": 0.7923898696899414, "avg_line_length": 82.16666412353516, "blob_id": "adbaa0f10c7900d06c3654def5a23cf31e0cba4a", "content_id": "3be35a8ac6c8adc26004c7071598f35726d355cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2394, "license_type": "no_license", "max_line_length": 148, "num_lines": 18, "path": "/hw03/README.md", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "### Инструкции\n1. Поставьте питоновские модули poster, mwclient и mwparserfromhell\n2. запустите серверы DFS.\n3. создайте подкаталог tmp/plist \n4. определите константу USERNAME в скриптах `mr_posting_lists.py` и `print-posting-list.py`\n5. скачайте корпус командой `python crawl-corpus.py` (результаты будут записаны в DFS)\n6. сделайте списки вхождений, запустив map-reduce `mr_posting_lists.py`. Обратите внимание, что\nон состоит из двух шагов и соответственно запускать рабочий процесс (`python mincemeat.py localhost`) надо дважды. Результаты первого шага\nзаписываются на диск в каталог `tmp/plist`, результаты второго шага записываются в DFS\n7. посмотрите на какой-нибудь список, например для терма HANA командой `python print-posting-list.py --term HANA`\n\n### Подводные камни\n1. если у вас Windows, то некоторые файлы, создаваемые первым MR в `mr_posting_lists.py` в каталоге `tmp/plists` \nмогут превысить ограничение на длину полного пути до файла (зависит от того, где расположен репозиторий на диске)\nВ таком случае поиграйтесь с константой в функции `reducefn`: `if len(k) > 100:`\n2. файлы в DFS только наращиваются (не перетираются и не удаляются), поэтому если хотите начать процесс построения индекса \n\"с чистого листа\" то не забудьте стереть физические файлы из каталогов, используемых DFS и перезапустить DFS. `rm files data/* data2/*`, запущенный \nиз `../dfs` удалит всё, например. Это также означает, что процесс скачивания корпуса или построения списков вхождений не идемпотентный.\n\n" }, { "alpha_fraction": 0.705567479133606, "alphanum_fraction": 0.7080656886100769, "avg_line_length": 36.2933349609375, "blob_id": "a61a431837be99bbaa7c2d1cb351270cd9b6d32f", "content_id": "38b07836b2849d0997b91b6519d8e5db34ef6861", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3303, "license_type": "no_license", "max_line_length": 116, "num_lines": 75, "path": "/hw02/test_dfs.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# encoding: utf8\nfrom collections import namedtuple\nimport json\nimport os.path\n\ndef _json_object_hook(d): return namedtuple('X', d.keys())(*d.values())\ndef json2obj(data): return json.loads(data, object_hook=_json_object_hook)\n\ndef save_files(files):\n with open(\"data/files\", \"w\") as f:\n f.write(json.JSONEncoder().encode(list({\"name\": f.name, \"chunks\": f.chunks} for f in files)))\n\ndef save_chunk_locations(chunk_locations):\n with open(\"data/chunk_locations\", \"w\") as f:\n f.write(json.JSONEncoder().encode(list({\"id\": cl.id, \"chunkserver\": cl.chunkserver} for cl in chunk_locations)))\n\nCHUNK_SERVER_COUNT = 4\n\n# Получает от \"мастера\" список файлов и входящих в каждый файл фрагментов\n# Возвращает список объектов с полями \"name\": String и \"chunks\": String[]\n# \"name\" - это имя файла, \"chunks\" - список строковых идентификаторов \n# фрагментов в том порядке, в котором они следуют в файле\ndef files():\n with open(\"data/files\") as f:\n return json2obj(f.read())\n\n# Получает от \"мастера\" расположение фрагмента на файловых серверах.\n# Так как репликация для нашей задачи несущественна, то файловый сервер\n# у каждого фрагмента один.\n# Возвращает список объектов с полями \"id\": String и \"chunkserver\": String\n# где \"id\" - идентификатор фрагмента, \"chunkserver\" - идентификатор файлового сервера,\n# на котором фрагмент хранится\ndef chunk_locations():\n with open(\"data/chunk_locations\") as f:\n return json2obj(f.read())\n\nFilesRecord = namedtuple('FilesRecord', ['name', 'chunks'])\nChunkLocationRecord = namedtuple('ChunkLocations', ['id', 'chunkserver'])\n\ndef create_file(filename):\n all_files = files()\n for f in all_files:\n \tif f.name == filename:\n \t raise Exception(\"File %s already exists\" % filename)\n\n chunkserver = \"cs%d\" % (hash(filename) % CHUNK_SERVER_COUNT)\n\n chunk_id = \"%s_chunk00\" % filename.lstrip('/')\n chunk_filename = \"data/%s/%s\" % (chunkserver, chunk_id)\n if os.path.exists(chunk_filename):\n \traise Exception(\"Chunk file %s already exists\" % chunk_filename)\n all_files.append(FilesRecord(filename, [chunk_id]))\n\n clocs = chunk_locations()\n clocs.append(ChunkLocationRecord(chunk_id, chunkserver))\n\n if not os.path.exists(\"data/\" + chunkserver):\n \tos.makedirs(\"data/\" + chunkserver)\n\n save_files(all_files)\n save_chunk_locations(clocs)\n return chunk_id\n\ndef write_chunk(chunk_id, content):\n for cl in chunk_locations():\n \tif cl.id == chunk_id:\n \t with open(\"data/%s/%s\" % (cl.chunkserver, chunk_id), \"w\") as f:\n \t \tf.write(content)\n \t \treturn\n raise Exception(\"Chunk %s does not exist. Forgot to call create_file?\" % chunk_id)\n# Возвращает содержимое указанного фрагмента с указанного файлового сервера\n# в виде потока\ndef get_chunk_data(chunk_server_id, chunk_id):\n return open(\"data/%s/%s\" % (chunk_server_id, chunk_id))\n\n\n\n\n\n" }, { "alpha_fraction": 0.6776482462882996, "alphanum_fraction": 0.6832235455513, "avg_line_length": 31.883333206176758, "blob_id": "e911cf15cd2b0ca956541627c8bf6f3a41a69c4b", "content_id": "10a8635996d15e0915d9a15fd511d850022e3060", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2565, "license_type": "no_license", "max_line_length": 104, "num_lines": 60, "path": "/hw04/maxvalue.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n\"\"\"maxvalue.py демнострирует работу алгоритма, прекращающегося не по условию \nдостижения ограничения на число итераций\n\nАлгоритм находит для каждой вершины V графа G максимальное значение среди вершин,\nиз которых есть направленный путь в V. Если граф G сильно связанный, то в каждой\nвершине будет записано максимальное значение во всём графе\n\n\"\"\"\n\nfrom pregel import Vertex, Pregel\nfrom hw04util import *\nfrom random import randint\nimport sys\n\nvertices = {}\nnum_workers = 1\nmax_supersteps = 50\n\ndef main(filename):\n global vertices\n global num_vertices\n # читаем граф из файла, используя конструктор MaxValueVertex\n vertices = read_graph(filename, MaxValueVertex)\n\n # Заполняем случайными значениями\n for v in vertices.values():\n v.value = randint(1, len(vertices) * 2)\n\n # Запускаем подсчет, ограничивая количеством итераций\n p = Pregel(vertices.values(),num_workers,max_supersteps)\n p.run()\n print \"Completed in %d supersteps\" % p.superstep\n for vertex in p.vertices:\n print \"#%s: %s\" % (vertex.id, vertex.value)\n\nclass MaxValueVertex(Vertex):\n def __init__(self, id):\n Vertex.__init__(self, id, None, [])\n\n def update(self):\n global vertices\n # На нулевой итерации еще нет входящих\n if self.superstep > 0:\n # по умолчанию эта вершина станет пассивной\n self.active = False\n if len(self.incoming_messages) > 0:\n # Если входящие сообщения есть то находим максимальное значение и если оно больше, чем свое,\n # то активизируемся\n max_incoming = max(value for (_, value) in self.incoming_messages)\n if max_incoming > self.value:\n self.active = True\n self.value = max_incoming\n\n if self.active:\n # Активная вершина рассылает свое значение по исходящим дугам\n self.outgoing_messages = [(vertex,self.value) for vertex in self.out_vertices]\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n" }, { "alpha_fraction": 0.7712329030036926, "alphanum_fraction": 0.7890411019325256, "avg_line_length": 90.125, "blob_id": "d2b54114e65673d8ff5e2cbd2c409f9cdb0908de", "content_id": "e5814e23d25bfd5883cbba11d97e3e8260bf90a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1146, "license_type": "no_license", "max_line_length": 360, "num_lines": 8, "path": "/hw04/README.md", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "### Реализация Pregel машины\nНужно унаследовать класс от `pregel.Vertex` и определить метод `update`. Смотрите пример в файле `pagerank.py`\n\n### Запуск Pregel\n\nПодсчет pagerank запускается командой `python pagerank.py <filename>` где <filename> -- имя файла с графом. Есть два графа из лекции: в файле `small_graph.txt` записан граф из 20 вершин и двух компонент связности (полный граф из 4 вершин + бинарное дерево из 16 вершин), в файле `random_1000.txt` записан сгенерированный случайным образом граф из 1000 вершин.\n\nПодсчет максимального значения запускается командой `python maxvalue.py <filename>`. В файле `maxvalue.txt` записан слегка модифицированный граф `small_graph`, в котором обе компоненты стали сильно связными \n" }, { "alpha_fraction": 0.36100131273269653, "alphanum_fraction": 0.5691699385643005, "avg_line_length": 43.52941131591797, "blob_id": "f87618d2090591ccaa35d604f9ff655a9090acbb", "content_id": "03172c2e52d2133d7fc6e9498783688b575a6465", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 759, "license_type": "no_license", "max_line_length": 113, "num_lines": 17, "path": "/hw07/test.sh", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nERR=\"/dev/null\"\n#ERR=\"/dev/stderr\"\npython raft.py -p 8001 -l 1:L,1:O,1:R,4:E,4:M,5:I,5:P,6:S,6:U -t 6 2>$ERR &\npython raft.py -p 8002 -l 1:L,1:O,1:R,4:E -t 4 2>$ERR &\npython raft.py -p 8003 -l 1:L,1:O,1:R,4:E,4:M,5:I,5:P,6:S,6:U,6:S,6:D -t 6 2>$ERR &\npython raft.py -p 8004 -l 1:L,1:O,1:R,4:E,4:M,5:I,5:P,6:S,6:U,6:S,7:D,7:O -t 7 2>$ERR &\npython raft.py -p 8005 -l 1:L,1:O,1:R,4:E,4:M,4:E,4:T -t 4 2>$ERR &\npython raft.py -p 8006 -l 1:L,1:O,1:R,2:S,2:I,2:T,3:D,3:U,3:R,3:U,3:M -t 3 2>$ERR &\npython raft.py -p 8007 -l \"\" -t 1 2>$ERR &\npython raft.py -p 8008 -l 1:L,1:O,1:R,4:E,4:M,5:I,5:P,7:X,7:X -t 7 &\n\n\nsleep 2s\npython raft.py -p 8000 -l 1:L,1:O,1:R,4:E,4:M,5:I,5:P,6:S,6:U,8:M -t 8 -f 8001,8002,8003,8004,8005,8006,8007,8008\nkill $(jobs -p)\n\n\n" }, { "alpha_fraction": 0.3656330704689026, "alphanum_fraction": 0.5826873183250427, "avg_line_length": 29.920000076293945, "blob_id": "508a9813cd6efb88909ba371f5b295d91db3f5f8", "content_id": "d7f296735e2652a71f65a623bcd0ca121fea776c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 100, "num_lines": 25, "path": "/hw08/mr_lsh.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "\t# encoding: utf-8\nimport mincemeat\n\ndef mapfn(docid, docvector):\n for v in docvector:\n yield docid, v\n\ndef reducefn(k, vs):\n return vs\n\ns = mincemeat.Server() \n\ninput0 = {}\ninput0['doc1'] = [48, 25, 69, 36, 22, 24, 88, 37, 71, 8, 68, 60, 20, 33, 96, 9, 50, 77, 30, 32]\ninput0['doc2'] = [48, 25, 69, 12, 22, 24, 45, 37, 71, 8, 68, 60, 63, 78, 12, 9, 50, 77, 30, 32]\ninput0['doc3'] = [48, 25, 69, 36, 74, 100, 94, 14, 89, 18, 100, 89, 63, 66, 96, 9, 50, 77, 30, 32]\ninput0['doc4'] = [22, 5, 34, 96, 31, 41, 14, 89, 18, 100, 89, 63, 66, 96, 78, 19, 39, 53, 83, 20]\n\ns.map_input = mincemeat.DictMapInput(input0) \ns.mapfn = mapfn\ns.reducefn = reducefn\n\nresults = s.run_server(password=\"\") \nfor key, value in sorted(results.items()):\n print(\"%s: %s\" % (key, value) )\n" }, { "alpha_fraction": 0.6357874274253845, "alphanum_fraction": 0.671379804611206, "avg_line_length": 28.71014404296875, "blob_id": "9255067f49ee64b925ef7454daf3c84428a13401", "content_id": "3baee201ebc5e7a7b1a1c037b3c394852a925c72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2473, "license_type": "no_license", "max_line_length": 92, "num_lines": 69, "path": "/class09/mr_kmeans.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nimport mincemeat\nimport argparse\n\n# Маппер получает список, в котором первым элементом записан список центроидов,\n# а последущими элементами являются точки исходного набора данных\n# Маппер выплевывает для каждой точки d пару (c, d) где c -- ближайший к точке центроид\ndef mapfn1(k, items):\n cur_centroids = items[0]\n del items[0]\n for i in items:\n min_dist = 100\n min_c = -1\n for c in cur_centroids:\n c = float(c)\n if abs(i - c) < min_dist:\n min_c = c\n min_dist = abs(i-c)\n yield str(min_c), str(i)\n\n# У свертки ключом является центроид а значением -- список точек, определённых в его кластер\n# Свёртка выплевывает новый центроид для этого кластера\ndef reducefn1(k, vs):\n print k\n print vs\n old_c = float(k)\n new_c = float(sum([int(v) for v in vs])) / len(vs)\n return new_c\n\ndef reducefn2(k, vs):\n return vs\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", help=\"Iterations count\", required = True, type = int)\nparser.add_argument(\"-c\", help=\"Comma-separated list of initial centroids\")\n\nargs = parser.parse_args()\n\n# Начальные центроиды и количество итераций принимаются параметрами\ncentroids = [int(c.strip()) for c in args.c.split(\",\")]\n\nSHARD1 = [10, 20, 25, 27, 27, 32, 41, 49, 55, 72]\nSHARD2 = [15, 16, 30, 35, 43, 44, 53, 67, 80, 81]\nfor i in xrange(1,args.n):\n s = mincemeat.Server() \n\n # На каждой \n input0 = {}\n input0['set1'] = [centroids] + SHARD1\n input0['set2'] = [centroids] + SHARD2\n s.map_input = mincemeat.DictMapInput(input0) \n s.mapfn = mapfn1\n s.reducefn = reducefn1\n\n results = s.run_server(password=\"\") \n centroids = [c for c in results.itervalues()]\n\n# На последней итерации снова собираем кластер и печатаем его\ns = mincemeat.Server() \ninput0 = {}\ninput0['set1'] = [centroids] + SHARD1\ninput0['set2'] = [centroids] + SHARD2\ns.map_input = mincemeat.DictMapInput(input0) \ns.mapfn = mapfn1\ns.reducefn = reducefn2\nresults = s.run_server(password=\"\") \nfor key, value in sorted(results.items()):\n print(\"%s: %s\" % (key, value) )\n\n" }, { "alpha_fraction": 0.6970874071121216, "alphanum_fraction": 0.7029126286506653, "avg_line_length": 27.55555534362793, "blob_id": "4c0bec433e93f8e10525c2b76c1ba4d25391007f", "content_id": "b7594cf3bb078daa70aef4a91f14225217202ca3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 68, "num_lines": 18, "path": "/hw03/crawl-corpus.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "import mwclient\nimport mwparserfromhell as mwparser\nimport sys\nsys.path.append(\"../dfs/\")\n\nimport client as dfs\n\nsite = mwclient.Site('en.wikipedia.org')\ncategory = site.Pages['Category:Big_data']\ncounter = 0\n\nwith dfs.file_appender(\"/wikipedia/__toc__\") as toc:\n\tfor page in category:\n\t\tpage_filename = \"/wikipedia/page%d\" % counter\n\t\twith dfs.file_appender(page_filename) as f:\n\t\t\tf.write(mwparser.parse(page.text()).strip_code().encode('utf-8'))\n\t\ttoc.write(\"%s %s\" % (page_filename, page.name))\n\t\tcounter += 1\n\n" }, { "alpha_fraction": 0.68381667137146, "alphanum_fraction": 0.6875584721565247, "avg_line_length": 27.891891479492188, "blob_id": "4824e22c093d7618167346883c396e6d9ac9c96d", "content_id": "71da81e9903fd1959732c8f5a85d086af9f2ce83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1318, "license_type": "no_license", "max_line_length": 64, "num_lines": 37, "path": "/hw02/mr_sum_matrix.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom dfs_client import *\nimport mincemeat\n\n# маппер ожидает на входе получать ключ и значение, равные имени\n# файла, и для каждой строки (больше не помещается в память) \n# выплевывает номер матрицы и сумму значений\ndef mapfn(k, v):\n reduce_key = None\n for l in get_file_content(v):\n if reduce_key is None:\n matrix_num, start, end = l.split(\" \", 2)\n reduce_key = matrix_num\n continue\n values = [int(v) for v in l.split(\" \")]\n yield reduce_key, sum(values)\n\n# редьюсер суммирует значения с одинаковым ключом\ndef reducefn(k, vs):\n result = sum(vs)\n return result\n\ns = mincemeat.Server() \n\n# читаем список файлов, из которых состоят матрицы\nmatrix_files = [l for l in get_file_content(\"/matrix1\")]\nfor l in get_file_content(\"/matrix2\"):\n matrix_files.append(l)\n \n# и подаем этот список на вход мапперам\ns.map_input = mincemeat.MapInputDFSFileName(matrix_files) \ns.mapfn = mapfn\ns.reducefn = reducefn\n\nresults = s.run_server(password=\"\") \nfor key, value in sorted(results.items()):\n print(\"%s: %s\" % (key, value) )\n" }, { "alpha_fraction": 0.6095651984214783, "alphanum_fraction": 0.6182608604431152, "avg_line_length": 27.75, "blob_id": "9b635e73cd3338261ffbb5bd3ab87f07d648a4f9", "content_id": "422de35b9f597c0e74e133b5baf6ba4f69ef9f34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1225, "license_type": "no_license", "max_line_length": 90, "num_lines": 40, "path": "/hw02/print_matrix.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom __future__ import print_function\n\nimport argparse\nfrom dfs_client import *\n\n\ndef print_matrix_chunk(filename, cols):\n row = 0\n col = 0\n for l in get_file_content(filename):\n if row == 0:\n matrix_num, start, end = l.split(\" \", 2)\n print(\"Matrix #%s Rows [%s, %s]\" % (matrix_num, start, end))\n row += 1\n continue\n \n print(l, end=' ')\n col += len(l.split(\" \"))\n if col == cols:\n print(\"\")\n row += 1\n col = 0\n return row - 1\n\ndef print_matrix(matrix_toc, rows, cols):\n print(\"Printing matrix %dx%d\" % (rows, cols))\n read_rows = 0\n for l in get_file_content(matrix_toc):\n read_rows += print_matrix_chunk(l, cols)\n if rows != read_rows:\n print(\"Что-то пошло не так: мы прочитали %d строк а надо было %d\" % (read_rows, rows))\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--num\", required = True, help = \"Номер матрицы\")\nparser.add_argument(\"--rows\", required = True, help = \"Число строк\")\nparser.add_argument(\"--cols\", required = True, help = \"Число столбцов\")\n\nargs = parser.parse_args()\nprint_matrix(\"/matrix%s\" % args.num, int(args.rows), int(args.cols))\n" }, { "alpha_fraction": 0.7229601740837097, "alphanum_fraction": 0.7286527752876282, "avg_line_length": 25.350000381469727, "blob_id": "368d1d3943991450bd1ec4a01a5ca6b30d2e2848", "content_id": "257828cb06f1b09d2fe460e149359ebd94f7bd79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 562, "license_type": "no_license", "max_line_length": 125, "num_lines": 20, "path": "/hw03/print-posting-list.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nimport sys\nsys.path.append(\"../dfs/\")\n\nimport client as dfs\nimport argparse\nimport json\nimport util\n\nmetadata = dfs.CachedMetadata()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--term\", required = True)\nargs = parser.parse_args()\n\n# Ваш псевдоним в виде строковой константы\n# USERNAME=\nshard = \"\".join([l for l in metadata.get_file_content(\"/%s/posting_list/%s\" % (USERNAME, util.encode_term(args.term)[0:1]))])\nplists = json.JSONDecoder().decode(shard)\nprint plists[util.encode_term(args.term)]\n" }, { "alpha_fraction": 0.7401190996170044, "alphanum_fraction": 0.7541959881782532, "avg_line_length": 37.47916793823242, "blob_id": "a6378182ba7d1200128cb25ccaf922c7d2126d03", "content_id": "612a51f16dc67a8b2ff83b400afae055fd4e23a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2625, "license_type": "no_license", "max_line_length": 87, "num_lines": 48, "path": "/hw01/task1.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# encoding: utf8\n\n# Для быстрого локального тестирования используйте модуль test_dfs\nimport test_dfs as dfs\n\n# Для настоящего тестирования используйте модуль http_dfs\n#import http_dfs as dfs\n\n# Демо показывает имеющиеся в DFS файлы, расположение их фрагментов\n# и содержимое фрагмента \"partitions\" с сервера \"cs0\"\n# (не рассчитывайте, что эти две константы останутся неизменными в http_dfs. Они\n# использованы исключительно для демонстрации)\ndef demo():\n for f in dfs.files():\n print(\"File {0} consists of fragments {1}\".format(f.name, f.chunks))\n\n for c in dfs.chunk_locations():\n print(\"Chunk {0} sits on chunk server {1}\".format(c.id, c.chunkserver))\n\n # Дальнейший код всего лишь тестирует получение фрагмента, предполагая, что известно,\n # где он лежит. Не рассчитывайте, что этот фрагмент всегда будет находиться\n # на использованных тут файл-серверах\n\n # При использовании test_dfs читаем из каталога cs0\n chunk_iterator = dfs.get_chunk_data(\"cs0\", \"partitions\")\n\n # При использовании http_dfs читаем с данного сервера\n #chunk_iterator = dfs.get_chunk_data(\"104.155.8.206\", \"partitions\")\n print(\"\\nThe contents of chunk partitions:\")\n for line in chunk_iterator:\n # удаляем символ перевода строки\n print(line[:-1])\n\n# Эту функцию надо реализовать. Функция принимает имя файла и\n# возвращает итератор по его строкам.\n# Если вы не знаете ничего про итераторы или об их особенностях в Питоне,\n# погуглите \"python итератор генератор\". Вот например\n# http://0agr.ru/blog/2011/05/05/advanced-python-iteratory-i-generatory/\ndef get_file_content(filename):\n raise \"Comment out this line and write your code below\"\n\n# эту функцию надо реализовать. Она принимает название файла с ключами и возвращает\n# число\ndef calculate_sum(keys_filename):\n raise \"Comment out this line and write your code below\"\n\ndemo()\n" }, { "alpha_fraction": 0.6618345379829407, "alphanum_fraction": 0.6730817556381226, "avg_line_length": 30.488189697265625, "blob_id": "3dc50f2250f858370c4ed4a7f479e49b51bb18fd", "content_id": "d39919c8b5c4f72e98e6e3577bad6be22fee759b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4001, "license_type": "no_license", "max_line_length": 100, "num_lines": 127, "path": "/dfs/client.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "from collections import namedtuple\n#Use this import if you're using Python3\nimport urllib2\nfrom urllib2 import urlopen\n#Use this import if you're using Python2\n#from urllib2 import urlopen\nimport json\nfrom poster.encode import multipart_encode\nfrom poster.streaminghttp import register_openers\nfrom contextlib import closing\nimport argparse\n\nregister_openers()\n\n#MASTER_URL = \"bigdata-hw01.barashev.net\"\nMASTER_URL = \"localhost:8000\"\ndef _json_object_hook(d): return namedtuple('X', d.keys())(*d.values())\ndef json2obj(data): return json.loads(data, object_hook=_json_object_hook)\n\ndef files():\n resp = urlopen(url = \"http://%s/files\" % MASTER_URL, timeout=10)\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't get files from master\")\n return json2obj(resp.read().decode(encoding='UTF-8'))\n\ndef chunk_locations():\n resp = urlopen(url = \"http://%s/chunk_locations\" % MASTER_URL, timeout=10)\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't get chunk locations from master\")\n return json2obj(resp.read().decode(encoding='UTF-8'))\n\ndef get_chunk_data(chunk_server_id, chunk_id):\n resp = urlopen(url=\"http://%s/read?id=%s\" % (chunk_server_id, chunk_id), timeout=10)\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't get chunk %s from chunkserver %s\" % (chunk_id, chunk_server_id))\n for line in resp:\n yield line.decode(encoding='UTF-8')\n\ndef get_file_content(filename):\n chunks = []\n for f in files():\n if f.name == filename:\n chunks = f.chunks\n if len(chunks) == 0:\n return\n clocs = {}\n for c in chunk_locations():\n clocs[c.id] = c.chunkserver\n\n for chunk in chunks:\n try:\n loc = clocs[chunk]\n if loc == \"\":\n raise \"ERROR: location of chunk %s is unknown\" % chunk\n for l in get_chunk_data(loc, chunk):\n yield l.rstrip()\n\n except StopIteration:\n pass\n\ndef create_chunk(filename):\n resp = urlopen(url = \"http://%s/new_chunk?f=%s\" % (MASTER_URL, filename))\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't create new chunk of file=%s\" % filename)\n return resp.read().split(\" \")\n\ndef write_chunk_data(chunk_server_id, chunk_id, data):\n datagen, headers = multipart_encode({\"data\": data, \"chunk_id\": chunk_id})\n request = urllib2.Request(\"http://%s/write\" % chunk_server_id, datagen, headers)\n response = urllib2.urlopen(request)\n if response.getcode() != 200:\n raise Exception(\"ERROR: can't write chunk %s to chunkserver %s\" % (chunk_id, chunk_server_id))\n\ndef file_appender(filename):\n return closing(FileAppend(filename))\n\nclass FileAppend:\n def __init__(self, filename):\n self.filename = filename\n self.lines = []\n\n def write(self, line):\n self.lines.append(line)\n\n def close(self):\n chunkserver, chunk_id = create_chunk(self.filename)\n write_chunk_data(chunkserver, chunk_id, \"\\n\".join(self.lines))\n\nclass CachedMetadata:\n def __init__(self):\n self.file_chunks = {}\n for f in files():\n self.file_chunks[f.name] = f.chunks\n self.chunk_locations = {}\n for cl in chunk_locations():\n self.chunk_locations[cl.id] = cl.chunkserver\n\n def get_file_content(self, filename):\n for chunk_id in self.file_chunks[filename]:\n for l in get_chunk_data(self.chunk_locations[chunk_id], chunk_id):\n yield l\n\ndef put_file(from_file, to_file, master):\n global MASTER_URL\n MASTER_URL=master\n with open(from_file) as f, file_appender(to_file) as buf:\n for l in f:\n buf.write(l.rstrip())\n\ndef get_file(from_file, master):\n global MASTER_URL\n MASTER_URL=master\n for l in get_file_content(from_file):\n print l\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--command\", required = True)\n parser.add_argument(\"--f\")\n parser.add_argument(\"--t\")\n parser.add_argument(\"--master\", required=True, default=\"localhost:8000\")\n args = parser.parse_args()\n\n if \"put\" == args.command:\n put_file(args.f, args.t, args.master)\n elif \"get\" == args.command:\n get_file(args.f, args.master) \n" }, { "alpha_fraction": 0.8404255509376526, "alphanum_fraction": 0.8404255509376526, "avg_line_length": 93, "blob_id": "4626d4bf89df9a2675eefc18b2ba3bfff1d418df", "content_id": "c35ea530e8803d9dc734a0f590d52a021b7f01ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 481, "license_type": "no_license", "max_line_length": 169, "num_lines": 3, "path": "/hw08/README.md", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "Реализуйте map-reduce, или конвейер из нескольких map-reduce, применяющий LSH к набору документов, и напечатайте в ответе список пар, объявленных кандидатами в близнецы.\n\nВ заготовке даны несколько документов и реализован identity map-reduce, выполняющий идентичное преобразование.\n" }, { "alpha_fraction": 0.7395498156547546, "alphanum_fraction": 0.7427652478218079, "avg_line_length": 40.29999923706055, "blob_id": "b58947e506cd9e39a438e8b8294d79e0391a43d9", "content_id": "091f8317974fe355fbf2b14a987a9b935b7d42d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1745, "license_type": "no_license", "max_line_length": 86, "num_lines": 30, "path": "/hw01/test_dfs.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# encoding: utf8\nfrom collections import namedtuple\nimport json\n\ndef _json_object_hook(d): return namedtuple('X', d.keys())(*d.values())\ndef json2obj(data): return json.loads(data, object_hook=_json_object_hook)\n\n# Получает от \"мастера\" список файлов и входящих в каждый файл фрагментов\n# Возвращает список объектов с полями \"name\": String и \"chunks\": String[]\n# \"name\" - это имя файла, \"chunks\" - список строковых идентификаторов \n# фрагментов в том порядке, в котором они следуют в файле\ndef files():\n with open(\"data/files\") as f:\n return json2obj(f.read())\n\n# Получает от \"мастера\" расположение фрагмента на файловых серверах.\n# Так как репликация для нашей задачи несущественна, то файловый сервер\n# у каждого фрагмента один.\n# Возвращает список объектов с полями \"id\": String и \"chunkserver\": String\n# где \"id\" - идентификатор фрагмента, \"chunkserver\" - идентификатор файлового сервера,\n# на котором фрагмент хранится\ndef chunk_locations():\n with open(\"data/chunk_locations\") as f:\n return json2obj(f.read())\n\n# Возвращает содержимое указанного фрагмента с указанного файлового сервера\n# в виде потока\ndef get_chunk_data(chunk_server_id, chunk_id):\n return open(\"data/%s/%s\" % (chunk_server_id, chunk_id))\n\n\n\n\n\n" }, { "alpha_fraction": 0.7135636210441589, "alphanum_fraction": 0.7186183929443359, "avg_line_length": 26.581396102905273, "blob_id": "84b39931ce8ec96e05c9c886acfb8f7027bdd838", "content_id": "a28d20d910721b31049ee78ed5e017ff7d9dd7ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3012, "license_type": "no_license", "max_line_length": 88, "num_lines": 86, "path": "/hw03/mr_posting_lists.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nimport mincemeat\nimport os\n\nimport sys\nsys.path.append(\"../dfs/\")\n\nimport client as dfs\n\n# Это последовательность из двух Map-Reduce\n# Диспетчер запускается командой python mr_posting_lists.py\n# Рабочий процесс запускается командой python mincemeat.py localhost \n# для каждого из Map-Reduce. То есть, когда отработает первый рабочий процесс, \n# нужно запустить эту же команду еще раз\n# \n# Этот конвейер пока что работает только на одной машине \n# (потому что результаты первого MR записываются в локальные файлы)\n\n# Первый Map-Reduce отображает терм в документ\ndef mapfn(k, v):\n\timport util\n\tfilename, pagetitle = v.split(\" \", 1)\n\tprint v\n\n\timport sys\n\tsys.path.append(\"../dfs/\")\n\n\timport client as dfs\n\twords = {}\n\tfor l in dfs.get_file_content(filename):\n\t\tfor word in l.encode(\"utf-8\").split():\n\t\t\twords[word] = True\n\tfor word in words:\n\t\tyield util.encode_term(word), filename\n\n# и записывает список документов для каждого терма во временный файл\ndef reducefn(k, vs):\n\timport util\n\tif len(k) > 100:\n\t\tprint \"Skipping posting list for term %s\" % (util.decode_term(k))\n\t\treturn {}\n\twith open(\"tmp/plist/%s\" % k, \"w\") as plist:\n\t\tplist.write(\"\\n\".join(vs))\n\treturn {}\n\ns = mincemeat.Server() \n\n# читаем оглавление корпуса википедии\nwikipedia_files = [l for l in dfs.get_file_content(\"/wikipedia/__toc__\")]\n# и подаем этот список на вход мапперам\ns.map_input = mincemeat.MapInputSequence(wikipedia_files) \ns.mapfn = mapfn\ns.reducefn = reducefn\n\nresults = s.run_server(password=\"\") \n\n# Второй Map-Reduce читает временные файлы и отображает первую букву файла в терм\ndef mapfn1(k, v):\n\tyield k[0:1], v\n\n# свертка собирает все списки вхождений для термов, начинающихся на одну и ту же букву, \n# составляет из них словарь, сериализует его и записывает в файл на DFS\ndef reducefn1(k, vs):\n\tterm_plist = {}\n\tfor term in vs:\n\t\twith open(\"tmp/plist/%s\" % term) as f:\n\t\t\tterm_plist[term] = f.read().split(\"\\n\")\n\n\timport sys\n\tsys.path.append(\"../dfs/\")\n\n\timport client as dfs\n\timport json\n\n\t# Ваш псевдоним в виде строковой константы\n\t#USERNAME=\n\twith dfs.file_appender(\"/%s/posting_list/%s\" % (USERNAME, k)) as buf:\n\t\tbuf.write(json.JSONEncoder().encode(term_plist))\n\ns = mincemeat.Server() \nplist_files = os.listdir(\"tmp/plist/\")\ns.map_input = mincemeat.MapInputSequence(plist_files) \ns.mapfn = mapfn1\ns.reducefn = reducefn1\n\nresults = s.run_server(password=\"\") \n\n" }, { "alpha_fraction": 0.7766554355621338, "alphanum_fraction": 0.7912458181381226, "avg_line_length": 45.894737243652344, "blob_id": "d51214a610a520c59ef9e27e8773066729181011", "content_id": "724d64b3e34cbeaea24cf5d3560d855adf614443", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1336, "license_type": "no_license", "max_line_length": 96, "num_lines": 19, "path": "/hw02/README.txt", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "В скрипте print_matrix.py реализована программа, печатающая строки матрицы MxN,\nхранящиеся в test_dfs описанным в задании образом. У нас есть две матрицы: первая размером\n3x4, состоящая из единиц и вторая размером 4x6, состоящая из двоек. Запуск программы:\n\npython print_matrix.py --num 1 --rows 3 --cols 4\npython print_matrix.py --num 2 --rows 4 --cols 6\n\nВ файле mr_sum_matrix.py реализован map-reduce, считающий для каждой матрицы сумму ее элементов.\nЗапуск диспетчера map-reduce: \npython mr_sum_matrix.py\n\nЗапуск рабочего процесса: python mincemeat.py localhost\n\nprint_matrix.py работает как в Python 2 так и в Python 3\nmr_sum_matrix.py и mincemeat рассчитаны на Python 2\n\nОбратите внимание на то, что если вы будете в ваших функциях mapfn и reducefn использовать\nсторонние модули, вам, скорее всего, понадобится эти модули включить в mincemeat.py. \nТак сделано, например, с dfs_client.\n" }, { "alpha_fraction": 0.6784249544143677, "alphanum_fraction": 0.7005742192268372, "avg_line_length": 36.96875, "blob_id": "cdfe6ff0110b9a763e90dd4fffafec5741390265", "content_id": "ff29b59797c6990074e011e5a82a15a23cc147fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1219, "license_type": "no_license", "max_line_length": 98, "num_lines": 32, "path": "/hw01/http_dfs.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# encoding: utf8\nfrom collections import namedtuple\n#Use this import if you're using Python3\nfrom urllib.request import urlopen\n#Use this import if you're using Python2\n#from urllib2 import urlopen\nimport json\n\nMASTER_URL = \"bigdata-hw01.barashev.net\"\n\ndef _json_object_hook(d): return namedtuple('X', d.keys())(*d.values())\ndef json2obj(data): return json.loads(data, object_hook=_json_object_hook)\n\ndef files():\n resp = urlopen(url = \"http://%s/files\" % MASTER_URL, timeout=10)\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't get files from master\")\n return json2obj(resp.read().decode(encoding='UTF-8'))\n\ndef chunk_locations():\n resp = urlopen(url = \"http://%s/chunk_locations\" % MASTER_URL, timeout=10)\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't get chunk locations from master\")\n return json2obj(resp.read().decode(encoding='UTF-8'))\n\ndef get_chunk_data(chunk_server_id, chunk_id):\n resp = urlopen(url=\"http://%s/chunks/%s\" % (chunk_server_id, chunk_id), timeout=10)\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't get chunk %s from chunkserver %s\" % (chunk_id, chunk_server_id))\n for line in resp:\n yield line.decode(encoding='UTF-8')\n\n\n\n\n" }, { "alpha_fraction": 0.5955607891082764, "alphanum_fraction": 0.6035659313201904, "avg_line_length": 34.11820983886719, "blob_id": "4c36568d233124cdf6ed074aa1ecc89d1e1c4f65", "content_id": "6c59ef023fceb20ae1fa82424806296f10e4635a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10993, "license_type": "no_license", "max_line_length": 127, "num_lines": 313, "path": "/dfs/server.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"Simple HTTP Server With Upload.\n\nThis module builds on BaseHTTPServer by implementing the standard GET\nand HEAD requests in a fairly straightforward manner.\n\n\"\"\"\n# encoding: utf-8\n#from __future__ import print_function\n\n\n__version__ = \"0.1\"\n__all__ = [\"SimpleHTTPRequestHandler\"]\n__author__ = \"bones7456\"\n__home_page__ = \"http://li2z.cn/\"\n\nimport os\nimport posixpath\nimport BaseHTTPServer\nimport urllib\nimport cgi\nimport shutil\nimport mimetypes\nimport re\nimport argparse\nimport json\nfrom collections import namedtuple\nimport hashlib\nfrom urlparse import urlparse, parse_qs\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\nfrom poster.encode import multipart_encode\nfrom poster.streaminghttp import register_openers\nimport urllib2\nimport sys\nimport cgi\nfrom threading import Timer\n\nregister_openers()\n\ndef json2obj(data): return json.JSONDecoder().decode(data)\n\ndef read_files():\n if os.path.exists(\"files\"):\n with open(\"files\") as f:\n return json2obj(f.read())\n else:\n return []\n\ndef write_files(files):\n with open(\"files\", \"w\") as f:\n f.write(json.JSONEncoder().encode(files))\n\n\nour_files = read_files()\nour_chunk_locations_ = {}\nour_chunkservers = []\n__data_dir__ = \"\"\n\ndef get_chunk_locations():\n return our_chunk_locations_\n\ndef set_chunk_locations(cl):\n global our_chunk_locations_\n our_chunk_locations_ = cl\n\ndef get_chunkserver(chunk_id):\n if id in our_chunk_locations_:\n return our_chunk_locations_[chunk_id]\n return None\n\nclass MasterRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n def do_POST(self):\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n if ctype == 'multipart/form-data':\n postvars = cgi.parse_multipart(self.rfile, pdict)\n elif ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers.getheader('content-length'))\n postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)\n if 'id' in postvars and 'chunks' in postvars:\n reporter = postvars['id'][0]\n chunks = postvars['chunks'][0]\n\n new_chunk_locations = {}\n for chunk_id, chunkserver in get_chunk_locations().iteritems():\n if chunkserver != reporter:\n new_chunk_locations[chunk_id] = chunkserver\n for chunk in chunks.split():\n new_chunk_locations[chunk] = reporter\n\n set_chunk_locations(new_chunk_locations)\n \n global our_chunkservers\n if not reporter in our_chunkservers:\n our_chunkservers.append(reporter)\n self.send_response(200)\n else:\n self.send_response(400, 'Please specify chunkserver id and chunk list')\n\n def do_GET(self):\n parsed_path = urlparse(self.path)\n if parsed_path.path == \"/new_chunk\":\n if len(our_chunkservers) == 0:\n self.send_response(404, \"No registered chunk servers. No one can write\")\n return\n\n query_components = parse_qs(parsed_path.query)\n if not \"f\" in query_components:\n self.send_response(400, \"Please specify 'f' argument\")\n return\n filename = query_components[\"f\"][0] \n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n\n chunkserver = hash(filename) % len(our_chunkservers)\n\n existing_chunks = []\n for file in our_files:\n if file[\"name\"] == filename:\n existing_chunks = file[\"chunks\"]\n\n chunk_id = \"%s_%d\" % (hashlib.md5(filename).hexdigest(), len(existing_chunks))\n if len(existing_chunks) == 0: \n our_files.append({\"name\": filename, \"chunks\": existing_chunks})\n existing_chunks.append(chunk_id)\n\n write_files(our_files)\n self.wfile.write(\"%s %s\" % (our_chunkservers[chunkserver], chunk_id))\n\n if parsed_path.path == \"/files\": \n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.wfile.write(json.JSONEncoder().encode(our_files))\n return\n if parsed_path.path == \"/chunk_locations\": \n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n\n self.wfile.write(json.JSONEncoder().encode(\n [{\"id\": chunk_id, \"chunkserver\": chunkserver} for chunk_id, chunkserver in get_chunk_locations().iteritems()]))\n return \n\ndef send_heartbeat():\n try:\n list = os.listdir(__data_dir__)\n except os.error as e:\n print \"ERROR: can't list directory %s: %s\" % (__data_dir__, str(e))\n return False\n list.sort(key=lambda a: a.lower())\n datagen, headers = multipart_encode({\"chunks\": \"\\n\".join(list), \"id\": __chunkserver_url__})\n request = urllib2.Request(\"http://%s/heartbeat\" % __master_url__, datagen, headers)\n response = urllib2.urlopen(request)\n if response.getcode() != 200:\n sys.stderr.write(\"Heartbeat failed: %s\" % str(response))\n return False\n Timer(30, send_heartbeat, ()).start()\n return True\n\nclass ChunkServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n\n \"\"\"Simple HTTP request handler with GET/HEAD/POST commands.\n\n This serves files from the current directory and any of its\n subdirectories. The MIME type for files is determined by\n calling the .guess_type() method. And can reveive file uploaded\n by client.\n\n The GET/HEAD/POST requests are identical except that the HEAD\n request omits the actual contents of the file.\n\n \"\"\"\n\n server_version = \"SimpleHTTPWithUpload/\" + __version__\n\n def do_GET(self):\n \"\"\"Serve a GET request.\"\"\"\n f = self.send_head()\n if f:\n shutil.copyfileobj(f, self.wfile)\n f.close()\n\n def do_HEAD(self):\n \"\"\"Serve a HEAD request.\"\"\"\n f = self.send_head()\n if f:\n f.close()\n\n def do_POST(self):\n \"\"\"Serve a POST request.\"\"\"\n parsed_path = urlparse(self.path)\n if parsed_path.path != \"/write\":\n self.send_response(400)\n\n r, info = self.deal_post_data()\n if r:\n if send_heartbeat():\n self.send_response(200)\n else:\n self.send_response(500, \"ERROR: failed to report new chunk to master. Pleae retry writing\")\n else:\n self.send_response(500, info)\n self.end_headers()\n \n def deal_post_data(self):\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n if ctype == 'multipart/form-data':\n postvars = cgi.parse_multipart(self.rfile, pdict)\n elif ctype == 'application/x-www-form-urlencoded':\n length = int(self.headers.getheader('content-length'))\n postvars = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1)\n if \"chunk_id\" in postvars and \"data\" in postvars:\n path = self.translate_path(postvars[\"chunk_id\"][0])\n try:\n out = open(path, 'wb')\n out.write(postvars[\"data\"][0])\n out.close()\n return (True, \"OK\")\n except IOError as e:\n print e\n return (False, \"Can't create file to write, do you have permission to write?\")\n \n\n def send_head(self):\n \"\"\"Common code for GET and HEAD commands.\n\n This sends the response code and MIME headers.\n\n Return value is either a file object (which has to be copied\n to the outputfile by the caller unless the command was HEAD,\n and must be closed by the caller under all circumstances), or\n None, in which case the caller has nothing further to do.\n\n \"\"\"\n parsed_path = urlparse(self.path)\n if parsed_path.path != \"/read\":\n self.send_response(400, \"Unknown action: %s\" % parsed_path.path)\n\n query_components = parse_qs(parsed_path.query)\n if \"id\" not in query_components:\n self.send_response(400, \"id argument is expected\")\n\n path = self.translate_path(query_components[\"id\"][0])\n f = None\n if os.path.isdir(path):\n self.send_response(400, \"Path is a directory\")\n return None\n try:\n # Always read in binary mode. Opening files in text mode may cause\n # newline translations, making the actual size of the content\n # transmitted *less* than the content-length!\n f = open(path, 'rb')\n except IOError:\n self.send_error(404, \"File not found\")\n return None\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n fs = os.fstat(f.fileno())\n self.send_header(\"Content-Length\", str(fs[6]))\n self.send_header(\"Last-Modified\", self.date_time_string(fs.st_mtime))\n self.end_headers()\n return f\n\n\n def translate_path(self, path):\n \"\"\"Translate a /-separated PATH to the local filename syntax.\n\n Components that mean special things to the local file system\n (e.g. drive or directory names) are ignored. (XXX They should\n probably be diagnosed.)\n\n \"\"\"\n # abandon query parameters\n path = path.split('?',1)[0]\n path = path.split('#',1)[0]\n path = posixpath.normpath(urllib.unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = os.getcwd() + \"/\" + __data_dir__\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word in (os.curdir, os.pardir): continue\n path = os.path.join(path, word)\n return path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--role\", required = False, default = 'master')\nparser.add_argument(\"--chunkserver\", required = False, default = 'localhost')\nparser.add_argument(\"--master\", required = False)\nparser.add_argument(\"--port\", required = False, default = 8000)\nparser.add_argument(\"--data\", required = False, default = \"\")\nargs = parser.parse_args()\n\nserver_address = ('', int(args.port))\nif args.role == 'master':\n httpd = BaseHTTPServer.HTTPServer(server_address, MasterRequestHandler)\nelif args.role == 'chunkserver':\n if args.master is None:\n raise Exception(\"Please specify master address\")\n __master_url__ = args.master\n __chunkserver_url__ = \"%s:%d\" % (args.chunkserver, int(args.port))\n httpd = BaseHTTPServer.HTTPServer(server_address, ChunkServerRequestHandler)\n __data_dir__ = args.data\n send_heartbeat()\n Timer(30, send_heartbeat, ()).start()\n\nhttpd.serve_forever()\n\n" }, { "alpha_fraction": 0.6611570119857788, "alphanum_fraction": 0.6644628047943115, "avg_line_length": 14.868420600891113, "blob_id": "a2afbd165f934340007a949cf22e71e5da9c3791", "content_id": "f0772d2d1db3464ed3a3e9dcda34af8bf3db0f15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 605, "license_type": "no_license", "max_line_length": 32, "num_lines": 38, "path": "/hw05/RLEList.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "class RLEList(object):\n\tdef __init__(self):\n\t\tpass\n\n\tdef append(self, value):\n\t\tpass\n\n\tdef insert(self, index, value):\n\t\tpass\n\n\tdef get(self, index):\n\t\tpass\n\n\tdef iterator(self):\n\t\tpass\n\nclass RLEListRefImpl(RLEList):\n\tdef __init__(self):\n\t\tself.impl = []\n\n\tdef append(self, value):\n\t\tself.impl.append(value)\n\n\tdef insert(self, index, value):\n\t\tself.impl.insert(index, value)\n\n\tdef get(self, index):\n\t\treturn self.impl[index]\n\n\tdef iterator(self):\n\t\treturn iter(self.impl)\n\ndef demo():\n\tlist = RLEListRefImpl()\n\tlist.append(\"foo\")\n\tlist.insert(0, \"bar\")\n\tprint list.iterator().next()\n\tprint list.get(1)\n\n\n" }, { "alpha_fraction": 0.652482271194458, "alphanum_fraction": 0.7234042286872864, "avg_line_length": 19.285715103149414, "blob_id": "a7d941e807a66309d309a8b023933b8a95cafd3e", "content_id": "225d751dcebd290d7d6d6bfbe0dd274ea5d1fe4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 141, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/hw03/util.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "import base64\n\ndef encode_term(term):\n\treturn base64.b64encode(term, \"_-\")\n\ndef decode_term(encoded):\n\treturn base64.b64decode(encoded, \"_-\")" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6719858050346375, "avg_line_length": 20.730770111083984, "blob_id": "b000da2cb8b32ac791d18334ab51e5b110e1f43b", "content_id": "d408cdaf06687b6625543967d4cb4ccf8a5df122", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "no_license", "max_line_length": 63, "num_lines": 26, "path": "/hw03/print-corpus.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "import sys\nsys.path.append(\"../dfs/\")\n\nimport client as dfs\nimport argparse\n\nmetadata = dfs.CachedMetadata()\n\ndef print_file(filename):\n\tfor l1 in metadata.get_file_content(filename):\n\t\tprint l1\n\tprint \"\\n\\n\"\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--file\", required = False, default = None)\nargs = parser.parse_args()\n\nif args.file is None:\n\tfor l in metadata.get_file_content(\"/wikipedia/__toc__\"):\n\t\tfilename, pagename = l.split(\" \", 1)\n\t\tprint pagename\n\t\tprint '==========================='\n\t\tprint_file(filename)\nelse:\n\tprint_file(args.file)" }, { "alpha_fraction": 0.6160244941711426, "alphanum_fraction": 0.653239905834198, "avg_line_length": 31.154930114746094, "blob_id": "e7ee3b5c2c59c154cedeb370a55ce997b9003319", "content_id": "c46b453e4d210392cfeb277deebbcd106070c70b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2698, "license_type": "no_license", "max_line_length": 121, "num_lines": 71, "path": "/hw09/mr_kmeans.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nimport mincemeat\nimport argparse\n\n# Маппер получает список, в котором первым элементом записан список центроидов,\n# а последущими элементами являются точки исходного набора данных\n# Маппер выплевывает для каждой точки d пару (c, d) где c -- ближайший к точке центроид\ndef mapfn1(k, items):\n import math\n\n def dist(p1, p2):\n \treturn math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n\n cur_centroids = items[0]\n del items[0]\n for i in items:\n min_dist = 100\n min_c = -1\n for c in cur_centroids:\n if dist(i, c) < min_dist:\n min_c = c\n min_dist = dist(i, c)\n yield \"%f %f\" % min_c, \"%f %f\" % i\n\n# У свертки ключом является центроид а значением -- список точек, определённых в его кластер\n# Свёртка выплевывает новый центроид для этого кластера\ndef reducefn1(k, vs):\n new_cx = float(sum([float(v.split()[0]) for v in vs])) / len(vs)\n new_cy = float(sum([float(v.split()[1]) for v in vs])) / len(vs)\n return (new_cx, new_cy)\n\ndef reducefn2(k, vs):\n return vs\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", help=\"Iterations count\", required = True, type = int)\nparser.add_argument(\"-c\", help=\"Initial centroids separated by commas and semicolons, like 1,1;2,6;6,2\", required = True)\n\nargs = parser.parse_args()\n\n# Начальные центроиды и количество итераций принимаются параметрами\ncentroids = [(float(c.split(\",\")[0]), float(c.split(\",\")[1])) for c in args.c.split(\";\")]\n\nSHARD1 = [(0,0),(0,3),(1,0),(1,1),(1,5),(1,6),(2,1),(2,2),(2,6)]\nSHARD2 = [(4,4),(3,6),(5,2),(5,3),(6,1),(6,2)]\nfor i in xrange(1,args.n):\n s = mincemeat.Server() \n\n input0 = {}\n input0['set1'] = [centroids] + SHARD1\n input0['set2'] = [centroids] + SHARD2\n s.map_input = mincemeat.DictMapInput(input0) \n s.mapfn = mapfn1\n s.reducefn = reducefn1\n\n results = s.run_server(password=\"\") \n centroids = [c for c in results.itervalues()]\n print centroids\n\n# На последней итерации снова собираем кластер и печатаем его\ns = mincemeat.Server() \ninput0 = {}\ninput0['set1'] = [centroids] + SHARD1\ninput0['set2'] = [centroids] + SHARD2\ns.map_input = mincemeat.DictMapInput(input0) \ns.mapfn = mapfn1\ns.reducefn = reducefn2\nresults = s.run_server(password=\"\") \nfor key, value in sorted(results.items()):\n print(\"%s: %s\" % (key, value) )\n\n" }, { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.8048780560493469, "avg_line_length": 53.66666793823242, "blob_id": "1c239d4184f1928351b4087c74de85fe75b75cb6", "content_id": "da61207df39e3d5d95c853bb6a50b2a6900a6af3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 577, "license_type": "no_license", "max_line_length": 141, "num_lines": 6, "path": "/README.md", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# bigdata_2015\nРепозиторий для домашних работ по курсу \"Технологии хранения и обработки больших объемов данных\" 2015 года\n\n**Надо**: написать Екатерине Лебедевой, чтобы получить доступ на запись, сделать свой собственный бранч и сабмитить свои решения в этот бранч\n\n**Не надо**: делать форк репозитория и сабмитить решения туда\n" }, { "alpha_fraction": 0.568973958492279, "alphanum_fraction": 0.5891016125679016, "avg_line_length": 22.95294189453125, "blob_id": "2e958ceeb2af1a73783694da8df313594b22d92f", "content_id": "65a9499932f1ac4bdd894582a6bf477d4f647c53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2109, "license_type": "no_license", "max_line_length": 91, "num_lines": 85, "path": "/class04/mr_pagerank.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nimport mincemeat\n\n\nimport sys\nsys.path.append(\"../dfs/\")\n\nimport client as dfs\n\ndef mapfn(k, v):\n import sys\n sys.path.append(\"../dfs/\")\n import client as dfs\n N = 1000\n\n for l in dfs.get_file_content(v):\n l = l.strip()\n if len(l) == 0:\n continue\n cols = l.split(\" \")\n if len(cols) != 2 and len(cols) != 4:\n sys.stderr.write(\"Malformed record len=%d: %s\" %(len(cols), str(l)))\n continue\n\n docid = cols[0]\n outlinks = [] if cols[1] == \"==\" else cols[1].split(\",\")\n if len(cols) == 4:\n rank = float(cols[2])\n iter_num = int(cols[3])\n else:\n rank = 1.0/N\n iter_num = 0\n\n if len(outlinks) == 0:\n for d in range(0, N):\n yield str(d), (\"rank\", rank/N)\n else:\n for d in outlinks:\n yield str(d), (\"rank\", rank / len(outlinks))\n yield docid, (\"outlinks\", cols[1])\n yield docid, (\"iter\", iter_num + 1)\n\n\ndef reducefn(k, vs):\n import sys\n sys.path.append(\"../dfs/\")\n import client as dfs\n N = 1000\n\n new_rank = 0.0\n iter_num = None\n for v in vs:\n if v[0] == \"rank\":\n new_rank += v[1]\n elif v[0] == \"outlinks\":\n outlinks = v[1]\n elif v[0] == \"iter\":\n iter_num = v[1]\n else:\n sys.stderr.write(\"Malformed reduce task: key=%s value=%s\" % (k, str(vs)))\n\n if iter_num is None:\n sys.stderr.write(\"Malformed reduce task (no iter num): key=%s value=%s\" % (k, str(vs)))\n return\n new_rank = 0.85 * new_rank + 0.15 / N\n out_filename = \"/class04/iter%d/%s\" % (iter_num, WORKER_NAME)\n print \"%s %s %f %d\" % (k, outlinks, new_rank, iter_num)\n\n return out_filename\n\ns = mincemeat.Server() \n\nimport argparse\n# читаем список файлов, из которых состоят матрицы\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--toc\", required = True)\nargs = parser.parse_args()\n\ngraph_files = [l for l in dfs.get_file_content(args.toc)]\n# и подаем этот список на вход мапперам\ns.map_input = mincemeat.MapInputSequence(graph_files) \ns.mapfn = mapfn\ns.reducefn = reducefn\n\nresults = s.run_server(password=\"\") \n" }, { "alpha_fraction": 0.6289544105529785, "alphanum_fraction": 0.6388739943504333, "avg_line_length": 31.163793563842773, "blob_id": "d890e3ed6ec103dc1a00371c81b9434cdbbb6510", "content_id": "a9f7a2aed4e0f76417fd485ea9c5aba2bd1ad067", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3730, "license_type": "no_license", "max_line_length": 167, "num_lines": 116, "path": "/hw07/raft.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer\nimport threading\nimport time\nimport argparse\nfrom urllib2 import urlopen\nfrom string import strip\nimport urlparse\n\nclass RaftHandler(BaseHTTPRequestHandler):\n def __init__(self, term, log, *args):\n \tself.term = term\n \tself.log = log\n \tBaseHTTPRequestHandler.__init__(self, *args)\n\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type','text/plain')\n self.end_headers()\n\n request = urlparse.urlparse(self.path)\n qs = urlparse.parse_qs(request.query)\n\n if request.path == \"/replicate\":\n self.process_replicate(qs)\n return\n if request.path == \"/print\":\n print str(self.log) \n return\n\n def process_replicate(self, qs):\n prev_idx = int(qs[\"prev_idx\"][0])\n prev_term = int(qs[\"prev_term\"][0])\n if prev_idx >= len(self.log):\n self.wfile.write(\"NACK\")\n return\n\n def report_ack():\n self.wfile.write(\"ACK\")\n del(self.log[prev_idx + 1 :])\n cur_term = int(qs[\"cur_term\"][0]) \t\n cur_value = qs[\"cur_value\"][0]\n self.log.append((cur_term, cur_value))\n\n if len(self.log) == 0:\n if prev_idx == -1:\n \treport_ack()\n return\n self.wfile.write(\"NACK\") \n return\n\n if self.log[prev_idx][0] == prev_term:\n report_ack()\n else:\n self.wfile.write(\"NACK\")\n return\n\ndef handleRequestsUsing(term, log):\n\treturn lambda *args: RaftHandler(term, log, *args)\n\ndef find_baseline(term, log, port):\n for i in reversed(xrange(len(log))):\n resp = urlopen(url = \"http://localhost:%d/replicate?cur_term=%d&cur_value=%s&prev_idx=%d&prev_term=%d\" % (port, log[i][0], log[i][1], i - 1, log[i-1][0]))\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't get response from follower on port %d\" % port)\n answer = resp.read().decode(encoding='UTF-8')\n if \"ACK\" == answer:\n return i\n return -1\n\ndef append_tail(start_idx, log, follower_port):\n i = start_idx\n while i < len(log):\n resp = urlopen(url = \"http://localhost:%d/replicate?cur_term=%d&cur_value=%s&prev_idx=%d&prev_term=%d\" % (follower_port, log[i][0], log[i][1], i - 1, log[i-1][0]))\n if resp.getcode() != 200:\n raise Exception(\"ERROR: can't get response from follower on port %d\" % follower_port)\n answer = resp.read().decode(encoding='UTF-8')\n if \"NACK\" == answer:\n raise Exception(\"ERROR: when appending a log at index %d tail follower returned NACK. What's up?\"\t% i)\n i += 1 \n\ndef replicate_log(term, log, follower_ports):\n for port in follower_ports:\n append_tail(1 + find_baseline(term, log, port), log, port)\n urlopen(url = \"http://localhost:%d/print\" % port)\n \ndef start_follower(term, log, port):\n server = HTTPServer(('', port), handleRequestsUsing(term, log))\n print 'Started httpserver on port %d' % port\n t = threading.Thread(target=server.serve_forever)\n t.setDaemon(True)\n t.start()\n return t\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-p\", help=\"Port number\", required = True, type = int)\nparser.add_argument(\"-l\", help=\"Existing log\", required = True)\nparser.add_argument(\"-t\", help=\"Term number\", required = True, type = int)\nparser.add_argument(\"-f\", help=\"Comma-separated list of follower node ports\")\n\nargs = parser.parse_args()\n\nif len(args.l) == 0:\n log = []\nelse:\n log = [(int(le[0]), le[1]) for le in [log_entry.split(\":\") for log_entry in args.l.split(\",\")]]\n\nif args.f:\n print \"Leader is starting. Log=%s\" % str(log)\n follower_ports = [int(f.strip()) for f in args.f.split(\",\")]\n replicate_log(args.t, log, follower_ports)\nelse:\n start_follower(args.t, log, args.p)\n print \"Started follower with log=%s\" % str(log)\n while True:\n time.sleep(1)" }, { "alpha_fraction": 0.7294944524765015, "alphanum_fraction": 0.7522597908973694, "avg_line_length": 53.29090881347656, "blob_id": "e02b5a3aa7d81b8b54d2cc05ab5625b90d1cc13c", "content_id": "fa205eb19051850b73ba63d540a35d80c4b2fd1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4249, "license_type": "no_license", "max_line_length": 521, "num_lines": 55, "path": "/dfs/README.md", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "### TL;DR:\n```\ncd bigdata_2015/dfs\nrm -r files data data2 \nmkdir data data2\npython server.py --role master --port 8000\npython server.py --role chunkserver --master localhost:8000 --chunkserver localhost --port 8001 --data data\npython server.py --role chunkserver --master localhost:8000 --chunkserver localhost --port 8002 --data data2\n```\n\n### Подробности\nDFS состоит из нескольких серверов: мастера и как минимум одного файлового сервера. Мастер хранит метаданные, файловые серверы хранят фрагменты в каталоге на диске. Метаданные включают в себя наш знакомый файл `files` -- отоюражение имен файлов в идентификаторы фрагментов. Информацию о расположении фрагментов (известную нам как `chunk_locations`) мастер получает от файловых серверов.\n\nВ командах выше запускаются с чистого листа 1 мастер и 2 файловых сервера. Мастер запускается на порту 8000, файловые серверы запускаются на портах 8001 и 8002 и хранят данные, соответственно, в каталогах `data` и `data2`. \n\n### Клиентский код\nВ скрипте `client.py` есть уже знакомые функции `files()`, `chunk_locations()`, `get_chunk_data()`, `get_file_content()` -- они делают то же самое что и делали в предыдущих заданиях. Новые функции `create_chunk()` и `write_chunk_data()` создают новый фрагмент и записывают его содержимое. Вспомогательная функция `file_appender()` возвращает объект с методом `write()` который буферизует записи и при закрытии создаёт новый фрагмент и записывает в него накопленные записи. Этим объектом удобно пользоваться таким образом:\n\n```\nwith dfs.file_appender(\"/foo/bar\") as f:\n f.write(\"Foo\")\n f.write(\"Bar\")\n# Здесь мы выйдем из контекста with, автоматически вызовется метод f.close() \n# и в файле /foo/bar создастся новый фрагмент. \n# Если файла не существовало, он появится\n```\n\nКлиентский код по умолчанию обращается к мастеру, висящему на `localhost:8000` (константа `MASTER_URL` в `client.py`)\n\n### Необходимые модули\nДля работы DFS требуется Python 2 и модуль `poster`. Модуль `poster` отсутствует в стандартном SDK, ставится командой pip install poster (возможно, запущенной от имени суперпользователя)\n\n### Подводные камни\n```\npython server.py --role chunkserver --master localhost:8000 --chunkserver localhost --port 8001 --data data\nTraceback (most recent call last):\n File \"server.py\", line 309, in <module>\n send_heartbeat()\n File \"server.py\", line 153, in send_heartbeat\n self.send_error(404, \"No permission to list directory\")\nNameError: global name 'self' is not defined\n```\n\nСделайте каталог `data`\n\n```\n File \"print-corpus.py\", line 20, in <module>\n for l in metadata.get_file_content(\"/wikipedia/__toc__\"):\n File \"../dfs/client.py\", line 99, in get_file_content\n for l in get_chunk_data(self.chunk_locations[chunk_id], chunk_id):\nKeyError: u'6bf8debcb30e532944848c2329315072_0'\n```\n\nЕсли в `chunk_locations` не находится какой-то ключ, который есть в `files`, это означает, что файловый сервер, хранящий соответствующий фрагмент, не запущен или у него неправильно указан аргумент `--data` \n### \n" }, { "alpha_fraction": 0.5512195229530334, "alphanum_fraction": 0.5654784440994263, "avg_line_length": 22.79464340209961, "blob_id": "fbeafdc750d8ac902d86f63c51ca83e63d960299", "content_id": "c9c643874ca9034ebf91e69e189b8e9477c31aad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2847, "license_type": "no_license", "max_line_length": 101, "num_lines": 112, "path": "/hw05/control_test.py", "repo_name": "phil-dolgolev/bigdata_2015", "src_encoding": "UTF-8", "text": "# encoding: utf-8\nfrom rlelist import RLEListTrueImpl as RLEListImpl\n\n# создает новый экземпляр списка\ndef newInstance():\n return RLEListImpl()\n\n# возвращает длину сжатого списка\ndef implLength(l):\n return len(l.impl)\n\n# возвращает сжатый список как строку (используется в assert'е, если тест не падает то не требуется)\ndef implAsString(l):\n return \",\".join([\"%s (%d)\" % (e.value, e.count) for e in l.impl])\n\n# конкатенирует элементы списка и возвращает строку-результат\ndef concat(l):\n return \"\".join([c for c in l.iterator()])\n\ndef assertContent(expected, l):\n for i in xrange(len(expected)):\n assert expected[i] == l.get(i), \"get(%d): Expected:%s actual:%s\" % (i, expected[i], l.get(i))\n assert expected == concat(l), \"Concat: Expected:%s actual:%s\" % (expected, concat(l))\n\ndef test1():\n l = newInstance()\n l.append(\"h\")\n l.append(\"e\")\n l.append(\"l\")\n l.append(\"l\")\n l.append(\"o\")\n assert 4 == implLength(l)\n assertContent(\"hello\", l)\n\ndef test2():\n l = newInstance()\n l.insert(0, \"o\")\n l.insert(0, \"l\")\n l.insert(0, \"l\")\n l.insert(0, \"e\")\n l.insert(0, \"h\")\n assert 4 == implLength(l), implAsString(l)\n assertContent(\"hello\", l)\n\ndef test3():\n l = newInstance()\n l.append(\"e\")\n l.append(\"l\")\n l.append(\"o\")\n l.insert(1, \"l\")\n l.insert(0, \"h\")\n assert 4 == implLength(l), implAsString(l)\n assertContent(\"hello\", l)\n\n l = newInstance()\n l.append(\"e\")\n l.append(\"l\")\n l.append(\"o\")\n l.insert(2, \"l\")\n l.insert(0, \"h\")\n assert 4 == implLength(l), \"impl=%s\" % implAsString(l)\n assertContent(\"hello\", l)\n\ndef test4():\n l = newInstance()\n l.append(\"h\") \n l.append(\"e\") \n l.append(\"e\") \n l.append(\"e\") \n l.insert(2, \"e\")\n l.insert(2, \"E\")\n assert 4 == implLength(l), \"impl=%s\" % implAsString(l)\n assertContent(\"heEeee\", l)\n\n l = newInstance()\n l.append(\"h\") \n l.append(\"e\") \n l.append(\"e\") \n l.append(\"e\") \n l.insert(2, \"E\")\n l.insert(4, \"E\")\n\n assert 6 == implLength(l), \"impl=%s\" % implAsString(l)\n assertContent(\"heEeEe\", l)\n\ndef test5():\n l = newInstance()\n l.insert(0, \"a\")\n l.insert(0, \"a\")\n l.insert(0, \"a\")\n assert 1 == implLength(l), \"impl=%s\" % implAsString(l)\n assertContent(\"aaa\", l)\n\ndef test6():\n l = newInstance()\n l.append(\"h\")\n l.append(\"h\")\n l.append(\"h\")\n l.append(\"e\")\n l.append(\"e\")\n l.append(\"e\")\n l.insert(4, \"e\")\n assert 2 == implLength(l), \"impl=%s\" % implAsString(l)\n assertContent(\"hhheeee\", l)\n\ntest1()\ntest2()\ntest3()\ntest4()\ntest5()\ntest6()\nprint \"All passed\"\n" } ]
33
d-chen/twitch-emoticons
https://github.com/d-chen/twitch-emoticons
1b1630e893a09c8b5b45cc0e71cb877966015326
27e23126d0c8577ed9c25c510b24196ddc5dffa4
366a515a12febbdce7c47ce5821cc55fabbed6dc
refs/heads/master
2021-01-17T09:49:15.753842
2017-07-31T01:22:06
2017-07-31T01:22:06
35,789,124
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6305800676345825, "alphanum_fraction": 0.6396685242652893, "avg_line_length": 32.702701568603516, "blob_id": "48346f50662a014ebabfced6686cf12e119c8685", "content_id": "acb8ed47b650d1bc3b75edadf7268230acd1b743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3741, "license_type": "no_license", "max_line_length": 96, "num_lines": 111, "path": "/download.py", "repo_name": "d-chen/twitch-emoticons", "src_encoding": "UTF-8", "text": "import collections\nfrom collections import OrderedDict\nimport json\nimport logging\nimport os\nimport requests\nimport shutil\n\nwith open('log_download.txt', 'w'):\n pass\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nhandler = logging.FileHandler('log_download.txt')\nhandler.setLevel(logging.INFO)\nlogger.addHandler(handler)\n\ndef create_json(emote_list, dir):\n my_list = []\n TEMPLATE = \"https://raw.githubusercontent.com/d-chen/twitch-emoticons/master/{1}/{0}.png\"\n REJECT = ['CougarHunt', 'EagleEye', 'RedCoat', 'StoneLightning', 'TheRinger', 'Evo2013']\n\n for emote in emote_list:\n code = emote['code']\n iid = emote['image_id']\n my_dict = {\"id\": code, \"src\": TEMPLATE.format(code, dir)}\n if not code in REJECT:\n my_list.append(my_dict)\n\n with open('global.json', 'w') as file:\n json.dump(my_list, file)\n\ndef download_emotes(emote_list, dir):\n TEMPLATE = \"http://static-cdn.jtvnw.net/emoticons/v1/{image_id}/1.0\"\n\n for emote in emote_list:\n code = emote['code']\n iid = emote['image_id']\n url = TEMPLATE.format(image_id=iid)\n path = './{dir}/{id}.png'.format(id=code, dir=dir)\n\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n logger.info('Saving image to {path}'.format(path=path))\n with open(path, 'wb') as file:\n for chunk in r.iter_content():\n file.write(chunk)\n logger.info('Finished downloading emotes')\n\ndef get_emote_list():\n EMOTE_LIST_URL = \"http://twitchemotes.com/api_cache/v2/subscriber.json\"\n\n logger.info('Requesting emote list from {0}'.format(EMOTE_LIST_URL))\n resp = requests.get(EMOTE_LIST_URL)\n\n if (resp.status_code != 200):\n logger.error('Cannot get emote list. Status code={0}'.format(resp.status_code))\n else:\n result = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(resp.text)\n return result['channels']['srkevo1']['emotes']\n\n#emote_list = get_emote_list()\n#download_emotes(emote_list, \"srkevo1\")\n#create_json(emote_list, \"srkevo1\")\n\n\ndef create_global_json(emote_list):\n my_list = []\n TEMPLATE = \"https://raw.githubusercontent.com/d-chen/twitch-emoticons/master/global/{0}.png\"\n REJECT = ['CougarHunt', 'EagleEye', 'RedCoat', 'StoneLightning', 'TheRinger']\n\n for key, value in emote_list.iteritems():\n my_dict = {\"id\": key, \"src\": TEMPLATE.format(key)}\n if not key in REJECT:\n my_list.append(my_dict)\n\n with open('global.json', 'w') as file:\n json.dump(my_list, file)\n\ndef download_global_emotes(emote_list):\n TEMPLATE = \"http://static-cdn.jtvnw.net/emoticons/v1/{image_id}/1.0\"\n\n for key, value in emote_list.iteritems():\n url = TEMPLATE.format(image_id=value['id'])\n path = './global/{id}.png'.format(id=key)\n\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n logger.info('Saving image to {path}'.format(path=path))\n with open(path, 'wb') as file:\n for chunk in r.iter_content():\n file.write(chunk)\n logger.info('Finished downloading emotes')\n\ndef get_global_emote_list():\n EMOTE_LIST_URL = \"https://twitchemotes.com/api_cache/v3/global.json\"\n\n logger.info('Requesting emote list from {0}'.format(EMOTE_LIST_URL))\n resp = requests.get(EMOTE_LIST_URL)\n\n if (resp.status_code != 200):\n logger.error('Cannot get emote list. Status code={0}'.format(resp.status_code))\n else:\n result = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(resp.text)\n return result\n\n\nemotes = get_global_emote_list()\ndownload_global_emotes(emotes)\ncreate_global_json(emotes)\n" } ]
1
Staszek1903/projekt_KWD
https://github.com/Staszek1903/projekt_KWD
753e91dece97893bedcfab54ed26f29112656d4e
080b27ecc9239a8b9c258f4a8ae35ed2d94f8242
6fe270b96b81af9be3ca1c742a26c288945b1bb7
refs/heads/master
2020-12-27T20:57:56.029083
2020-02-03T20:09:01
2020-02-03T20:09:01
238,051,598
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6974033117294312, "alphanum_fraction": 0.7095919251441956, "avg_line_length": 32.105262756347656, "blob_id": "f785bf94875082f55da7e890847fed9f9d50fd42", "content_id": "a66d9576969c84e5a08c8b3410e01a7dc524347c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1900, "license_type": "no_license", "max_line_length": 375, "num_lines": 57, "path": "/mush.py", "repo_name": "Staszek1903/projekt_KWD", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import tree, preprocessing\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, accuracy_score, confusion_matrix\nfrom sklearn.tree.export import export_text\n\nfrom pprint import pprint\n\nnames = ['class', 'cap-shape', 'cap-surface', 'cap-color', 'bruises', 'odor', 'gill-attachment', 'gill-spacing', 'gil-size', 'gil-color', 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring', 'stalk-surface-below-ring', 'stalk-color-above-ring', 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number', 'ring-type', 'spore-print-color', 'population', 'habitat']\ndata = pd.read_csv('./agaricus-lepiota.data', names=names)\n\n#pozbywamy się wartości których nie ma\ndata = data[data['stalk-root'] != '?'] \n\n#\nX = data.loc[:, data.columns != 'class']\ny = data['class'].to_frame()\n\n#wartości 0 i 1\nX_enc = pd.get_dummies(X)\nscaler = preprocessing.StandardScaler()\n#wartości ustandaryzowane\nX_std = scaler.fit_transform(X_enc)\n\nle = preprocessing.LabelEncoder()\ny_enc = le.fit_transform(y.values.ravel())\n\n#podział na zbiór uczący i testujący\nX_train, X_test, y_train, y_test = train_test_split(\n X_std,\n y_enc,\n test_size=0.99,\n stratify=y_enc,\n random_state=1001 \n)\nweights={0:5, 1:1}\n\n#wagi jak drzewo ma brać pod uwagę rozkład danych wyjściowych\nclf = tree.DecisionTreeClassifier(class_weight=weights)\nclf = clf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\n\nprint(\"Confusion Matrix: \", confusion_matrix(y_test, y_pred)) \n \nprint (\"Accuracy : \", accuracy_score(y_test,y_pred)*100) \n \nprint(\"Report : \", classification_report(y_test, y_pred)) \n\nr = export_text(clf, feature_names=list(X_enc.columns))\nprint(r)\n\n# plt.figure(dpi=250, figsize=[5.4, 3.8])\n# tree.plot_tree(clf)\n# plt.show()\n" } ]
1
wshahbaz/Fake_News
https://github.com/wshahbaz/Fake_News
b3d292eee8a6b6aa8ef6edf171f842fa0d329447
a9ce756d9b013757005649a98694b59e4ad7ea84
70aceaf97b7388a6fbe67f948a3338b1be92c123
refs/heads/main
2023-02-15T08:47:06.877684
2021-01-17T20:54:01
2021-01-17T20:54:01
330,480,542
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7947368621826172, "alphanum_fraction": 0.807894766330719, "avg_line_length": 53.28571319580078, "blob_id": "14d99e4b4d01d155f0600ec16aa3bb140cc73adc", "content_id": "1db4f665fa4fef594724df825c95bfb87b56c556", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 380, "license_type": "no_license", "max_line_length": 182, "num_lines": 7, "path": "/README.md", "repo_name": "wshahbaz/Fake_News", "src_encoding": "UTF-8", "text": "# Fake News Classifier\n\nFake News Classifier generated by building a Tfidf vectorizer on a 30MB dataset of fake and real news articles, followed by fitting Sklean's linear model PassiveAggressiveClassifier.\n\nThe model achieves approximately 92.5% accuracy, as shown by the confusion matrix for predictions on the test set.\n\n![Confusion Matrix on Test Set](./confusion_matrix.png)\n" }, { "alpha_fraction": 0.6429098844528198, "alphanum_fraction": 0.6536189317703247, "avg_line_length": 29.772727966308594, "blob_id": "00f357ee5e1635471eb26e9cf8247c651119ff94", "content_id": "fa0fe558223c8de90aa0074f24250b5ef27aa2cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2708, "license_type": "no_license", "max_line_length": 97, "num_lines": 88, "path": "/model_pred.py", "repo_name": "wshahbaz/Fake_News", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport itertools\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\n#Read the data\ndf=pd.read_csv('news.csv')\n\n#Get shape and head\ndf.shape\ndf.head()\n\n#Get the labels\nlabels=df.label\nlabels.head()\n\n#Split the dataset\nx_train,x_test,y_train,y_test=train_test_split(df['text'], labels, test_size=0.2, random_state=7)\n\n#Initialize a TfidfVectorizer\ntfidf_vectorizer=TfidfVectorizer(stop_words='english', max_df=0.7)\n\n#Fit and transform train set, transform test set\ntfidf_train=tfidf_vectorizer.fit_transform(x_train)\ntfidf_test=tfidf_vectorizer.transform(x_test)\n\n#Initialize a PassiveAggressiveClassifier\npac=PassiveAggressiveClassifier(max_iter=50, verbose=1)\npac.fit(tfidf_train,y_train)\n\n#Predict on the test set and calculate accuracy\ny_pred=pac.predict(tfidf_test)\nscore=accuracy_score(y_test,y_pred)\nprint()\nprint(f'Accuracy: {round(score*100,2)}%')\n\n#Build a confusion matrix\ndef plot_confusion_matrix(cm,\n target_names,\n title='Confusion matrix',\n cmap=None,\n normalize=True):\n\n import matplotlib.pyplot as plt\n import itertools\n\n accuracy = np.trace(cm) / np.sum(cm).astype('float')\n misclass = 1 - accuracy\n\n if cmap is None:\n cmap = plt.get_cmap('Blues')\n\n plt.figure(figsize=(8, 6))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(tick_marks, target_names, rotation=45)\n plt.yticks(tick_marks, target_names)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n else:\n plt.text(j, i, \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label\\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))\n plt.show()\n\n#plot confusion matrix\nplot_confusion_matrix(confusion_matrix(y_test,y_pred), ['FAKE','REAL'], normalize=False)\n" } ]
2
Muertogon/pyTest
https://github.com/Muertogon/pyTest
d9ca90dc53373999f2226b3af5a9fad1d6739461
9200290b523a63f43223736836f0dc0d91dcffed
046addacc13b273db4b16e8e7bcdf469678baff3
refs/heads/master
2023-03-14T04:04:15.852009
2021-03-12T09:52:07
2021-03-12T09:52:07
347,017,685
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5942173600196838, "alphanum_fraction": 0.6291126608848572, "avg_line_length": 15.733333587646484, "blob_id": "cef7b103e8096941ea2ee55a7c04ce5de5e1bee7", "content_id": "fe7860dbe25087ddcce7ecbcefe99d84e1d826c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1003, "license_type": "no_license", "max_line_length": 101, "num_lines": 60, "path": "/main.py", "repo_name": "Muertogon/pyTest", "src_encoding": "UTF-8", "text": "import random as rn\n\n# pirmasis\n\nne = False #bool\nfl = 24.5 #float\nmetai = 2021 #integer\nvardas = \"Kestutis\" #string\n\n# antrasis\n\nvaisiai = [\"Obuolys\", \"Bananas\", \"Persimonas\"] #list\nfor x in vaisiai:\n print(x)\nvaistai = (\"Pertusinas\", \"Valerijonas\", \"Ibuprofenas\") #tuple\nfor n in vaistai:\n print(n)\nasmuo = {\"vardas\": \"George\", \"pavarde\": \"Smith\", \"kodas\": rn.randint(10000000000, 99999999999)} #dict\nfor i in asmuo:\n print(asmuo[i])\n# trecias\n\nmanoKintamasis = 6\nmano_kintamasis = 8\ntipas = 9\n\n# ketvirtas\n\nfloatData = 4.5\n\n#penktas\n\ndef printe_liste(listo):\n if type(listo) == dict:\n for v in listo:\n print(listo[v])\n elif type(listo) == list:\n for v in listo:\n print(v)\n else:\n for v in listo:\n print(v)\n\nprinte_liste(asmuo)\n\n# sestas\n\nx = \"gas\"\ny = \"kin\"\n\n# septintas\n\n# mutable tipai: list, dict, set\n# not mutable: int, float, string, tuple\n\n#mutable objektai gali buti pakeisti po sukurimo, o immutable- ne\n\n#astuntas\n\n# 5" } ]
1
xj-m/JHU-19F-WebSecurity
https://github.com/xj-m/JHU-19F-WebSecurity
a8f426d66d86fc62f486935f66310e40416b1ebf
a97d185cab6c345580d0ba2e5d99f00ea8bb5218
3a7a7d00100235f36ac0d3622f07f453926fc5b4
refs/heads/master
2020-07-30T03:14:44.045677
2019-11-24T09:07:58
2019-11-24T09:07:58
210,067,337
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6161702275276184, "alphanum_fraction": 0.621560275554657, "avg_line_length": 36.11579132080078, "blob_id": "168604a952cde3ca1bf9c585269f02cbe8799eb5", "content_id": "faf27b5c6ea93eff7b0851b7cec91ab33c1efa0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3525, "license_type": "no_license", "max_line_length": 170, "num_lines": 95, "path": "/p3/project/modules/Handler.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "man = chrome.extension.getBackgroundPage().man;\n\nman.Handler = new function () {};\n\nfunction sleep(ms) {\n return new Promise(resolve => setTimeout(resolve, ms));\n}\nman.Handler.checkTabImg = async function (winId, tabId) {\n console.log(\"Handle: checkTabImg called\");\n thePromise = new Promise(resolve => {\n chrome.tabs.captureVisibleTab(winId, function (dataUrl) {\n newImgUrl = dataUrl;\n oldImgUrl = man.Store.getImg(winId, tabId);\n resolve();\n });\n });\n await thePromise;\n if (oldImgUrl == null) {\n console.log(\"Handle: oldImg is null, prepare capture and save\");\n } else {\n console.log(\"Handle: got oldImg, prepare compare\");\n result = await man.ImgHandler.compareImgUrl(oldImgUrl, newImgUrl);\n console.log(\"handler get result\" + result);\n man.Handler.highlightTab(winId, tabId, result);\n console.log(\"Handle: checkTabImg completed\");\n }\n return;\n};\n\nman.Handler.highlightTab = function (winId, tabId, diff) {\n console.log(\"Handle: highlight called, need implement\");\n if (diff.length == 0) {\n console.log(\"compare result shows no diff, so return\");\n return;\n }\n chrome.runtime.sendMessage({\n todo: 'showPageAction'\n });\n\n // get img dataUrl\n\n var imgUrl = man.ImgHandler.getHighlightImgUrl(diff);\n chrome.runtime.sendMessage({\n todo: \"addOverlay\",\n imgUrl: imgUrl\n });\n var notifOptions = {\n type: \"basic\",\n iconUrl: \"icon_48.png\",\n title: \"Alert\",\n message: \"This tab is different from previous one, be careful at possible TabNabbing attack!\"\n };\n chrome.notifications.create(\"limitNotif\", notifOptions);\n man.Handler.insertOverlay(imgUrl);\n return;\n};\nman.Handler.insertOverlay = function (imgURL) {\n chrome.tabs.executeScript({\n code: 'var div = document.createElement(\"div\");' +\n 'div.className = \"img_overlay\";' +\n 'div.style.cssText = \"position: absolute;top: 0;left: 0;width: 100%;height: 100%;background-image: url(' + imgURL + ');background-size:cover; opacity:0.3\";' +\n 'div.onclick = function() {var elements = document.getElementsByClassName(\"img_overlay\");while(elements.length > 0){elements[0].remove();}};' +\n 'document.body.appendChild(div);'\n });\n};\n\nman.Handler.handleTab = async function (winId, tabId) {\n console.log(\"Handle: handleTab start: %s %s\", winId, tabId);\n man.Store.changeCurrTab(winId, tabId);\n await man.Handler.checkTabImg(winId, tabId);\n man.Handler.keepCaptureCurrentTab(winId, tabId);\n console.log(\"Handle: onTabActivate complete\");\n return;\n};\n\nman.Handler.keepCaptureCurrentTab = async function (winId, tabId) {\n while (winId == man.Store.curWinId && tabId == man.Store.curTabId) {\n console.log(\"tab id match, capture %s %s\", winId, tabId);\n windowStore = man.Store.imgStore[String(winId)];\n if (windowStore == null) {\n man.Store.imgStore[String(winId)] = {};\n windowStore = man.Store.imgStore[String(winId)];\n }\n chrome.tabs.captureVisibleTab(winId, function (dataUrl) {\n tabStore = windowStore[String(tabId)];\n if (tabStore == null) {\n tabStore = {};\n }\n tabStore[\"imgUrl\"] = dataUrl;\n man.Store.imgStore[String(winId)][String(tabId)] = tabStore;\n });\n await sleep(man.Store.speed * 1000);\n }\n console.log(\"now keepCapture stopped\");\n};" }, { "alpha_fraction": 0.6895522475242615, "alphanum_fraction": 0.7044776082038879, "avg_line_length": 22.928571701049805, "blob_id": "54ebeb83b0bfc507ab5b8284054b1f82d067f840", "content_id": "cc44601eab512f52cf90adf86cdee1e396e4cddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 56, "num_lines": 14, "path": "/P1/P1_project/__init__.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask\nimport os\nfrom flask_sqlalchemy import SQLAlchemy\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \\\n os.path.join(basedir, 'mydb.db')\napp.config['SECRET_KEY'] = 'vnkdjnfjknfl1232#'\ndb = SQLAlchemy(app)\n\nfrom P1 import main\n" }, { "alpha_fraction": 0.7754698395729065, "alphanum_fraction": 0.7873392701148987, "avg_line_length": 111.33333587646484, "blob_id": "21dac693721814c4ca580de5a77f03e800d68e79", "content_id": "7c9394367e99eda104dbd01b71420c7d29f36bf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1011, "license_type": "no_license", "max_line_length": 263, "num_lines": 9, "path": "/p3/project/README.md", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "# README\n\n1. No special requirement for installation.\n2. The time interval for taking snapshot and the sub images amount is saved in `modules/Store.js`, currently it's sent as 1 seconds and 20\\*20 pieces, it can always be modified\n3. Resemble is not used for this implementation, the compare is simply based on image dataUrl.\n4. This implementation supports tabs from multi-windows.\n5. The area is not same from the last time of browning will be mark as red, and click will remove all marked area by class.(This is achived by overlay a div whose background is an image that have information of difference)\n6. The permission is minimal, I personally add the permission for notification, which can be removed without hurting the core functionality.\n7. For the \"provide a color coding in the task bar to alert the user of potential changes\", I use `sendMessage` and `pageAction` apis for this, also I add the function to send notification to user(but this is not supported by my computer's OS so I didn't test it)\n" }, { "alpha_fraction": 0.6734007000923157, "alphanum_fraction": 0.7138047218322754, "avg_line_length": 34.35714340209961, "blob_id": "694f9ff5a093f19b3d730473fae74d362599414a", "content_id": "fc2dd7e06b58a09b60c4964e503723932ff47496", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1489, "license_type": "no_license", "max_line_length": 206, "num_lines": 42, "path": "/P1/P1_project/README.md", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "## dependence\n\n```shell\npip install -r requirements.txt\n```\n\nNote if `ModuleNotFoundError: No module named 'flask_socketio’`, then run\n\n```shell\nsudo pip3 install flask-socketio\n```\n\n## Run the app\n\n```shell\nexport FLASK_APP=main.py\nflask run\n```\n\n(Then the command line should shows ` * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)`)\n\n* Then go to `http://127.0.0.1:5000` in browser( first tap will redirect to `http://127.0.0.1:5000/1`)\n* Then open another tap in browser with same address: `http://127.0.0.1:5000` (second tap will redirect to `http://127.0.0.1:5000/2`)\n* Start playing\n* To restart the game, need to rerun the app again, and refresh the browser\n\n## Possible problems\n\n* I used anaconda for python env, and package\n* If `ModuleNotFoundError: No module named 'XXX’` , then need to install modules manually\n\n* When I run the app in debug mode, the client side console may show some error, which do not effect the game\n* I used the sqlite3 for data storage, the db file is `/mydb.db`. I use macOS, and the config of the path to this db file works fine in my computer, the config is locate at `__init_.py`, and the config is \n\n```python\nimport os\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \\\n os.path.join(basedir, 'mydb.db')\n```\n\n* When I run the app by VSCode debugger, sometime I need to restart the browser to restart the game , and sometimes may need to restart VSCode as well. " }, { "alpha_fraction": 0.6571125388145447, "alphanum_fraction": 0.6581740975379944, "avg_line_length": 28, "blob_id": "097371496c4333fd98e71308955c77f5738b0435", "content_id": "035ad6f0a23bfb0e8a0c81a7ac631caf0dd2cbc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1884, "license_type": "no_license", "max_line_length": 76, "num_lines": 65, "path": "/project_backup/modules/Handler.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "man = chrome.extension.getBackgroundPage().man;\n\nman.Handler = new function () {};\n\nman.Handler.capture = function (winId, tabId) {\n console.log(\"Handle: capture for %s %s, need implement\", winId, tabId);\n // go to target page\n // capture\n img = null;\n return img;\n};\n\nman.Handler.compareImg = function (img1, img2) {\n // TODO:\n // split, compare, return result and s\n return;\n};\n\nman.Handler.saveCurImg = function(){\n curTabId = man.Store.curTabId;\n curWinId = man.Store.curWinId;\n curImg = man.Handler.capture(man.Store.curWinId, man.Store.curTabId);\n man.Store.saveImg(curWinId, curTabId, curImg);\n};\n\nman.Handler.checkTabImg = function (winId, tabId) {\n console.log(\"Handle: checkTabImg called\");\n newImg = man.Handler.capture(winId, tabId);\n oldImg = man.Store.getImg(winId, tabId);\n if (oldImg == null) {\n console.log(\"Handle: oldImg is null, prepare capture and save\");\n return;\n } else {\n console.log(\"Handle: got oldImg, prepare compare see if highlight\");\n // TODO: compare's result, how to highlight\n result = man.Handler.compareImg(oldImg, newImg);\n man.highlightTab(winId, tabId);\n }\n self.Store.saveImg(winId, tabId, newImg);\n console.log(\"Handle: checkTabImg completed\");\n return;\n};\n\nman.Handler.highlight = function (tabId) {\n console.log(\"Handle: highlight called, need implement\");\n // TODO:\n return;\n};\n\nman.Handler.splitIntoSquires = function (img) {\n console.log(\"Handle: split called, need implement\");\n return;\n};\n\nman.Handler.handleTab = function (winId, tabId) {\n console.log(\"Handle: handleTab start: %s %s\", winId, tabId);\n // save previous img\n man.Handler.saveCurImg();\n man.Store.changeCurrTab(winId, tabId);\n\n man.Handler.checkTabImg(winId, tabId);\n console.log(\"Handle: onTabActivate complete\");\n\n return;\n};" }, { "alpha_fraction": 0.6916890144348145, "alphanum_fraction": 0.6943699717521667, "avg_line_length": 40.44444274902344, "blob_id": "258d46d71884d145852095d9147575f7f20294ec", "content_id": "d5eba96988ee11ba8ce5c8caaadce15aca19219a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 373, "license_type": "permissive", "max_line_length": 78, "num_lines": 9, "path": "/p3/learn/chrome_tutorial/PageFontStyle/eventPage.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "// NOTE: content script comunicate with event page, use json\nchrome.runtime.onMessage.addListener(function(request, sender, sendResponse) {\n if (request.todo == \"showPageAction\") {\n chrome.tabs.query({ active: true, currentWindow: true }, function(tabs) {\n // NOTE: return tabs that satisfy the conditions\n chrome.pageAction.show(tabs[0].id);\n });\n }\n});\n" }, { "alpha_fraction": 0.6706214547157288, "alphanum_fraction": 0.7197740077972412, "avg_line_length": 22.289474487304688, "blob_id": "28c2e3c27e223764d68aa258f9d245d31f5c152f", "content_id": "f8779201f2aa4e18b9899df4ee53e06716a6fe5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1786, "license_type": "no_license", "max_line_length": 149, "num_lines": 76, "path": "/P2/P2.md", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "# P2\n\n- [part2 report](/Users/xiangjun/Library/Mobile Documents/com~apple~CloudDocs/xj_note/Academic/640/JHU-19F-WebSecurity/P2/P2_part2_report.md)\n- [requirement pdf](/Users/xiangjun/Library/Mobile Documents/com~apple~CloudDocs/xj_note/Academic/640/JHU-19F-WebSecurity/P2/proj2.pdf)\n\n## patch\n\nFor student3, code patch level6: it only uses `escape()` to process the url, so alert can be triggered by passing \"//127.0.0.1/evil.js\"\n\n## TODO\n\n- [x] Flask url classify\n\n## note\n\n- Check if virtual environment if `flask run` not working\n- Blueprint name shouldn’t be same as route function name\n- For different app, VSCode open different folder\n\n## Bugs\n\n- Cannot comment in jinja\n- The browser auto defend the level1 attack\n- Check inspector everytime\n\n## SCP\n\n- [简单例子](https://juejin.im/post/5c30c04651882524a1414406)\n- [2.0 vs 3.0](https://www.w3.org/TR/CSP3/#changes-from-level-2)\n- [tutorial and examples](https://content-security-policy.com)\n- [CSP 教程!](https://paper.seebug.org/423/)\n- [CSP doc](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src)\n\n## Workflow\n\n### Part3\n\n- [x] Research\n- [x] Rebuild and test web\n- [ ] Apply scp rule from 1 to 6\n\n## Archive\n\n### Part3\n\n#### Research\n\n1. Diff 2 and 3\n2. Post to piazza to make sure\n3. Go to other work\n\n#### Rebuild web\n\n1. Locate place\n2. Revert to old version\n3. Test vul\n\n### Part2\n\n#### start\n\n1. [Vscode open folder](/Users/xiangjun/Library/Mobile Documents/com~apple~CloudDocs/xj_note/Academic/640/JHU-19F-WebSecurity/P2/P2_project/xss_game)\n2. [Google download source](https://xss-game.appspot.com/level5)\n3. [Level](#Level)\n\n#### Level\n\n1. Local build\n2. Patch\n3. Write reports\n\n#### Local build\n\n1. Flask new blueprint test: `views`\n2. Add html, js\n3. Add static\n" }, { "alpha_fraction": 0.4880083501338959, "alphanum_fraction": 0.4973931312561035, "avg_line_length": 29.617021560668945, "blob_id": "959d52847cfa03842216e48a937bd629af9eef4e", "content_id": "dd4125b5bb20ce7d898bd6a283ed26efe8508dd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2877, "license_type": "no_license", "max_line_length": 82, "num_lines": 94, "path": "/p3/project/modules/ImgHandler.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "man = chrome.extension.getBackgroundPage().man;\n\nman.ImgHandler = new function () {}\n\nman.ImgHandler.getHighlightImgUrl = function (cuts) {\n wSize = man.Store.wSize;\n hSize = man.Store.hSize;\n var result = \"\";\n if (cuts.length > 0) {\n var canvas = document.createElement(\"canvas\");\n w = man.Store.w;\n h = man.Store.h;\n wSize = man.Store.wSize;\n hSize = man.Store.hSize;\n\n canvas.width = w * wSize;\n canvas.height = h * hSize;\n var ctx = canvas.getContext(\"2d\");\n\n for (var i = 0; i < cuts.length; i++) {\n var x = (w * cuts[i][0]) ;\n var y = (h * cuts[i][1]) ;\n ctx.beginPath();\n ctx.rect(x, y, w, h);\n ctx.fillStyle = \"red\";\n ctx.fill();\n }\n\n result = canvas.toDataURL();\n }\n return result;\n};\n\nman.ImgHandler.compareImgUrl = async function (img1Url, img2Url) {\n console.log(\"compare img start\");\n cut1 = await man.ImgHandler.cutImgUrl(img1Url);\n cut2 = await man.ImgHandler.cutImgUrl(img2Url);\n console.log(\"got two cuts\");\n // result = await man.ImgHandler.compareImgCuts(cut1, cut2);\n result = await man.ImgHandler.compareImgCuts(cut1, cut2);\n return result;\n};\n\nman.ImgHandler.compareImgCuts = async function (cut1, cut2) {\n console.log(\"start compare two cuts\");\n result = [];\n for (i = 0; i < cut1.length; i++) {\n for (j = 0; j < cut1[i].length; j++) {\n if (cut1[i][j] != cut2[i][j]) {\n result.push([i, j]);\n }\n }\n }\n return result;\n};\n\nman.ImgHandler.cutImgUrl = async function (dataUrl) {\n console.log(\"Handler: cut img start\");\n var img = new Image();\n cuts = [];\n const imageLoadPromise = new Promise(resolve => {\n img = new Image();\n //Use the onload handler, otherwise, the content will not yet be available\n //Prepare the canvas to extract image data\n img.onload = function () {\n canvas = document.createElement(\"canvas\");\n ctx = canvas.getContext(\"2d\");\n wSize = man.Store.wSize;\n hSize = man.Store.hSize;\n w = img.width / wSize;\n h = img.height / hSize;\n man.Store.w = w;\n man.Store.h = h;\n\n for (var i = 0; i < wSize; i++) {\n cuts[i] = [];\n for (var j = 0; j < hSize; j++) {\n x = (-w * i) ;\n y = (-h * j) ;\n // y = (h * i) <= h ? 0 : -h;\n canvas.width = w;\n canvas.height = h;\n ctx.drawImage(this, x, y, w * wSize, h * hSize);\n\n cuts[i].push(canvas.toDataURL());\n }\n }\n resolve();\n };\n img.src = dataUrl;\n });\n await imageLoadPromise;\n return cuts;\n};" }, { "alpha_fraction": 0.5778103470802307, "alphanum_fraction": 0.5895276665687561, "avg_line_length": 21.2032527923584, "blob_id": "77fc2d44ad86d11802c4c81424776f4f23c0206b", "content_id": "4dffc1d017f0c9f15f909eb18f197f1be3fb6ca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2731, "license_type": "no_license", "max_line_length": 62, "num_lines": 123, "path": "/P1/P1_project/main.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect\nfrom flask import render_template, request, url_for, flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_socketio import SocketIO\nimport sqlite3\nfrom flask import g\nimport os\nfrom P1 import app, db\nfrom .models import Board, Status\nfrom .checker import checkWin, resetBoard\n\n\nsocketio = SocketIO(app)\n\n# init db\n\n\ndef get_pNum():\n return Status.query.filter_by(name=\"pNum\").first()\n\n\ndef get_turn():\n return Status.query.filter_by(name=\"turn\").first()\n\n\ndef get_gameOver():\n return Status.query.filter_by(name=\"gameOver\").first()\n\n\ndef get_board(x, y):\n return Board.query.filter_by(x=x, y=y).first()\n\n\ndef init_db():\n for i in range(19):\n for j in range(19):\n b = Board.query.filter_by(x=i, y=j).first()\n b.v = 0\n turn = Status.query.filter_by(name=\"turn\").first()\n turn.val = 1\n pNum = Status.query.filter_by(name=\"pNum\").first()\n pNum.val = 1\n gameOver = Status.query.filter_by(name=\"gameOver\").first()\n gameOver.val = 0\n db.session.commit()\n\n\ninit_db()\n\n\[email protected]('/chat')\ndef sessions():\n return render_template('session.html')\n\n\[email protected]('/')\ndef index():\n pNum = get_pNum()\n if(pNum.val == 1):\n pNum.val = 2\n db.session.commit()\n return redirect('/1')\n else:\n pNum.val = 1\n db.session.commit()\n return redirect('/2')\n\n\[email protected]('/1')\ndef game1():\n return render_template('index1.html')\n\n\[email protected]('/2')\ndef game2():\n return render_template('index2.html')\n\n\[email protected]('put piece')\ndef handle_put_piece(json, methods=['GET', 'POST']):\n if(get_pNum != 2):\n return\n print('client put piece:' + str(json))\n pTurn = get_turn()\n gameOver = get_gameOver()\n x, y, pNum = json['x'], json['y'], json['pNum']\n # check turn\n if pNum != pTurn.val:\n return\n # check game status\n if(gameOver.val == 1):\n return\n # check board\n boardXY = get_board(x, y)\n if (x < 19 and y < 19 and boardXY.v == 0):\n # socket draw\n boardXY.v = pNum\n db.session.commit()\n socketio.emit('put piece res', json)\n # check win\n gameOver.val = checkWin(x, y, pNum)\n if(gameOver.val == 1):\n socketio.emit('winner res', {\n 'pNum': pNum\n })\n if(pTurn.val == 1):\n pTurn.val = 2\n else:\n pTurn.val = 1\n db.session.commit()\n\n\[email protected]('restart game')\ndef restartGame(json, methods=['GET', 'POST']):\n print('client restart game')\n init_db()\n resetBoard()\n socketio.emit('restart game res', {})\n\n\nif __name__ == '__main__':\n app.run(threaded=True)\n socketio.run(app, debug=True)\n" }, { "alpha_fraction": 0.6380434632301331, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 31.85714340209961, "blob_id": "c3e1001eda0396b649b3101f3b56942eae8c9147", "content_id": "49226b2d55fb402f33ee4c34e50e4ef59242160e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 920, "license_type": "no_license", "max_line_length": 93, "num_lines": 28, "path": "/P2/P2_project/part3 csp3/views/level1.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "import string\nimport random\nfrom flask import Flask, render_template, redirect, request, Blueprint, Markup, make_response\n\nlevel1Bp = Blueprint('level1', __name__, url_prefix=\"/level1\")\n\n\ndef randomString(stringLength=10):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n# level1CSP = \"\"\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level1():\n x = randomString()\n level1CSP = \"script-src 'nonce={}'\".format(x)\n if request.args.get('query') == None:\n r = make_response(render_template('level1.html', mode=\"main\", x=x))\n else:\n query = Markup(request.args.get('query'))\n r = make_response(render_template(\n 'level1.html', mode=\"msg\", msg=query, x=x))\n r.headers['Content-Security-policy'] = level1CSP\n r.headers['X-XSS-Protection'] = '0'\n return r\n" }, { "alpha_fraction": 0.7312633991241455, "alphanum_fraction": 0.7601712942123413, "avg_line_length": 21.238094329833984, "blob_id": "75611491972fb41150042e55e6d8c150f55c2a16", "content_id": "ff68a50270468b7282256a664f3bd909ad9ea714", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 934, "license_type": "no_license", "max_line_length": 146, "num_lines": 42, "path": "/p3/project3.md", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "# Project 3\n\n## links\n\n[attack page](http://www.azarask.in/blog/post/a-new-type-of-phishing-attack/)\n[test page location](</Users/xiangjun/Library/Mobile\\ Documents/com~apple~CloudDocs/xj_note/Academic/640/repo/p3/test>)\n\n## learn\n\ndebug pop up: right click icon, select inspect pop-up\ndebug content script: on the page\n\n[example with note](/Users/xiangjun/Library/Mobile Documents/com~apple~CloudDocs/xj_note/Academic/640/repo/p3/learn/chrome_tutorial/PageFontStyle)\n\n## steps\n\n1. when leave tab, take snapshot\n2. when come back, compare and highlight\n\n### note\n\nMinimum permission\n\n#### compare algorithm\n\nCompare snapshot: locally via JS, use `Resemble.js`\n\n[piazza about resemble](https://piazza.com/class/k02q73d937hoe?cid=77)\n\n1. split in squares\n2. compare\n3. color difference\n\n## Design\n\ncreate: change current_tab\nnew activate:\n\n1. capture current_tab\n2. change current_tab\n3. compare with old capture: resemble\n4. highlight\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6453055143356323, "avg_line_length": 27, "blob_id": "36a96b789e6637d4bd858155ccffe0e2a0676d71", "content_id": "1fe2419ff47667e54f2a044957a04590c7cf2554", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 671, "license_type": "no_license", "max_line_length": 74, "num_lines": 24, "path": "/project_backup/modules/Store.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "man = chrome.extension.getBackgroundPage().man;\n\nman.Store = new function () {\n this.tabs = {};\n // TODO: change back\n this.curTabId = 100;\n this.curWinId = 100;\n}\n\nman.Store.changeCurrTab = function (winId, tabId) {\n man.Store.curWinId = winId;\n man.Store.curTabId = tabId;\n console.log(\"Store: current tab changed: %s %s\", winId, tabId);\n};\nman.Store.getImg = function (winId, tabId) {\n console.log(\"Store: getImg for %s %s, need implement\", winId, tabId);\n // return null if no record\n return;\n};\n\nman.Store.saveImg = function (winId, tabId, img) {\n console.log(\"Store: saveImg for %s %s, need implement\", winId, tabId);\n return;\n};" }, { "alpha_fraction": 0.6165191531181335, "alphanum_fraction": 0.6165191531181335, "avg_line_length": 17.88888931274414, "blob_id": "c1315a434552572d5d56bb6e243322a5bb955044", "content_id": "8b393207745613cae34fbd4527e298a32bc7218c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 339, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/p3/project/modules/TabStore.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "man = chrome.extension.getBackgroundPage().man;\n\nman.Store = new function(){\n this.tabs = {};\n this.cur_tab = null;\n};\n\nman.Store.getImg = function(tabId){\n console.log(\"store getImg called\");\n // TODO:\n return;\n};\n\nman.Store.saveImg = function(tabId){\n console.log(\"store saveImg called\");\n // TODO:\n return;\n};" }, { "alpha_fraction": 0.6039978265762329, "alphanum_fraction": 0.6320907473564148, "avg_line_length": 35.29411697387695, "blob_id": "2cad5d0cea985733f419e1f9f946b49e85c59cdb", "content_id": "ba5cb96c392cd9d48dec9d2b58906cb114c14c1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1922, "license_type": "no_license", "max_line_length": 100, "num_lines": 51, "path": "/P2/P2_project/part3 csp2/views/level4.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint, make_response\nimport base64\nimport hashlib\nlevel4Bp = Blueprint('level4', __name__, url_prefix=\"/level4\")\n# level4CSP = \"script-src-elem 'sha256-l5N3PQZUTfBrAgs6Cd/8DUT5K9Xqti8830NgkUx6TNQ=' 'sha256-1by291HWq0c1xmoYrazFZul7uPqRlwJbhyMQTqWwx2o='\"\n# level4CSP = \"script-src-elem 'sha256-l5N3PQZUTfBrAgs6Cd/8DUT5K9Xqti8830NgkUx6TNQ=' 'sha256-mGkvh1o1NbqhmaNycGqxPAiOdO8oggOM0XPiaSuo7LU='\"\n# level4CSP = \"script-src-elem 'sha256-/90/sYuOXlGCyF2ApZiOOLhVW/5BB6I/37UI9/s0WDM='\"\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level4():\n level4CSP = \"script-src 'self'\"\n if not request.args.get('timer'):\n r = make_response(render_template(\"level4.html\"))\n else:\n timer = request.args.get('timer', 1)\n try:\n int(timer)\n except ValueError:\n timer = 1\n script = \"document.getElementById('img').onload = function() {startTimer('\" + timer + \"');}\"\n level4CSP += \" \\'sha256-\" + \\\n base64.b64encode(hashlib.sha256(script.encode(\n 'utf-8')).digest()).decode(\"utf-8\") + \"\\'\"\n r = make_response(render_template(\"level4_timer.html\", timer=timer))\n r.headers['Content-Security-policy'] = level4CSP\n return r\n\n\n'''class MainPage(webapp.RequestHandler):\n \n  def render_template(self, filename, context={}):\n    path = os.path.join(os.path.dirname(__file__), filename)\n    self.response.out.write(template.render(path, context))\n \n  def get(self):\n    # Disable the reflected XSS filter for demonstration purposes\n    self.response.headers.add_header(\"X-XSS-Protection\", \"0\")\n \n    if not self.request.get('timer'):\n      # Show main timer page\n      self.render_template('index.html')\n    else:\n      # Show the results page\n      timer= self.request.get('timer', 0)\n      self.render_template('timer.html', { 'timer' : timer })\n     \n    return\n \napplication = webapp.WSGIApplication([ ('.*', MainPage), ], debug=False)\n'''\n" }, { "alpha_fraction": 0.5495391488075256, "alphanum_fraction": 0.559907853603363, "avg_line_length": 20.700000762939453, "blob_id": "539580c3e3ec4ff056a36e4706702eac24b0976f", "content_id": "9ae45a63c2b2dafadaa4db9fc9e584fb54582bff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 868, "license_type": "no_license", "max_line_length": 53, "num_lines": 40, "path": "/P1/P1_project/models.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from P1 import db\n\n\nclass Board(db.Model):\n x = db.Column(db.Integer, primary_key=True)\n y = db.Column(db.Integer, primary_key=True)\n v = db.Column(db.Integer)\n\n def __init__(self, x, y, v):\n self.x = x\n self.y = y\n self.v = v\n\n def __repr__(self):\n return 'board'\n\n\nclass Status(db.Model):\n name = db.Column(db.String(80), primary_key=True)\n val = db.Column(db.Integer)\n\n def __init__(self, name, val):\n self.name = name\n self.val = val\n\n def __repr__(self):\n return 'status'\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255))\n email = db.Column(db.String(255), unique=True)\n\n def __init__(self, name, email):\n self.name = name\n self.email = email\n\n def __repr__(self):\n return '<User %r>' % self.name\n" }, { "alpha_fraction": 0.6654309630393982, "alphanum_fraction": 0.6811862587928772, "avg_line_length": 32.6875, "blob_id": "a84627eab3c8ae36a5bf62c87566375411a1c837", "content_id": "43b6747eb6f9b16cd6a3dee1a8d69239f2d8d9a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 102, "num_lines": 32, "path": "/P2/P2_project/part3 csp2/views/level5.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint, make_response\n\nlevel5Bp = Blueprint('level5', __name__, url_prefix=\"/level5\")\nlevel5CSP = \"script-src 'self'\"\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level5():\n r= make_response(render_template('level5_welcome.html'))\n r.headers['Content-Security-policy'] = level5CSP\n return r\n\n\[email protected]('/signUp', methods=['GET', 'POST'])\ndef signUp():\n r = make_response(render_template('level5_signUp.html', next=request.args.get('next')))\n r.headers['Content-Security-policy'] = level5CSP\n return r\n\n\[email protected]('/confirm', methods=['GET', 'POST'])\ndef confirm():\n next = request.args.get('next','welcome')\n r =make_response(render_template('level5_confirm.html',next = request.args.get('next','welcome')))\n r.headers['Content-Security-policy'] = level5CSP\n return r\n\[email protected]('/welcome', methods=['GET', 'POST'])\ndef welcome():\n r= make_response(render_template('level5_welcome.html'))\n r.headers['Content-Security-policy'] = level5CSP\n return r\n\n" }, { "alpha_fraction": 0.5996741056442261, "alphanum_fraction": 0.613253653049469, "avg_line_length": 20.91666603088379, "blob_id": "51eda6f70b1fe6cc06522230348e3e7e09c6a440", "content_id": "5380311612478d5a7145bc90c7c2fe093b23a5d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1847, "license_type": "no_license", "max_line_length": 115, "num_lines": 84, "path": "/P2/P2_part2_report.md", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "# part 2 report\n\n[^link]: [XSS protection](https://tech.meituan.com/2018/09/27/fe-security.html)\n\n\n\n## How to run the website\n\n\n\n## Patch details\n\n### Level1\n\n* Location: `level1.html` \n\n* Instead of insert HTML elements in to page, I pass the user input as a string to the page\n\n```html\n Sorry, no results were found for <b> {{msg}} </b>\n <a href='?'>Try again</a>\t\n```\n\n### Level2 \n\n* Location: `level2.html` \n* Add the `escapeHTML` func to escape HTML element\n\n```javascript\n html += \"<blockquote>\" + \tescapeHTML(posts[i].message) + \"</blockquote>\";\n```\n\n```javascript\nfunction escapeHTML(unsafe) {\n return unsafe\n .replace(/&/g, \"&amp;\")\n .replace(/</g, \"&lt;\")\n .replace(/>/g, \"&gt;\")\n .replace(/\"/g, \"&quot;\")\n .replace(/'/g, \"&#039;\");\n}\n```\n\n### Level3\n\n* Parse the `num` into int\n\n```javascript\nhtml += \"<img src='/static/level3/cloud\" + parseInt(num) + \".jpg' />\";\n```\n\n### Level4\n\nJinja can protect from XSS attack by using double or single quotes for Jinja expressions\n\n> Refers to: https://flask.palletsprojects.com/en/1.0.x/security/\n\n```javascript\n<img src=\"/static/loading.gif\" onload=\"startTimer(\" + \" {{ timer }}\" +\");\" />\n```\n\n### Level5\n\nAt server side, I set the next param always to “welcome”, so that the client won’t be able to modify the next param\n\n```python\[email protected]('/signUp', methods=['GET', 'POST'])\ndef signUp():\n return render_template('level5_signUp.html', next = 'confirm')\n```\n\n### Level6\n\nUse regex to prevent loading the out side javascript file to run\n\n```\nif (url.match(/\\/\\//)) {\n setInnerText(document.getElementById(\"log\"),\n \"Sorry, cannot load a URL containing \\/\\/.\");\n return;\n}\n```\n\n* Second way: I can just set the java file path as always the same file, so no other code can be executed " }, { "alpha_fraction": 0.6777042150497437, "alphanum_fraction": 0.7163355350494385, "avg_line_length": 31.321428298950195, "blob_id": "c4bf6608d1a83281a0cfc2eb65d11b57ba31e22f", "content_id": "62accb91dbcd2c5e972e374eeef4b4fb02ce540e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 916, "license_type": "no_license", "max_line_length": 112, "num_lines": 28, "path": "/P1/p1.md", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "# P1\n\n[github project](https://github.com/xj-m/JHU-19F-WebSecurity.git)\n\n## examples\n\n[chat rooms](https://github.com/miguelgrinberg/Flask-SocketIO-Chat)\n\n[flask sqlite](https://github.com/uwi-info3180/flask-sqlite)\n\n## Note\n\n* [get var from render](https://stackoverflow.com/questions/37259740/passing-variables-from-flask-to-javascript)\n* [js not change](https://stackoverflow.com/questions/41144565/flask-does-not-see-change-in-js-file)\n* [add requirement.txt](https://stackoverflow.com/questions/31684375/automatically-create-requirements-txt)\n* `db.create_all()`\n* `__init__`represent string of app folder\n* `Range`不包含后面\n\n- `Run app.js`\n- ` <script src=\"/static/js/add_user.js\" type='text/javascript'></script>`\n- `let postBtn = $(\"button#postBtn\");` is searched by `name`\n\n## Bug\n\n* Check `NOTE1111`\n* Add ` {{ form.csrf_token }}`\n* `Format on save ` can cause error, may move the import to top\n\n" }, { "alpha_fraction": 0.5339035987854004, "alphanum_fraction": 0.5468391180038452, "avg_line_length": 35.318180084228516, "blob_id": "c2a0900ac1f6c24e50b48c7e50076d6db9ec3e1f", "content_id": "04095d592e206a3cfa57e881d9df902d456febef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 4793, "license_type": "no_license", "max_line_length": 133, "num_lines": 132, "path": "/P2/P2_project/part3 csp3/templates/index.html", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "<!doctype html>\n<html>\n\n<head>\n\n <link type=\"text/css\" rel=\"stylesheet\" href=\"/static/game.css\" />\n <script src=\"/static/game.js\"></script>\n <title>XSS game</title>\n\n</head>\n\n<body>\n <h1 id=\"level-title\">Warning: You are entering the XSS game area</h1>\n\n <div id=\"instructions\">\n\n <h2>Welcome, recruit!</h2>\n <a href=\"//www.google.com/about/appsecurity/learning/xss/index.html\">Cross-site\n scripting</a> (XSS) bugs are one of the most common and dangerous types of vulnerabilities in Web\n applications. These nasty buggers can allow your enemies to steal or modify user data in your apps and you must\n learn to dispatch them, pronto!\n\n <br><br> At Google, we know very well how important these bugs are. In fact, Google is so serious about finding\n and fixing XSS issues that <a href=\"//www.google.com/about/appsecurity/reward-program/index.html#rewards\">we are\n paying mercenaries up to $7,500</a> for dangerous XSS bugs discovered in our most sensitive products.\n\n <br><br> In this training program, you will learn to find and exploit XSS bugs. You'll use this knowledge to\n confuse and infuriate your adversaries by preventing such bugs from happening in your applications.\n\n <br><br> There will be cake at the end of the test.\n <br><br>\n\n\n <h2>Training progress:</h2>\n <table id=\"training-progress\">\n\n <tr>\n <td class=\"level-name\">Level 1:\n <a href=\"/level1\"> Hello, world of XSS</a></td>\n <td class=\"level-status\">\n\n &#x2713;\n\n </td>\n </tr>\n\n <tr>\n <td class=\"level-name\">Level 2:\n <a href=\"/level2\"> Persistence is key</a></td>\n <td class=\"level-status\">\n\n &#x2713;\n\n </td>\n </tr>\n\n <tr>\n <td class=\"level-name\">Level 3:\n <a href=\"/level3\"> That sinking feeling...</a></td>\n <td class=\"level-status\">\n\n &#x2713;\n\n </td>\n </tr>\n\n <tr>\n <td class=\"level-name\">Level 4:\n <a href=\"/level4\"> Context matters</a></td>\n <td class=\"level-status\">\n\n &#x2713;\n\n </td>\n </tr>\n\n <tr>\n <td class=\"level-name\">Level 5:\n <a href=\"/level5\"> Breaking protocol</a></td>\n <td class=\"level-status\">\n\n &#x2713;\n\n </td>\n </tr>\n\n <tr>\n <td class=\"level-name\">Level 6:\n <a href=\"/level6\"> Follow the <span id='rabbit'>&#128007;</span></a></td>\n <td class=\"level-status\">\n\n &#x2713;\n\n </td>\n </tr>\n\n </table>\n\n <br>\n <h4><a href=\"#\"\n onclick=\"this.style.display = 'none'; document.getElementById('intro-faq').style.display='block'; return false\">?</a>\n </h4>\n <div id=\"intro-faq\" style=\"display: none\">\n <h4>What's this all about?</h4>\n This security game consists of several levels resembling real-world applications which are vulnerable to XSS\n - your task will be to find the problem and attack the apps, similar to what an evil hacker might do.\n <br><br> XSS bugs are common because they have a nasty habit of popping up wherever a webapp deals with\n untrusted input. Our motivation is to highlight common coding patterns which lead to XSS to help you spot\n them in your code.\n\n <h4>Who can play?</h4>\n The game is designed primarily for developers working on Web applications who do not specialize in security.\n If you're a connoisseur of online hacking challenges you'll find the first few levels quite easy, but you\n just might learn something useful along\n the way.\n <br><br> You'll need a modern browser which supports Javascript and cookies.\n\n <h4>Is it possible to cheat at this game?</h4>\n Yes, since this is a browser-based game, you will be able to cheat by messing with the page internals in\n developer tools or editing HTTP traffic.\n <br><br> However, we're sure that you won't have to resort to that -- there are hints and source to guide\n you. And as your teacher once told you:\n <i>you would only be cheating yourself</i> ;-)\n\n <h4>How will I know when I'm done?</h4>\n There <b>will</b> be cake at the end of the test.\n <br><br>\n\n </div>\n</body>\n\n</html>" }, { "alpha_fraction": 0.5968992114067078, "alphanum_fraction": 0.6108527183532715, "avg_line_length": 36.94117736816406, "blob_id": "4f3cc58276e3d9d75cdbc6729664e7282284e6d2", "content_id": "7b6ad78de22707f949ce57dc5aba5075e504f399", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 92, "num_lines": 17, "path": "/P2/P2_project/rebuild/views/level1.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint, Markup,make_response\n\nlevel1Bp = Blueprint('level1', __name__, url_prefix=\"/level1\")\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level1():\n if request.args.get('query') == None:\n r = make_response(render_template('level1.html', mode = \"main\"))\n r.headers['\"X-XSS-Protection'] = 0\n return r\n else:\n query = Markup(request.args.get('query'))\n # query = request.args.get('query')\n r = make_response(render_template('level1.html', mode = \"msg\" , msg = query))\n r.headers['X-XSS-Protection'] = '0'\n return r\n" }, { "alpha_fraction": 0.6592471599578857, "alphanum_fraction": 0.6805237531661987, "avg_line_length": 21.13768196105957, "blob_id": "98135b2507e5ed363239aa48a570520dd8333c22", "content_id": "bb32512bb0d65271954f76e8781c3aa4dc61baf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3067, "license_type": "no_license", "max_line_length": 232, "num_lines": 138, "path": "/P2/P2_part1_report.md", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "# Part 1 report\n\n[^links]: [csdn solutions](https://blog.csdn.net/abc_12366/article/details/82054946) [github solutions](https://gist.github.com/pbssubhash/2f99644a4f24e8fe6b3e)\n\n## Level 1\n\n### Vulnerable source code location\n\nIt’s locate at line 45,\n\n```javascript\nmessage = \"Sorry, no results were found for <b>\" + query + \"</b>.\";\n```\n\nThe server directly return whatever typed by user\n\n### how to trigger the vulnerability\n\nType following in the search bar\n\n```js\n<script>alert(1)</script>\n```\n\n## Level 2\n\n- [ ] What’s the meaning of hint3?\n\n### Vulnerable source code location\n\nLine 30 in index.html\n\n```html\nhtml += \"\n<blockquote>\" + posts[i].message + \"</blockquote>\n\";\n```\n\nIt can filter script tag, but can be escaped by other js object\n\n### how to trigger the vulnerability\n\nPost this\n\n```javascript\n<img src='' onerror=alert(1)>\n```\n\n## Level 3\n\n- [ ] What’s the meaning of this line?: `$('#tabContent').html(html)`;\n\n### Vulnerable source code location\n\n- Line 37\n\n```javascript\nwindow.onload = function() {\n chooseTab(unescape(self.location.hash.substr(1)) || \"1\");\n};\n```\n\nThis means the web will read the url to get the location info, and pass this to `chooseTab` function\n\n- Line 17\n\n```javascript\nhtml += \"<img src='/static/level3/cloud\" + num + \".jpg' />\";\n$(\"#tabContent\").html(html);\n```\n\nThe chooseTab will insert the `num`, which is read from url, into img element\n\n### how to trigger the vulnerability\n\n- Append `'/><script>alert(1)</script>` to the url and visit\n- Another way: append `'onerror='alert(\"xss\")'>`\n\n## Level 4\n\n### Vulnerable source code location\n\ntimer.html, line 21\n\n```html\n<img src=\"/static/loading.gif\" onload=\"startTimer('{{ timer }}');\" />\n```\n\nThe user input is insert into `onload = “startTimer('...')”`\n\n### how to trigger the vulnerability\n\nSet the input like `1');alert(1);('`\n\nHTML will find the closest `‘` `)` and pair them up, so this can add a new line of code into HTML\n\n## Level 5\n\n### Vulnerable source code location\n\nSignup.html, line 15\n\n```html\n<a href=\"{{ next }}\">Next >></a>\n```\n\nThe `next` is read from url, and next will be insert into `<a>`\n\n### how to trigger the vulnerability\n\nGo to url `....signup?next=javascript:alert(1)`\n\nThen hit next to activate `<a>` ,and `javascript:alert(1)` will be executed\n\n## Level 6\n\n### Vulnerable source code location\n\nindex.html, line 51\n\n```javascript\nwindow.addEventListener(\n \"message\",\n function(event) {\n if (event.source == parent) {\n includeGadget(getGadgetName());\n }\n },\n false\n);\n```\n\nHere `includeGadeget` create and `<script>` object from `getGadgetName` and append it into the page, however, the `getGadgetName` will get the source of the javascript from the url, so we can insert javascript and execute it at url.\n\n### how to trigger the vulnerability\n\n- I host the local server in my computer and put a js file named `test.js`, and write `alert(1)` in this file, then I append `HTTP:127.0.0.1/test.js` to the url, then hit enter, the alert follows\n- Second way: append `#data:text/plain,alert(1)` to the url and visit\n" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.703125, "avg_line_length": 31, "blob_id": "1680f1896b65f8c0bcc48009c70c1a4cadbbd052", "content_id": "8f5f2f4a204d0b26c9accbb9c9914be3b33ce436", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 64, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/P2/bonus_point_student_3/rebuild/static/gadget.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "/* This is a completely awesome invisible gadget */\nalert(\"1\");\n" }, { "alpha_fraction": 0.6712328791618347, "alphanum_fraction": 0.6931506991386414, "avg_line_length": 32.181819915771484, "blob_id": "89b193e243e9add61106bae1157f758cf12b4418", "content_id": "bbc464f440917cd02b7536c260589cdebb91a768", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 85, "num_lines": 11, "path": "/P2/P2_project/part3 csp3/views/level6.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint, make_response\n\nlevel6Bp = Blueprint('level6', __name__, url_prefix=\"/level6\")\nlevel6CSP = \"default-src 'self'\"\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level6():\n r = make_response(render_template('level6.html'))\n r.headers['Content-Security-policy'] = level6CSP\n return r\n" }, { "alpha_fraction": 0.6447368264198303, "alphanum_fraction": 0.6710526347160339, "avg_line_length": 24.33333396911621, "blob_id": "4ea7b12ef83834cc4b2f5422cc4af451dfab7be1", "content_id": "2648e4e0b47e162916d60757eeeb21b5d3a2fb3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 228, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/P2/bonus_point_student_3/part3 csp2/static/level3_onclick.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "document.getElementById(\"tab1\").onclick = function() {\n chooseTab(\"1\");\n};\ndocument.getElementById(\"tab2\").onclick = function() {\n chooseTab(\"2\");\n};\ndocument.getElementById(\"tab3\").onclick = function() {\n chooseTab(\"3\");\n};\n" }, { "alpha_fraction": 0.6324582099914551, "alphanum_fraction": 0.6491646766662598, "avg_line_length": 33.91666793823242, "blob_id": "6bbe5a95008a4ff3def80dec5dba459b15199d6d", "content_id": "fe16c96e7affe5b6ebc7fd3edda730e5327029f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 419, "license_type": "no_license", "max_line_length": 70, "num_lines": 12, "path": "/P2/bonus_point_student_3/part2/views/level1.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint\n\nlevel1Bp = Blueprint('level1', __name__, url_prefix=\"/level1\")\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level1():\n if request.args.get('query') == None:\n return render_template('level1.html', mode=\"main\")\n else:\n query = request.args.get('query')\n return render_template('level1.html', mode=\"msg\", msg=query)\n" }, { "alpha_fraction": 0.6703296899795532, "alphanum_fraction": 0.692307710647583, "avg_line_length": 32.09090805053711, "blob_id": "0728c37a3694687d210b1801429b02edb5fab20b", "content_id": "e83749b09c8fa847b23243b23bef33d3de09a079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 364, "license_type": "no_license", "max_line_length": 85, "num_lines": 11, "path": "/P2/P2_project/part3 csp2/views/level2.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint, make_response\n\nlevel2Bp = Blueprint('level2', __name__, url_prefix=\"/level2\")\nlevel2CSP = \"script-src 'self'\"\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level2():\n r = make_response(render_template(\"level2.html\"))\n r.headers['Content-Security-policy'] = level2CSP\n return r\n" }, { "alpha_fraction": 0.5817757248878479, "alphanum_fraction": 0.5922897458076477, "avg_line_length": 41.849998474121094, "blob_id": "4810a0200d838f791d3c4dedab39b8650da3cb36", "content_id": "59cbd7e79c03a6c72333e4d079eaa66b467a8cc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 856, "license_type": "no_license", "max_line_length": 143, "num_lines": 20, "path": "/p3/project/content.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "// chrome.runtime.onMessage.addListener(function (request, sender, sendResponse) {\n// console.log(\"content here!\");\n// if (request.todo == \"addOverlay\") {\n// var div = document.createElement(\"div\");\n// div.className = \"img_overlay\";\n// imgUrl = request.imgUrl;\n// div.style.cssText = \".overlay {position: absolute;top: 0;left: 0;width: 100%;height: 100%;background: url(\"+imgUrl+\") repeat;}\";\n// document.body.appendChild(div);\n// }\n// });\n\nchrome.runtime.onMessage.addListener(function(message,sender,sendResponse){\n alert(1);\n console.log(\"content activated\");\n if(message.todo === 'showPageAction'){\n chrome.pageAction.show(sender.tab.id);\n console.log(\"tab colored!\");\n chrome.browserAction.setBadgeText({\"text\": \"!\"});\n }\n});" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 22.375, "blob_id": "00c018f153bd57dbca02c0636183730a4979c00d", "content_id": "97dcc0093b09d8c77d48427d088bf1f43671c13b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "no_license", "max_line_length": 70, "num_lines": 8, "path": "/P2/P2_project/part2/views/index.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint\n\nindexBp = Blueprint('index', __name__)\n\n\[email protected](\"/\")\ndef index():\n return render_template('index.html')\n" }, { "alpha_fraction": 0.4947839081287384, "alphanum_fraction": 0.5225409865379333, "avg_line_length": 22.752212524414062, "blob_id": "0503f1c7baaab0c8d37619bdfcee7153ae94b218", "content_id": "1013960f7daa5981c3002c680ead72d4c687b5cf", "detected_licenses": [ "MIT", "LicenseRef-scancode-generic-cla" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5368, "license_type": "permissive", "max_line_length": 69, "num_lines": 226, "path": "/examples/vscode_flask_tutorial/hello_app/static/index.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "initialization();\n\nwindow.onload = function() {\n //initialization for count and player\n var canvas = document.getElementById(\"board\");\n\n //if it is player1's turn\n var Player1 = true;\n //update after each move to check if game's over\n var gameover = false;\n\n //initialization for chess board, 0:available, 1:player1, 2:player2\n var board = [];\n for (var i = 0; i < 19; i++) {\n board[i] = [];\n for (var j = 0; j < 19; j++) {\n board[i][j] = 0;\n }\n }\n //console.log(board);\n\n var Player1 = true;\n var c = document.getElementById(\"myCanvas\");\n var ctx = c.getContext(\"2d\");\n\n canvas.addEventListener(\n \"click\",\n function(event) {\n if (gameover) {\n return;\n }\n\n var cx = event.offsetX,\n cy = event.offsetY,\n x = Math.round(cx / 50),\n y = Math.round(cy / 50);\n console.log(\"X: \" + x + \" Y: \" + y);\n //draw if within boundary and board coordinates usable\n if (x < 19 && y < 19 && board[x][y] == 0) {\n drawCircle(50 * x, 50 * y, Player1);\n //update the pieces on board\n board[x][y] = Player1 ? 1 : 2;\n console.log(board[x][y]);\n }\n\n gameover = checkWin(x, y, Player1);\n if (!gameover) {\n Player1 = !Player1;\n } else {\n var name = Player1 ? \"Player1\" : \"Player2\";\n alert(\"Game Over \" + name + \" won\");\n }\n },\n false\n );\n\n //check if any player wins after the new movement\n function checkWin(x, y, Player1) {\n return checkH(x, y, Player1) ||\n checkV(x, y, Player1) ||\n checkDL(x, y, Player1) ||\n checkDR(x, y, Player1)\n ? true\n : false;\n }\n\n //check horizontally\n function checkH(x, y, Player1) {\n var count = 0;\n var val = Player1 ? 1 : 2;\n var lowerBound = x - 4 >= 0 ? x - 4 : 0;\n var higherBound = x + 4 < 19 ? x + 4 : 0;\n for (var i = lowerBound; i <= higherBound; i++) {\n if (board[i][y] != val) {\n count = 0;\n } else {\n count++;\n if (count == 5) {\n return true;\n }\n }\n }\n return false;\n }\n\n //check vertically\n function checkV(x, y, Player1) {\n var count = 0;\n var val = Player1 ? 1 : 2;\n var lowerBound = y - 4 >= 0 ? y - 4 : 0;\n var higherBound = y + 4 < 19 ? y + 4 : 0;\n for (var i = lowerBound; i <= higherBound; i++) {\n if (board[x][i] != val) {\n count = 0;\n } else {\n count++;\n if (count == 5) {\n return true;\n }\n }\n }\n return false;\n }\n\n //check diagonally to the right-down\n function checkDR(x, y, Player1) {\n var count = 0;\n var val = Player1 ? 1 : 2;\n var lowerBoundX = 0,\n lowerBoundY = 0,\n higherBoundX = 18,\n higherBoundY = 18;\n //find the smaller value in lower bound x,y coordinates\n var lowerBound = x < y ? x : y;\n //define lower bound points\n if (lowerBound - 4 < 0) {\n lowerBoundX = x - lowerBound;\n lowerBoundY = y - lowerBound;\n } else {\n lowerBoundX = x - 4;\n lowerBoundY = y - 4;\n }\n var higherBound = x < y ? y : x;\n if (higherBound + 4 > 18) {\n higherBoundX = x + 18 - higherBound;\n higherBoundY = y + 18 - higherBound;\n } else {\n higherBoundX = x + 4;\n higherBoundY = y + 4;\n }\n\n var i = lowerBoundX;\n var j = lowerBoundY;\n while (i <= higherBoundX) {\n if (board[i][j] != val) {\n count = 0;\n } else {\n count++;\n if (count == 5) {\n return true;\n }\n }\n i++;\n j++;\n }\n return false;\n }\n\n //check diagonally to the left-up\n function checkDL(x, y, Player1) {\n var count = 0;\n var val = Player1 ? 1 : 2;\n var lowerBoundX = 0,\n lowerBoundY = 0,\n higherBoundX = 18,\n higherBoundY = 18;\n\n //find the coordinates for lower and higher bound\n diff_xr = 18 - x;\n diff_yu = y;\n diff_xl = x;\n diff_yd = 18 - y;\n right_up = diff_xr < diff_yu ? diff_xr : diff_yu;\n left_down = diff_xl < diff_yd ? diff_xl : diff_yd;\n lowerBoundX = x - left_down;\n lowerBoundY = y + left_down;\n higherBoundX = x + right_up;\n higherBoundY = y - right_up;\n\n //check five in a row\n var i = lowerBoundX;\n var j = lowerBoundY;\n while (i <= higherBoundX) {\n if (board[i][j] != val) {\n count = 0;\n } else {\n count++;\n if (count == 5) {\n return true;\n }\n }\n i++;\n j--;\n }\n return false;\n }\n};\n//initialize the 10x10 board\nfunction initialization() {\n var c = document.getElementById(\"myCanvas\");\n var width = 900;\n var height = 900;\n console.log(width);\n var ctx = c.getContext(\"2d\");\n for (var i = 0; i < 18; i++) {\n cx = (i * width) / 18;\n cy = (i * height) / 18;\n ctx.moveTo(0, cy);\n ctx.lineTo(width, cy);\n ctx.moveTo(cx, 0);\n ctx.lineTo(cx, height);\n ctx.strokeStyle = \"#cc9966\";\n ctx.stroke();\n }\n}\n/* Draw the pices on the board.\n * isBlack is the boolean type that checks if the user is black side\n */\nfunction drawCircle(x, y, isBlack) {\n if (isBlack) {\n var color = \"black\";\n } else {\n var color = \"white\";\n }\n\n var c = document.getElementById(\"myCanvas\");\n var ctx = c.getContext(\"2d\");\n ctx.beginPath();\n ctx.arc(x, y, 20, 0, 2 * Math.PI);\n ctx.stroke();\n ctx.fillStyle = color;\n ctx.fill();\n ctx.lineWidth = 3;\n ctx.strokeStyle = color;\n ctx.stroke();\n}\n" }, { "alpha_fraction": 0.42442476749420166, "alphanum_fraction": 0.4538052976131439, "avg_line_length": 17.34415626525879, "blob_id": "b16a12d205ce7a757143254aac35f83862819539", "content_id": "b8034a87789f65596c60983eae91e814cb69858b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2825, "license_type": "no_license", "max_line_length": 97, "num_lines": 154, "path": "/P1/P1_project/checker.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from P1 import db\nfrom .models import Board\n\nglobal board\nboard = []\n# for i in range(19):\n# board.append([])\n# for j in range(19):\n# board[i].append(Board.query.filter_by(x=i, y=j).first())\n\n\ndef resetBoard():\n for i in range(19):\n board.append([])\n for j in range(19):\n board[i].append(0)\n\n\nresetBoard()\n\n\ndef checkWin(x, y, pNum):\n # NOTE1111: bug!\n board[x][y] = pNum\n return checkH(x, y, pNum) or checkV(x, y, pNum) or checkDL(x, y, pNum) or checkDR(x, y, pNum)\n\n\ndef checkH(x, y, pNum):\n count = 0\n # NOTE: replace this\n if(x-4 >= 0):\n lBound = x-4\n else:\n lBound = 0\n if(x+4 < 19):\n hBound = x+4\n else:\n hBound = 0\n for i in range(lBound, hBound+1):\n if (board[i][y] != pNum):\n count = 0\n else:\n count += 1\n if(count == 5):\n return True\n return False\n\n\ndef checkV(x, y, pNum):\n count = 0\n # NOTE: replace this\n if(y-4 >= 0):\n lBound = y-4\n else:\n lBound = 0\n if(y+4 < 19):\n hBound = y+4\n else:\n hBound = 0\n for i in range(lBound, hBound+1):\n if (board[x][i] != pNum):\n count = 0\n else:\n count += 1\n if(count == 5):\n return True\n return False\n\n\ndef checkDL(x, y, pNum):\n count = 0\n lBoundX = 0\n lBoundY = 0,\n hBoundX = 18\n hBoundY = 18\n\n diff_xr = 18-x\n diff_yu = y\n diff_xl = x\n diff_yd = 18-y\n\n if(diff_xr < diff_yu):\n right_up = diff_xr\n else:\n right_up = diff_yu\n\n if(diff_xl < diff_yd):\n left_down = diff_xl\n else:\n left_down = diff_yd\n\n lBoundX = x - left_down\n lBoundY = y + left_down\n hBoundX = x + right_up\n hBoundY = y - right_up\n\n i = lBoundX\n j = lBoundY\n while(i <= hBoundX):\n if(board[i][j] != pNum):\n count = 0\n else:\n count += 1\n if(count == 5):\n return True\n i += 1\n j -= 1\n return False\n\n\ndef checkDR(x, y, pNum):\n count = 0\n lBoundX = 0\n lBoundY = 0\n hBoundX = 18\n hBoundY = 18\n\n if(x < y):\n lBound = x\n else:\n lBound = y\n\n if(lBound-4 < 0):\n lBoundX = x - lBound\n lBoundY = y - lBound\n else:\n lBoundX = x-4\n lBoundY = y-4\n\n if(x < y):\n hBound = y\n else:\n hBound = x\n\n if(hBound+4 > 18):\n hBoundX = x+18-hBound\n hBoundY = y+18 - hBound\n else:\n hBoundX = x + 4\n hBoundY = y+4\n\n i = lBoundX\n j = lBoundY\n\n while(i <= hBoundX):\n if(board[i][j] != pNum):\n count = 0\n else:\n count += 1\n if(count == 5):\n return True\n i += 1\n j += 1\n return False\n" }, { "alpha_fraction": 0.5793573260307312, "alphanum_fraction": 0.5920155644416809, "avg_line_length": 27.55555534362793, "blob_id": "7d635201a5342709571d6f594cd8376446f5d9aa", "content_id": "4954916e627e1fd5ace032241ac5f75a868f55d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1027, "license_type": "no_license", "max_line_length": 74, "num_lines": 36, "path": "/p3/project/modules/Store.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "man = chrome.extension.getBackgroundPage().man;\n\nman.Store = new function () {\n this.speed = 1; // in seconds\n this.wSize = 20;\n this.hSize = 20;\n\n this.curTabId = 100;\n this.curWinId = 100;\n this.imgStore = {};\n this.cut1 = [];\n this.cut2 = [];\n this.w = null;\n this.h = null;\n};\n\nman.Store.changeCurrTab = function (winId, tabId) {\n man.Store.curWinId = winId;\n man.Store.curTabId = tabId;\n console.log(\"Store: current tab changed: %s %s\", winId, tabId);\n};\nman.Store.getImg = function (winId, tabId) {\n windowStore = man.Store.imgStore[winId];\n if(windowStore == null || windowStore[tabId] == null){\n // NOTE: take note \n return null;\n }\n console.log(\"Store: getImg for %s %s\", winId, tabId);\n // return null if no record\n return man.Store.imgStore[winId][tabId][\"imgUrl\"];\n};\n\nman.Store.saveImg = function (winId, tabId, img) {\n console.log(\"Store: saveImg for %s %s, need implement\", winId, tabId);\n return;\n};" }, { "alpha_fraction": 0.6485084295272827, "alphanum_fraction": 0.6575875282287598, "avg_line_length": 31.125, "blob_id": "64ea8d2bc3ad3aa81c094e4bf15485040d0b0033", "content_id": "e8f054ec84ab1b7fab8adae667db0c35ab0475f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1613, "license_type": "no_license", "max_line_length": 85, "num_lines": 48, "path": "/P2/P2_project/part3 csp3/views/level4.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint, make_response\nimport string\nimport random\nlevel4Bp = Blueprint('level4', __name__, url_prefix=\"/level4\")\n\n\ndef randomString(stringLength=10):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level4():\n x = randomString()\n level4CSP = \"script-src 'nonce-{}'\".format(x)\n if not request.args.get('timer'):\n r = make_response(render_template(\"level4.html\", x=x))\n else:\n timer = request.args.get('timer', 0)\n r = make_response(render_template(\n \"level4_timer.html\", timer=timer, x=x))\n r.headers['Content-Security-policy'] = level4CSP\n return r\n\n\n'''class MainPage(webapp.RequestHandler):\n \n  def render_template(self, filename, context={}):\n    path = os.path.join(os.path.dirname(__file__), filename)\n    self.response.out.write(template.render(path, context))\n \n  def get(self):\n    # Disable the reflected XSS filter for demonstration purposes\n    self.response.headers.add_header(\"X-XSS-Protection\", \"0\")\n \n    if not self.request.get('timer'):\n      # Show main timer page\n      self.render_template('index.html')\n    else:\n      # Show the results page\n      timer= self.request.get('timer', 0)\n      self.render_template('timer.html', { 'timer' : timer })\n     \n    return\n \napplication = webapp.WSGIApplication([ ('.*', MainPage), ], debug=False)\n'''\n" }, { "alpha_fraction": 0.6680216789245605, "alphanum_fraction": 0.6842818260192871, "avg_line_length": 31.04347801208496, "blob_id": "75d8d1d461e21176f6a95e11feb0ed3d386862c5", "content_id": "49cd202dd1624ad07f4920fc57bb2b6b41040d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "no_license", "max_line_length": 91, "num_lines": 23, "path": "/P2/P2_project/rebuild/views/level5.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint, make_response\n\nlevel5Bp = Blueprint('level5', __name__, url_prefix=\"/level5\")\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level5():\n return render_template('level5_welcome.html')\n\n\[email protected]('/signUp', methods=['GET', 'POST'])\ndef signUp():\n return render_template('level5_signUp.html', next=request.args.get('next'))\n\n\[email protected]('/confirm', methods=['GET', 'POST'])\ndef confirm():\n next = request.args.get('next','welcome')\n return render_template('level5_confirm.html',next = request.args.get('next','welcome'))\n\[email protected]('/welcome', methods=['GET', 'POST'])\ndef welcome():\n return render_template('level5_welcome.html')\n\n" }, { "alpha_fraction": 0.6655573844909668, "alphanum_fraction": 0.6755407452583313, "avg_line_length": 25.130434036254883, "blob_id": "99d149d54eb254483fa276a5480059d3bfa7456b", "content_id": "6a3d65427d2a2a821fd5c35cebd81756bb3d26d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "no_license", "max_line_length": 72, "num_lines": 23, "path": "/P2/bonus_point_student_3/part2/views/level3.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint\n\nlevel3Bp = Blueprint('level3', __name__, url_prefix=\"/level3\")\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level3():\n return render_template('level3.html')\n\n\n'''\nclass MainPage(webapp.RequestHandler):\n \n \n def render_template(self, filename, context={}):\n path = os.path.join(os.path.dirname(__file__), filename)\n self.response.out.write(template.render(path, context))\n \n def get(self):\n self.render_template('index.html')\n \napplication = webapp.WSGIApplication([ ('.*', MainPage), ], debug=False)\n'''\n" }, { "alpha_fraction": 0.6052631735801697, "alphanum_fraction": 0.6052631735801697, "avg_line_length": 18.5, "blob_id": "ac80d051cde35c7db6e1e6051eac29c2b1287504", "content_id": "d9371970a25cc8e5882629a48527f583458ff737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 38, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/p3/project/background/init_man.js", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "var man = {};\nconsole.log(\"init man\");" }, { "alpha_fraction": 0.6630753874778748, "alphanum_fraction": 0.680705189704895, "avg_line_length": 29.939393997192383, "blob_id": "087157e47280014c9adfe62ad5f35284f3b848ee", "content_id": "734510e3320cf46a32a0e3893ba09f4a31218165", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1021, "license_type": "no_license", "max_line_length": 87, "num_lines": 33, "path": "/P2/P2_project/part3 csp3/views/level2.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "# from flask import Flask, render_template, redirect, request, Blueprint, make_response\n\n# level2Bp = Blueprint('level2', __name__, url_prefix=\"/level2\")\n# level2CSP = \"script-src 'nonce-x'\"\n\n\n# @level2Bp.route(\"/\", methods=['GET', 'POST'])\n# def level2():\n# r = make_response(render_template(\"level2.html\"))\n# r.headers['Content-Security-policy'] = level2CSP\n# return r\n\n\nimport string\nimport random\nfrom flask import Flask, render_template, redirect, request, Blueprint, make_response\n\nlevel2Bp = Blueprint('level2', __name__, url_prefix=\"/level2\")\n\n\ndef randomString(stringLength=10):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level2():\n x = randomString()\n level2CSP = \"script-src 'nonce-{}'\".format(x)\n r = make_response(render_template(\"level2.html\", x=x))\n r.headers['Content-Security-policy'] = level2CSP\n return r\n" }, { "alpha_fraction": 0.6724891066551208, "alphanum_fraction": 0.6972343325614929, "avg_line_length": 44.79999923706055, "blob_id": "78a9da9cf1db357d8a3322554e9518f42f5ec582", "content_id": "79b7de43495e1d9580fb481ef90c96c4cfdbe0a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 687, "license_type": "no_license", "max_line_length": 86, "num_lines": 15, "path": "/P2/bonus_point_student_3/part3 csp2/views/level3.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint, make_response\n\nlevel3Bp = Blueprint('level3', __name__, url_prefix=\"/level3\")\n# level3CSP = \"script-src-elem 'sha256-+zlx0LZhdO64maLakjkdyjWHBiGjmz+ND3coSjJWW6E=' ajax.googleapis.com/\"\n# level3CSP = \"script-src http://localhost:5000/static/level3.js ajax.googleapis.com/\"\nlevel3CSP = \"script-src 'self'; script-src-elem 'self' ajax.googleapis.com/\"\n# level3CSP = \"script-src-elem 'sha256-rEl3vo2eeGsIK5QdBfnVWMffvMkYoFv/HHBRwsnkBgg=''sha256-5GwV0JsBobqZcbVCKSlLFeHE0He4GTeXJrc+b6OOEGs=''sha256-AnDVymxFQ3SNPFvGTp6Xbh2Cef+9yLo4n/nJAMA7daI=' 'sha256-+zlx0LZhdO64maLakjkdyjWHBiGjmz+ND3coSjJWW6E=' ajax.googleapis.com/\"\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level3():\n # return render_template('level3.html')\n r = make_response(render_template(\"level3.html\"))\n r.headers['Content-Security-policy'] = level3CSP\n return r\n" }, { "alpha_fraction": 0.6769230961799622, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 31.75, "blob_id": "6078db1596f6fd29059712ee807271f23c7f3796", "content_id": "a34dabc669fd18da941e8e899a62dbb06154f2c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 74, "num_lines": 4, "path": "/P2/bonus_point_student_3/part3 csp2/sha256.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "import hashlib, base64\n\ndef sha256_string(hash_string):\n return base64.b64encode(hashlib.sha256(hash_string.encode()).digest())" }, { "alpha_fraction": 0.6717850565910339, "alphanum_fraction": 0.6823416352272034, "avg_line_length": 27.94444465637207, "blob_id": "6c50eed02a0f066c133643ac26437d74bd14c231", "content_id": "fae3fea66c4ac3cc17bd59ff31c332194d6326aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1042, "license_type": "no_license", "max_line_length": 85, "num_lines": 36, "path": "/P2/bonus_point_student_3/part3 csp3/views/level3.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "import string\nimport random\nfrom flask import Flask, render_template, redirect, request, Blueprint, make_response\n\nlevel3Bp = Blueprint('level3', __name__, url_prefix=\"/level3\")\n\n\ndef randomString(stringLength=10):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level3():\n x = randomString()\n level3CSP = \"script-src 'nonce-{}'\".format(x)\n # return render_template('level3.html')\n r = make_response(render_template(\"level3.html\", x=x))\n r.headers['Content-Security-policy'] = level3CSP\n return r\n\n\n'''\nclass MainPage(webapp.RequestHandler):\n \n \n def render_template(self, filename, context={}):\n path = os.path.join(os.path.dirname(__file__), filename)\n self.response.out.write(template.render(path, context))\n \n def get(self):\n self.render_template('index.html')\n \napplication = webapp.WSGIApplication([ ('.*', MainPage), ], debug=False)\n'''\n" }, { "alpha_fraction": 0.6613138914108276, "alphanum_fraction": 0.6776642203330994, "avg_line_length": 25.346153259277344, "blob_id": "3760d89b1373e133ca789ce6af99b67a5fbcd4a5", "content_id": "265c4a9e124ba57bb076c38ac97464ba96571ba3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3437, "license_type": "no_license", "max_line_length": 183, "num_lines": 130, "path": "/P2/P3_report.md", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "# part 2 report\n\n## How to run the website\n\n```shell\nflask run\n```\n\n## CSP2.0\n\n### Level1\n\nMy rule is:\n\n```python\nlevel1CSP = \"script-src 'none'\"\n```\n\nThen render it to header\n\n```python\[email protected](\"/\", methods=['GET', 'POST'])\ndef level1():\n if request.args.get('query') == None:\n r = make_response(render_template('level1.html', mode = \"main\"))\n r.headers['\"X-XSS-Protection'] = 0\n r.headers['Content-Security-policy'] = level1CSP\n return r\n else:\n query = Markup(request.args.get('query'))\n # query = request.args.get('query')\n r = make_response(render_template('level1.html', mode = \"msg\" , msg = query))\n r.headers['Content-Security-policy'] = level1CSP\n r.headers['X-XSS-Protection'] = '0'\n return r\n\n```\n\nFor level1, there shouldn’t contain any javascript, so just ban all javascript can prevent attacks, also here I set the css protection header to 0 to disable the browser auto defences\n\n### level2\n\n```python\nlevel2CSP = \"script-src 'self'\"\n```\n\nNote that I moved the javascript to a separate file.\n\nThen render the header, this prevent the inline script but allows running .js file within the domain, which maintains the original functionality\n\n### level3\n\n```python\nlevel3CSP = \"script-src 'self'; script-src-elem 'self' ajax.googleapis.com/\"\n```\n\nNote that I moved the javascript to a separate file, as well as the `onclick` event\n\nSimilar to level2, every javascript that maintains the functionality are included in a .js file, and attacker’s inline javascript won’t be executed.\n\nNote that Ajax needs to be whitelisted\n\n### level4\n\nSince the user input can be any int, we have to make sure evert input can trigger the javascript function, so need to whitelist the javascript depends on the user input.\n\nSo here I checked the user input\n\n```python\n timer = request.args.get('timer', 1)\n try:\n int(timer)\n except ValueError:\n timer = 1\n```\n\nNow since the `nonce` is not allowed here, then use hash to whitelist the javascript\n\n```python\n level4CSP = \"script-src 'self'\"\n script = \"document.getElementById('img').onload = function() {startTimer('\" + timer + \"');}\"\n level4CSP += \" \\'sha256-\" + base64.b64encode(hashlib.sha256(script.encode('utf-8')).digest()).decode(\"utf-8\") + \"\\'\"\n```\n\nNote that I modified the html file and move the `onclick` out of the image element\n\n### Leve5\n\n```python\nlevel5CSP = \"script-src 'self'\"\n```\n\nSimilar to level2\n\n### Leve6\n\n```python\nlevel6CSP = \"default-src 'self' filesystem\"\n```\n\nThis only allow the script from filesystem, which prevent inputing javascript form url\n\n## CSP3.0\n\n### Level 1 and 6\n\nThe same as the CSP2.0, since it’s no need to use nonce\n\n### For level 2-5\n\nFirst a function to generate the random string\n\n```python\ndef randomString(stringLength=10):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n```\n\nThen set the nonce every time needs to render a page, take level2 as an example\n\n```python\n x = randomString()\n level2CSP = \"script-src 'nonce-{}'\".format(x)\n r = make_response(render_template(\"level2.html\", x=x))\n r.headers['Content-Security-policy'] = level2CSP\n return r\n```\n\nThen in the `script` tag, put the `nonce=‘{{x}}’` for the javascript that is needed for the logic functionality\n" }, { "alpha_fraction": 0.6418439745903015, "alphanum_fraction": 0.6507092118263245, "avg_line_length": 29.486486434936523, "blob_id": "1a34dcd9cd2fb688037f25776d3845c38b3098cd", "content_id": "2773f812ef1737078e9526e409256e9662fcd0c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 72, "num_lines": 37, "path": "/P2/P2_project/rebuild/views/level4.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, redirect, request, Blueprint\n\nlevel4Bp = Blueprint('level4', __name__, url_prefix=\"/level4\")\n\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef level4():\n # NOTE:\n if not request.args.get('timer'):\n return render_template('level4.html')\n else:\n timer = request.args.get('timer', 0)\n return render_template('level4_timer.html', timer=timer)\n\n\n'''class MainPage(webapp.RequestHandler):\n \n  def render_template(self, filename, context={}):\n    path = os.path.join(os.path.dirname(__file__), filename)\n    self.response.out.write(template.render(path, context))\n \n  def get(self):\n    # Disable the reflected XSS filter for demonstration purposes\n    self.response.headers.add_header(\"X-XSS-Protection\", \"0\")\n \n    if not self.request.get('timer'):\n      # Show main timer page\n      self.render_template('index.html')\n    else:\n      # Show the results page\n      timer= self.request.get('timer', 0)\n      self.render_template('timer.html', { 'timer' : timer })\n     \n    return\n \napplication = webapp.WSGIApplication([ ('.*', MainPage), ], debug=False)\n'''\n" }, { "alpha_fraction": 0.7884615659713745, "alphanum_fraction": 0.8230769038200378, "avg_line_length": 29.58823585510254, "blob_id": "ebb2f30a1ecd010d1601f3c185afdc125077b895", "content_id": "3ace19d235bbf456e0fa74da73fac3c45be14a02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 34, "num_lines": 17, "path": "/P2/P2_project/part2/__init__.py", "repo_name": "xj-m/JHU-19F-WebSecurity", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom .views.index import indexBp\nfrom .views.level1 import level1Bp\nfrom .views.level2 import level2Bp\nfrom .views.level3 import level3Bp\nfrom .views.level4 import level4Bp\nfrom .views.level5 import level5Bp\nfrom .views.level6 import level6Bp\n\napp = Flask(__name__)\napp.register_blueprint(indexBp)\napp.register_blueprint(level1Bp)\napp.register_blueprint(level2Bp)\napp.register_blueprint(level3Bp)\napp.register_blueprint(level4Bp)\napp.register_blueprint(level5Bp)\napp.register_blueprint(level6Bp)\n" } ]
42
msun0106/Python-for-GIS
https://github.com/msun0106/Python-for-GIS
a11d29544e73ac441f0aa6a664afec6e29c7b60f
4581b3df440e811ea78174b1fd0ae4d511333c7e
c9c71bcecd0a7e6714c375f75c8832b1d866caf5
refs/heads/master
2018-11-09T11:26:10.381973
2017-11-28T20:31:40
2017-11-28T20:31:40
94,367,467
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.670265793800354, "alphanum_fraction": 0.6789867281913757, "avg_line_length": 35.40909194946289, "blob_id": "b23accb1bab957e8bdea7aa8e59436113aecf22b", "content_id": "816984c6eb432816c9c4472662d39660e46739b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2408, "license_type": "no_license", "max_line_length": 165, "num_lines": 66, "path": "/DwellingCountTool.py", "repo_name": "msun0106/Python-for-GIS", "src_encoding": "UTF-8", "text": " # -------------------------------------------------------------------------------\n # Purpose: Prompt the user for the building type and the box number.\n # Count the buildings in the selected box zone and store the results. \n # And store the results in the attribute table.\n #\n #---------------------------------------------------------------------------------\ntry:\n #Import the modules\n import arcpy\n from arcpy import env\n \n #Set up environment\n env.workspace = r\"H:\\EsriPress\\GISTPython\\Data\\City of Oleander.gdb\"\n \n #Get input from the user\n # The first will be the box number to act upon -- index 0\n # The second will be the building type to count -- index 1\n boxNumber = arcpy.GetParameterAsText(0)\n buildingType = arcpy.GetParameterAsText(1)\n \n #Make feature layers from the user input\n boxLayer = arcpy.MakeFeatureLayer_management (r\"H:\\EsriPress\\GISTPython\\Data\\City of Oleander.gdb\\FireBoxMap_\" + str(boxNumber))\n buildLayer = arcpy.MakeFeatureLayer_management (r\"H:\\EsriPress\\GISTPython\\Data\\City of Oleander.gdb\\Planimetrics\\BldgFootprints\",\"\\\"UseCode\\\"='\" +buildingType+ \"'\")\n \n #Use the specified file of box zone to select specified type of building\n arcpy.SelectLayerByLocation_management(buildLayer,\"HAVE_THEIR_CENTER_IN\", boxLayer)\n \n #Count the selected features\n bldgCount=int(arcpy.GetCount_management(buildLayer).getOutput(0))\n \n #Dispaly the results in the geoprocessing results window\n arcpy.AddMessage(\"The count of buildings is\" + str(bldgCount)+\".\")\n \n #Create a field to store the results\n #1=Single Family (SFCount)\n #2=Multi-Family (MFCount)\n #3=Commercial (ComCount)\n #4=Industrial (IndCount)\n #5=City Property (CityCount)\n #6=Storage Sheds (ShedCount)\n #7=Schools (SchCount)\n #8=Church (ChurCount)\n if buildingType == 1:\n newField = \"SFCount\"\n elif buildingType == 2:\n newField = \"MFCount\"\n elif buildingType == 3:\n newField = \"ComCount\"\n elif buildingType == 4:\n newField = \"IndCount\"\n elif buildingType == 5:\n newField = \"CityCount\"\n elif buildingType == 6:\n newField = \"ShedCount\"\n elif buildingType ==7:\n newField = \"SchCount\"\n else:\n newField = \"ChurCount\"\n arcpy.AddField_management(boxLayer, newField, \"Long\")\n #Store the results in the field\n arcpy.CalculateField_management(boxLayer, newField,bldgCount)\n\nexcept arcpy.ExecuteError:\n print arcpy.GetMessages(2)\nexcept:\n print \"Process did not complete.\"\n \n \n" }, { "alpha_fraction": 0.6613767743110657, "alphanum_fraction": 0.6628779768943787, "avg_line_length": 43.70192337036133, "blob_id": "63e6a8c1bb22ad6df29fcb7c456b26a006559fb6", "content_id": "f898357f2a5230de01f17f9612efa39ecd3c5467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4665, "license_type": "no_license", "max_line_length": 178, "num_lines": 104, "path": "/Process Fire Data.py", "repo_name": "msun0106/Python-for-GIS", "src_encoding": "UTF-8", "text": "try:\n # Import the modules\n itry:\n # Import the modules\n import arcpy\n from arcpy import env\n\n\n # Set up the environment\n env.workspace=r\"‪H:\\EsriPress\\GISTPython\\MyExercises\\\\\"\n env.overwriteOutput=True\n \n # Prompt user for the input table / accept use rinput into a variable named inTable\n inTable = arcpy.GetParameterAsText(0)\n # When this is set up as a cursor tool, set the input to tables only \n # Get the fields from the input\n fields = arcpy.ListFields(inTable)\n # Create a fieldinfo object\n fieldinfo = arcpy.FieldInfo()\n \n # Define a fieldinfo object to bring only certain fields into the view\n # inci_no, alm_data, alm_time, arv_date\n # descript, station, shift, city\n # number, st_prefix, street, st_type, st_suffix\n # (can not add new fields to a table view, so reuse a discarded one)\n # Change the name of addr_2 to GeoAddress in the output table\n # Code was copied and modified from the Help screen\n \n # Iterate through the fields, and set them to fieldinfo\n for field in fields:\n if field.name== \"inci_no\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"alm_date\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"alm_time\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"arv_date\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"arv_time\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\") \n elif field.name == \"inci_type\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"descript\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"station\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"shift\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\") \n elif field.name == \"city\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"number\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"st_prefix\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"street\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\") \n elif field.name == \"st_type\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"st_suffix\":\n fieldinfo.addField(field.name, field.name, \"VISIBLE\", \"\")\n elif field.name == \"addr_2\":\n fieldinfo.addField(field.name, \"GeoAddress\", \"VISIBLE\", \"\")\n else:\n fieldinfo.addField(field.name, field.name, \"Hidden\", \"\") \n \n # Create a table view of the input table\n # The created fire_view table view will have fields as set in fieldinfo object\n arcpy.MakeTableView_management(inTable, \"fire_view\", \"\", \"\", fieldinfo)\n \n # Do the address formatting into GeoAddress for the whole table\n # Concatenate number + st_prefix + street + st_type + st_suffix and remove spaces\n arcpy.CalculateField_management(\"fire_view\",\"GeoAddress\",\"str(!number!)+' '+!st_prefix!.strip()+' '+!street!.strip()+' ' +!st_type!.strip()+' ' + !st_suffix!.strip()\",\"PYTHON\")\n \n # Create new geodatebase to store new results for year (\"Fire files for \"+ last 4 digits of file name)\n gdbName = \"Fire_File_For_\" + inTable[-8]\n arcpy.CreateFileGDB_management(\"H:\\\\EsriPress\\\\GISTPython\\\\MyExercises\",gdbName)\n \n # Use cursor to find each unique city name, and add it to a list\n # City names included may differ from file to file\n # Set up a list to hold unique city names\n cityList=[]\n fireCursor=arcpy.da.SearchCursor(\"inTable\",[\"city\"])\n # Start cursor iteration\n for row in fireCursor:\n cityName = row[0]\n if cityName not in cityList:\n cityList.append(cityName)\n # Result is a list object to select records\n del row\n arcpy.AddWarning (\"Made the list of city names.\")\n \n # Use the names in the list object to select records\n for name in cityList:\n cityQuery = '\"city\"= \\'' + name + '\\''\n arcpy.SelectLayerByAttribute_management(\"fire_view\",\"NEW_SELECTION\",cityQuery)\n newTable=\"H:\\\\EsriPress\\\\GISTPython\\\\MyExercises\\\\\" + gdbName + \".gdb\\\\\" + name.replace(\" \",\"_\")\n arcpy.CopyRows_management(\"fire_view\", newTable)\n itemCount=int(arcpy.GetCount_management(\"fire_view\").getOutput(0))\n arcpy.AddWarning(\"A table called \" + newTable + \" was created with \" + str(itemCount) + \" rows. \")\n \n\n# Repeat for all names in the list\n# Use the script to create a script tool\n# Add validation code to the script tool\n\n\n\n\n\n\n\n\n \n \n" }, { "alpha_fraction": 0.6803921461105347, "alphanum_fraction": 0.6823529601097107, "avg_line_length": 35.42856979370117, "blob_id": "e141e9e8d4f36ab06737ae752377eecb8042255a", "content_id": "6d12718023e2599065ecc113a3cc0ae840f95d3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1020, "license_type": "no_license", "max_line_length": 93, "num_lines": 28, "path": "/LanemilesCount.py", "repo_name": "msun0106/Python-for-GIS", "src_encoding": "UTF-8", "text": "try:\n import arcpy\n from arcpy import env\n\n # Set up the environment \n env.workspace = r\"H:\\EsriPress\\GISTPython\\Data\\FireDepartment.gdb\"\n env.overwriteOutput=True\n fcBoxZones = arcpy.ListFeatureClasses(\"FireBoxMap*\")\n \n #Start a for statement to iterate through the files\n for fc in fcBoxZones:\n #Get the first file - it's stored in fc\n print fc\n arcpy.AddField_management(fc, \"LaneMiles\",\"Long\")\n arcpy.MakeFeatureLayer_management(\"Street Centerlines\",\"centerline_lyr\")\n print \"Made feature layers\"\n arcpy.SelectLayerByLocation_management (\"centerline_lyr\", \"HAVE_THEIR_CENTER_IN\", fc)\n print \"Made selectoin\"\n laneCount=int(arcpy.GetCount_management(centerline_lyr).getOutput(0))\n print str(laneCount)\n \n arcpy.CalculateField_management(fc,\"LaneMiles\",laneCount)\n print \"Updated the LaneMiles\"\n \nexcept arcpy.ExecuteError:\n print arcpy.GetMessages(2)\nexcept:\n print \"Process did not complete\"\n" }, { "alpha_fraction": 0.7945945858955383, "alphanum_fraction": 0.800000011920929, "avg_line_length": 96.88235473632812, "blob_id": "1d916cfbd60ed34f6deba735ab32295510686cbd", "content_id": "554479640a4a3df0ba9525ce4f95a54b9964cddf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1665, "license_type": "no_license", "max_line_length": 449, "num_lines": 17, "path": "/README.md", "repo_name": "msun0106/Python-for-GIS", "src_encoding": "UTF-8", "text": "# Python-for-GIS\nusing python to execute gis analysis based on ArcGIS environment\n\n1. Bookmobile Analysis\n\n2. DwellingCount.py:\n senario: fire department needs a count of all the single-family homes and multifamily structures in each of the 44 fire response zones. The geodatabase contains a polygon feature class for each response zone, or box. The idea is to add two fields to each box: one for the single-family count and one for the multifamily count. \n\n3. DwellingCountTool.py\n senario: based on DwellingCount.py, create a script tool which allow users to choose the input and output parameters.\n \n4. Process Fire Data.py \n senario: In any given month, the Oleander Fire Department responds to several hundred calls around the region. Since Oleander has mutual-aid agreements with 16 neighboring cities, the call location may be in any one of these locations. The department needs to split the fire run data that represents each call for service into multiple file with one table for Oleander calls and another table for each of the other cities in which they responded.\n \n5. DrainageshedAnalysis.py\n senario: The Oleander Public Works Department need to know the characteristics of the drainage system for the watershed connceted to the particular outfall at the monitoring station. The descriptions include an inventory of the fixtures attached to the watershed system, along with a summary of pipe sizes. \n The tool is to automate the process will have the user select one outfall, and then the tool will trace the connected pipes until all are selected. This tool will select and form an inventory of the fixtures attached to these pipes. \n" }, { "alpha_fraction": 0.6605744361877441, "alphanum_fraction": 0.6710183024406433, "avg_line_length": 46.40625, "blob_id": "7bd692bb280e29bfcd6d928f42ebc7aced929a97", "content_id": "3a12ae9a5390a782a63b055526f3eb81dab8e39d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1532, "license_type": "no_license", "max_line_length": 181, "num_lines": 32, "path": "/Bookmobile Analysis.py", "repo_name": "msun0106/Python-for-GIS", "src_encoding": "UTF-8", "text": "\ntry:\n #Import the modules\n import arcpy\n from arcpy import env\n\n #Set up the environment\n env.workspace = r\"H:\\EsriPress\\GISTPython\\Data\\City of Oleander.gdb\\\\\"\n env.overwriteOutput = True\n\n #Set up cursor for the bookmobile sites\n arcpy.MakeFeatureLayer_management(\"BookmobileLocations\",\"Locations_lyr\")\n arcpy.MakeFeatureLayer_management(\"Parcels\",\"Parcels_lyr\",'\"DU\"=1')\n siteCursor = arcpy.da.SearchCursor(\"Locations_lyr\",\"Marker\")\n for row in siteCursor:\n siteName=row[0]\n \n \n arcpy.Select_analysis (\"Locations_lyr\", r\"H:\\EsriPress\\GISTPython\\MyExercises\\Scratch\\Temporary Storage.gdb\\SiteTemp\",'\"Marker\"= \\''+siteName+\"\\'\")\n arcpy.SelectLayerByLocation_management(\"Parcels_lyr\",\"WITHIN_A_DISTANCE\",r\"H:\\EsriPress\\GISTPython\\MyExercises\\Scratch\\Temporary Storage.gdb\\SiteTemp\",\"150\",\"NEW_SELECTION\")\n \n #Start a while statement until number of dwelling units exceeds 200\n parcelCount = int(arcpy.GetCount_management(\"Parcels_lyr\").getOutput(0))\n \n \n while parcelCount < 200:\n arcpy.SelectLayerByLocation_management(\"Parcels_lyr\",\"WITHIN_A_DISTANCE\",\"Parcels_lyr\",\"150\",\"ADD_TO_SELECTION\")\n parcelCount = int(arcpy.GetCount_management(\"Parcels_lyr\").getOutput(0))\n print parcelCount \n \n arcpy.CopyFeatures_management(\"Parcels_lyr\", r\"H:\\EsriPress\\GISTPython\\MyExercises\\MyAnswers.gdb\\\\\"+siteName.replace(\" \",\"_\"))\n \n print siteName + \"Output OK!\"\n\n \n \n \n" }, { "alpha_fraction": 0.6700890064239502, "alphanum_fraction": 0.6789869666099548, "avg_line_length": 47.66666793823242, "blob_id": "f95e9b3defc69c58b27625c63273ac58d7cbfc24", "content_id": "4cb969fe7511fa986813022df6fdec39a9118b32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1461, "license_type": "no_license", "max_line_length": 177, "num_lines": 30, "path": "/Bookmobile_pipe.py", "repo_name": "msun0106/Python-for-GIS", "src_encoding": "UTF-8", "text": "\ntry:\n\n #Import the modules\n import arcpy\n from arcpy import env\n\n #Set up the environment\n env.workspace = r\"H:\\EsriPress\\GISTPython\\Data\\City of Oleander.gdb\\\\\"\n env.overwriteOutput = True\n \n #Set up cursor for the bookmobile sites\n arcpy.MakeFeatureLayer_management(\"SamplingStations\",\"SSLocation_lyr\", \"'Status'= Operational\")\n arcpy.MakeFeatureLayer_management(\"DistLateral\",\"DL_lyr\")\n siteCursor=arcpy.da.SearchCursor(\"SSLocation_lyr\",\"Desc\")\n for row in siteCursor:\n siteName=row[0]\n arcpy.Select_analysis(\"SSLocation_lyr\",r\"H:\\EsriPress\\GISTPython\\MyExercises\\Scratch\\Temporary Storage.gdb\\psiteTemp\", '\"Desc\"= \\''+siteName+\"\\'\")\n arcpy.SelectLayerByLocation_management(\"DL_lyr\",\"WITHIN_DISTANCE\", r\"H:\\EsriPress\\GISTPython\\MyExercises\\Scratch\\Temporary Storage.gdb\\psiteTemp\",\"100\", \"NEW_SELECTION\")\n \n #start a while statement until number of pipe unit exceeds 10\n pipeCount = int(arcpy.GetCount_management(\"DL_Lyr\").getOutput(0))\n \n while pipeCount <10:\n arcpy.SelectLayerByLocation_management(\"DL_Lyr\", \"WITHIN_DISTANCE\", \"DL_Lyr\",\"100\",\"ADD_TO_SELECTION\")\n pipeCount=int(arcpy.GetCount_management(\"DL_Lyr\").getOutput(0))\n print pipeCount\n \n arcpy.CopyFeatures_management(\"DL_Lyr\",r\"H:\\EsriPress\\GISTPython\\MyExercises\\MyAnswers.gdb\\\\\"+siteName.replace(\" \",\"_\"))\n \n print siteName + \"Output OK!\"\n" }, { "alpha_fraction": 0.6772152185440063, "alphanum_fraction": 0.6825705766677856, "avg_line_length": 43.5217399597168, "blob_id": "a87e3f36e883cfc71d782a11f8aa0a224be1078f", "content_id": "8b05d884407d6ac9ebbf99242f97d76dbc3ac6bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2054, "license_type": "no_license", "max_line_length": 111, "num_lines": 46, "path": "/DwellingCount.py", "repo_name": "msun0106/Python-for-GIS", "src_encoding": "UTF-8", "text": "\ntry:\n import arcpy\n from arcpy import env\n \n # Set up the environment \n env.workspace = r\"H:\\EsriPress\\GISTPython\\Data\\FireDepartment.gdb\"\n env.overwriteOutput=True\n fcBoxZones = arcpy.ListFeatureClasses(\"FireBoxMap*\")\n \n #Start a for statement to iterate through the files\n for fc in fcBoxZones:\n #Get the first file - it's stored in fc\n print fc\n \n #Add two fields to hold the results\n arcpy.AddField_management(fc, \"SFCount\",\"Long\")\n arcpy.AddField_management(fc,\"MFCount\",\"Long\")\n \n #Select the single-family housing units (centroid within polygon)\n arcpy.MakeFeatureLayer_management(\"BldgFootprints\",\"Buildings_lyr\",\"\\\"UseCode\\\"=1\")\n arcpy.MakeFeatureLayer_management(\"BldgFootprints\", \"Builgins_lyr2\", \"\\\"UseCode\\\"=2\")\n print \"Made feature layers\"\n arcpy.SelectLayerByLocation_management (\"Buildings_lyr\", \"HAVE_THEIR_CENTER_IN\", fc)\n arcpy.SelectLayerByLocation_management (\"Buildings_lyr2\", \"HAVE_THEIR_CENTER_IN\", fc)\n print \"Made selectoin\"\n #Count the single_family dwellings\n bldgCount=int(arcpy.GetCount_management(Buildings_lyr).getOutput(0))\n bldgCount2=int(arcpy.GetCount_management(Buildings_lyr2).getOutput(0))\n print str(bldgCount)\n print str(bldgCount2)\n # use \"HAVE THEIR CENTER IN\" make sure that for the overlay type parameter, no buidling \n # will appear in more than one box\n \n #Update he field\n #CalculateField_management (in_table, field, expression, {expression_type},{code_block})\n arcpy.CalculateField_management(fc,\"SFCount\",bldgCount)\n arcpy.CalculateField_management(fc,\"MFCount\",bldgCount2)\n print \"Updated the SFCount and MFCount field\"\n \nexcept arcpt.ExecuteError:\n print arcpy.GetMessages(2)\nexcept:\n print \"Process did not complete\"\n \n#The special arcpy.Execute object will trap any errors raised bt Arcpy statements. The second except statement \n#will trap any other errors\n \n" }, { "alpha_fraction": 0.7288092374801636, "alphanum_fraction": 0.7336747646331787, "avg_line_length": 58.06060791015625, "blob_id": "74c7ec478116d09d8f39c5b74455bbe235ffc444", "content_id": "919f89aff4e6e0f878d2d4e82b0c6600b9b6fc93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3905, "license_type": "no_license", "max_line_length": 126, "num_lines": 66, "path": "/DrainageshedAnalysis.py", "repo_name": "msun0106/Python-for-GIS", "src_encoding": "UTF-8", "text": "\n# Check to make sure that only one feature is selected before continuing\nfixCount = int(arcpy.GetCount_management(\"Fixtures\").getOutput(0))\narcpy.AddWarning(\"The number of featres seleceted is \"+ str(fixCount)+\".\")\nif fixCount<>1:\n arcpy.AddError (\"The number of selected features MUST be only one.\" + \\\n \"Prepare a new selection and try again.\")\n raise exception\nelse:\n arcpy.AddWarning (\"Only one feature is selected and the script is continuing...\")\n\n# Set up a cursor to iterate through the selected row of Fixtures\n# arcpy.da.SearchCursor(in_table, field_names, {where_clause},{spatial_reference},{explode_to_points},{sql_clause})\n# By only specifying one field name, the value of row[0] contains that field value.\nfixCursor = arcpy.da.SearchCursor(\"Fixtures\",\"System\")\nfor row in fixCursor:\n systemName=row[0]\n arcpy.AddWarning(\"You are working on the \"+ systemName + \" system.\")\n \n# Selections can only be made on deature layers, \n# so create one for the lines..\nstormLinesLayer = arcpy.MakeFeatureLayer_management(\"MainLat\")\n# .. and one for the fixtures\nfisturesLayer = arcpy.MakeFeatureLayer_management(r\"H:\\EsriPress\\GISTPython\\Data\\StormDrainUtility.gdb\\Storm_Drains\\Fixtures\")\n# Note that the feature layer was made by referencing the data's location\n# in the geodatabase and not in the map document\n# Referencing the data in the map document would only get the currently selected features\n\n# Use the selected outfall to select the first lines connected to it\n# Selection type will be \"Create new selection\"\n# SelectLayerByLocation_management((in_layer, {overlap_type},{select_features},{search_distance},{selection_type})\narcpy.SelectLayerByLocation_management(stormLinesLayer,\"INTERSECT\",\"Fixtures\",\"\", \"NEW_SELECTION\")\n\n# With the first line feature selected, use it to select the other lines \n# Set up two feature count variables\n# The first will be the current selection\nlineCount1 = int(arcpy.GetCount_management(stormLinesLayer).getOutput(0))\n# The second will be the new selection (initialize outside the while loop)\nlineCount2 = 0\n\n# Set up a while statement - it will end when the current selection equals the new selection\nwhile lineCount1 <> lineCount2:\n lineCount1 = lineCount2\n # Use selected features to select intersecting features\n # Selection type will be \"Add to selection\"\n arcpy.SelectLayerByLocation_management(stormLinesLayer, \"INTERSECT\", stormLinesLayer, \"\", \"ADD_TO_SELECTION\")\n # Get count and match against previous count to see if the entire drainage shed system is selected\n lineCount2 = int(arcpy.GetCount_management(stormLinesLayer).getOutput(0))\n # Message to keep track of selections\n arcpy.AddWarning(\"previous set = \" + str(lineCount1)+\" and new set = \" + str(lineCount2))\n \n # Message to show end of line selections\n arcpy.AddWarning(\"Finished selecting: \" + str(lineCount1) + \" = \" + str(lineCount2))\n \n # Select only the fixtures that intersect the selected line features\n arcpy.SelectLayerByLocation_management(fixtureLayer, \"INTERSECT\", stormLinesLayer, \"\" \"NEW_SELECTION\")\n fixCount = int(arcpy.GetCount_management(fixturesLayer).getOutput(0))\n arcpy.AddWarning(\"The count of fixtures is \" + str(fixCount) + \".\")\n \n # Perform a summary statistics process on the storm drain lines\n # Statistics_analysis (in_table, out_table, statistics_fields, {case_field})\n arcpy.Statistics_analysis(stormLinesLayer, r\"H:\\EsriPress\\GISTPython\\MyExercises\\MyAnswers.gdb\\\\\" + \\\n systemName.replace(\"-\", \"_\") + \"_StormLineSummary\", [[\"Shape_Length\", \"SUM\"]], \"PipeSize\")\n \n # Perform a summary statistics process on the fixtures \n arcpy.Statistics_analysis(fixturesLayer, r\"H:\\EsriPress\\GISTPython\\MyExercises\\MyAnswers.gdb\\\\\" + \\\n systemName.replace(\"-\", \"_\")+ \"_StormFixtureSummary\", [[\"Type\", \"COUNT\"]], \"Type\")\n\n \n" } ]
8
Corters22/Python_Analysis
https://github.com/Corters22/Python_Analysis
837dc02998b1c63a5ced6045162436d8aa2a43bf
61ca56b4d22b28083771ebcbf534a5097c87fd43
66f417273f91461f3f1b87899e0f78e75821adea
refs/heads/main
2023-02-20T18:41:30.991607
2021-01-25T00:06:13
2021-01-25T00:06:13
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7765957713127136, "alphanum_fraction": 0.7765957713127136, "avg_line_length": 130.60000610351562, "blob_id": "323f4aa5aec05add3aca8a685cc53a4c9688be04", "content_id": "510e0fda68e56e4ca1526bbb0aa2c9cf195a1b99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 658, "license_type": "no_license", "max_line_length": 410, "num_lines": 5, "path": "/README.md", "repo_name": "Corters22/Python_Analysis", "src_encoding": "UTF-8", "text": "# Python Challenge\nWithin this repository, there are two python codes. The first is \"PyBank\" which analyzes a simple csv file of banking data. The second is \"PyPoll\" which analyzes voting/poll information for a small town.\n\n## Coding Instructions\n For both directories, you can find the code in the file \"main.py\". When running the code, you should be in the \"PyBank\" or \"PyPoll\" directory for the import/export paths to work. For reference, you can review the initial csv data file in the \"Resources\" file under their perspective directories. The new text file, \"results.txt,\" that is created when running the code, will be found in the \"Analysis\" folder.\n" }, { "alpha_fraction": 0.6131846904754639, "alphanum_fraction": 0.6316044330596924, "avg_line_length": 29.352941513061523, "blob_id": "17e73530733513f10e16a7fca3fc317f1e31385a", "content_id": "343cf231fa437a4e5da351bdf0eece8f2f207bef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2063, "license_type": "no_license", "max_line_length": 110, "num_lines": 68, "path": "/PyPoll/main.py", "repo_name": "Corters22/Python_Analysis", "src_encoding": "UTF-8", "text": "import os\nimport csv\n\n#connecting csv file to code\ncsvpath = os.path.join(\"Resources\", \"election_data.csv\")\nwith open(csvpath) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n csv_header = next(csvreader)\n\n #list of candidates voted for and count of total votes\n candidates = []\n total_votes = 0\n for row in csvreader:\n total_votes = total_votes + 1\n candidates.append(row[2])\n\n #pulling out unique values of candidates\n set_uni = set(candidates)\n unique_cand = list(set_uni)\n \n #counting how many votes each candidate recieved\n total_cand_vote = []\n for each in unique_cand:\n num_of_votes = 0\n for vote in candidates:\n if each == vote:\n num_of_votes += 1\n total_cand_vote.append(num_of_votes)\n\n#zipping lists of #of votes with candidate name\nvote_results = []\nfinal_vote = zip(unique_cand, total_cand_vote)\n#calculating vote percentage, and winner\nfor row in final_vote:\n vote_results.append(row[0] + \": \" + f'{((row[1]/total_votes) *100):.3f}' + \"% \" + \"(\" + str(row[1]) + \")\")\n if row[1] == max(total_cand_vote):\n winner = row[0] \n\n#printing final results\nprint(\"Election Results\")\nprint(\"-\" * 20)\nprint(\"Total Votes: \" + str(total_votes))\nprint(\"-\" * 20)\nprint(vote_results[0])\nprint(vote_results[1])\nprint(vote_results[2])\nprint(vote_results[3])\nprint(\"-\" * 20)\nprint(\"Winner: \" + winner)\nprint(\"-\" * 20)\n \n\n#open new text file\noutput_path = os.path.join(\"Analysis\", \"results.txt\")\nwith open(output_path, 'w', newline='') as textfile:\n \n #print to text file \n print(\"Election Results\", file=textfile)\n print(\"-\" * 20, file=textfile)\n print(\"Total Votes: \" + str(total_votes), file=textfile)\n print(\"-\" * 20, file=textfile)\n print(vote_results[0], file=textfile)\n print(vote_results[1], file=textfile)\n print(vote_results[2], file=textfile)\n print(vote_results[3], file=textfile)\n print(\"-\" * 20, file=textfile)\n print(\"Winner: \" + winner, file=textfile)\n print(\"-\" * 20, file=textfile)" }, { "alpha_fraction": 0.6145584583282471, "alphanum_fraction": 0.6217184066772461, "avg_line_length": 36.52238845825195, "blob_id": "ea42386412c6c10cba352e2e9ed458c17cabbba3", "content_id": "df95f02c9f719c6d89e978aa4b3512429176de5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2514, "license_type": "no_license", "max_line_length": 119, "num_lines": 67, "path": "/PyBank/main.py", "repo_name": "Corters22/Python_Analysis", "src_encoding": "UTF-8", "text": "import csv\nimport os\n\n#joining csv file with code\ncsvpath = os.path.join(\"Resources\", 'budget_data.csv')\nwith open(csvpath) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n csv_header = next(csvreader)\n \n #calculations and making new lists\n period_change_list = []\n date = []\n prof_loss = []\n row_count = 0\n total = 0\n profit_losses = 0\n for row in csvreader:\n #calculating count of rows\n row_count += 1\n #calculation total profits/losses\n total += int(row[1])\n #calculating changes between two date periods\n #making new lists\n #period_change = int(row[1]) - profit_losses\n period_change_list.append(int(row[1]) - profit_losses)\n date.append(row[0])\n prof_loss.append(row[1]) \n #resetting profit loss value\n profit_losses = int(row[1]) \n\n#zipping lists to get max/min and coordinating dates\nzippy = zip(date, prof_loss, period_change_list) \nfor row in zippy:\n if row[2] == max(period_change_list):\n great_inc_per = row[0]\n elif row[2] == min(period_change_list):\n great_dec_per = row[0]\n \n#calculating the total of the profit/loss changes \ntotal_change = 0\ndel period_change_list[0]\nfor value in period_change_list:\n total_change = total_change + value\n#calculating avg of profit/loss changes \navg_change = total_change/ (len(period_change_list))\n\n \n#print to terminal\nprint(\"Financial Analysis\")\nprint(\"--------------------------\")\nprint(\"Total Months: \" + str(row_count))\nprint(\"Total: $\" + str(total))\nprint(\"Average Change: $\" + f'{avg_change:.2f}')\nprint(\"Greatest Increase in Profits: \" + great_inc_per + ' ($' + str(max(period_change_list)) + ')')\nprint(\"Greatest Decrease in Profits: \" + great_dec_per + ' ($' + str(min(period_change_list)) + ')')\n\n#open new text file\noutput_path = os.path.join(\"Analysis\", \"results.txt\")\nwith open(output_path, 'w', newline='') as textfile:\n #print to text file\n print(\"Financial Analysis\", file=textfile)\n print(\"--------------------------\", file=textfile)\n print(\"Total Months: \" + str(row_count), file=textfile)\n print(\"Total: $\" + str(total), file=textfile)\n print(\"Average Change: $\" + f'{avg_change:.2f}', file=textfile)\n print(\"Greatest Increase in Profits: \" + great_inc_per + ' ($' + str(max(period_change_list)) + ')', file=textfile)\n print(\"Greatest Decrease in Profits: \" + great_dec_per + ' ($' + str(min(period_change_list)) + ')', file=textfile)\n" }, { "alpha_fraction": 0.5891398191452026, "alphanum_fraction": 0.598270058631897, "avg_line_length": 34.879310607910156, "blob_id": "ae3864519151bdee4ea87d6d2b4721863d6007f8", "content_id": "23e2fabce97301f2f8219144140b1dc41079ba3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2081, "license_type": "no_license", "max_line_length": 119, "num_lines": 58, "path": "/PyBank/Resources/PyBankCode.py", "repo_name": "Corters22/Python_Analysis", "src_encoding": "UTF-8", "text": "import csv\nimport os\n\ncsvpath = os.path.join('budget_data.csv')\nwith open(csvpath) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n csv_header = next(csvreader)\n \n period_change_list = []\n date = []\n prof_loss = []\n row_count = 0\n total = 0\n profit_losses = 0\n for row in csvreader:\n row_count = row_count + 1\n total = total + int(row[1])\n profit_losses_2 = int(row[1])\n period_change = profit_losses_2 - profit_losses\n period_change_list.append(period_change)\n date.append(row[0])\n prof_loss.append(row[1]) \n profit_losses = int(row[1]) \n\nzippy = zip(date, prof_loss, period_change_list) \nfor row in zippy:\n if row[2] == max(period_change_list):\n great_inc_per = row[0]\n elif row[2] == min(period_change_list):\n great_dec_per = row[0]\n \n \ntotal_change = 0\ndel period_change_list[0]\nfor value in period_change_list:\n total_change = total_change + value\n \navg_change = total_change/ (len(period_change_list))\n\n \n\nprint(\"Financial Analysis\")\nprint(\"--------------------------\")\nprint(\"Total Months: \" + str(row_count))\nprint(\"Total: $\" + str(total))\nprint(\"Average Change: $\" + f'{avg_change:.2f}')\nprint(\"Greatest Increase in Profits: \" + great_inc_per + ' ($' + str(max(period_change_list)) + ')')\nprint(\"Greatest Decrease in Profits: \" + great_dec_per + ' ($' + str(min(period_change_list)) + ')')\n\noutput_path = os.path.join(\"../Analysis\", \"results.txt\")\nwith open(output_path, 'w', newline='') as textfile:\n print(\"Financial Analysis\", file=textfile)\n print(\"--------------------------\", file=textfile)\n print(\"Total Months: \" + str(row_count), file=textfile)\n print(\"Total: $\" + str(total), file=textfile)\n print(\"Average Change: $\" + f'{avg_change:.2f}', file=textfile)\n print(\"Greatest Increase in Profits: \" + great_inc_per + ' ($' + str(max(period_change_list)) + ')', file=textfile)\n print(\"Greatest Decrease in Profits: \" + great_dec_per + ' ($' + str(min(period_change_list)) + ')', file=textfile)\n" } ]
4
fact-project/shower_animation
https://github.com/fact-project/shower_animation
7a216a21508e9d3152fdd5496c9552ea4f128975
4310536a764b6cde225c70785a3974553c870606
b86842b86f34d218514e98b4861785c9603f9346
refs/heads/master
2020-12-11T04:12:26.241440
2016-09-21T10:02:17
2016-09-21T10:02:17
68,806,122
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6093088984489441, "alphanum_fraction": 0.6403384804725647, "avg_line_length": 25.754716873168945, "blob_id": "7e0ee83aac5297664d13fc4dde6b64df24d1a37a", "content_id": "839446ddf312a741a5144636bfd421d9d67e3155", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1418, "license_type": "no_license", "max_line_length": 89, "num_lines": 53, "path": "/animate_shower.py", "repo_name": "fact-project/shower_animation", "src_encoding": "UTF-8", "text": "from argparse import ArgumentParser\nfrom astropy.io import fits\nfrom matplotlib.animation import FuncAnimation\nfrom fact.plotting import camera\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nparser = ArgumentParser()\nparser.add_argument('inputfile')\nparser.add_argument('eventnum', type=int)\nparser.add_argument('outputfile')\nparser.add_argument(\n '--first', dest='first', type=int, default=20,\n help='first slice to plot',\n)\nparser.add_argument(\n '--last', dest='last', type=int, default=200,\n help='last slice to plot',\n)\n\n\ndef main():\n args = parser.parse_args()\n\n with fits.open(args.inputfile) as f:\n data = f[1].data[args.eventnum]['DataCalibrated']\n\n img = data.reshape((1440, 300))\n img = img[:, args.first:args.last]\n\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_axes([0, 0, 1, 1])\n ax.set_axis_off()\n\n vmin = np.nanpercentile(img, 1)\n vmax = np.nanpercentile(img, 99)\n c = camera(img[:, 0], cmap='inferno', vmin=vmin, vmax=vmax)\n template = '$t = {: 3.1f}\\,\\mathrm{{ns}}$'\n t = plt.text(120, 189, template.format(0), size=30, va='top')\n\n def update(i):\n c.set_array(img[:, i])\n t.set_text(template.format(i / 2))\n return c, t\n\n ani = FuncAnimation(fig, update, np.arange(0, img.shape[1]), interval=100, blit=True)\n\n ani.save(args.outputfile, writer='imagemagick', dpi=40)\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
Maymonah98/trivia-app
https://github.com/Maymonah98/trivia-app
302b9ea03f47efff8e8a7b90b210d1ea6c600016
9e01d49cc70a3dc3962ad2e48501bfec10addb2f
6b34bfeab0056d812f8d8799c4271a2d17066a2d
refs/heads/master
2023-07-02T07:00:33.975528
2021-08-07T15:45:38
2021-08-07T15:45:38
389,224,259
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6388064622879028, "alphanum_fraction": 0.6494503617286682, "avg_line_length": 26.661836624145508, "blob_id": "c24136be9fced325f0610623297afa4a4dd63ae9", "content_id": "4a96e644bf7237aff82c4fe7700af47fee607405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5731, "license_type": "no_license", "max_line_length": 131, "num_lines": 207, "path": "/backend/flaskr/__init__.py", "repo_name": "Maymonah98/trivia-app", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, json, request, abort, jsonify\nfrom flask.globals import current_app\nfrom flask.helpers import total_seconds\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS , cross_origin\nimport random\n\nfrom sqlalchemy.sql.elements import Null\n\nfrom models import setup_db, Question, Category\n\nQUESTIONS_PER_PAGE = 10\n\ndef paginate_questions(request,selection):\n page = request.args.get('page', 1, type=int)\n start = (page - 1) * QUESTIONS_PER_PAGE\n end = start + QUESTIONS_PER_PAGE\n \n questions=[question.format() for question in selection]\n current_questions= questions[start:end]\n\n return current_questions\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n setup_db(app)\n \n \n CORS(app, resources={r\"/api/*\" : {'origins': '*'}})\n \n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n return response\n \n @app.route('/categories')\n def get_categories():\n categories= Category.query.all()\n formatted_categories={}\n for category in categories:\n formatted_categories[category.id]=category.type\n\n if len(categories)==0:\n abort(404)\n\n return jsonify({\n 'success':True,\n 'categories': formatted_categories,\n })\n\n \n @app.route('/questions')\n def get_questions():\n categories= Category.query.order_by(Category.id).all()\n formatted_categories={}\n current_category=True\n for category1 in categories:\n formatted_categories[category1.id]=category1.type \n\n selection=Question.query.order_by(Question.id).all()\n current_questions= paginate_questions(request,selection)\n\n if len(current_questions) ==0:\n abort(404)\n\n return jsonify({\n 'success':True,\n 'questions': current_questions,\n 'total_questions': len(Question.query.all()),\n 'categories':formatted_categories,\n 'current_category': current_category\n })\n\n \n @app.route('/questions/<int:question_id>',methods=['DELETE'])\n def delete_question(question_id):\n try:\n question=Question.query.filter(Question.id==question_id).one_or_none()\n\n if question is None:\n abort(404)\n \n question.delete()\n selection=Question.query.order_by(Question.id).all()\n current_questions= paginate_questions(request,selection)\n\n return jsonify({\n 'success':True,\n 'deleted': question_id,\n 'questions':current_questions,\n 'total_questions': len(Question.query.all())\n })\n except:\n abort(400)\n \n\n @app.route('/questions',methods=['POST'])\n def create_question():\n body=request.get_json()\n new_question=body.get('question',None)\n new_ansewr=body.get('answer',None)\n new_category=body.get('category',None)\n new_difficulty=body.get('difficulty',None)\n search=body.get('searchTerm',None)\n\n try:\n if search:\n selection= Question.query.order_by(Question.id).filter(Question.question.ilike('%{}%'.format(search)))\n current_questions=paginate_questions(request,selection)\n\n return jsonify({\n 'success':True,\n 'questions':current_questions,\n 'total_questions':len(selection.all()),\n 'current_category':''\n })\n else:\n question=Question(question=new_question,answer=new_ansewr,category=new_category,difficulty=new_difficulty)\n question.insert()\n\n selection=Question.query.order_by(Question.id).all()\n current_questions=paginate_questions(request,selection)\n\n return jsonify({\n 'success':True,\n 'created':question.id,\n 'questions':current_questions,\n 'total_questions':len(selection)\n })\n\n except:\n abort(422)\n\n \n @app.route('/categories/<int:category_id>/questions')\n def get_questions_based_on_category(category_id):\n selection=Question.query.filter(Question.category==category_id).all()\n current_questions=paginate_questions(request,selection)\n if len(current_questions)==0:\n abort(404)\n \n return jsonify({\n 'success':True,\n 'questions':current_questions,\n 'total_questions':len(Question.query.all()),\n 'current_category':category_id\n })\n \n \n @app.route('/quizzes',methods=['POST'])\n def play_the_quiz():\n body=request.get_json()\n previous_question=body.get('previous_questions')\n quiz_category=body.get('quiz_category')\n\n if quiz_category['id']==0:\n questions=Question.query.filter(Question.id.notin_((previous_question))).all()\n total=len(questions)\n else:\n questions=Question.query.filter(Question.category==quiz_category['id']).filter(Question.id.notin_((previous_question))).all()\n total=len(questions)\n \n if total > 0 :\n current_question = questions[random.randrange(0, len(questions))].format() \n else: \n return jsonify({\n 'success': True\n }) \n \n if quiz_category is None :\n abort(400)\n\n return jsonify({\n 'success': True,\n 'question':current_question,\n 'previous_questions':previous_question,\n })\n \n \n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n 'success': False,\n 'error':404,\n 'message': 'Not found'\n }),404\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n 'success': False,\n 'error':400,\n 'message': 'Bad request'\n }),400\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n 'success': False,\n 'error':422,\n 'message': 'Unprocessable'\n }),422\n\n return app\n\n " }, { "alpha_fraction": 0.5938475131988525, "alphanum_fraction": 0.6184797286987305, "avg_line_length": 27.4761905670166, "blob_id": "51fc84e52398f4087006241801cfc23bf98b9b59", "content_id": "7a5376f4308e4f2195645f3bf6fc5667f52675e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8972, "license_type": "no_license", "max_line_length": 387, "num_lines": 315, "path": "/README.md", "repo_name": "Maymonah98/trivia-app", "src_encoding": "UTF-8", "text": "# Full Stack API Final Project\n\n\n## Full Stack Trivia\n\nUdacity is invested in creating bonding experiences for its employees and students. A bunch of team members got the idea to hold trivia on a regular basis and created a webpage to manage the trivia app and play the game, but their API experience is limited and still needs to be built out.\n\nCompleting this trivia app gave me the ability to structure plan, implement, and test an API - skills essential for enabling my future applications to communicate with others.\n\n## Getting started\n### Pre-requisites and Local Development \nDevelopers using this project should already have Python3, pip and node installed on their local machines.\n\n#### Backend\n\nFrom the backend folder run `pip install requirements.txt`. All required packages are included in the requirements file. \n\nTo run the application run the following commands: \n```\nexport FLASK_APP=flaskr\nexport FLASK_ENV=development\nflask run\n```\n\nThese commands put the application in development and directs our application to use the `__init__.py` file in our flaskr folder. Working in development mode shows an interactive debugger in the console and restarts the server whenever changes are made. If running locally on Windows, look for the commands in the [Flask documentation](http://flask.pocoo.org/docs/1.0/tutorial/factory/).\n\nThe application is run on `http://127.0.0.1:5000/` by default and is a proxy in the frontend configuration. \n\n#### Frontend\n\nFrom the frontend folder, run the following commands to start the client: \n```\nnpm install // only once to install dependencies\nnpm start \n```\n\nBy default, the frontend will run on localhost:3000. \n\n### Tests\nIn order to run tests navigate to the backend folder and run the following commands: \n\n```\ndropdb trivia_test\ncreatedb trivia_test\npsql trivia_test < trivia.psql\npython test_flaskr.py\n```\n\nThe first time you run the tests, omit the dropdb command. \n\nAll tests are kept in that file and should be maintained as updates are made to app functionality. \n\n\n## API Reference\n\n### Getting Started\n- Base URL: At present this app can only be run locally and is not hosted as a base URL. The backend app is hosted at the default, `http://127.0.0.1:5000/`, which is set as a proxy in the frontend configuration. \n- Authentication: This version of the application does not require authentication or API keys. \n\n### Error Handling\nErrors are returned as JSON objects in the following format:\n```\n{\n \"success\": False, \n \"error\": 400,\n \"message\": \"bad request\"\n}\n```\nThe API will return three error types when requests fail:\n- 400: Bad Request\n- 404: Resource Not Found\n- 422: Not Processable \n\n### Endpoints \n#### GET /categories\n\n- General:\n - Returns a list of categories , success value. \n- Sample: `curl http://127.0.0.1:5000/categories`\n\n```\n{\n \"categories\": {\n \"1\": \"Science\", \n \"2\": \"Art\", \n \"3\": \"Geography\", \n \"4\": \"History\", \n \"5\": \"Entertainment\", \n \"6\": \"Sports\"\n }, \n \"success\": true\n}\n```\n#### GET /questions\n- General:\n - Returns a list of question objects based on category, categories, current category, success value, and total number of questions\n - Results are paginated in groups of 10. Include a request argument to choose page number, starting from 1. \n- Sample: `curl http://127.0.0.1:5000/questions`\n\n```\n{\n \"categories\": [\n {\n \"1\": \"Science\", \n \"2\": \"Art\", \n \"3\": \"Geography\", \n \"4\": \"History\", \n \"5\": \"Entertainment\", \n \"6\": \"Sports\"\n }\n ], \n \"current_category\": \"Sports\", \n \"questions\": [\n {\n \"answer\": \"Brazil\", \n \"category\": 6, \n \"difficulty\": 3, \n \"id\": 10, \n \"question\": \"Which is the only team to play in every soccer World Cup tournament?\"\n }, \n {\n \"answer\": \"Uruguay\", \n \"category\": 6, \n \"difficulty\": 4, \n \"id\": 11, \n \"question\": \"Which country won the first ever soccer World Cup in 1930?\"\n }\n ], \n \"success\": true, \n \"total_questions\": 19\n}\n```\n#### GET /categories/{id}/questions\n- General:\n - Returns a list of question objects based on category, current category, success value, and total number of questions\n - Results are paginated in groups of 10. Include a request argument to choose page number, starting from 1. \n- Sample: `curl http://127.0.0.1:5000/categories/2/questions`\n\n```\n{\n \"currentCategory\": 2, \n \"questions\": [\n {\n \"answer\": \"Escher\", \n \"category\": 2, \n \"difficulty\": 1, \n \"id\": 16, \n \"question\": \"Which Dutch graphic artist\\u2013initials M C was a creator of optical illusions?\"\n }, \n {\n \"answer\": \"Mona Lisa\", \n \"category\": 2, \n \"difficulty\": 3, \n \"id\": 17, \n \"question\": \"La Giaconda is better known as what?\"\n }, \n {\n \"answer\": \"One\", \n \"category\": 2, \n \"difficulty\": 4, \n \"id\": 18, \n \"question\": \"How many paintings did Van Gogh sell in his lifetime?\"\n }, \n {\n \"answer\": \"Jackson Pollock\", \n \"category\": 2, \n \"difficulty\": 2, \n \"id\": 19, \n \"question\": \"Which American artist was a pioneer of Abstract Expressionism, and a leading exponent of action painting?\"\n }, \n {\n \"answer\": \"yellow and blue\", \n \"category\": 2, \n \"difficulty\": 1, \n \"id\": 24, \n \"question\": \"what is the mix that produce green color?\"\n }\n ], \n \"success\": true, \n \"totalQuestions\": 19\n}\n```\n\n#### DELETE /questions/{question_id}\n- General:\n - Deletes the question of the given ID if it exists. Returns the id of the deleted question, success value, total questions, and question list based on current page number . \n- `curl -X DELETE http://127.0.0.1:5000/questions/16?page=2`\n\n```\n{\n \"questions\": [\n {\n \"answer\": \"Maya Angelou\", \n \"category\": 4, \n \"difficulty\": 2, \n \"id\": 5, \n \"question\": \"Whose autobiography is entitled 'I Know Why the Caged Bird Sings'?\"\n }, \n {\n \"answer\": \"Muhammad Ali\", \n \"category\": 4, \n \"difficulty\": 1, \n \"id\": 9, \n \"question\": \"What boxer's original name is Cassius Clay?\"\n }, \n {\n \"answer\": \"Apollo 13\", \n \"category\": 5, \n \"difficulty\": 4, \n \"id\": 2, \n \"question\": \"What movie earned Tom Hanks his third straight Oscar nomination, in 1996?\"\n }, \n {\n \"answer\": \"Tom Cruise\", \n \"category\": 5, \n \"difficulty\": 4, \n \"id\": 4, \n \"question\": \"What actor did author Anne Rice first denounce, then praise in the role of her beloved Lestat?\"\n }, \n {\n \"answer\": \"Edward Scissorhands\", \n \"category\": 5, \n \"difficulty\": 3, \n \"id\": 6, \n \"question\": \"What was the title of the 1990 fantasy directed by Tim Burton about a young man with multi-bladed appendages?\"\n }, \n {\n \"answer\": \"Brazil\", \n \"category\": 6, \n \"difficulty\": 3, \n \"id\": 10, \n \"question\": \"Which is the only team to play in every soccer World Cup tournament?\"\n }, \n {\n \"answer\": \"Uruguay\", \n \"category\": 6, \n \"difficulty\": 4, \n \"id\": 11, \n \"question\": \"Which country won the first ever soccer World Cup in 1930?\"\n }, \n {\n \"answer\": \"George Washington Carver\", \n \"category\": 4, \n \"difficulty\": 2, \n \"id\": 12, \n \"question\": \"Who invented Peanut Butter?\"\n }, \n {\n \"answer\": \"Lake Victoria\", \n \"category\": 3, \n \"difficulty\": 2, \n \"id\": 13, \n \"question\": \"What is the largest lake in Africa?\"\n }, \n {\n \"answer\": \"The Palace of Versailles\", \n \"category\": 3, \n \"difficulty\": 3, \n \"id\": 14, \n \"question\": \"In which royal palace would you find the Hall of Mirrors?\"\n }\n ],\n \"deleted\": 16,\n \"success\": true,\n \"total_questions\": 18\n}\n```\n\n\n#### POST /questions\n\n- General:\n - Creates a new question using the submitted question, answer, category and difficulty. Returns the id of the created question, success value, total questions, and questions list based on current page number to update the frontend. \n - Sends a post request in order to search for a specific question by search term \n\n- `curl http://127.0.0.1:5000/questions?page=3 -X POST -H \"Content-Type: application/json\" -d '{\"question\":\"This is a question\", \"answer\":\"This is an answer\",\"category\":\"5\", \"difficulty\":\"5\"}'`\n\n```\n{\n 'questions': [\n {\n 'id': 20,\n 'question': 'This is a question',\n 'answer': 'This is an answer', \n 'difficulty': 5,\n 'category': 5\n },\n ],\n 'total_questions': 100,\n 'current_category': 'Entertainment'\n}\n```\n\n#### POST /quizzes\n- Sends a post request in order to get the next question\n- Returns a single new question object\n``` \n{\n 'question': {\n 'id': 1,\n 'question': 'This is a question',\n 'answer': 'This is an answer', \n 'difficulty': 5,\n 'category': 4\n }\n}\n```\n\n## Deployment N/A\n\n## Authors\nUdacity team , Maymonah Althunayan\n\n## Acknowledgements \nThe awesome team at Udacity and all of the students, soon to be full stack extraordinaires! \n\n" } ]
2
roocell/micindicator
https://github.com/roocell/micindicator
4315ea79b25b00f6d77252f67b30ce0c8a3cb0d0
62232c85028f2593a96e39d6d6e4ab8c888f317a
504a49abf7abb9ce4606addf38d51feb3c99f1c2
refs/heads/main
2023-03-01T22:30:16.314891
2021-02-07T19:19:33
2021-02-07T19:19:33
336,397,533
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.649393618106842, "alphanum_fraction": 0.6934950351715088, "avg_line_length": 22.230770111083984, "blob_id": "1fc71eeaa2fed24b8273fabeb6f4fca630972cfc", "content_id": "90a9fb52dbd4a004b130be83f711f28dc9a9ad44", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 907, "license_type": "permissive", "max_line_length": 50, "num_lines": 39, "path": "/RPi_Keyboard_Example.py", "repo_name": "roocell/micindicator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nNULL_CHAR = chr(0)\n\ndef write_report(report):\n with open('/dev/hidg0', 'rb+') as fd:\n fd.write(report.encode())\n\n# Press a\nwrite_report(NULL_CHAR*2+chr(4)+NULL_CHAR*5)\n# Release keys\nwrite_report(NULL_CHAR*8)\n# Press SHIFT + a = A\nwrite_report(chr(32)+NULL_CHAR+chr(4)+NULL_CHAR*5)\n\n# Press b\nwrite_report(NULL_CHAR*2+chr(5)+NULL_CHAR*5)\n# Release keys\nwrite_report(NULL_CHAR*8)\n# Press SHIFT + b = B\nwrite_report(chr(32)+NULL_CHAR+chr(5)+NULL_CHAR*5)\n\n# Press SPACE key\nwrite_report(NULL_CHAR*2+chr(44)+NULL_CHAR*5)\n\n# Press c key\nwrite_report(NULL_CHAR*2+chr(6)+NULL_CHAR*5)\n# Press d key\nwrite_report(NULL_CHAR*2+chr(7)+NULL_CHAR*5)\n\n# Press RETURN/ENTER key\nwrite_report(NULL_CHAR*2+chr(40)+NULL_CHAR*5)\n\n# Press e key\nwrite_report(NULL_CHAR*2+chr(8)+NULL_CHAR*5)\n# Press f key\nwrite_report(NULL_CHAR*2+chr(9)+NULL_CHAR*5)\n\n# Release all keys\nwrite_report(NULL_CHAR*8)\n\n" }, { "alpha_fraction": 0.6796610355377197, "alphanum_fraction": 0.6830508708953857, "avg_line_length": 19.071428298950195, "blob_id": "4bdc88f74523a63cb98b2a5d899165bbb62b348c", "content_id": "1b416d1a43465fcee893c28fb68c24c400f984e1", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "permissive", "max_line_length": 94, "num_lines": 28, "path": "/app.py", "repo_name": "roocell/micindicator", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\r\n\r\n# pizero as bluetooth keyboard\r\n\r\n#https://thanhle.me/make-raspberry-pi3-as-an-emulator-bluetooth-keyboard/\r\n#https://mtlynch.io/key-mime-pi/\r\n\r\n\r\nimport RPi.GPIO as GPIO\r\nimport os, time, sys, datetime\r\nimport logging\r\n\r\n# create logger\r\nlog = logging.getLogger(__file__)\r\nlog.setLevel(logging.DEBUG)\r\nch = logging.StreamHandler()\r\nch.setLevel(logging.DEBUG)\r\nformatter = logging.Formatter('%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s')\r\nch.setFormatter(formatter)\r\nlog.addHandler(ch)\r\n\r\n\r\n\r\ndef main():\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.7316906452178955, "alphanum_fraction": 0.7600958347320557, "avg_line_length": 43.65625, "blob_id": "f3825d23a490918fba892e118613125cae31eead", "content_id": "9391dca34c2faee6d6cb3f0c2236ddd1fe4acaf2", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2922, "license_type": "permissive", "max_line_length": 111, "num_lines": 64, "path": "/README.md", "repo_name": "roocell/micindicator", "src_encoding": "UTF-8", "text": "# micindicator\r\nthis project came about because I saw this indiegogo campaign for a\r\n$35 USB device that was nothing but a mute button with LEDs.\r\nI thought \"I could make that\" - let's see.\r\n\r\nthe mic architecture in windows seems overlay complicated.\r\nsurprisingly there is not default hotkey to toggle your mic.\r\noriginally i thought having a raspi mimic a keyboard and send a key to the PC\r\nwould replicate this device.\r\n\r\nwhile it's a good opportunity to learn bluetooth. it does make more\r\nsense to do it over USB so it can power the pizero as well. we could emulate a\r\nkeyboard over USB or bluetooth.\r\n\r\nafter experimenting with different solutions (with some suggestions from other),\r\nrealized a raspi isn't required at all. It could be done just using the\r\nautohotkey app with scripts to do the work (including toggling a Hue bulb).\r\nThis may be a good solution for most people\r\n\r\nhttps://www.autohotkey.com/boards/viewtopic.php?t=15509\r\nhttps://www.howtogeek.com/319428/how-to-control-your-philips-hue-lights-with-keyboard-shortcuts/ \r\n\r\nso with that done. let's get back to replicating the indiegogo device\r\n\r\n# turn raspi into a bluetooth keyboard and mouse\r\n# https://github.com/quangthanh010290/keyboard_mouse_emulate_on_raspberry\r\n# https://thanhle.me/emulate-bluetooth-mouse-with-raspberry-pi/\r\n\r\n# emulate a USB keyboard\r\nhttps://randomnerdtutorials.com/raspberry-pi-zero-usb-keyboard-hid/\r\nhttps://makerhacks.com/usb-keyboard-emulation-with-the-raspberry-pi-zero/\r\nhttp://www.isticktoit.net/?p=1383\r\n\r\nbe sure to use a OTG USB cable!\r\ni.e. some USB cables won't allow pizero to be detected\r\n\r\nsudo vi /usr/bin/isticktoit_usb\r\nthis file gets run at startup (in /etc/rc.local)\r\n\r\nThe simplest way to send keystrokes is by echoing HID packets to the device file:\r\nsudo echo -ne \"\\0\\0\\x4\\0\\0\\0\\0\\0\" > /dev/hidg0 #press the A-button\r\nsudo echo -ne \"\\0\\0\\0\\0\\0\\0\\0\\0\" > /dev/hidg0 #release all keys\r\n\r\n# MSFT usb telephony spec\r\n# this is probably the proper way to do it - but requires commerical device\r\n# and licenses, etc.\r\nhttp://download.microsoft.com/download/1/6/1/161ba512-40e2-4cc9-843a-923143f3456c/usbtelephony-v091.doc\r\n\r\n# craziness - there isn't a standard hotkey to mute your microphone\r\n# need to run an app triggered by a hotkey to do it\r\n# autohotkey\r\n# this is very responsive and works perfectly\r\n# you just need to figure out your device number using the findmic.ahk script\r\n# then modify the togglemute.ahk script with it\r\nhttps://www.autohotkey.com/\r\nhttps://www.autohotkey.com/boards/viewtopic.php?t=15509\r\n\r\n# haha. don't even need a pi\r\nhttps://www.howtogeek.com/319428/how-to-control-your-philips-hue-lights-with-keyboard-shortcuts/ \r\n\r\n# hotkey to run app\r\n# nirsoft stuff takes forever (up to 7 seconds) to run\r\nhttps://www.howtogeek.com/howto/windows-vista/create-a-shortcut-or-hotkey-to-mute-the-system-volume-in-windows/\r\nhttps://www.nirsoft.net/utils/sound_volume_view.html\r\n" } ]
3
jys000415/tryPyCharm
https://github.com/jys000415/tryPyCharm
92af089f5bd1d9885dbd86cefeb58bcfe07500a5
ecb2cb30807176097218674dd2e5f1103ecb12c3
c0e9252e1902f5b174777c73f8bd7b63b4a8724a
refs/heads/master
2023-02-23T11:28:47.833341
2021-01-26T10:25:44
2021-01-26T10:25:44
333,046,607
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7166666388511658, "alphanum_fraction": 0.7166666388511658, "avg_line_length": 19, "blob_id": "be86450304361623b8ac366c52f0ebafb5e60b92", "content_id": "e026204fb521e6e5c51ea440c538a251dba4b610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/main.py", "repo_name": "jys000415/tryPyCharm", "src_encoding": "UTF-8", "text": "print('Hello World')\nprint('Goodbye World')\nprint('Upload')\n" } ]
1
iosamuel/PhoneBook
https://github.com/iosamuel/PhoneBook
5e21fb977c2498a2d7c014cd7e9145d0ac1e5296
de06108df4fc02486cb1330a7c3128e41d3c0a10
11e5bd00ac85d523b55238f89043bb54e8e86a58
refs/heads/master
2021-01-25T08:42:30.427169
2013-05-01T00:24:55
2013-05-01T00:24:55
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7090300917625427, "alphanum_fraction": 0.739130437374115, "avg_line_length": 29, "blob_id": "f9c041ac2e9daeaa86cb06641b02b22ef19cea69", "content_id": "350f12837bca096a9091d703aefd147b755c60a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 49, "num_lines": 10, "path": "/phonebook/principal/models.py", "repo_name": "iosamuel/PhoneBook", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Datos(models.Model):\n\tnombres = models.CharField(max_length=150)\n\tapellidos = models.CharField(max_length=150)\n\tnumero = models.IntegerField()\n\tdireccion = models.CharField(max_length=200)\n\n\tdef __unicode__(self):\n\t\treturn \"%s %s\" % (self.nombres, self.apellidos)" }, { "alpha_fraction": 0.7376543283462524, "alphanum_fraction": 0.7376543283462524, "avg_line_length": 31.5, "blob_id": "248be9965b361ea2b8f9f697cdaf61d8053cb5ec", "content_id": "d6981a482935f3753828b8eb8aa092258fd160e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 57, "num_lines": 10, "path": "/phonebook/principal/views.py", "repo_name": "iosamuel/PhoneBook", "src_encoding": "UTF-8", "text": "from django.shortcuts import render_to_response\nfrom principal.models import Datos\n\ndef index(request):\n\tdatos = Datos.objects.all().order_by('-id')\n\treturn render_to_response(\"index.html\", {\"datos\":datos})\n\ndef detalle(request, id):\n\tdato = Datos.objects.get(pk=id)\n\treturn render_to_response(\"detalle.html\", {\"dato\":dato})" }, { "alpha_fraction": 0.8421052694320679, "alphanum_fraction": 0.8421052694320679, "avg_line_length": 23, "blob_id": "02e097e9c435c24053828797d839b4a23bf3337e", "content_id": "f4da5c165f48603e308482bc3f6b11ebfc074e13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/phonebook/principal/admin.py", "repo_name": "iosamuel/PhoneBook", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom principal.models import Datos\n\nadmin.site.register(Datos)" }, { "alpha_fraction": 0.5826446413993835, "alphanum_fraction": 0.5826446413993835, "avg_line_length": 33.57143020629883, "blob_id": "1c31e98b585d644d5859b76267d63de4b87cd380", "content_id": "45a116c7156afb15ae7420a5d16949df7c684da4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 242, "license_type": "no_license", "max_line_length": 76, "num_lines": 7, "path": "/README.md", "repo_name": "iosamuel/PhoneBook", "src_encoding": "UTF-8", "text": "# PhoneBook hecho en Python/Django\n----------------------------------\nEste es el proyecto realizado para el webcast #pythonIO de Desarrolloweb.com\n\n## Desarrollado por\n--------------------\nSamuel Burbano Ramos - [http://samuelbr.com](http://samuelbr.com/)\n" } ]
4
ab7/auto-release-notes
https://github.com/ab7/auto-release-notes
4b634ca1401db2c81d234ea5104202954d84f6eb
66187bcb72971fc39a1eac8c2ff6f3a16e89bfd3
cf60e87f73e6ec6de69084c027ce4f2c2e7769e5
refs/heads/master
2022-12-13T01:46:41.780814
2020-04-26T23:48:20
2020-04-26T23:48:20
226,570,499
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6567491292953491, "alphanum_fraction": 0.6661446690559387, "avg_line_length": 32.96808624267578, "blob_id": "23f236b2a2c17b927ca964871ecfce55b04cd685", "content_id": "045658fd4599844df941bd4ad6a4dd8fe13cd9a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3193, "license_type": "no_license", "max_line_length": 91, "num_lines": 94, "path": "/main.py", "repo_name": "ab7/auto-release-notes", "src_encoding": "UTF-8", "text": "import os\nimport base64\n\nimport semver\nfrom github import Github\nfrom google.cloud import kms\n\nfrom github_auto_release_notes.validation import GithubRequestValidator\nfrom github_auto_release_notes.exceptions import (\n GithubRequestException,\n GithubPullRequestNoAction,\n)\n\n\n# https://dev.to/googlecloud/using-secrets-in-google-cloud-functions-5aem\nkms_client = kms.KeyManagementServiceClient()\nGITHUB_ACCESS_TOKEN = kms_client.decrypt(\n os.environ['GITHUB_ACCESS_TOKEN_RESOURCE'],\n base64.b64decode(os.environ['GITHUB_ACCESS_TOKEN']),\n).plaintext.decode('utf-8')\nGITHUB_WEBHOOK_SECRET = kms_client.decrypt(\n os.environ['GITHUB_WEBHOOK_SECRET_RESOURCE'],\n base64.b64decode(os.environ['GITHUB_WEBHOOK_SECRET']),\n).plaintext.decode('utf-8')\nGITHUB_REPO = os.environ['GITHUB_REPO']\n\n\ndef update_release_notes(payload):\n PR_CLOSED = 'closed'\n DEFAULT_BRANCH = 'master'\n TAG_INITIAL = '0.0.1'\n TAG_PREFIX = 'v'\n RELEASE_NOTE_FORMAT = '* {message}. ({url})'\n\n # https://developer.github.com/v3/activity/events/types/#pullrequestevent\n try:\n action = payload['action']\n merged = payload['pull_request']['merged']\n url = payload['pull_request']['html_url']\n title = payload['pull_request']['title']\n base = payload['pull_request']['base']['ref']\n except KeyError:\n message = f'Unexpected webhook payload: {payload}'\n raise GithubRequestException(message)\n\n merged_into_default = action == PR_CLOSED and merged is True and base == DEFAULT_BRANCH\n if not merged_into_default:\n message = f'PR not merged into default branch: '\\\n f'action:{action}, merged:{merged}, base:{base}'\n raise GithubPullRequestNoAction(message)\n\n g = Github(GITHUB_ACCESS_TOKEN)\n repo = g.get_repo(GITHUB_REPO)\n releases = repo.get_releases()\n\n try:\n latest = releases[0]\n except IndexError:\n # must be the first release\n tag = f'{TAG_PREFIX}{TAG_INITIAL}'\n body = RELEASE_NOTE_FORMAT.format(message=title, url=url)\n repo.create_git_release(tag, tag, body, draft=True)\n\n if latest.draft is True:\n new_note = RELEASE_NOTE_FORMAT.format(message=title, url=url)\n new_notes = f'{latest.body}\\n{new_note}'\n latest.update_release(latest.title, new_notes, draft=True)\n else:\n tag = latest.tag_name.replace(TAG_PREFIX, '')\n new_tag = f'{TAG_PREFIX}{str(semver.parse_version_info(tag).bump_patch())}'\n body = RELEASE_NOTE_FORMAT.format(message=title, url=url)\n repo.create_git_release(new_tag, new_tag, body, draft=True)\n\n return latest\n\n\ndef webhook_handler(request):\n try:\n GithubRequestValidator(GITHUB_WEBHOOK_SECRET).validate_webhook(request)\n except GithubRequestException as e:\n print(f'Webhook validation failed: {str(e)}')\n return '', 302\n\n payload = request.get_json()\n try:\n update_release_notes(payload)\n except GithubPullRequestNoAction as e:\n print(f'No action: {str(e)}')\n return '', 200\n except GithubRequestException as e:\n print(f'Failed to update release notes: {str(e)}')\n return '', 502\n\n return '', 200\n" }, { "alpha_fraction": 0.7416452169418335, "alphanum_fraction": 0.7416452169418335, "avg_line_length": 31.41666603088379, "blob_id": "2f406cd2b80c871873b3fc3ea0b89a42323b3400", "content_id": "5e6cbef405fed0f6182bb5bb394a091a16f87f0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 778, "license_type": "no_license", "max_line_length": 64, "num_lines": 24, "path": "/tests/test_exceptions.py", "repo_name": "ab7/auto-release-notes", "src_encoding": "UTF-8", "text": "import unittest\n\nfrom github_auto_release_notes.exceptions import (\n GithubRequestException,\n GithubPullRequestNoAction\n)\n\n\nclass TestGithubRequestException(unittest.TestCase):\n\n def test_github_request_exception(self):\n expected_message = 'test'\n with self.assertRaises(GithubRequestException) as cm:\n raise GithubRequestException(expected_message)\n self.assertEqual(str(cm.exception), expected_message)\n\n\nclass TestGithubPullRequestNoAction(unittest.TestCase):\n\n def test_github_pull_request_no_action(self):\n expected_message = 'test'\n with self.assertRaises(GithubPullRequestNoAction) as cm:\n raise GithubPullRequestNoAction(expected_message)\n self.assertEqual(str(cm.exception), expected_message)\n" }, { "alpha_fraction": 0.6287263035774231, "alphanum_fraction": 0.631436288356781, "avg_line_length": 30.628570556640625, "blob_id": "12b17a24bddfc65639c38851a77009779711097f", "content_id": "2581b4db55d784e5c47c30703637c044fc0b408f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1107, "license_type": "no_license", "max_line_length": 84, "num_lines": 35, "path": "/github_auto_release_notes/validation.py", "repo_name": "ab7/auto-release-notes", "src_encoding": "UTF-8", "text": "import hmac\nimport hashlib\n\nfrom .exceptions import GithubRequestException\n\n\nclass GithubRequestValidator:\n\n def __init__(self, secret):\n self.secret = secret\n\n @staticmethod\n def _check_method(method):\n if method != 'POST':\n message = f'Method not allowed: {method}'\n raise GithubRequestException(message)\n\n def _check_signature(self, signature, raw_data):\n try:\n digest = signature.split('=')[1]\n except IndexError:\n message = f'Unexpected signature format: {signature}'\n raise GithubRequestException(message)\n digest_maker = hmac.new(bytes(self.secret, 'utf-8'), raw_data, hashlib.sha1)\n if digest != digest_maker.hexdigest():\n message = f'Invalid signature: {signature}'\n raise GithubRequestException(message)\n\n def validate_webhook(self, request):\n method = request.method\n signature = request.headers.get('X-Hub-Signature', '')\n raw_data = request.get_data()\n\n self._check_method(method)\n self._check_signature(signature, raw_data)\n" }, { "alpha_fraction": 0.7904762029647827, "alphanum_fraction": 0.7904762029647827, "avg_line_length": 16.5, "blob_id": "95220918c087379c4eea43f34815b1943b160ef4", "content_id": "5534d603c2b36dcec8e7a6d6a6b4d3043cd55ebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 43, "num_lines": 6, "path": "/github_auto_release_notes/exceptions.py", "repo_name": "ab7/auto-release-notes", "src_encoding": "UTF-8", "text": "class GithubRequestException(Exception):\n pass\n\n\nclass GithubPullRequestNoAction(Exception):\n pass\n" }, { "alpha_fraction": 0.6959051489830017, "alphanum_fraction": 0.7106735110282898, "avg_line_length": 45.072166442871094, "blob_id": "c05ab3720efc91a66e66a6b627ea50a44229068c", "content_id": "c9e1bd2eeed13948018fdb57716252066825e377", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4469, "license_type": "no_license", "max_line_length": 86, "num_lines": 97, "path": "/tests/test_validation.py", "repo_name": "ab7/auto-release-notes", "src_encoding": "UTF-8", "text": "import unittest\nfrom unittest.mock import MagicMock\n\nfrom github_auto_release_notes.validation import GithubRequestValidator\nfrom github_auto_release_notes.exceptions import GithubRequestException\n\n\nclass TestGithubRequestValidator(unittest.TestCase):\n\n GITHUB_WEBHOOK_SECRET = 'secret'\n DATA = {\"test\": \"data\"}\n RAW_DATA = bytes(str(DATA), 'utf-8')\n VALID_SIGNATURE = 'sha1=e12c95dc17e8b0ebe3a581a19eb18195cfb5e9bb'\n BAD_FORMAT_SIGNATURE = 'e12c95dc17e8b0ebe3a581a19eb18195cfb5e9bb'\n INVALID_SIGNATURE = 'sha1=5a19eb18195cfb5e9bbb0ebe39eb18117e8b0eb5e'\n VALID_REQUEST = MagicMock(\n method='POST',\n headers={'X-Hub-Signature': VALID_SIGNATURE},\n get_data=MagicMock(return_value=RAW_DATA)\n )\n BAD_FORMAT_REQUEST = MagicMock(\n method='POST',\n headers={'X-Hub-Signature': BAD_FORMAT_SIGNATURE},\n get_data=MagicMock(return_value=RAW_DATA)\n )\n NO_HEADER_REQUEST = MagicMock(\n method='POST',\n headers={},\n get_data=MagicMock(return_value=RAW_DATA)\n )\n INVALID_REQUEST = MagicMock(\n method='POST',\n headers={'X-Hub-Signature': INVALID_SIGNATURE},\n get_data=MagicMock(return_value=RAW_DATA)\n )\n\n def test_check_method_is_valid(self):\n result = GithubRequestValidator._check_method('POST')\n self.assertEqual(result, None)\n\n def test_check_method_raises_method_error(self):\n with self.assertRaises(GithubRequestException) as cm:\n GithubRequestValidator._check_method('GET')\n expected_message = 'Method not allowed: GET'\n self.assertEqual(str(cm.exception), expected_message)\n\n def test_check_signature_valid(self):\n validator = GithubRequestValidator(self.GITHUB_WEBHOOK_SECRET)\n result = validator._check_signature(self.VALID_SIGNATURE, self.RAW_DATA)\n self.assertEqual(result, None)\n\n def test_check_signature_raises_unexpected_error(self):\n validator = GithubRequestValidator(self.GITHUB_WEBHOOK_SECRET)\n with self.assertRaises(GithubRequestException) as cm:\n validator._check_signature(self.BAD_FORMAT_SIGNATURE, self.RAW_DATA)\n expected_message = f'Unexpected signature format: {self.BAD_FORMAT_SIGNATURE}'\n self.assertEqual(str(cm.exception), expected_message)\n\n def test_check_signature_with_missing_header(self):\n validator = GithubRequestValidator(self.GITHUB_WEBHOOK_SECRET)\n with self.assertRaises(GithubRequestException) as cm:\n validator._check_signature('', self.RAW_DATA)\n expected_message = 'Unexpected signature format: '\n self.assertEqual(str(cm.exception), expected_message)\n\n def test_check_signature_with_invalid_signature(self):\n validator = GithubRequestValidator(self.GITHUB_WEBHOOK_SECRET)\n with self.assertRaises(GithubRequestException) as cm:\n validator._check_signature(self.INVALID_SIGNATURE, self.RAW_DATA)\n expected_message = f'Invalid signature: {self.INVALID_SIGNATURE}'\n self.assertEqual(str(cm.exception), expected_message)\n\n def test_validate_webhook_valid(self):\n validator = GithubRequestValidator(self.GITHUB_WEBHOOK_SECRET)\n result = validator.validate_webhook(self.VALID_REQUEST)\n self.assertEqual(result, None)\n\n def test_validate_webhook_raises_unexpected_error(self):\n validator = GithubRequestValidator(self.GITHUB_WEBHOOK_SECRET)\n with self.assertRaises(GithubRequestException) as cm:\n validator.validate_webhook(self.BAD_FORMAT_REQUEST)\n expected_message = f'Unexpected signature format: {self.BAD_FORMAT_SIGNATURE}'\n self.assertEqual(str(cm.exception), expected_message)\n\n def test_validate_webhook_with_missing_header(self):\n validator = GithubRequestValidator(self.GITHUB_WEBHOOK_SECRET)\n with self.assertRaises(GithubRequestException) as cm:\n validator.validate_webhook(self.NO_HEADER_REQUEST)\n expected_message = 'Unexpected signature format: '\n self.assertEqual(str(cm.exception), expected_message)\n\n def test_validate_webhook_with_invalid_signature(self):\n validator = GithubRequestValidator(self.GITHUB_WEBHOOK_SECRET)\n with self.assertRaises(GithubRequestException) as cm:\n validator.validate_webhook(self.INVALID_REQUEST)\n expected_message = f'Invalid signature: {self.INVALID_SIGNATURE}'\n self.assertEqual(str(cm.exception), expected_message)\n" } ]
5
krisspnet/l1ktools
https://github.com/krisspnet/l1ktools
d54582d368c6cf70a65fd13dd889fe16f76fd441
8d739dd05f0563219eeceacf723bf90d75e1751f
636a1443221219e0df47628131ae5adf87ff63bd
refs/heads/master
2021-01-11T18:36:58.238118
2017-01-20T00:04:14
2017-01-20T00:04:14
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4149397611618042, "alphanum_fraction": 0.5190361738204956, "avg_line_length": 42.53146743774414, "blob_id": "c08ca7c48c1bf45eb3e8c8e3eedf49b3cc0eef65", "content_id": "2582cafd0d86361fa8c6d8cea37b5be6bf4218d2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6225, "license_type": "permissive", "max_line_length": 96, "num_lines": 143, "path": "/python/broadinstitute_cmap/io/pandasGEXpress/test_write_gct.py", "repo_name": "krisspnet/l1ktools", "src_encoding": "UTF-8", "text": "import unittest\nimport logging\nimport setup_GCToo_logger as setup_logger\nimport os\nimport numpy as np\nimport pandas as pd\nimport parse_gct\nimport write_gct as wg\n\nFUNCTIONAL_TESTS_PATH = \"functional_tests\"\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\nclass TestWriteGCT(unittest.TestCase):\n def test_write_version_and_dims(self):\n # Write\n fname = \"test_file.gct\"\n f = open(fname, \"wb\")\n wg.write_version_and_dims(\"1.3\", [\"1\", \"2\", \"3\", \"4\"], f)\n f.close()\n\n # Read and then remove\n f = open(fname, \"r\")\n version_string = f.readline().strip()\n dims = f.readline().strip().split(\"\\t\")\n f.close()\n os.remove(fname)\n\n # Check that it was written correctly\n self.assertEqual(version_string, \"#1.3\")\n self.assertEqual(dims, [\"1\", \"2\", \"3\", \"4\"])\n\n def test_assemble_full_df(self):\n row_meta_df = pd.DataFrame([[\"Analyte 11\", 11, \"dp52\"],\n [\"Analyte 12\", 12, \"dp52\"]],\n index=[\"200814_at\", \"218597_s_at\"],\n columns=[\"pr_analyte_id\", \"pr_analyte_num\", \"pr_bset_id\"])\n col_meta_df = pd.DataFrame([[8.38, np.nan, \"DMSO\", \"24 h\"],\n [7.7, np.nan, \"DMSO\", \"24 h\"],\n [8.18, np.nan, \"DMSO\", \"24 h\"]],\n index=[\"LJP005_A375_24H_X1_B19:A03\",\n \"LJP005_A375_24H_X1_B19:A04\",\n \"LJP005_A375_24H_X1_B19:A05\"],\n columns=[\"qc_iqr\", \"pert_idose\", \"pert_iname\", \"pert_itime\"])\n data_df = pd.DataFrame([[11.3819, 11.3336, 11.4486],\n [10.445, 10.445, 10.3658]],\n index=[\"200814_at\", \"218597_s_at\"],\n columns=[\"LJP005_A375_24H_X1_B19:A03\",\n \"LJP005_A375_24H_X1_B19:A04\",\n \"LJP005_A375_24H_X1_B19:A05\"])\n e_df = pd.DataFrame(\n [['qc_iqr', '-666', '-666', '-666', '8.38', '7.7', '8.18'],\n ['pert_idose', '-666', '-666', '-666', '-666', '-666', '-666'],\n ['pert_iname', '-666', '-666', '-666', 'DMSO', 'DMSO', 'DMSO'],\n ['pert_itime', '-666', '-666', '-666', '24 h', '24 h', '24 h'],\n ['200814_at', 'Analyte 11', '11', 'dp52', 11.3819, 11.3336, 11.4486],\n ['218597_s_at', 'Analyte 12', '12', 'dp52', 10.445, 10.445, 10.3658]],\n columns=['id', 'pr_analyte_id', 'pr_analyte_num', 'pr_bset_id',\n 'LJP005_A375_24H_X1_B19:A03', 'LJP005_A375_24H_X1_B19:A04',\n 'LJP005_A375_24H_X1_B19:A05'])\n full_df = wg.assemble_full_df(row_meta_df, col_meta_df, data_df, \"NaN\", \"-666\", \"-666\")\n\n self.assertTrue(full_df.equals(e_df))\n self.assertEqual(full_df.columns.values[0], \"id\")\n self.assertEqual(full_df.iloc[4, 0], \"200814_at\")\n self.assertEqual(full_df.ix[2, \"pr_bset_id\"], \"-666\")\n\n def test_write_full_df(self):\n full_df = pd.DataFrame(\n [['qc_iqr', '-666', '-666', '-666', '8.38', '7.7', '8.18'],\n ['pert_idose', '-666', '-666', '-666', '-666', '-666', '-666'],\n ['pert_iname', '-666', '-666', '-666', 'DMSO', 'DMSO', 'DMSO'],\n ['pert_itime', '-666', '-666', '-666', '24 h', '24 h', '24 h'],\n ['200814_at', 'Analyte 11', '11', 'dp52', 11.3819, 11.3336, 11.4486],\n ['218597_s_at', 'Analyte 12', '12', 'dp52', 9.5063, 10.445, 10.3658]],\n columns=['id', 'pr_analyte_id', 'pr_analyte_num', 'pr_bset_id',\n 'LJP005_A375_24H_X1_B19:A03', 'LJP005_A375_24H_X1_B19:A04',\n 'LJP005_A375_24H_X1_B19:A05'])\n\n fname = \"test_file.gct\"\n f = open(fname, \"wb\")\n wg.write_full_df(full_df, f, \"NaN\", None)\n f.close()\n os.remove(fname)\n\n f2 = open(fname, \"wb\")\n f2.write(\"#1.3\\n\")\n f2.write(\"1\\t2\\t3\\t4\\n\")\n wg.write_full_df(full_df, f2, \"NaN\", None)\n f2.close()\n os.remove(fname)\n\n def test_append_dims_and_file_extension(self):\n data_df = pd.DataFrame([[1, 2], [3, 4]])\n fname_no_gct = \"a/b/file\"\n fname_gct = \"a/b/file.gct\"\n e_fname = \"a/b/file_n2x2.gct\"\n\n fname_out = wg.append_dims_and_file_extension(fname_no_gct, data_df)\n self.assertEqual(fname_out, e_fname)\n\n fname_out = wg.append_dims_and_file_extension(fname_gct, data_df)\n self.assertEqual(fname_out, e_fname)\n\n def test_l1000_functional(self):\n l1000_in_path = os.path.join(FUNCTIONAL_TESTS_PATH, \"test_l1000.gct\")\n l1000_out_path = os.path.join(FUNCTIONAL_TESTS_PATH, \"test_l1000_writing.gct\")\n\n # Read in original gct file\n l1000_in_gct = parse_gct.parse(l1000_in_path)\n\n # Read in new gct file\n wg.write(l1000_in_gct, l1000_out_path)\n l1000_out_gct = parse_gct.parse(l1000_out_path)\n\n self.assertTrue(l1000_in_gct.data_df.equals(l1000_out_gct.data_df))\n self.assertTrue(l1000_in_gct.row_metadata_df.equals(l1000_out_gct.row_metadata_df))\n self.assertTrue(l1000_in_gct.col_metadata_df.equals(l1000_out_gct.col_metadata_df))\n\n # Clean up\n os.remove(l1000_out_path)\n\n def test_p100_functional(self):\n p100_in_path = os.path.join(FUNCTIONAL_TESTS_PATH, \"test_p100.gct\")\n p100_out_path = os.path.join(FUNCTIONAL_TESTS_PATH, \"test_p100_writing.gct\")\n\n # Read in original gct file\n p100_in_gct = parse_gct.parse(p100_in_path)\n\n # Read in new gct file\n wg.write(p100_in_gct, p100_out_path)\n p100_out_gct = parse_gct.parse(p100_out_path)\n\n self.assertTrue(p100_in_gct.data_df.equals(p100_out_gct.data_df))\n self.assertTrue(p100_in_gct.row_metadata_df.equals(p100_out_gct.row_metadata_df))\n self.assertTrue(p100_in_gct.col_metadata_df.equals(p100_out_gct.col_metadata_df))\n\n # Clean up\n os.remove(p100_out_path)\n\nif __name__ == \"__main__\":\n setup_logger.setup(verbose=True)\n unittest.main()\n" }, { "alpha_fraction": 0.48975086212158203, "alphanum_fraction": 0.5198675394058228, "avg_line_length": 38.39751434326172, "blob_id": "fe4eb731bf15c2e3d6ba8e85d4d55e51764545c3", "content_id": "6c35c0ed841541239a8b5045ca8a1e3635bcdd2b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6342, "license_type": "permissive", "max_line_length": 92, "num_lines": 161, "path": "/python/broadinstitute_cmap/io/pandasGEXpress/test_concat_gctoo.py", "repo_name": "krisspnet/l1ktools", "src_encoding": "UTF-8", "text": "import os\nimport unittest\nimport logging\nimport numpy as np\nimport pandas as pd\n\nimport setup_GCToo_logger as setup_logger\nimport concat_gctoo as cg\nimport parse_gct as pg\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\nFUNCTIONAL_TESTS_DIR = \"functional_tests\"\n\nclass TestConcatGCToo(unittest.TestCase):\n def test_left_right(self):\n # Verify that concatenation replicates the output file\n left_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, \"test_merge_left.gct\")\n right_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, \"test_merge_right.gct\")\n expected_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, \"test_merged_left_right.gct\")\n\n left_gct = pg.parse(left_gct_path)\n right_gct = pg.parse(right_gct_path)\n expected_gct = pg.parse(expected_gct_path)\n\n # Merge left and right\n concated_gct = cg.hstack([left_gct, right_gct], None, False, False)\n\n self.assertTrue(expected_gct.data_df.equals(concated_gct.data_df), (\n \"\\nconcated_gct.data_df:\\n{}\\nexpected_gct.data_df:\\n{}\".format(\n concated_gct.data_df, expected_gct.data_df)))\n self.assertTrue(expected_gct.row_metadata_df.equals(concated_gct.row_metadata_df))\n self.assertTrue(expected_gct.col_metadata_df.equals(concated_gct.col_metadata_df))\n\n def test_top_bottom(self):\n # Verify that concatenation replicates the output file\n top_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, \"test_merge_top.gct\")\n bottom_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, \"test_merge_bottom.gct\")\n expected_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, \"test_merged_top_bottom.gct\")\n\n top_gct = pg.parse(top_gct_path)\n bottom_gct = pg.parse(bottom_gct_path)\n expected_gct = pg.parse(expected_gct_path)\n\n # TODO: Merge top and bottom\n concated_gct = cg.hstack([top_gct, bottom_gct], None, False, False)\n\n self.assertTrue(expected_gct.data_df.equals(concated_gct.data_df), (\n \"\\nconcated_gct.data_df:\\n{}\\nexpected_gct.data_df:\\n{}\".format(\n concated_gct.data_df, expected_gct.data_df)))\n self.assertTrue(expected_gct.row_metadata_df.equals(concated_gct.row_metadata_df))\n self.assertTrue(expected_gct.col_metadata_df.equals(concated_gct.col_metadata_df))\n\n def test_concat_row_meta(self):\n meta1 = pd.DataFrame(\n [[\"r1_1\", \"r1_2\", \"r1_3\"],\n [\"r2_1\", \"r2_2\", \"r2_3\"],\n [\"r3_1\", \"r3_2\", \"r3_3\"]],\n index=[\"r1\", \"r2\", \"r3\"],\n columns=[\"rhd1\", \"rhd2\", \"rhd3\"])\n meta2 = pd.DataFrame(\n [[\"r1_1\", \"r1_2\", \"r1_3\"],\n [\"r2_1\", \"r2_2\", \"r2_3\"],\n [\"r3_1\", \"r3_2\", \"r3_33\"]],\n index=[\"r1\", \"r2\", \"r3\"],\n columns=[\"rhd1\", \"rhd2\", \"rhd3\"])\n e_meta = pd.DataFrame(\n [[\"r1_1\", \"r1_2\"],\n [\"r2_1\", \"r2_2\"],\n [\"r3_1\", \"r3_2\"]],\n index=[\"r1\", \"r2\", \"r3\"],\n columns=[\"rhd1\", \"rhd2\"])\n\n with self.assertRaises(AssertionError) as e:\n _ = cg.concat_row_meta([meta1, meta2], None, False)\n self.assertIn(\"rids are duplicated\", str(e.exception))\n\n # happy path, using fields_to_remove\n out_meta_df = cg.concat_row_meta([meta1, meta2], [\"rhd3\"], False)\n\n self.assertTrue(out_meta_df.equals(e_meta), (\n \"\\nout_meta_df:\\n{}\\ne_meta:\\n{}\".format(out_meta_df, e_meta)))\n\n def test_concat_col_meta(self):\n meta1 = pd.DataFrame(\n [[\"a\", \"b\"], [\"c\", \"d\"]],\n index=[\"c1\", \"c2\"],\n columns=[\"hd1\", \"hd2\"])\n meta2 = pd.DataFrame(\n [[\"e\", \"f\", \"g\"], [\"h\", \"i\", \"j\"]],\n index=[\"c2\", \"c3\"],\n columns=[\"hd1\", \"hd2\", \"hd3\"])\n e_concated = pd.DataFrame(\n [[\"a\", \"b\", np.nan], [\"c\", \"d\", np.nan],\n [\"e\", \"f\", \"g\"], [\"h\", \"i\", \"j\"]],\n index=[\"c1\", \"c2\", \"c2\", \"c3\"],\n columns=[\"hd1\", \"hd2\", \"hd3\"])\n\n out_concated = cg.concat_col_meta([meta1, meta2])\n self.assertTrue(out_concated.equals(e_concated), (\n \"\\nout_concated:\\n{}\\ne_concated:\\n{}\".format(\n out_concated, e_concated)))\n\n def test_concat_data(self):\n df1 = pd.DataFrame(\n [[1, 2, 3], [4, 5, 6]],\n index=[\"a\", \"b\"],\n columns=[\"s1\", \"s2\", \"s3\"])\n df2 = pd.DataFrame(\n [[7, 8, 9], [10, 11, 12]],\n index=[\"a\", \"b\"],\n columns=[\"s4\", \"s5\", \"s6\"])\n e_concated = pd.DataFrame(\n [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]],\n index=[\"a\", \"b\"],\n columns=[\"s1\", \"s2\", \"s3\", \"s4\", \"s5\", \"s6\"])\n\n out_concated = cg.concat_data([df1, df2])\n self.assertTrue(out_concated.equals(e_concated), (\n \"\\nout_concated:\\n{}\\ne_concated:\\n{}\".format(\n out_concated, e_concated)))\n\n def test_do_reset_ids(self):\n col_df = pd.DataFrame(\n [[1, 2], [3, 4], [5, 6]],\n index=[\"s1\", \"s2\", \"s1\"],\n columns=[\"hd1\", \"hd2\"])\n data_df = pd.DataFrame(\n [[1, 2, 3], [4, 5, 6]],\n index=[\"a\", \"b\"],\n columns=[\"s1\", \"s2\", \"s1\"])\n inconsistent_data_df = pd.DataFrame(\n [[1, 2, 3], [4, 5, 6]],\n index=[\"a\", \"b\"],\n columns=[\"s1\", \"s2\", \"s3\"])\n e_col_df = pd.DataFrame(\n [[\"s1\", 1, 2], [\"s2\", 3, 4], [\"s1\", 5, 6]],\n index=[0, 1, 2],\n columns=[\"old_cid\", \"hd1\", \"hd2\"])\n e_data_df = pd.DataFrame(\n [[1, 2, 3], [4, 5, 6]],\n index=[\"a\", \"b\"],\n columns=[0, 1, 2])\n\n # Check the assert statement\n with self.assertRaises(AssertionError) as e:\n (_, _) = cg.do_reset_ids(col_df, inconsistent_data_df)\n self.assertIn(\"do not agree\", str(e.exception))\n\n # Happy path\n (out_col_df, out_data_df) = cg.do_reset_ids(col_df, data_df)\n self.assertTrue(out_col_df.equals(e_col_df), (\n \"\\nout_col_df:\\n:{}\\ne_col_df:\\n{}\".format(out_col_df, e_col_df)))\n self.assertTrue(out_data_df.equals(e_data_df), (\n \"\\nout_data_df:\\n:{}\\ne_data_df:\\n{}\".format(out_data_df, e_data_df)))\n\n\nif __name__ == \"__main__\":\n setup_logger.setup(verbose=True)\n\n unittest.main()" }, { "alpha_fraction": 0.6713998913764954, "alphanum_fraction": 0.6732450723648071, "avg_line_length": 37.8317756652832, "blob_id": "616031b6a21de5facfbb6bb03da3c148e1abd031", "content_id": "033a2c1f27d22ece873d926e637a014908e87b75", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12465, "license_type": "permissive", "max_line_length": 113, "num_lines": 321, "path": "/python/broadinstitute_cmap/io/pandasGEXpress/concat_gctoo.py", "repo_name": "krisspnet/l1ktools", "src_encoding": "UTF-8", "text": "\"\"\"\nconcat_gctoo.py\n\nThis function is for concatenating gct(x) files together. You can tell it to\nfind gct files using the file_wildcard argument, or you can tell it exactly\nwhich files you want to concatenate using the list_of_gct_paths argument. The\nmeat of this function are the hstack (i.e. horizontal concatenation of gcts)\nand vstack (i.e. vertical concatenation).\n\nTerminology: 'Common' metadata refers to the metadata that is shared between\nthe gcts. For example, if horizontally concatenating, the 'common' metadata is\nthe row metadata. 'New' metadata is the other one; it's the 'new' metadata that\neach gct brings. For example, if horizontally concatenating, the 'new' metadata\nis the column metadata.\n\nThere are 3 arguments that allow you to work around certain obstacles\nof concatenation.\n\n1) If the 'common' metadata contains fields that are not the same in\nall files, then you can remove these fields using the fields_to_remove argument.\n\n2) If the 'common' metadata fields are all the same between different files but\nnot in the same order, you will have to sort them using the sort_headers\nargument.\n\n3) If the 'new' metadata ids are not unique between different files, and you\ntry to concatenate the files, an invalid GCToo would be formed (duplicate ids).\nTo overcome this, use the reset_sample_ids argument. This will move the 'new'\nmetadata ids to a new metadata field and replace the original ids with unique\nintegers.\n\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport glob\nimport logging\nimport setup_GCToo_logger as setup_logger\nimport pandas as pd\n\nimport GCToo \nimport parse\nimport write_gct \nimport write_gctx\n\n__author__ = \"Lev Litichevskiy\"\n__email__ = \"[email protected]\"\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\n\ndef build_parser():\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # Required args\n parser.add_argument(\"--concat_\")\n mutually_exclusive_group = parser.add_mutually_exclusive_group()\n mutually_exclusive_group.add_argument(\"--list_of_gct_paths\", \"-lop\", nargs=\"+\",\n help=\"full paths to gct files to be concatenated\")\n mutually_exclusive_group.add_argument(\"--file_wildcard\", \"-w\", type=str,\n help=(\"wildcard specifying where files should be found \" +\n \"(make sure to surround in quotes if calling from command line!)\"))\n\n # Optional args\n out_type_group = parser.add_mutually_exclusive_group()\n out_type_group.add_argument('-gctx', action='store_const', dest='out_type', const=\"gctx\")\n out_type_group.add_argument('-gct', action='store_const', dest='out_type', const=\"gct\")\n parser.set_defaults(out_type=\"gctx\")\n\n parser.add_argument(\"--full_out_name\", \"-o\", type=str, default=\"concated.gctx\",\n help=\"what to name the output file (full path)\")\n parser.add_argument(\"--fields_to_remove\", \"-ftr\", nargs=\"+\",\n help=\"fields to remove from the common metadata\")\n parser.add_argument(\"--sort_headers\", \"-sh\", action=\"store_true\", default=False,\n help=\"whether to sort the common metadata headers\")\n parser.add_argument(\"--reset_ids\", \"-rsi\", action=\"store_true\", default=False,\n help=\"whether to reset sample ids (use this flag if sample ids are not unique)\")\n\n parser.add_argument(\"-data_null\", type=str, default=\"NA\",\n help=\"how to represent missing values in the data\")\n parser.add_argument(\"-metadata_null\", type=str, default=\"NA\",\n help=\"how to represent missing values in the metadata\")\n parser.add_argument(\"-filler_null\", type=str, default=\"NA\",\n help=\"what value to use for filling the top-left filler block of a .gct\")\n parser.add_argument(\"-verbose\", \"-v\", action=\"store_true\", default=False,\n help=\"whether to print a bunch of output\")\n\n return parser\n\n\ndef main(args):\n\n # Get files directly\n if args.list_of_gct_paths is not None:\n files = args.list_of_gct_paths\n\n # Or find them\n else:\n files = get_file_list(args.file_wildcard)\n\n assert len(files) > 0, \"No files were found. args.file_wildcard: {}\".format(\n args.file_wildcard)\n\n # Parse each file and append to a list\n gctoos = []\n for f in files:\n gctoos.append(parse.parse(f))\n\n # Create concatenated gctoo object\n out_gctoo = hstack(gctoos, args.fields_to_remove, args.reset_ids, args.sort_headers)\n\n # Write out_gctoo to file\n logger.info(\"Write to file...\")\n if args.out_type == \"gctx\":\n write_gctx.write(out_gctoo, args.full_out_name) \n elif args.out_type == \"gct\":\n write_gct.write(out_gctoo, args.full_out_name,\n filler_null=args.filler_null,\n metadata_null= args.metadata_null,\n data_null=args.data_null)\n\ndef get_file_list(wildcard):\n \"\"\"Search for files to be concatenated. Currently very basic, but could\n expand to be more sophisticated.\n Args:\n wildcard (regular expression string)\n Returns:\n files (list of full file paths)\n \"\"\"\n files = glob.glob(os.path.expanduser(wildcard))\n return files\n\n\ndef hstack(gctoos, fields_to_remove=None, reset_ids=False, sort_headers=False):\n \"\"\"Horizontally concatenate gctoos.\n Args:\n gctoos (list of gctoo objects)\n fields_to_remove (list of strings): can specify certain fields to remove\n from row metadata in order to allow rows to line up\n reset_ids (bool): set to True if sample ids are not unique\n sort_headers (bool): set to True in order to sort the headers of each\n row_metadata_df\n Return:\n concated (gctoo object)\n \"\"\"\n\n # Separate each gctoo into its component dfs\n row_meta_dfs = []\n col_meta_dfs = []\n data_dfs = []\n for g in gctoos:\n row_meta_dfs.append(g.row_metadata_df)\n col_meta_dfs.append(g.col_metadata_df)\n data_dfs.append(g.data_df)\n\n # Concatenate row metadata\n all_row_metadata_df = concat_row_meta(row_meta_dfs, fields_to_remove, sort_headers)\n\n # Concatenate col metadata\n all_col_metadata_df = concat_col_meta(col_meta_dfs)\n\n # Concatenate the data_dfs\n all_data_df = concat_data(data_dfs)\n\n # Make sure df shapes are correct\n assert all_data_df.shape[0] == all_row_metadata_df.shape[0], \"Number of rows is incorrect.\"\n assert all_data_df.shape[1] == all_col_metadata_df.shape[0], \"Number of columns is incorrect.\"\n \n # If requested, assign unique integer as new sample id and move old sample\n # id into the column metadata\n if reset_ids:\n (all_col_metadata_df, all_data_df) = do_reset_ids(\n all_col_metadata_df, all_data_df)\n\n logger.info(\"build GCToo of all...\")\n concated = GCToo.GCToo(row_metadata_df=all_row_metadata_df,\n col_metadata_df=all_col_metadata_df,\n data_df=all_data_df)\n\n return concated\n\ndef concat_row_meta(row_meta_dfs, fields_to_remove, sort_headers):\n \"\"\"Concatenate the row metadata dfs together and sort the index.\n Args:\n row_meta_dfs (list of pandas dfs)\n fields_to_remove (list of strings): metadata fields to drop from all dfs\n before concatenating\n sort_headers (bool): set to True in order to sort the headers of each\n row_metadata_df\n Returns:\n all_row_meta_df (pandas df)\n \"\"\"\n # Remove any metadata columns that will prevent probes from being identical between plates\n if fields_to_remove is not None:\n for df in row_meta_dfs:\n df.drop(fields_to_remove, axis=1, inplace=True)\n\n # Sort metadata headers in case the order is different between dfs\n if sort_headers:\n for df in row_meta_dfs:\n df.sort_index(axis=1, inplace=True)\n\n # Concat all row_meta_df and then remove duplicate rows (slow...but it works)\n all_row_meta_df_dups = pd.concat(row_meta_dfs, axis=0)\n logger.debug(\"all_row_meta_df_dups.shape: {}\".format(all_row_meta_df_dups.shape))\n all_row_meta_df = all_row_meta_df_dups.drop_duplicates()\n logger.debug(\"all_row_meta_df.shape: {}\".format(all_row_meta_df.shape))\n\n # Verify that there are no longer any duplicate rids\n duplicate_rids = all_row_meta_df.index.duplicated(keep=False)\n assert all_row_meta_df.index.is_unique, (\n (\"The following rids are duplicated because the metadata between \" +\n \"different files does not agree.\\nTry excluding more metadata \" +\n \"fields using the fields_to_remove argument.\\n\"\n \"all_row_meta_df.index[duplicate_rids]:\\n{}\").format(\n all_row_meta_df.index[duplicate_rids]))\n\n # Finally, re-sort the index\n all_row_metadata_df_sorted = all_row_meta_df.sort_index()\n\n return all_row_metadata_df_sorted\n\ndef concat_col_meta(col_meta_dfs):\n \"\"\"Concatenate the column metadata dfs together.\n Args:\n col_meta_dfs (list of pandas dfs)\n Returns:\n all_col_meta_df (pandas df)\n \"\"\"\n # Concatenate the col_meta_dfs\n all_col_meta_df = pd.concat(col_meta_dfs, axis=0)\n\n # Sanity check: the number of rows in all_col_metadata_df should correspond\n # to the sum of the number of rows in the input dfs\n n_rows = all_col_meta_df.shape[0]\n n_rows_cumulative = sum([df.shape[0] for df in col_meta_dfs])\n assert n_rows == n_rows_cumulative\n\n logger.debug(\"all_col_meta_df.shape[0]: {}\".format(n_rows))\n\n return all_col_meta_df\n\n\ndef concat_data(data_dfs):\n \"\"\"Concatenate the data dfs together.\n Args:\n data_dfs (list of pandas dfs)\n Returns:\n all_data_df_sorted (pandas df)\n \"\"\"\n # Concatenate the data_dfs\n all_data_df = pd.concat(data_dfs, axis=1)\n\n # Sort the index\n all_data_df_sorted = all_data_df.sort_index()\n\n # Sanity check: the number of columns in all_data_df_sorted should correspond\n # to the sum of the number of columns in the input dfs\n n_cols = all_data_df_sorted.shape[1]\n n_cols_cumulative = sum([df.shape[1] for df in data_dfs])\n assert n_cols == n_cols_cumulative\n\n logger.debug(\"all_data_df_sorted.shape[1]: {}\".format(n_cols))\n\n return all_data_df_sorted\n\n\ndef do_reset_ids(all_col_metadata_df, all_data_df):\n \"\"\"Rename sample ids in both metadata and data dfs to unique integers.\n Note that the dataframes are modified in-place.\n In order to save the output as a proper gct file, the sample ids need to be\n unique. If sample ids are not unique, then this function will error out.\n However, if you want to concatenate files anyway, you can use the flag\n reset_ids to move the cids to a new metadata field and assign a unique\n integer index for each sample.\n Args:\n all_col_metadata_df (pandas df)\n all_data_df (pandas df)\n Returns:\n all_col_metadata_df (pandas df): updated\n all_data_df (pandas df): updated\n \"\"\"\n # See how many samples are repeated before resetting\n logger.debug(\"num samples: {}\".format(all_col_metadata_df.shape[0]))\n logger.debug(\"num unique samples before reset: {}\".format(\n len(all_col_metadata_df.index.unique())))\n\n # First, make sure sample ids agree between data df and col_meta_df\n assert all_col_metadata_df.index.equals(all_data_df.columns), (\n \"Sample ids in all_col_metadata_df do not agree with the sample ids in data_df.\")\n\n # Change index name so that the column that it becomes will be\n # appropriately named\n all_col_metadata_df.index.name = \"old_cid\"\n\n # Reset index\n all_col_metadata_df = all_col_metadata_df.reset_index()\n\n # Change the index name back to cid\n all_col_metadata_df.index.name = \"cid\"\n\n # Replace sample ids in data_df with the new ones from col_meta_df (just an\n # array of unique integers, zero-indexed)\n all_data_df.columns = pd.Index(all_col_metadata_df.index.values)\n\n # Assert that the number of unique samples now equals the number of samples\n logger.debug(\"num unique samples after reset: {}\".format(\n len(all_col_metadata_df.index.unique())))\n assert all_col_metadata_df.shape[0] == len(all_col_metadata_df.index.unique()), (\n \"The sample ids in all_col_metadata_df still are not unique! Not good! \" +\n \"\\nall_col_metadata_df.index.values:\\n{}\").format(all_col_metadata_df.index.values)\n\n return all_col_metadata_df, all_data_df\n\n\nif __name__ == \"__main__\":\n args = build_parser().parse_args(sys.argv[1:])\n setup_logger.setup(verbose=args.verbose)\n\n main(args)\n" }, { "alpha_fraction": 0.577616810798645, "alphanum_fraction": 0.5875922441482544, "avg_line_length": 37.11458206176758, "blob_id": "28654f86b42fc12b25a7ad2062045ad89ecad6a0", "content_id": "ad0c81577e7b7b5ea07d0a19dd4f90269248b751", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7318, "license_type": "permissive", "max_line_length": 126, "num_lines": 192, "path": "/python/broadinstitute_cmap/io/pandasGEXpress/write_gct.py", "repo_name": "krisspnet/l1ktools", "src_encoding": "UTF-8", "text": "import logging\nimport setup_GCToo_logger as setup_logger\nimport pandas as pd\nimport numpy as np\nimport os.path\n\n\n__author__ = \"Lev Litichevskiy\"\n__email__ = \"[email protected]\"\n\n\"\"\" Writes a GCToo object to a gct file.\n\nThe main method is write. write_version_and_dims writes the first two\nlines of the gct file, assemble_full_df assembles 3 component dfs\ninto a df of the correct form for a gct file, and write_full_df writes\nthe full_df into the gct file as lines 3 to the end.\nappend_dims_and_file_extension is a utility function that can be used to\nappend the matrix dimensions and .gct extension to the output filename.\n\nExample GCT:\n#1.3\n96 36 9 15\n\n96 = number of data rows\n36 = number of data columns\n9 = number of row metadata fields (+1 for the 'id' column -- first column)\n15 = number of col metadata fields (+1 for the 'id' row -- first row)\n---------------------------------------------------\n|id| rhd | cid |\n---------------------------------------------------\n| | | |\n|c | | |\n|h | (blank) | col_metadata |\n|d | | |\n| | | |\n---------------------------------------------------\n| | | |\n|r | | |\n|i | row_metadata | data |\n|d | | |\n| | | |\n---------------------------------------------------\n\"\"\"\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\n# Only writes GCT v1.3\nVERSION = \"1.3\"\n\n\ndef write(gctoo, out_fname, data_null=\"NaN\", metadata_null=\"-666\", filler_null=\"-666\", data_float_format=None):\n \"\"\"Write a gctoo object to a gct file.\n\n Args:\n gctoo (gctoo object)\n out_fname (string): filename for output gct file\n data_null (string): how to represent missing values in the data (default = \"NaN\")\n metadata_null (string): how to represent missing values in the metadata (default = \"-666\")\n filler_null (string): what value to fill the top-left filler block with (default = \"-666\")\n data_float_format (string): how many decimal points to keep in representing data (default = None will keep all digits)\n Returns:\n nothing\n \"\"\"\n # Create handle for output file\n if not out_fname.endswith(\".gct\"):\n out_fname = out_fname + \".gct\"\n f = open(out_fname, \"wb\")\n\n # Write first two lines\n dims_ints = [gctoo.data_df.shape[0], gctoo.data_df.shape[1],\n gctoo.row_metadata_df.shape[1], gctoo.col_metadata_df.shape[1]]\n dims = [str(dim) for dim in dims_ints]\n write_version_and_dims(VERSION, dims, f)\n\n # Convert 3 component dataframes into correct form\n full_df = assemble_full_df(\n gctoo.row_metadata_df, gctoo.col_metadata_df, gctoo.data_df,\n data_null, metadata_null, filler_null)\n\n # Write remainder of gct\n write_full_df(full_df, f, data_null, data_float_format)\n f.close()\n\n logger.info(\"GCT has been written to {}\".format(out_fname))\n\n\ndef write_version_and_dims(version, dims, f):\n \"\"\"Write first two lines of gct file.\n\n Args:\n version (string): 1.3 by default\n dims (list of strings): length = 4\n f (file handle): handle of output file\n Returns:\n nothing\n \"\"\"\n f.write((\"#\" + version + \"\\n\"))\n f.write((dims[0] + \"\\t\" + dims[1] + \"\\t\" + dims[2] + \"\\t\" + dims[3] + \"\\n\"))\n\n\ndef assemble_full_df(row_metadata_df, col_metadata_df, data_df, data_null, metadata_null, filler_null):\n \"\"\"Assemble 3 component dataframes into the correct form for gct files.\n\n Args:\n row_metadata_df (pandas df)\n col_metadata_df (pandas df)\n data_df (pandas df)\n data_null (string): how to represent missing values in the data\n metadata_null (string): how to represent missing values in the metadata\n filler_null (string): what value to fill the top-left filler block with\n\n Returns:\n full_df (pandas df): shape = (n_chd + n_rid, 1 + n_rhd + n_cid),\n header will become the 3rd line of the gct file\n \"\"\"\n # Convert metadata to strings\n row_metadata_df = row_metadata_df.astype(str)\n col_metadata_df = col_metadata_df.astype(str)\n\n # Replace missing values in metadata with metadata_null\n row_metadata_df.replace(\"nan\", value=metadata_null, inplace=True)\n col_metadata_df.replace(\"nan\", value=metadata_null, inplace=True)\n\n # TOP ROW: horz concatenate \"id\", rhd, and cid\n rhd_and_cid = np.hstack((row_metadata_df.columns.values, data_df.columns.values))\n top_row = np.hstack((\"id\", rhd_and_cid))\n\n # Check that it has correct length\n assert(len(top_row) == (1 + row_metadata_df.shape[1] + data_df.shape[1]))\n\n # Create nan array to fill the blank top-left quadrant\n filler = np.full((col_metadata_df.shape[1], row_metadata_df.shape[1]),\n filler_null, dtype=\"S8\")\n\n # TOP HALF: horz concatenate chd, filler, and col_metadata, which must be transposed\n filler_and_col_metadata = np.hstack((filler, col_metadata_df.T.values))\n top_half = np.column_stack((col_metadata_df.columns.values, filler_and_col_metadata))\n\n # BOTTOM HALF: horz concatenate rid, row_metadata, and data\n row_metadata_and_data = np.hstack((row_metadata_df.values, data_df.values))\n bottom_half = np.column_stack((data_df.index.values, row_metadata_and_data))\n\n # Vert concatenate the two halves\n full_df_values = np.vstack((top_half, bottom_half))\n\n # Stitch together full_df\n full_df = pd.DataFrame(full_df_values, columns=top_row)\n\n # Check that is has correct dims\n assert (full_df.shape == (\n (col_metadata_df.shape[1] + data_df.shape[0]),\n (1 + row_metadata_df.shape[1] + data_df.shape[1])))\n return full_df\n\n\ndef write_full_df(full_df, f, data_null, data_float_format):\n \"\"\"Write the full_df to the gct file.\n\n Args:\n full_df (pandas df): data and metadata arranged correctly\n f (file handle): handle for output file\n data_null (string): how to represent missing values in the data\n data_float_format (string): how many decimal points to keep in representing data\n Returns:\n nothing\n \"\"\"\n full_df.to_csv(f, header=True, index=False,\n sep=\"\\t\",\n na_rep=data_null,\n float_format=data_float_format)\n\n\ndef append_dims_and_file_extension(fname, data_df):\n \"\"\"Append dimensions and file extension to output filename.\n N.B. Dimensions are cols x rows.\n\n Args:\n fname (string): output filename\n data_df (pandas df)\n Returns:\n out_fname (string): output filename with matrix dims and .gct appended\n \"\"\"\n # If there's no .gct at the end of output file name, add the dims and .gct\n if not fname.endswith(\".gct\"):\n out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0])\n return out_fname\n\n # Otherwise, only add the dims\n else:\n basename = os.path.splitext(fname)[0]\n out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0])\n return out_fname\n" } ]
4
ArthurCST/hyperparametros
https://github.com/ArthurCST/hyperparametros
30f35337c838eb5bfaf288ec6063263b899fa333
045ed46548a2eadb2a2fd3a5ad2d46112f99297b
6f8be284f9aedf8c4694d4a2bd582ee61a2577f3
refs/heads/master
2020-09-20T10:16:44.045974
2019-12-08T21:42:39
2019-12-08T21:42:39
224,448,545
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5904284715652466, "alphanum_fraction": 0.6160266995429993, "avg_line_length": 37.25531768798828, "blob_id": "4f04f03b02dcba8751d2515f7cdc8a9e9b5c4717", "content_id": "bf5eecc2273889e596b2bfeeeddfbae49a03f399", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1797, "license_type": "no_license", "max_line_length": 117, "num_lines": 47, "path": "/segmentation loss.py", "repo_name": "ArthurCST/hyperparametros", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nfrom keras.losses import binary_crossentropy\nimport keras.backend as K\n\ndef dice_loss(y_true, y_pred):\n numerator = 2 * tf.reduce_sum(y_true * y_pred, axis=(1,2,3))\n denominator = tf.reduce_sum(y_true + y_pred, axis=(1,2,3))\n\n return 1 - numerator / denominator\n\ndef focal_loss(alpha=0.25, gamma=2):\n def focal_loss_with_logits(logits, targets, alpha, gamma, y_pred):\n weight_a = alpha * (1 - y_pred) ** gamma * targets\n weight_b = (1 - alpha) * y_pred ** gamma * (1 - targets)\n return (tf.log1p(tf.exp(-tf.abs(logits))) + tf.nn.relu(-logits)) * (weight_a + weight_b) + logits * weight_b \n\n def loss(y_true, y_pred):\n y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())\n logits = tf.log(y_pred / (1 - y_pred))\n\n loss = focal_loss_with_logits(logits=logits, targets=y_true, alpha=alpha, gamma=gamma, y_pred=y_pred)\n # or reduce_sum and/or axis=-1\n return tf.reduce_mean(loss)\n return loss\n\ndef CrossDice_loss(y_true, y_pred):\n def dice_loss(y_true, y_pred):\n numerator = 2 * tf.reduce_sum(y_true * y_pred, axis=(1,2,3))\n denominator = tf.reduce_sum(y_true + y_pred, axis=(1,2,3))\n\n return tf.reshape(1 - numerator / denominator, (-1, 1, 1))\n\n return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)\n\ndef tversky_loss(beta=0.8):\n def loss(y_true, y_pred):\n numerator = tf.reduce_sum(y_true * y_pred, axis=-1)\n denominator = y_true * y_pred + beta * (1 - y_true) * y_pred + (1 - beta) * y_true * (1 - y_pred)\n\n return 1 - (numerator + 1) / (tf.reduce_sum(denominator, axis=-1) + 1)\n\n return loss\n\ndef focal_tversky(y_true,y_pred):\n pt_1 = tversky_loss()\n gamma = 0.75\n return K.pow((1-pt_1), gamma)" }, { "alpha_fraction": 0.5087527632713318, "alphanum_fraction": 0.5344638824462891, "avg_line_length": 31.851852416992188, "blob_id": "1914e48ca4e018e6b9b7a9f0da9f896aecfa2dd0", "content_id": "843167bf666283b3ba113231f3b3b63087110067", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1830, "license_type": "no_license", "max_line_length": 128, "num_lines": 54, "path": "/activations.py", "repo_name": "ArthurCST/hyperparametros", "src_encoding": "UTF-8", "text": "from keras import backend as K\r\n\r\nimport tensorflow as tf\r\n\r\ndef ISRLU(x, alpha=1.0):\r\n '''\r\n Applies the ISRLU function element-wise:\r\n .. math::\r\n ISRLU(x)=\\\\left\\\\{\\\\begin{matrix} x, x\\\\geq 0 \\\\\\\\ x * (\\\\frac{1}{\\\\sqrt{1 + \\\\alpha*x^2}}), x <0 \\\\end{matrix}\\\\right.\r\n Plot:\r\n .. figure:: _static/isrlu.png\r\n :align: center\r\n Shape:\r\n - Input: (N, *) where * means, any number of additional\r\n dimensions\r\n - Output: (N, *), same shape as the input\r\n Arguments:\r\n - alpha: hyperparameter α controls the value to which an ISRLU saturates for negative inputs (default = 1)\r\n References:\r\n - ISRLU paper: https://arxiv.org/pdf/1710.09967.pdf\r\n '''\r\n # K.dot(K.pow(K.sqrt(K.update_add(K.dot(alpha, K.pow(x, 2)), 1.0)), -1), x)\r\n # print(tf.shape(x))\r\n return tf.where(K.greater_equal(x, K.zeros(shape=tf.shape(x))),\r\n x,\r\n x / K.sqrt(1 +(alpha*K.pow(x, 2))))\r\n\r\ndef ISRU(x, alpha=1.0):\r\n '''\r\n Applies the ISRU function element-wise:\r\n .. math::\r\n ISRU(x)=x\\\\left (\\\\frac{1}{\\sqrt{1+\\\\alpha x^{2}}} \\\\right )\r\n Plot:\r\n .. figure:: _static/isrlu.png\r\n :align: center\r\n Shape:\r\n - Input: (N, *) where * means, any number of additional\r\n dimensions\r\n - Output: (N, *), same shape as the input\r\n Arguments:\r\n - alpha: hyperparameter α controls the value to which an ISRLU saturates for negative inputs (default = 1)\r\n References:\r\n - ISRLU paper: https://arxiv.org/pdf/1710.09967.pdf\r\n '''\r\n return x / K.sqrt(1 +(alpha*K.pow(x, 2)))\r\n\r\ndef bentID(x):\r\n '''\r\n Bent Identity\r\n .. math::\r\n bentID(x)=x\\\\left (\\\\frac{1}{\\sqrt{1+\\\\alpha x^{2}}} \\\\right )\r\n '''\r\n\r\n return ((K.sqrt(K.pow(x,2)+1)-1)/2)+x\r\n" } ]
2
ltnormalcoder/first
https://github.com/ltnormalcoder/first
df8dfb225fb3685d54876b9717f57ab1342a3d0f
535457d09ba5528d3c97f5d460c02689f59843d8
6d619dd387a7b8ff54221a629b4f31afba6317dd
refs/heads/master
2020-06-21T16:05:26.321821
2019-07-18T02:39:09
2019-07-18T02:39:09
197,497,752
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5697140693664551, "alphanum_fraction": 0.5834804177284241, "avg_line_length": 29.47311782836914, "blob_id": "7e6ffcc43b8ae903cc4e42e2cd2fac4b2e4ac638", "content_id": "cd33bc423bd1aadd46a2e8bdebbaf9cbf2c9e5f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2963, "license_type": "no_license", "max_line_length": 116, "num_lines": 93, "path": "/verifyemail.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport random\nimport smtplib\nimport logging\nimport time\nimport re\nimport iptable\nimport dns.resolver\n\n#logging.basicConfig(level=logging.DEBUG,\n # format='%(asctime)s - %(filename)s [line:%(lineno)d] - %(levelname)s: %(message)s')\n\n#logger = logging.getLogger()\n\n\ndef fetch_mx(host):\n '''\n 解析服务邮箱\n :param host:\n :return:\n '''\n #logger.info('正在查找邮箱服务器')\n answers = dns.resolver.query(host, 'MX')\n res = [str(rdata.exchange)[:-1] for rdata in answers]\n #logger.info('查找结果为:%s' % res)\n return res\n\n\ndef verify_istrue(email,email_path):\n email_list = []\n email_obj = []\n email_ok = []\n final_res = {}\n #firstverify(email,email_path)\n email_obj=iptable.somelist(email_path)\n all_num=len(email_obj)\n s=connect_email()\n for need_verify in email_obj: \n totol=email_obj.index(need_verify)\n send_from = login_email(s).docmd('RCPT TO:<%s>' % need_verify)\n if send_from[0] == 250 or send_from[0] == 451:\n final_res[need_verify] = 'True' # 存在\n record_process = '正在检查 第'+str(totol)+' 个用户邮箱 剩余 '+str(all_num-totol)+' 尝试成功! sendto '+need_verify+'\\n'\n print '正在检查 第'+str(totol)+' 个用户邮箱 剩余 '+str(all_num-totol)+' 个'+need_verify+'结果:'+final_res[need_verify]\n with open('res/static/progress.txt','a+') as f:\n f.write(record_process)\n with open('res/gress.txt','a+') as f:\n f.write(need_verify+'\\n')\n s.close()\ndef firstverify(email,email_path):\n if isinstance(email, str) or isinstance(email, bytes):\n email_list.append(email)\n else:\n email_list = email\n email_obj=[]\n for em in email_list:\n if re.match(r'^[0-9a-zA-Z_]{0,19}@qq\\.[com,cn,net]{1,3}$',em): \n email_obj.append(em)\n print '添加'+em\n else:\n print '忽视'+em\n filter(None, email_obj)\n email_obj = list(set(email_obj))\n with open(email_path,'w') as f:\n f.write('\\n'.join( email_obj))\ndef connect_email():\n try:\n s = smtplib.SMTP('smtp.qq.com', timeout=100)\n except Exception as e:\n time.sleep(1)\n s=connect_email()\n else:\n return s\ndef login_email(si):\n try:\n s=si\n s.login('[email protected]', 'qgmulruqictnbbcg')\n s.docmd('HELO we'+str(random.randint(100000,100000000))+'.cn')\n s.docmd('MAIL FROM:<'+'[email protected]'+'>')\n except Exception as e:\n print '意外断开!'\n time.sleep(600)\n s=login_email(si)\n else:\n return s\ndef verifyemail():\n useremails=iptable.somelist('res/useremail.txt')\n final_list = verify_istrue(useremails,'res/useremail.txt')\ndef verifyapiemail():\n useremails=iptable.somelist('res/apiuseremail.txt')\n final_list = verify_istrue(useremails,'res/apiuseremail.txt')" }, { "alpha_fraction": 0.5707560181617737, "alphanum_fraction": 0.5815563797950745, "avg_line_length": 32.786407470703125, "blob_id": "cbbc829fd8f14cb25b170c579270e7391bed8dac", "content_id": "48898a979f5aac69220830bc74cd1e81d6aab8b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3901, "license_type": "no_license", "max_line_length": 141, "num_lines": 103, "path": "/apiemail.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "\r\n# coding:utf-8\r\n \r\nimport json\r\nfrom urlparse import parse_qs\r\nfrom wsgiref.simple_server import make_server\r\nimport MySQLdb\r\nimport sys \r\nimport time\r\nimport iptable\r\n\r\ndef db_config():\r\n\tdb_config={}\r\n\tdb_config['host']=\"127.0.0.1\"\r\n\tdb_config['username']=\"ewomail\"\r\n\tdb_config['passwords']=\"GGHheZdnkcdTFvVu\"\r\n\tdb_config['db_name']=\"apiemail\"\r\n\treturn db_config\r\n\r\ndef msq_select(condition,table=''):\r\n db = MySQLdb.connect(db_config()['host'], db_config()['username'], db_config()['passwords'], db_config()['db_name'], charset='utf8')\r\n cursor = db.cursor()\r\n #user=msq_select('order by id asc limit 1')[0]\r\n table=check_table(table)\r\n sql='select * from '+table+' '+condition\r\n # 使用cursor()方法获取操作游标 \r\n cursor = db.cursor()\r\n # 使用execute方法执行SQL语句\r\n cursor.execute(sql)\r\n datalist=[]\r\n for x in cursor:\r\n datalist.append({'id':x[0],'useremail':x[1],'status':x[2]})\r\n return datalist\r\ndef msq_update(condition,table=''):\r\n #msq_update('status =1 where id='+str(user['id']))\r\n db = MySQLdb.connect(db_config()['host'], db_config()['username'], db_config()['passwords'], db_config()['db_name'], charset='utf8' )\r\n cursor = db.cursor()\r\n table=check_table(table)\r\n sql='update '+table+' set '+condition\r\n try:\r\n # 执行SQL语句\r\n cursor.execute(sql)\r\n # 提交到数据库执行\r\n db.commit()\r\n except:\r\n # 发生错误时回滚\r\n db.rollback()\r\n \r\ndef msq_insert(email='',table=''):\r\n db = MySQLdb.connect(db_config()['host'], db_config()['username'], db_config()['passwords'], db_config()['db_name'], charset='utf8' )\r\n cursor = db.cursor()\r\n table=check_table(table)\r\n if email=='all':\r\n useremail=''\r\n for x in iptable.somelist(\"res/apiuseremail.txt\"):\r\n useremail=useremail+'(\"'+x+'\")'+','\r\n else:\r\n useremail='(\"'+email+'\")'\r\n sql = 'insert into '+table+'(useremail) values'+useremail[:-1] \r\n try:\r\n # 执行SQL语句\r\n cursor.execute(sql)\r\n # 提交到数据库执行\r\n db.commit()\r\n except:\r\n # 发生错误时回滚\r\n db.rollback()\r\ndef check_table(table):\r\n if table=='':\r\n table='useremails'\r\n return table\r\n\r\n# 定义函数,参数是函数的两个参数,都是python本身定义的,默认就行了。\r\ndef application(environ, start_response):\r\n # 打开数据库连接\r\n # 定义文件请求的类型和当前请求成功的code\r\n start_response('200 OK', [('Content-Type', 'text/html')])\r\n # 获取当前get请求的所有数据,返回是string类型\r\n params = parse_qs(environ['QUERY_STRING'])\r\n # 获取get中key为name的值\r\n useremail = params.get('useremail', [''])[0]\r\n status= params.get('status', [''])[0]\r\n #请求的两个参数邮箱和状态\r\n if useremail=='':\r\n try:\r\n user=msq_select('where status=0 order by id asc limit 1')[0]\r\n msq_update('status =1 where useremail=\"'+user['useremail']+'\"')\r\n except Exception as e:\r\n dic = {'useremail': '','status':''}\r\n else:\r\n dic = {'useremail': user['useremail'],'status': user['status']}\r\n else :\r\n msq_update('status ='+status+' where useremail=\"'+useremail+'\"')\r\n db = MySQLdb.connect(db_config()['host'], db_config()['username'], db_config()['passwords'], db_config()['db_name'], charset='utf8' )\r\n cursor = db.cursor()\r\n cursor.execute('select count(*) from useremails where status=0')\r\n left_num=cursor.fetchall()[0][0]\r\n dic = {'useremail': useremail,'status': status,'left':left_num}\r\n return [json.dumps(dic)]\r\ndef start_useremail_api():\r\n port = 5088\r\n httpd = make_server(\"0.0.0.0\", port, application)\r\n print \"serving http on port {0}...\".format(str(port))\r\n httpd.serve_forever() \r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n" }, { "alpha_fraction": 0.6494312286376953, "alphanum_fraction": 0.6535677313804626, "avg_line_length": 38.125, "blob_id": "2aa245ea5b407991c0f023849edd2afe03d0219d", "content_id": "f53d92c51af0458d9816378b3e5a5abd224ae010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 981, "license_type": "no_license", "max_line_length": 128, "num_lines": 24, "path": "/opendkim.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport sys#sys.argv\r\nimport os \r\nimport iptable\r\n\r\n\r\ndef opendkim():\r\n\tweb = iptable.somelist(\"res/web.txt\")\r\n\tos.system('echo '' > /etc/opendkim/KeyTable')\r\n\tos.system('echo '' > /etc/opendkim/SigningTable')\r\n\tos.system('echo localhost > /etc/opendkim/TrustedHosts')\r\n\tfor w in web :\r\n\t\t\tos.system(\"mkdir /etc/opendkim/keys/\"+w)\r\n\t\t\tos.system('opendkim-genkey --domain='+w+' --directory=/etc/opendkim/keys/'+w+'/')\r\n\t\t\tos.system('echo \"default._domainkey.'+w+' '+w+':default:/etc/opendkim/keys/'+w+'/default.private\" >> /etc/opendkim/KeyTable')\r\n\t\t\tos.system('echo \"*@'+w+' default._domainkey.'+w+'\" >> /etc/opendkim/SigningTable')\r\n\t\t\tos.system('echo \"'+w+'\" >> /etc/opendkim/TrustedHosts')\r\n\t\t\tprint(w+'创建密匙成功!')\r\n\r\n\tos.system('chown opendkim:opendkim -R /etc/opendkim/')\r\n\tos.system('chmod -R 700 /etc/opendkim')\r\n\tos.system('systemctl restart opendkim.service')\r\n\tos.system('systemctl restart postfix.service')\r\n\r\n\r\n" }, { "alpha_fraction": 0.6261904835700989, "alphanum_fraction": 0.6363095045089722, "avg_line_length": 31.939393997192383, "blob_id": "28258361f4f42550263b5a36dff3deae86f22d20", "content_id": "105e08c4a6f464491614a5efd3709ce82eea41d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3426, "license_type": "no_license", "max_line_length": 177, "num_lines": 99, "path": "/deal_email_data.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n \r\nimport smtplib,email,dkim \r\nfrom email.mime.text import MIMEText\r\nfrom email.header import Header\r\nimport dns.resolver \r\nimport iptable\r\nimport random\r\nimport time\r\nimport re \r\nimport os\r\nimport sys#sys.argv\r\n\r\n \r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\n\r\ndef senderemail():\r\n\tweb = random.choice(iptable.somelist(\"res/web.txt\"))\r\n\tname = iptable.clear(random.choice(iptable.somelist(\"res/static/name.txt\")))\r\n\tsender=name+'@'+web\r\n\treturn sender\r\n\r\ndef sendemail():\r\n os.system('rm -rf /var/log/maillog')\r\n os.system('chmod 777 -R /var/log')\r\n os.system('service rsyslog restart')\r\n user_emails=iptable.somelist(\"res/useremail.txt\")\r\n all_num=len(user_emails)\r\n time_now=time.time()\r\n ip_list_num=len(iptable.iplist())\r\n donum=0\r\n for useremail in user_emails:\r\n iptable_reset = int(open(\"res/static/iptable_reset.txt\",\"r\").read())\r\n if iptable_reset % ip_list_num==0:\r\n iptable.iptable()\r\n iptable_reset=0\r\n donum=mail(useremail,donum,user_emails.index(useremail),all_num,time_now)\r\n iptable_reset=iptable_reset+1\r\n with open('res/static/iptable_reset.txt','w') as f:\r\n f.write(str(iptable_reset))\r\n time.sleep(random.randint(12,21))\r\n\r\ndef mail(useremail,donum,dealnum,all_num,time_now):\r\n try:\r\n os.system('hostname '+iptable.hostwebone())\r\n sender=senderemail()\r\n web=sender.split('@')[1]\r\n message_str=iptable.mail_detail(sender,useremail)\r\n sig = dkim.sign(message_str, 'default',web, open(os.path.join(\"/etc/opendkim/keys/\"+web+'/', 'default.private')).read())\r\n message_str='DKIM-Signature: '+sig[len(\"DKIM-Signature: \"):]+message_str\r\n except :\r\n with open('res/useremail.txt','a+') as f:\r\n f.write('\\n'+useremail)\r\n else:\r\n try:\r\n sendmaildeal(sender,useremail,message_str)\r\n message_detail = sender+' sendto '+useremail\r\n timeuse=time.time()-time_now\r\n except smtplib.SMTPException as e:\r\n print e\r\n if e[0]==550:\r\n with open('res/useremail.txt','a+') as f:\r\n f.write('\\n'+useremail)\r\n else:\r\n with open('res/useremail.txt','a+') as f:\r\n f.write('\\n'+useremail)\r\n except :\r\n with open('res/useremail.txt','a+') as f:\r\n f.write('\\n'+useremail)\r\n else:\r\n\t\tdonum=donum+1\r\n\t\tleft=all_num-donum\r\n\t\ttimeper=timeuse/(dealnum+1)\r\n\t\toneday_num=str(int((3600*24)/int(timeper)))\r\n\t\trecord_process = str(int(timeper))+'秒/每封 '+' 处理 '+str(dealnum+1)+' 成功 '+str(donum)+' 剩余 '+str(left)+' 耗时 '+str(int(timeuse/60))+'分钟 '+oneday_num+'封/天 '+message_detail+'\\n'\r\n\t\twith open('res/static/progress.txt','a+') as f:\r\n\t\t\tf.write(record_process)\r\n\t\tprint record_process.replace(\"\\n\", \"\")\r\n return donum\r\ndef sendmaildeal(mailfrom, mailto, msg): \r\n domain = email.Utils.unquote(mailto).split(\"@\")[1]\r\n host = dns.resolver.query(domain, \"MX\")[0].exchange\r\n smtp = smtplib.SMTP(str(host))\r\n smtp.sendmail(mailfrom, [mailto], msg)\r\n smtp.quit()\r\n\r\ndef mail_record():\r\n with open('res/static/progress.txt','r') as fr:\r\n record_one= fr.readlines()[-1]\r\n record_mail = iptable.clear(record_one.split('sendto ')[1])\r\n try:\r\n undomail=open(\"res/useremail.txt\",\"r\").read().split(record_mail)[1]\r\n with open('res/useremail.txt','w') as f:\r\n f.write(\"\\n\"+undomail)\r\n print '记录进度成功!'+record_one\r\n except :\r\n print \"记录失败请稍后再试!\"\r\n" }, { "alpha_fraction": 0.6339285969734192, "alphanum_fraction": 0.7232142686843872, "avg_line_length": 26.25, "blob_id": "393b421c870043330f3dff6dae04621c3ac21951", "content_id": "d055e1db51c45e452177bbbffebd8fbf4929f8a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 47, "num_lines": 8, "path": "/test.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "import requests\r\nimport json\r\nurl = r'http://23.228.74.242:5088'\r\nparams1 = {\"q\":\"4354\"}\r\nresponse = requests.get(url=url,params=params1)\r\nprint(response.status_code)\r\ndata=json.loads(response.text)\r\nprint(data['useremail'])" }, { "alpha_fraction": 0.5311740636825562, "alphanum_fraction": 0.5846154093742371, "avg_line_length": 28.170732498168945, "blob_id": "50ec79fbfb5f49671bf67ceb79c5dc1a6e2c3763", "content_id": "2ea0a16834f870a05be1ad430f5746017f4373df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1235, "license_type": "no_license", "max_line_length": 123, "num_lines": 41, "path": "/verifyweb.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport sys#sys.argv\r\nimport subprocess\r\nimport re\r\nimport iptable\r\n\r\ndef verifyweb():\r\n\tiplist = iptable.iplist()\r\n\twebs = iptable.somelist(\"res/web.txt\")\r\n\twebok=[]\r\n\tfor web in webs:\r\n\t\tl=re.findall(r'\\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\b',getIP(web))\r\n\t\tif l:\r\n\t\t\tif l[0] in iplist:\r\n\t\t\t\twebok.append(web)\r\n\t\t\t\tprint 'join '+web\r\n\twith open('res/web.txt','w') as f:\r\n f.write('\\n'.join(webok))\r\n\r\ndef getIP(domain):\r\n\trun_watch=subprocess.Popen('ping -c 1 -w 1 %s'%domain,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\r\n\toutput,err_msg=run_watch.communicate()\r\n\trun_watch.returncode\r\n\treturn output\r\ndef delweb(web):\r\n\tiplist = iptable.iplist()\r\n\twebs = open(\"res/web.txt\",\"r\").read().replace(web, \"\")\r\n\twith open('res/web.txt','w') as f:\r\n f.write(webs)\r\n\r\ndef verifyhostweb():\r\n\twebs = iptable.hostweblist()\r\n\twebok=[]\r\n\tfor web in webs:\r\n\t\tl=re.findall(r'\\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\b',getIP(web))\r\n\t\tif l:\r\n\t\t\twebok.append(web)\r\n\t\t\tprint 'join '+web\r\n\twith open('res/static/hostweb.txt','w') as f:\r\n f.write('\\n'.join(webok))" }, { "alpha_fraction": 0.5295815467834473, "alphanum_fraction": 0.5916305780410767, "avg_line_length": 47.5, "blob_id": "0bdd1f90d1f43942aed47db102427cb3d7554095", "content_id": "1f8ec0709f2a1e2d33e0b646e00397cbf68a7f08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 125, "num_lines": 14, "path": "/statistics.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# -*- coding: utf-8 -* \r\n#__author__ = 'Administrat \r\n#coding=utf-8 \r\nimport sys\r\nimport os \r\n\r\ndef statistics():\r\n\tsend_num = os.popen('grep -E -o \".{0,0}.status=sent.{0,19}\" /var/log/maillog | grep -c \"sen\" ' ).read()\r\n\twrong550_num = os.popen('grep -E -o \".{0,0}.said: 550.{0,19}\" /var/log/maillog | grep -c \"550\" ' ).read()\r\n\tBlocked_num= os.popen('grep -E -o \".{0,0}.Blocked: 550.{0,19}|.{0,0}.IP: .{0,13}\" /var/log/maillog | grep -c \"IP\" ' ).read()\r\n\twith open('res/static/progress.txt','r') as f:\r\n\t\trecord_mail = f.readlines()[-1].split('deal')[1].split('message')[0]\r\n\tprint('总投递数:'+record_mail+'\\n成功投递数:'+send_num+'550错误数:'+wrong550_num+'封禁ip次数:'+Blocked_num)\r\n" }, { "alpha_fraction": 0.6653033494949341, "alphanum_fraction": 0.6782549619674683, "avg_line_length": 32.34090805053711, "blob_id": "b9b395e97fb1e64b0e37f6c1dd34d9da0983d08c", "content_id": "b8f2ce2ace98b4bd2c6dfac401736ffb4144cfc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1475, "license_type": "no_license", "max_line_length": 87, "num_lines": 44, "path": "/install.sh", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "#!/bin/bash\nwget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo;\nyum -y install epel-release -y;\nrpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm;\nyum clean all;\nyum makecache --skip-broken;\nyum install python -y;\nyum install screen -y;\nyum install python-requests -y;\nyum install opendkim -y;\nyum install postfix -y;\nyum install lrzsz -y;\nrm -rf /etc/postfix/main.cf;\nrm -rf /etc/postfix/header_checks; \nrm -rf /etc/postfix/master.cf;\ncp linux/main.cf /etc/postfix/main.cf; \ncp linux/header_checks\t /etc/postfix/header_checks; \ncp linux/master.cf \t/etc/postfix/master.cf;\ncat > /etc/opendkim.conf<<EOF\nCanonicalization relaxed/relaxed\nExternalIgnoreList refile:/etc/opendkim/TrustedHosts\nInternalHosts refile:/etc/opendkim/TrustedHosts\nKeyTable refile:/etc/opendkim/KeyTable\nLogWhy Yes\nMinimumKeyBits 1024\nMode sv\nPidFile /var/run/opendkim/opendkim.pid\nSigningTable refile:/etc/opendkim/SigningTable\nSocket inet:[email protected]\nSyslog Yes\nSyslogSuccess Yes\nTemporaryDirectory /var/tmp\nUMask 022\nUserID opendkim:opendkim\nEOF\nservice dkim restart;\nservice postfix restart;\nyum -y install python-pip;\npip install dkimpy;\npip install sh;\npip install pytz;\npip install tldextract;\npython do.py;\necho '安装成功';\n" }, { "alpha_fraction": 0.5823184251785278, "alphanum_fraction": 0.598556637763977, "avg_line_length": 44.224491119384766, "blob_id": "7f79b8f7ccce9147f6c9d1e91c11c8bd3fb362f4", "content_id": "ec42fbf0b67b894ed8894a6d1ac70cbdd64dfa20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2323, "license_type": "no_license", "max_line_length": 142, "num_lines": 49, "path": "/dnspod.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport requests\nimport sys#sys.argv\nimport json\nimport re\nimport iptable\n#105749,e2d2941a2acb7c1cd6add114a11804ea'\ndef dnspod(typename):\n\tl=raw_input(\"\\n\\033[1;32;40m请输入dnspod api账号密码 格式 xxxxxx,xxxxxxxxxxx :\")\n\tif l=='':\n\t\tprint '请输入 api账号密码!'\n\t\texit()\n\tip = iptable.iplist()\n\tweb = iptable.somelist(\"res/web.txt\")\n\tip_list=ip+ip+ip+ip\n\tcount=0\t\t\n\tfor w in web :\t\n\t\t\twww=w\n\t\t\t#print(w+ip_list[web.index(w)])\n\t\t\tif typename=='添加全部域名解析':\n\t\t\t\t\tcount+=1\n\t\t\t\t\treq_data = {'login_token':l,'format':'json','domain':w}\n\t\t\t\t\trequrl = \"https://dnsapi.cn/Domain.Create\"\n\t\t\t\t\ttry:\n\t\t\t\t\t\tr = json.loads(requests.post(requrl, data=req_data).text)\n\t\t\t\t\t\tdkim_origin=open('/etc/opendkim/keys/'+www+'/default.txt',\"r\").read()\n\t\t\t\t\t\tdkim=iptable.clear(re.findall(re.compile(r'[(](.*)[)]', re.S), dkim_origin)[0])\n\t\t\t\t\t\tprint (www+'添加域名 ->'+r['status']['message'])\n\t\t\t\t\t\tif 'exists' not in r['status']['message']:\n\t\t\t\t\t\t\treq_data=\t[\n\t\t\t\t\t\t\t\t\t\t{'login_token':l,'format':'json','domain':w,'sub_domain':'@','record_line':'默认','record_type':'A','value':ip_list[web.index(w)]},\n\t\t\t\t\t\t\t\t\t\t{'login_token':l,'format':'json','domain':w,'sub_domain':'mail','record_line':'默认','record_type':'A','value':ip_list[web.index(w)]},\n\t\t\t\t\t\t\t\t\t\t{'login_token':l,'format':'json','domain':w,'sub_domain':'*','record_line':'默认','record_type':'CNAME','value':'mail.'+w},\n\t\t\t\t\t\t\t\t\t\t{'login_token':l,'format':'json','domain':w,'sub_domain':'@','record_line':'默认','record_type':'MX','value':'mail.'+w},\n\t\t\t\t\t\t\t\t\t\t{'login_token':l,'format':'json','domain':w,'sub_domain':'default._domainkey','record_line':'默认','record_type':'TXT','value':dkim}\n\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\trequrl = \"https://dnsapi.cn/Record.Create\"\n\t\t\t\t\t\t\tfor req_data_one in req_data:\n\t\t\t\t\t\t\t\t\tr = json.loads(requests.post(requrl, data=req_data_one).text)\n\t\t\t\t\t\t\t\t\tprint (www+'添加'+req_data_one['record_type']+'记录 ->'+r['status']['message'])\n\t\t\t\t\texcept :\n\t \t\t\t\t\t print r['status']['message']\n\t\t\telif typename=='删除全部域名解析':\n\t\t\t\t\tcount+=1\n\t\t\t\t\treq_data = {'login_token':l,'format':'json','domain':w}\n\t\t\t\t\trequrl = \"https://dnsapi.cn/Domain.Remove\"\n\t\t\t\t\tr = json.loads(requests.post(requrl, data=req_data).text)\n\t\t\t\t\tprint (www+'删除'+' ->'+r['status']['message'])\n\n" }, { "alpha_fraction": 0.6941081285476685, "alphanum_fraction": 0.7021791934967041, "avg_line_length": 26.204545974731445, "blob_id": "313dee84b7f618388e61fc75c1a4f73e23a2c6f2", "content_id": "621330f966bf7d6ee8e8f9be988977130367a937", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1665, "license_type": "no_license", "max_line_length": 95, "num_lines": 44, "path": "/do.py", "repo_name": "ltnormalcoder/first", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n \r\nimport deal_email_data\r\nimport os\r\nimport sys#sys.argv\r\nimport dnspod\r\nimport opendkim\r\nimport statistics\r\nimport iptable\r\nimport verifyemail\r\nimport verifyweb\r\nimport apiemail\r\n\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\n\r\nprint('————发邮件准备步骤,从上到下依次完成!,打错字按crtl+删除键回删!————\\n生成密匙 \\n删除全部域名解析\\n添加全部域名解析\\n检查邮件\\n检查域名\\nip轮询')\r\nprint('————发邮件开始步骤————\\n记录和查看进度\\n群发邮件\\n发件统计')\r\nprint('————windows多服务器开启api步骤(从上到下依次输入即可)————\\n检查api客户邮件\\n导入客户邮箱到api\\n开启客户邮箱api接口')\r\ninput_words= raw_input(\"\"\"\\n\\033[1;32;40m请输入执行动作名字:\"\"\")\r\n\r\nif input_words=='记录和查看进度':\t\r\n\t\tdeal_email_data.mail_record()\r\nelif input_words=='群发邮件':\r\n\t\tdeal_email_data.sendemail()\r\nelif input_words=='检查邮件':\r\n\t\tverifyemail.verifyemail()\r\nelif input_words=='检查域名':\r\n\t\tverifyweb.verifyweb()\r\nelif input_words=='删除全部域名解析' or input_words=='添加全部域名解析':\r\n\t dnspod.dnspod(input_words)\r\nelif input_words=='生成密匙':\r\n opendkim.opendkim()\r\nelif input_words=='发件统计':\r\n statistics.statistics()\r\nelif input_words=='ip轮询':\r\n iptable.iptable()\r\nelif input_words=='检查api邮件':\r\n verifyemail.verifyapiemail()\r\nelif input_words=='导入客户邮箱到api':\r\n apiemail.msq_insert('all')\r\nelif input_words=='开启客户邮箱api接口':\r\n apiemail.start_useremail_api()" } ]
10
circleacid/car_sale
https://github.com/circleacid/car_sale
cb26953e688750de6784562b54991ec3de18d776
2a3ac1747a52ae8169dff14d0d6154b7485d6b6c
ebbe5e3d1b4c0c18897e0eeee01cd404b8d79491
refs/heads/master
2021-08-29T00:54:39.625945
2021-08-24T04:00:34
2021-08-24T04:00:34
249,142,118
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5866666436195374, "alphanum_fraction": 0.6053333282470703, "avg_line_length": 9.13513469696045, "blob_id": "f31dd64eb60a08e190659e95904a6dba93263e8e", "content_id": "3072683f84b1fcf4f8685ad34d9d7975b4d9f59b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 683, "license_type": "permissive", "max_line_length": 44, "num_lines": 37, "path": "/README.md", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "# 轿车营销系统\n\n## 数据库课程设计\n\n> 本系统采用Python+MySQL语言进行设计\n\n### 先前准备\n\n- Python环境\n\n - Python 3.7.2\n\n > 本系统运行在3.7.2版本下\n\n- 使用模块\n - pymysql\n - xlwt\n - wx\n\n- MySQL\n\n - MySQL 8\n\n - 建立数据库\n\n > 即**car_sale.sql**文件\n\n### 运行方式\n\n- 将文件解压至同一目录,运行 **test.py**\n\n- 如需实现 **登录** 操作请在数据库中建立相应的操作\n- 连接数据库代码在 **mydb.py**\n\n### 注意事项\n此系统写的很烂,各部分之间重复代码很多,不遵循软件工程 **低耦合,高内聚** 的原则\\\n运行过程中可能出现 Bug,还请自行解决\n" }, { "alpha_fraction": 0.6043772101402283, "alphanum_fraction": 0.635154664516449, "avg_line_length": 33.284183502197266, "blob_id": "047ce16eb648ad0e86f3affaf5d043db25d200fe", "content_id": "d031577a29a064f2c37f8eab5315d653c9d3fabf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14729, "license_type": "permissive", "max_line_length": 86, "num_lines": 373, "path": "/EOperation.py", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "import wx\r\nimport wx.grid\r\nfrom mydb import Sql_operation\r\nclass EOperation(wx.Frame):\r\n\t'''\r\n\t操作界面\r\n\t'''\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(EOperation,self).__init__(*args, **kw)\r\n\t\t#设置窗口屏幕居中\r\n\t\tself.Center()\r\n\t\t#创建窗口\r\n\t\tself.pnl = wx.Panel(self)\r\n\t\t#调用操作界面函数\r\n\t\tself.OperationInterface()\r\n\r\n\tdef OperationInterface(self):\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tself.vbox = wx.BoxSizer(wx.VERTICAL)\r\n\t\t#################################################################################\r\n\t\t#创建logo静态文本,设置字体属性\r\n\t\tlogo = wx.StaticText(self.pnl,label=\"员工信息管理\")\r\n\t\tfont = logo.GetFont()\r\n\t\tfont.PointSize += 30\r\n\t\tfont = font.Bold()\r\n\t\tlogo.SetFont(font)\r\n\t\t#添加logo静态文本到vbox布局管理中\r\n\t\tself.vbox.Add(logo,proportion=0,flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,border=5)\r\n\t\t#################################################################################\r\n\t\t#创建左侧的静态框\r\n\t\ttext_frame = wx.StaticBox(self.pnl,label=\"选择操作\")\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tvbox_button = wx.StaticBoxSizer(text_frame,wx.VERTICAL)\r\n\t\t#创建操作按钮、绑定事件处理\r\n\t\tcheck_button = wx.Button(self.pnl,id=10,label=\"查看员工信息\",size=(150,50))\r\n\t\tadd_button = wx.Button(self.pnl,id=11,label=\"添加员工信息\",size=(150,50))\r\n\t\tdelete_button = wx.Button(self.pnl,id=12,label=\"删除员工信息\",size=(150,50))\r\n\t\tupdate_button = wx.Button(self.pnl,id=14,label=\"修改员工信息\",size=(150,50))\r\n\t\tquit_button = wx.Button(self.pnl,id=13,label=\"退出系统\",size=(150,50))\r\n\t\tself.Bind(wx.EVT_BUTTON,self.ClickButton,id=10,id2=14)\r\n\t\t#添加操作按钮到vbox布局管理器\r\n\t\tvbox_button.Add(check_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(add_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(delete_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(update_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(quit_button,0,wx.EXPAND | wx.BOTTOM,200)\r\n\t\t#创建右侧静态框\r\n\t\tsb_show_operation = wx.StaticBox(self.pnl,label=\"显示/操作窗口\",size=(800,450))\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tself.vbox_showop = wx.StaticBoxSizer(sb_show_operation,wx.VERTICAL)\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox = wx.BoxSizer()\r\n\t\thbox.Add(vbox_button,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox.Add(self.vbox_showop,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#将hbox添加到垂直box\r\n\t\tself.vbox.Add(hbox,proportion=0,flag=wx.CENTER)\r\n\t\t#################################################################################\r\n\t\tself.pnl.SetSizer(self.vbox)\r\n #通过对应的按钮进行事件的跳转\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"员工管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\r\n#继承EOperation类,实现初始化操作界面\r\n\"\"\"\r\n进行数据库的查询操作\r\n\"\"\"\r\nclass InquireOp(EOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(InquireOp,self).__init__(*args, **kw)\r\n\t\t#创建员工信息网格\r\n\t\tself.cgrid = self.CreateGrid()\r\n\t\tself.cgrid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK,self.OnLabelleftClick)\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(self.cgrid,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,30)\r\n #此处的ClickButton用于事件之间的跳转\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tpass\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"员工管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n #创建用于显示数据的表格\r\n\tdef CreateGrid(self):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#获取car表中的学生信息,返回为二维元组\r\n\t\tnp = op.FindAll(\"employ\")\r\n\t\tcolumn_names = (\"员工编号\",\"员工姓名\",\"年龄\",\"性别\",\"籍贯\",\"学历\")\r\n\t\tcgrid = wx.grid.Grid(self.pnl)\r\n\t\t#CreateGrid(行数,列数)\r\n\t\tcgrid.CreateGrid(len(np),len(np[0])-1)\r\n\t\tfor row in range(len(np)):\r\n #表格横向为对应表中的属性,纵向为首个属性的数据\r\n\t\t\tcgrid.SetRowLabelValue(row,str(np[row][0]))\r\n\t\t\tfor col in range(1,len(np[row])):\r\n\t\t\t\tcgrid.SetColLabelValue(col-1,column_names[col])\r\n\t\t\t\tcgrid.SetCellValue(row,col-1,str(np[row][col]))\r\n\t\tcgrid.AutoSize()\r\n\t\treturn cgrid\r\n\r\n\tdef OnLabelleftClick(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\tnp = op.FindAll(\"employ\")\r\n\t\tprint(\"RowIdx: {0}\".format(event.GetRow()))\r\n\t\tprint(\"ColIdx: {0}\".format(event.GetRow()))\r\n\t\tprint(np[event.GetRow()])\r\n\t\tevent.Skip()\r\n\r\n#继承EOperation类,实现初始化操作界面\r\n\"\"\"\r\n数据库插入操作\r\n\"\"\"\r\nclass AddOp(EOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\tsuper(AddOp,self).__init__(*args, **kw)\r\n\t\t#创建表中属性文本框\r\n\t\tself.eno = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.ename = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.eage = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.esex = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.ehome = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.edu=wx.TextCtrl(self.pnl,size=(210,25))\r\n\t\tself.add_affirm = wx.Button(self.pnl,label=\"添加\",size=(80,25))\r\n\t\t#为添加按钮组件绑定事件处理\r\n\t\tself.add_affirm.Bind(wx.EVT_BUTTON,self.AddAffirm)\r\n\r\n #创建静态框\r\n\t\ttext_no = wx.StaticBox(self.pnl,label=\"员工编号\")\r\n\t\ttext_name = wx.StaticBox(self.pnl,label=\"员工姓名\")\r\n\t\ttext_age = wx.StaticBox(self.pnl,label=\"年 龄\")\r\n\t\ttext_sex = wx.StaticBox(self.pnl,label=\"性 别\")\r\n\t\ttext_home = wx.StaticBox(self.pnl,label=\"籍 贯\")\r\n\t\ttext_edu=wx.StaticBox(self.pnl,label=\"学 历\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_no = wx.StaticBoxSizer(text_no,wx.HORIZONTAL)\r\n\t\thbox_name = wx.StaticBoxSizer(text_name,wx.HORIZONTAL)\r\n\t\thbox_age = wx.StaticBoxSizer(text_age,wx.HORIZONTAL)\r\n\t\thbox_sex = wx.StaticBoxSizer(text_sex,wx.HORIZONTAL)\r\n\t\thbox_home = wx.StaticBoxSizer(text_home,wx.HORIZONTAL)\r\n\t\thbox_edu=wx.StaticBoxSizer(text_edu,wx.HORIZONTAL)\r\n\t\t#添加到hsbox布局管理器\r\n\t\thbox_no.Add(self.eno,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_name.Add(self.ename,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_age.Add(self.eage,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_sex.Add(self.esex,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_home.Add(self.ehome,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_edu.Add(self.edu,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#################################################################################\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_name,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_age,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_sex,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_home,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_edu,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.add_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tpass\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"员工管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\tdef AddAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\teno = self.eno.GetValue()\r\n\t\tprint(eno)\r\n\t\tename = self.ename.GetValue()\r\n\t\tprint(ename)\r\n\t\teage = self.eage.GetValue()\r\n\t\tprint(eage)\r\n\t\tesex = self.esex.GetValue()\r\n\t\tprint(esex)\r\n\t\tehome = self.ehome.GetValue()\r\n\t\tprint(ehome)\r\n\t\tedu=self.edu.GetValue()\r\n\t\tprint(edu)\r\n\t\tnp = op.EInsert(eno,ename,eage,esex,ehome,edu)\r\nclass UpdateOp(EOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\tsuper(UpdateOp,self).__init__(*args, **kw)\r\n\t\t#创建表中属性文本框\r\n\t\tself.eno = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.ename = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.eage = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.esex = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.ehome = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.edu=wx.TextCtrl(self.pnl,size=(210,25))\r\n\t\tself.update_affirm = wx.Button(self.pnl,label=\"修改\",size=(80,25))\r\n\t\t#为添加按钮组件绑定事件处理\r\n\t\tself.update_affirm.Bind(wx.EVT_BUTTON,self.UpdateAffirm)\r\n\r\n #创建静态框\r\n\t\ttext_no = wx.StaticBox(self.pnl,label=\"员工编号\")\r\n\t\ttext_name = wx.StaticBox(self.pnl,label=\"员工姓名\")\r\n\t\ttext_age = wx.StaticBox(self.pnl,label=\"年 龄\")\r\n\t\ttext_sex = wx.StaticBox(self.pnl,label=\"性 别\")\r\n\t\ttext_home = wx.StaticBox(self.pnl,label=\"籍 贯\")\r\n\t\ttext_edu=wx.StaticBox(self.pnl,label=\"学 历\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_no = wx.StaticBoxSizer(text_no,wx.HORIZONTAL)\r\n\t\thbox_name = wx.StaticBoxSizer(text_name,wx.HORIZONTAL)\r\n\t\thbox_age = wx.StaticBoxSizer(text_age,wx.HORIZONTAL)\r\n\t\thbox_sex = wx.StaticBoxSizer(text_sex,wx.HORIZONTAL)\r\n\t\thbox_home = wx.StaticBoxSizer(text_home,wx.HORIZONTAL)\r\n\t\thbox_edu=wx.StaticBoxSizer(text_edu,wx.HORIZONTAL)\r\n\t\t#添加到hsbox布局管理器\r\n\t\thbox_no.Add(self.eno,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_name.Add(self.ename,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_age.Add(self.eage,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_sex.Add(self.esex,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_home.Add(self.ehome,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_edu.Add(self.edu,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#################################################################################\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_name,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_age,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_sex,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_home,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_edu,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.update_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"员工管理系统\",size=(1024,668))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button=AddOp(None,title=\"员工管理系统\",size=(1024,668))\r\n\t\t\tadd_button.Show()\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"员工管理系统\",size=(1024,668))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tpass\r\n\tdef UpdateAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\teno = self.eno.GetValue()\r\n\t\tprint(eno)\r\n\t\tename = self.ename.GetValue()\r\n\t\tprint(ename)\r\n\t\teage = self.eage.GetValue()\r\n\t\tprint(eage)\r\n\t\tesex = self.esex.GetValue()\r\n\t\tprint(esex)\r\n\t\tehome = self.ehome.GetValue()\r\n\t\tprint(ehome)\r\n\t\tedu=self.edu.GetValue()\r\n\t\tprint(edu)\r\n\t\tnp = op.EUpdate(eno,ename,eage,esex,ehome,edu)\r\n#继承InquireOp类,实现初始化操作界面\r\nclass DelOp(InquireOp):\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(DelOp,self).__init__(*args, **kw)\r\n\t\t#创建删除员工输入框、删除按钮\r\n\t\tself.del_id = wx.TextCtrl(self.pnl,pos = (407,400),size = (210,25))\r\n\t\tself.del_affirm = wx.Button(self.pnl,label=\"删除\",pos=(625,400),size=(80,25))\r\n\t\t#为删除按钮组件绑定事件处理\r\n\t\tself.del_affirm.Bind(wx.EVT_BUTTON,self.DelAffirm)\r\n\t\t#################################################################################\r\n\t\t#创建静态框\r\n\t\ttext_del = wx.StaticBox(self.pnl,label=\"请选择需要删除的员工编号\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_del = wx.StaticBoxSizer(text_del,wx.HORIZONTAL)\r\n\t\t#添加到hbox_name布局管理器\r\n\t\thbox_del.Add(self.del_id,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_del,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.del_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tpass\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"员工管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\tdef DelAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\tdel_id = self.del_id.GetValue()\r\n\t\tprint(del_id)\r\n\t\tnp = op.EDel(int(del_id))\r\n\r\n\t\tdel_button = DelOp(None,title=\"员工管理系统\",size=(1024,720))\r\n\t\tdel_button.Show()\r\n\t\tself.Close(True)\r\n\"\"\"\r\nif __name__ == '__main__':\r\n\tapp = wx.App()\r\n\tlogin = EOperation(None,title=\"CSDN学生信息管理系统\",size=(1024,668))\r\n\tlogin.Show()\r\n\tapp.MainLoop()\r\n\"\"\"" }, { "alpha_fraction": 0.47000226378440857, "alphanum_fraction": 0.48834049701690674, "avg_line_length": 34.82500076293945, "blob_id": "5265c870c5b08aaabbed40a20d3d0c595e8ece0f", "content_id": "f3c96210c0e47d5503d31091e4a605a0a94ea5d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4879, "license_type": "permissive", "max_line_length": 89, "num_lines": 120, "path": "/CustomManer.py", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "import wx\r\nimport wx.grid\r\nfrom mydb import Sql_operation\r\n\r\n\r\nclass CustomManer(wx.Frame):\r\n def __init__(self, *args, **kw):\r\n super(CustomManer, self).__init__(*args, **kw)\r\n self.Center\r\n self.pnl = wx.Panel(self)\r\n self.CustomUI()\r\n\r\n def CustomUI(self):\r\n self.vbox = wx.BoxSizer(wx.VERTICAL)\r\n #################################################################################\r\n #创建logo静态文本,设置字体属性\r\n #\"\"\"\r\n logo = wx.StaticText(self.pnl, label=\"XX销售公司欢迎您!\")\r\n font = logo.GetFont()\r\n font.PointSize += 20\r\n font = font.Bold()\r\n logo.SetFont(font)\r\n self.vbox.Add(logo,\r\n proportion=0,\r\n flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,\r\n border=5)\r\n #\"\"\"\r\n #################################################################################\r\n #创建左侧的静态框\r\n text_frame = wx.StaticBox(self.pnl, label=\"选择操作\")\r\n #创建垂直方向box布局管理器\r\n vbox_button = wx.StaticBoxSizer(text_frame, wx.VERTICAL)\r\n #创建操作按钮、绑定事件处理\r\n check_button = wx.Button(self.pnl,\r\n id=10,\r\n label=\"查看车辆信息\",\r\n size=(150, 50))\r\n quit_button = wx.Button(self.pnl, id=11, label=\"退出系统\", size=(150, 50))\r\n self.Bind(wx.EVT_BUTTON, self.ClickButton, id=10, id2=11)\r\n #添加操作按钮到vbox布局管理器\r\n vbox_button.Add(check_button, 0, wx.EXPAND | wx.BOTTOM, 40)\r\n vbox_button.Add(quit_button, 0, wx.EXPAND | wx.BOTTOM, 200)\r\n #创建右侧静态框\r\n right_showop = wx.StaticBox(self.pnl, label=\"显示/操作窗口\", size=(800, 500))\r\n #创建垂直方向box布局管理器\r\n self.vbox_showop = wx.StaticBoxSizer(right_showop, wx.VERTICAL)\r\n #创建水平方向box布局管理器\r\n hbox = wx.BoxSizer()\r\n hbox.Add(vbox_button, 0, wx.EXPAND | wx.BOTTOM, 5)\r\n hbox.Add(self.vbox_showop, 0, wx.EXPAND | wx.BOTTOM, 5)\r\n #将hbox添加到垂直box\r\n self.vbox.Add(hbox, proportion=0, flag=wx.CENTER)\r\n #################################################################################\r\n self.pnl.SetSizer(self.vbox)\r\n\r\n def ClickButton(self, event):\r\n Bid = event.GetId()\r\n if Bid == 10:\r\n print(\"查询操作!\")\r\n inquire_button = ViewOp(None, title=\"车辆管理系统\", size=(1024, 720))\r\n inquire_button.Show()\r\n self.Close(True)\r\n elif Bid == 11:\r\n self.Close(True)\r\n\r\n\r\nclass ViewOp(CustomManer):\r\n def __init__(self, *args, **kw):\r\n super(ViewOp, self).__init__(*args, **kw)\r\n self.cgrid = self.CreateGrid()\r\n self.cgrid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK,\r\n self.OnLabelleftClick)\r\n #添加到vbox_showop布局管理器\r\n self.vbox_showop.Add(self.cgrid, 0,\r\n wx.CENTER | wx.TOP | wx.FIXED_MINSIZE, 30)\r\n\r\n def ClickButton(self, event):\r\n Bid = event.GetId()\r\n if Bid == 10:\r\n print(\"查询操作!\")\r\n inquire_button = ViewOp(None, title=\"车辆管理系统\", size=(1024, 720))\r\n inquire_button.Show()\r\n self.Close(True)\r\n elif Bid == 11:\r\n self.Close(True)\r\n\r\n def CreateGrid(self):\r\n #连接car_sale数据库\r\n op = Sql_operation(\"car_sale\")\r\n #获取car表中的学生信息,返回为二维元组\r\n np = op.FindAll(\"car\")\r\n column_names = (\"车辆编号\", \"型号\", \"颜色\", \"生产厂商\", \"出厂日期\", \"价格\")\r\n cgrid = wx.grid.Grid(self.pnl)\r\n #CreateGrid(行数,列数)\r\n cgrid.CreateGrid(len(np), len(np[0]) - 1)\r\n for row in range(len(np)):\r\n #表格横向为对应表中的属性,纵向为首个属性的数据\r\n cgrid.SetRowLabelValue(row, str(np[row][0]))\r\n for col in range(1, len(np[row])):\r\n cgrid.SetColLabelValue(col - 1, column_names[col])\r\n cgrid.SetCellValue(row, col - 1, str(np[row][col]))\r\n cgrid.AutoSize()\r\n return cgrid\r\n\r\n def OnLabelleftClick(self, event):\r\n #连接car_sale数据库\r\n op = Sql_operation(\"car_sale\")\r\n np = op.FindAll(\"car\")\r\n print(\"RowIdx: {0}\".format(event.GetRow()))\r\n print(\"ColIdx: {0}\".format(event.GetRow()))\r\n print(np[event.GetRow()])\r\n event.Skip()\r\n\r\n\"\"\"\r\nif __name__ == '__main__':\r\n app = wx.App()\r\n login = CustomManer(None, title=\"车辆信息查看界面\", size=(1024, 720))\r\n login.Show()\r\n app.MainLoop()\r\n\"\"\"" }, { "alpha_fraction": 0.6044857501983643, "alphanum_fraction": 0.6350421905517578, "avg_line_length": 32.49595642089844, "blob_id": "368d9c4639cb0fd8993d535687cde74b6b1f942a", "content_id": "83e548f958392af2217dca89d28c574e210f10a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14436, "license_type": "permissive", "max_line_length": 86, "num_lines": 371, "path": "/COperation.py", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "import wx\r\nimport wx.grid\r\nfrom mydb import Sql_operation\r\n#跳转至管理界面\r\nclass COperation(wx.Frame):\r\n\t'''\r\n\t操作界面\r\n\t'''\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(COperation,self).__init__(*args, **kw)\r\n\t\t#设置窗口屏幕居中\r\n\t\tself.Center()\r\n\t\t#创建窗口\r\n\t\tself.pnl = wx.Panel(self)\r\n\t\t#调用操作界面函数\r\n\t\tself.OperationInterface()\r\n\r\n\tdef OperationInterface(self):\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tself.vbox = wx.BoxSizer(wx.VERTICAL)\r\n\t\t#################################################################################\r\n\t\t#创建logo静态文本,设置字体属性\r\n\t\tlogo = wx.StaticText(self.pnl,label=\"信息管理\")\r\n\t\tfont = logo.GetFont()\r\n\t\tfont.PointSize += 30\r\n\t\tfont = font.Bold()\r\n\t\tlogo.SetFont(font)\r\n\t\t#添加logo静态文本到vbox布局管理中\r\n\t\tself.vbox.Add(logo,proportion=0,flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,border=5)\r\n\t\t#################################################################################\r\n\t\t#创建左侧的静态框\r\n\t\ttext_frame = wx.StaticBox(self.pnl,label=\"选择操作\")\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tvbox_button = wx.StaticBoxSizer(text_frame,wx.VERTICAL)\r\n\t\t#创建操作按钮、绑定事件处理\r\n\t\tcheck_button = wx.Button(self.pnl,id=10,label=\"查看客户信息\",size=(150,50))\r\n\t\tadd_button = wx.Button(self.pnl,id=11,label=\"添加客户信息\",size=(150,50))\r\n\t\tdelete_button = wx.Button(self.pnl,id=12,label=\"删除客户信息\",size=(150,50))\r\n\t\tupdate_button = wx.Button(self.pnl,id=14,label=\"修改客户信息\",size=(150,50))\r\n\t\tquit_button = wx.Button(self.pnl,id=13,label=\"退出系统\",size=(150,50))\r\n\t\tself.Bind(wx.EVT_BUTTON,self.ClickButton,id=10,id2=14)\r\n\t\t#添加操作按钮到vbox布局管理器\r\n\t\tvbox_button.Add(check_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(add_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(delete_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(update_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(quit_button,0,wx.EXPAND | wx.BOTTOM,200)\r\n\t\t#创建右侧静态框\r\n\t\tsb_show_operation = wx.StaticBox(self.pnl,label=\"显示/操作窗口\",size=(800,500))\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tself.vbox_showop = wx.StaticBoxSizer(sb_show_operation,wx.VERTICAL)\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox = wx.BoxSizer()\r\n\t\thbox.Add(vbox_button,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox.Add(self.vbox_showop,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#将hbox添加到垂直box\r\n\t\tself.vbox.Add(hbox,proportion=0,flag=wx.CENTER)\r\n\t\t#################################################################################\r\n\t\tself.pnl.SetSizer(self.vbox)\r\n #通过对应的按钮进行事件的跳转\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\r\n#继承COperation类,实现初始化操作界面\r\n\"\"\"\r\n进行数据库的查询操作\r\n\"\"\"\r\nclass InquireOp(COperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(InquireOp,self).__init__(*args, **kw)\r\n\t\t#创建学生信息网格\r\n\t\tself.cgrid = self.CreateGrid()\r\n\t\tself.cgrid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK,self.OnLabelleftClick)\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(self.cgrid,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,30)\r\n #此处的ClickButton用于事件之间的跳转\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tpass\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n #创建用于显示数据的表格\r\n\tdef CreateGrid(self):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#获取custom表中的学生信息,返回为二维元组\r\n\t\tnp = op.FindAll(\"custom\")\r\n\t\tcolumn_names = (\"客户姓名\",\"年龄\",\"性别\",\"联系方式\",\"业务记录\")\r\n\t\tcgrid = wx.grid.Grid(self.pnl)\r\n\t\tcgrid.CreateGrid(len(np),len(np[0]))\r\n\t\tfor row in range(len(np)):\r\n #表格横向为对应表中的属性,纵向为首个属性的数据\r\n\t\t\tcgrid.SetRowLabelValue(row,str(np[row][0]))\r\n\t\t\tfor col in range(1,len(np[row])):\r\n\t\t\t\tcgrid.SetColLabelValue(col-1,column_names[col])\r\n\t\t\t\tcgrid.SetCellValue(row,col-1,str(np[row][col]))\r\n\t\tcgrid.AutoSize()\r\n\t\treturn cgrid\r\n\r\n\tdef OnLabelleftClick(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\tnp = op.FindAll(\"custom\")\r\n\t\tprint(\"RowIdx: {0}\".format(event.GetRow()))\r\n\t\tprint(\"ColIdx: {0}\".format(event.GetRow()))\r\n\t\tprint(np[event.GetRow()])\r\n\t\tevent.Skip()\r\n\r\n#继承COperation类,实现初始化操作界面\r\n\"\"\"\r\n数据库插入操作\r\n\"\"\"\r\nclass AddOp(COperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\tsuper(AddOp,self).__init__(*args, **kw)\r\n\t\t#创建表中属性文本框\r\n\t\tself.cname = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.cage = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.csex = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.ctel = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.Brecord = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.add_affirm = wx.Button(self.pnl,label=\"添加\",size=(80,25))\r\n\t\t#为添加按钮组件绑定事件处理\r\n\t\tself.add_affirm.Bind(wx.EVT_BUTTON,self.AddAffirm)\r\n\r\n #创建静态框\r\n\t\ttext_name = wx.StaticBox(self.pnl,label=\"客户姓名\")\r\n\t\ttext_age = wx.StaticBox(self.pnl,label=\"年 龄\")\r\n\t\ttext_sex = wx.StaticBox(self.pnl,label=\"性 别\")\r\n\t\ttext_tel = wx.StaticBox(self.pnl,label=\"联系方式\")\r\n\t\ttext_record = wx.StaticBox(self.pnl,label=\"业务记录\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_name = wx.StaticBoxSizer(text_name,wx.HORIZONTAL)\r\n\t\thbox_age = wx.StaticBoxSizer(text_age,wx.HORIZONTAL)\r\n\t\thbox_sex = wx.StaticBoxSizer(text_sex,wx.HORIZONTAL)\r\n\t\thbox_tel = wx.StaticBoxSizer(text_tel,wx.HORIZONTAL)\r\n\t\thbox_record = wx.StaticBoxSizer(text_record,wx.HORIZONTAL)\r\n\t\t#添加到hsbox布局管理器\r\n\t\thbox_name.Add(self.cname,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_age.Add(self.cage,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_sex.Add(self.csex,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_tel.Add(self.ctel,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_record.Add(self.Brecord,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#################################################################################\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_name,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_age,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_sex,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_tel,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_record,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.add_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tpass\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\r\n\tdef AddAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#向stu_information表添加学生信息\r\n\t\tcname = self.cname.GetValue()\r\n\t\tprint(cname)\r\n\t\tcage = self.cage.GetValue()\r\n\t\tprint(cage)\r\n\t\tcsex = self.csex.GetValue()\r\n\t\tprint(csex)\r\n\t\tctel = self.ctel.GetValue()\r\n\t\tprint(ctel)\r\n\t\tBrecord = self.Brecord.GetValue()\r\n\t\tprint(Brecord)\r\n\t\tnp = op.CInsert(cname,cage,csex,ctel,Brecord)\r\n\r\nclass UpdateOp(COperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\tsuper(UpdateOp,self).__init__(*args, **kw)\r\n\t\t#创建表中属性文本框\r\n\t\tself.cname = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.cage = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.csex = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.ctel = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.Brecord = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.update_affirm = wx.Button(self.pnl,label=\"修改\",size=(80,25))\r\n\t\t#为添加按钮组件绑定事件处理\r\n\t\tself.update_affirm.Bind(wx.EVT_BUTTON,self.UpdateAffirm)\r\n\r\n #创建静态框\r\n\t\ttext_name = wx.StaticBox(self.pnl,label=\"客户姓名\")\r\n\t\ttext_age = wx.StaticBox(self.pnl,label=\"年 龄\")\r\n\t\ttext_sex = wx.StaticBox(self.pnl,label=\"性 别\")\r\n\t\ttext_tel = wx.StaticBox(self.pnl,label=\"联系方式\")\r\n\t\ttext_record = wx.StaticBox(self.pnl,label=\"业务记录\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_name = wx.StaticBoxSizer(text_name,wx.HORIZONTAL)\r\n\t\thbox_age = wx.StaticBoxSizer(text_age,wx.HORIZONTAL)\r\n\t\thbox_sex = wx.StaticBoxSizer(text_sex,wx.HORIZONTAL)\r\n\t\thbox_tel = wx.StaticBoxSizer(text_tel,wx.HORIZONTAL)\r\n\t\thbox_record = wx.StaticBoxSizer(text_record,wx.HORIZONTAL)\r\n\t\t#添加到hsbox布局管理器\r\n\t\thbox_name.Add(self.cname,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_age.Add(self.cage,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_sex.Add(self.csex,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_tel.Add(self.ctel,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_record.Add(self.Brecord,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#################################################################################\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_name,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_age,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_sex,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_tel,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_record,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.update_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\tdef UpdateAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#向stu_information表添加学生信息\r\n\t\tcname = self.cname.GetValue()\r\n\t\tprint(cname)\r\n\t\tcage = self.cage.GetValue()\r\n\t\tprint(cage)\r\n\t\tcsex = self.csex.GetValue()\r\n\t\tprint(csex)\r\n\t\tctel = self.ctel.GetValue()\r\n\t\tprint(ctel)\r\n\t\tBrecord = self.Brecord.GetValue()\r\n\t\tprint(Brecord)\r\n\t\tnp = op.CUpdate(cname,cage,csex,ctel,Brecord)\r\n\r\n#继承InquireOp类,实现初始化操作界面\r\nclass DelOp(InquireOp):\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(DelOp,self).__init__(*args, **kw)\r\n\t\t#创建删除学员信息输入框、删除按钮\r\n\t\tself.del_id = wx.TextCtrl(self.pnl,pos = (407,78),size = (210,25))\r\n\t\tself.del_affirm = wx.Button(self.pnl,label=\"删除\",pos=(625,78),size=(80,25))\r\n\t\t#为删除按钮组件绑定事件处理\r\n\t\tself.del_affirm.Bind(wx.EVT_BUTTON,self.DelAffirm)\r\n\t\t#################################################################################\r\n\t\t#创建静态框\r\n\t\ttext_del = wx.StaticBox(self.pnl,label=\"请选择需要删除的用户姓名\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_del = wx.StaticBoxSizer(text_del,wx.HORIZONTAL)\r\n\t\t#添加到hbox_name布局管理器\r\n\t\thbox_del.Add(self.del_id,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_del,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.del_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\r\n\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tpass\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button = UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\r\n\tdef DelAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#向stu_information表添加学生信息\r\n\t\tdel_id = self.del_id.GetValue()\r\n\t\tprint(del_id)\r\n\t\tnp = op.CDel(str(del_id))\r\n\r\n\t\tdel_button = DelOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\tdel_button.Show()\r\n\t\tself.Close(True)\r\n\"\"\"\r\nif __name__ == '__main__':\r\n\tapp = wx.App()\r\n\tlogin = COperation(None,title=\"用户管理系统\",size=(1024,668))\r\n\tlogin.Show()\r\n\tapp.MainLoop()\r\n\"\"\"" }, { "alpha_fraction": 0.6143944263458252, "alphanum_fraction": 0.643766462802887, "avg_line_length": 34.62239456176758, "blob_id": "4fc70317d079bd2d50b98ae8c781a0672b817c89", "content_id": "a81dc3298e4d4b3cb2d5add919b11d93c7c87e45", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15705, "license_type": "permissive", "max_line_length": 86, "num_lines": 384, "path": "/SOperation.py", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "import wx\r\nimport wx.grid\r\nfrom mydb import Sql_operation\r\nclass SOperation(wx.Frame):\r\n\t'''\r\n\t操作界面\r\n\t'''\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(SOperation,self).__init__(*args, **kw)\r\n\t\t#设置窗口屏幕居中\r\n\t\tself.Center()\r\n\t\t#创建窗口\r\n\t\tself.pnl = wx.Panel(self)\r\n\t\t#调用操作界面函数\r\n\t\tself.OperationInterface()\r\n\r\n\tdef OperationInterface(self):\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tself.vbox = wx.BoxSizer(wx.VERTICAL)\r\n\t\t#################################################################################\r\n\t\t#创建logo静态文本,设置字体属性\r\n\t\tlogo = wx.StaticText(self.pnl,label=\"销售信息管理\")\r\n\t\tfont = logo.GetFont()\r\n\t\tfont.PointSize += 30\r\n\t\tfont = font.Bold()\r\n\t\tlogo.SetFont(font)\r\n\t\t#添加logo静态文本到vbox布局管理中\r\n\t\tself.vbox.Add(logo,proportion=0,flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,border=5)\r\n\t\t#################################################################################\r\n\t\t#创建左侧的静态框\r\n\t\ttext_frame = wx.StaticBox(self.pnl,label=\"选择操作\")\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tvbox_button = wx.StaticBoxSizer(text_frame,wx.VERTICAL)\r\n\t\t#创建操作按钮、绑定事件处理\r\n\t\tcheck_button = wx.Button(self.pnl,id=10,label=\"查看销售信息\",size=(150,50))\r\n\t\tadd_button = wx.Button(self.pnl,id=11,label=\"添加销售信息\",size=(150,50))\r\n\t\txls_button = wx.Button(self.pnl,id=12,label=\"导出数据\",size=(150,50))\r\n\t\tupdate_button = wx.Button(self.pnl,id=14,label=\"修改销售信息\",size=(150,50))\r\n\t\tquit_button = wx.Button(self.pnl,id=13,label=\"退出系统\",size=(150,50))\r\n\t\tself.Bind(wx.EVT_BUTTON,self.ClickButton,id=10,id2=14)\r\n\t\t#添加操作按钮到vbox布局管理器\r\n\t\tvbox_button.Add(check_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(add_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(xls_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(update_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(quit_button,0,wx.EXPAND | wx.BOTTOM,200)\r\n\t\t#创建右侧静态框\r\n\t\tsb_show_operation = wx.StaticBox(self.pnl,label=\"显示/操作窗口\",size=(800,450))\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tself.vbox_showop = wx.StaticBoxSizer(sb_show_operation,wx.VERTICAL)\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox = wx.BoxSizer()\r\n\t\thbox.Add(vbox_button,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox.Add(self.vbox_showop,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#将hbox添加到垂直box\r\n\t\tself.vbox.Add(hbox,proportion=0,flag=wx.CENTER)\r\n\t\t#################################################################################\r\n\t\tself.pnl.SetSizer(self.vbox)\r\n #通过对应的按钮进行事件的跳转\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"导出数据\")\r\n\t\t\txls_button = ExcelOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\txls_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"销售管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\r\n#继承SOperation类,实现初始化操作界面\r\n\"\"\"\r\n进行数据库的查询操作\r\n\"\"\"\r\nclass InquireOp(SOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(InquireOp,self).__init__(*args, **kw)\r\n\t\t#创建销售信息网格\r\n\t\tself.cgrid = self.CreateGrid()\r\n\t\tself.cgrid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK,self.OnLabelleftClick)\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(self.cgrid,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,30)\r\n #此处的ClickButton用于事件之间的跳转\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tpass\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"导出数据\")\r\n\t\t\txls_button = ExcelOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\txls_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"销售管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n #创建用于显示数据的表格\r\n\tdef CreateGrid(self):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#获取car表中的学生信息,返回为二维元组\r\n\t\tnp = op.FindAll(\"sale\")\r\n\t\tcolumn_types = (\"销售编号\",\"车辆编号\",\"销售车型\",\"车辆颜色\",\"销售日期\",\"销售数量\",\"销售员\",\"买家\")\r\n\t\tcgrid = wx.grid.Grid(self.pnl)\r\n\t\t#CreateGrid(行数,列数)\r\n\t\tcgrid.CreateGrid(len(np),len(np[0])-1)\r\n\t\tfor row in range(len(np)):\r\n #表格横向为对应表中的属性,纵向为首个属性的数据\r\n\t\t\t#row[0]要显示的为第一列\r\n\t\t\tcgrid.SetRowLabelValue(row,str(np[row][0]))\r\n\t\t\tfor col in range(1,len(np[row])):\r\n\t\t\t\tcgrid.SetColLabelValue(col-1,column_types[col])\r\n\t\t\t\tcgrid.SetCellValue(row,col-1,str(np[row][col]))\r\n\t\tcgrid.AutoSize()\r\n\t\treturn cgrid\r\n\r\n\tdef OnLabelleftClick(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\tnp = op.FindAll(\"sale\")\r\n\t\tprint(\"RowIdx: {0}\".format(event.GetRow()))\r\n\t\tprint(\"ColIdx: {0}\".format(event.GetRow()))\r\n\t\tprint(np[event.GetRow()])\r\n\t\tevent.Skip()\r\n\r\n#继承SOperation类,实现初始化操作界面\r\n\"\"\"\r\n数据库插入操作\r\n\"\"\"\r\nclass AddOp(SOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\tsuper(AddOp,self).__init__(*args, **kw)\r\n\t\t#创建表中属性文本框\r\n\t\tself.sale_no=wx.TextCtrl(self.pnl,size=(210,25))\r\n\t\tself.sale_car = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_type = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_color = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_date = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_num = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_man=wx.TextCtrl(self.pnl,size=(210,25))\r\n\t\tself.add_affirm = wx.Button(self.pnl,label=\"添加\",size=(80,25))\r\n\t\t#为添加按钮组件绑定事件处理\r\n\t\tself.add_affirm.Bind(wx.EVT_BUTTON,self.AddAffirm)\r\n\r\n #创建静态框\r\n\t\ttext_no=wx.StaticBox(self.pnl,label=\"销售编号\")\r\n\t\ttext_car = wx.StaticBox(self.pnl,label=\"车辆编号\")\r\n\t\ttext_type = wx.StaticBox(self.pnl,label=\"销售车型\")\r\n\t\ttext_color = wx.StaticBox(self.pnl,label=\"车辆颜色\")\r\n\t\ttext_date = wx.StaticBox(self.pnl,label=\"销售日期\")\r\n\t\ttext_num = wx.StaticBox(self.pnl,label=\"销售数量\")\r\n\t\ttext_man=wx.StaticBox(self.pnl,label=\"销售员\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_no=wx.StaticBoxSizer(text_no,wx.HORIZONTAL)\r\n\t\thbox_car = wx.StaticBoxSizer(text_car,wx.HORIZONTAL)\r\n\t\thbox_type = wx.StaticBoxSizer(text_type,wx.HORIZONTAL)\r\n\t\thbox_color = wx.StaticBoxSizer(text_color,wx.HORIZONTAL)\r\n\t\thbox_date = wx.StaticBoxSizer(text_date,wx.HORIZONTAL)\r\n\t\thbox_num = wx.StaticBoxSizer(text_num,wx.HORIZONTAL)\r\n\t\thbox_man=wx.StaticBoxSizer(text_man,wx.HORIZONTAL)\r\n\t\t#添加到hsbox布局管理器\r\n\t\thbox_no.Add(self.sale_no,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_car.Add(self.sale_car,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_type.Add(self.sale_type,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_color.Add(self.sale_color,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_date.Add(self.sale_date,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_num.Add(self.sale_num,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_man.Add(self.sale_man,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#################################################################################\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_car,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_type,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_color,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_date,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_num,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_man,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.add_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tpass\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"导出数据\")\r\n\t\t\txls_button = ExcelOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\txls_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"销售管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\tdef AddAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\tsale_car = self.sale_car.GetValue()\r\n\t\tprint(sale_car)\r\n\t\tsale_type = self.sale_type.GetValue()\r\n\t\tprint(sale_type)\r\n\t\tsale_color = self.sale_color.GetValue()\r\n\t\tprint(sale_color)\r\n\t\tsale_date = self.sale_date.GetValue()\r\n\t\tprint(sale_date)\r\n\t\tsale_num = self.sale_num.GetValue()\r\n\t\tprint(sale_num)\r\n\t\tsale_man=self.sale_man.GetValue()\r\n\t\tprint(sale_man)\r\n\t\tnp = op.SInsert(sale_car,sale_type,sale_color,sale_date,sale_num,sale_man)\r\nclass UpdateOp(SOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\tsuper(UpdateOp,self).__init__(*args, **kw)\r\n\t\t#创建表中属性文本框\r\n\t\tself.sale_no = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_car = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_type = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_color = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_date = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_num = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.sale_man=wx.TextCtrl(self.pnl,size=(210,25))\r\n\t\tself.update_affirm = wx.Button(self.pnl,label=\"修改\",size=(80,25))\r\n\t\t#为添加按钮组件绑定事件处理\r\n\t\tself.update_affirm.Bind(wx.EVT_BUTTON,self.UpdateAffirm)\r\n\r\n #创建静态框\r\n\t\ttext_no = wx.StaticBox(self.pnl,label=\"销售编号\")\r\n\t\ttext_car = wx.StaticBox(self.pnl,label=\"车辆编号\")\r\n\t\ttext_type = wx.StaticBox(self.pnl,label=\"销售车型\")\r\n\t\ttext_color = wx.StaticBox(self.pnl,label=\"车辆颜色\")\r\n\t\ttext_date = wx.StaticBox(self.pnl,label=\"销售日期\")\r\n\t\ttext_num = wx.StaticBox(self.pnl,label=\"销售数量\")\r\n\t\ttext_man=wx.StaticBox(self.pnl,label=\"销售员\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_no = wx.StaticBoxSizer(text_car,wx.HORIZONTAL)\r\n\t\thbox_car = wx.StaticBoxSizer(text_car,wx.HORIZONTAL)\r\n\t\thbox_type = wx.StaticBoxSizer(text_type,wx.HORIZONTAL)\r\n\t\thbox_color = wx.StaticBoxSizer(text_color,wx.HORIZONTAL)\r\n\t\thbox_date = wx.StaticBoxSizer(text_date,wx.HORIZONTAL)\r\n\t\thbox_num = wx.StaticBoxSizer(text_num,wx.HORIZONTAL)\r\n\t\thbox_man=wx.StaticBoxSizer(text_man,wx.HORIZONTAL)\r\n\t\t#添加到hsbox布局管理器\r\n\t\thbox_no.Add(self.sale_car,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_car.Add(self.sale_car,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_type.Add(self.sale_type,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_color.Add(self.sale_color,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_date.Add(self.sale_date,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_num.Add(self.sale_num,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_man.Add(self.sale_man,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#################################################################################\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_car,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_type,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_color,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_date,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_num,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_man,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.update_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"销售管理系统\",size=(1024,668))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tupdate_buttom=UpdateOp(None,title=\"销售管理系统\",size=(1024,668))\r\n\t\t\tupdate_buttom.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"导出数据\")\r\n\t\t\txls_button = ExcelOp(None,title=\"销售管理系统\",size=(1024,668))\r\n\t\t\txls_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tpass\r\n\tdef UpdateAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\tsale_car = self.sale_car.GetValue()\r\n\t\tprint(sale_car)\r\n\t\tsale_type = self.sale_type.GetValue()\r\n\t\tprint(sale_type)\r\n\t\tsale_color = self.sale_color.GetValue()\r\n\t\tprint(sale_color)\r\n\t\tsale_date = self.sale_date.GetValue()\r\n\t\tprint(sale_date)\r\n\t\tsale_num = self.sale_num.GetValue()\r\n\t\tprint(sale_num)\r\n\t\tsale_man=self.sale_man.GetValue()\r\n\t\tprint(sale_man)\r\n\t\tnp = op.SUpdate(sale_car,sale_type,sale_color,sale_date,sale_num,sale_man)\r\n#继承InquireOp类,实现初始化操作界面\r\nclass ExcelOp(InquireOp):\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(ExcelOp,self).__init__(*args, **kw)\r\n\t\t#创建删除销售输入框、删除按钮\r\n\t\tself.excel_affrim = wx.Button(self.pnl,label=\"导出\",pos=(625,400),size=(80,25))\r\n\t\t#为删除按钮组件绑定事件处理\r\n\t\tself.excel_affrim.Bind(wx.EVT_BUTTON,self.ExcelAffrim)\r\n\t\t#################################################################################\r\n\t\t#创建静态框\r\n\t\ttext_xls = wx.StaticBox(self.pnl,label=\"导出销售数据\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_xls = wx.StaticBoxSizer(text_xls,wx.HORIZONTAL)\r\n\t\t#添加到hbox_type布局管理器\r\n\t\thbox_xls.Add(self.excel_affrim,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_xls,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.excel_affrim,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tpass\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"销售管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\tdef ExcelAffrim(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\r\n\t\tprint(\"导出销售数据\")\r\n\t\tnp = op.SExcel()\r\n\r\n\t\txls_button = ExcelOp(None,title=\"销售管理系统\",size=(1024,720))\r\n\t\txls_button.Show()\r\n\t\tself.Close(True)\r\n\"\"\"\r\nif __name__ == '__main__':\r\n\tapp = wx.App()\r\n\tlogin = SOperation(None,title=\"CSDN学生信息管理系统\",size=(1024,668))\r\n\tlogin.Show()\r\n\tapp.MainLoop()\r\n\"\"\"" }, { "alpha_fraction": 0.6108906269073486, "alphanum_fraction": 0.6406525373458862, "avg_line_length": 34.00529098510742, "blob_id": "0295ec66e613479b6ca307061b8c597f7f078da8", "content_id": "86253891b0c63560dcf0b514e32c5b191cae4874", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15232, "license_type": "permissive", "max_line_length": 86, "num_lines": 378, "path": "/CarOperation.py", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "import wx\r\nimport wx.grid\r\nfrom mydb import Sql_operation\r\n#跳转至管理界面\r\nclass CarOperation(wx.Frame):\r\n\t'''\r\n\t操作界面\r\n\t'''\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(CarOperation,self).__init__(*args, **kw)\r\n\t\t#设置窗口屏幕居中\r\n\t\tself.Center()\r\n\t\t#创建窗口\r\n\t\tself.pnl = wx.Panel(self)\r\n\t\t#调用操作界面函数\r\n\t\tself.OperationInterface()\r\n\r\n\tdef OperationInterface(self):\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tself.vbox = wx.BoxSizer(wx.VERTICAL)\r\n\t\t#################################################################################\r\n\t\t#创建logo静态文本,设置字体属性\r\n\t\tlogo = wx.StaticText(self.pnl,label=\"车辆信息管理\")\r\n\t\tfont = logo.GetFont()\r\n\t\tfont.PointSize += 30\r\n\t\tfont = font.Bold()\r\n\t\tlogo.SetFont(font)\r\n\t\t#添加logo静态文本到vbox布局管理中\r\n\t\tself.vbox.Add(logo,proportion=0,flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,border=5)\r\n\t\t#################################################################################\r\n\t\t#创建左侧的静态框\r\n\t\ttext_frame = wx.StaticBox(self.pnl,label=\"选择操作\")\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tvbox_button = wx.StaticBoxSizer(text_frame,wx.VERTICAL)\r\n\t\t#创建操作按钮、绑定事件处理\r\n\t\tcheck_button = wx.Button(self.pnl,id=10,label=\"查看车辆信息\",size=(150,50))\r\n\t\tadd_button = wx.Button(self.pnl,id=11,label=\"添加车辆信息\",size=(150,50))\r\n\t\tdelete_button = wx.Button(self.pnl,id=12,label=\"删除车辆信息\",size=(150,50))\r\n\t\tquit_button = wx.Button(self.pnl,id=13,label=\"退出系统\",size=(150,50))\r\n\t\tupdate_button=wx.Button(self.pnl,id=14,label=\"修改车辆信息\",size=(150,50))\r\n\t\tself.Bind(wx.EVT_BUTTON,self.ClickButton,id=10,id2=14)\r\n\t\t#添加操作按钮到vbox布局管理器\r\n\t\tvbox_button.Add(check_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(add_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(delete_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(update_button,0,wx.EXPAND | wx.BOTTOM,40)\r\n\t\tvbox_button.Add(quit_button,0,wx.EXPAND | wx.BOTTOM,200)\r\n\t\t#创建右侧静态框\r\n\t\tright_showop = wx.StaticBox(self.pnl,label=\"显示/操作窗口\",size=(800,500))\r\n\t\t#创建垂直方向box布局管理器\r\n\t\tself.vbox_showop = wx.StaticBoxSizer(right_showop,wx.VERTICAL)\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox = wx.BoxSizer()\r\n\t\thbox.Add(vbox_button,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox.Add(self.vbox_showop,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#将hbox添加到垂直box\r\n\t\tself.vbox.Add(hbox,proportion=0,flag=wx.CENTER)\r\n\t\t#################################################################################\r\n\t\tself.pnl.SetSizer(self.vbox)\r\n #通过对应的按钮进行事件的跳转\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\r\n#继承CarOperation类,实现初始化操作界面\r\n\"\"\"\r\n进行数据库的查询操作\r\n\"\"\"\r\nclass InquireOp(CarOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(InquireOp,self).__init__(*args, **kw)\r\n\t\t#创建学生信息网格\r\n\t\tself.cgrid = self.CreateGrid()\r\n\t\tself.cgrid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK,self.OnLabelleftClick)\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(self.cgrid,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,30)\r\n #此处的ClickButton用于事件之间的跳转\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tpass\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n #创建用于显示数据的表格\r\n\tdef CreateGrid(self):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#获取car表中的学生信息,返回为二维元组\r\n\t\tnp = op.FindAll(\"car\")\r\n\t\tcolumn_names = (\"车辆编号\",\"型号\",\"颜色\",\"生产厂商\",\"出厂日期\",\"价格\")\r\n\t\tcgrid = wx.grid.Grid(self.pnl)\r\n\t\t#CreateGrid(行数,列数)\r\n\t\tcgrid.CreateGrid(len(np),len(np[0])-1)\r\n\t\tfor row in range(len(np)):\r\n #表格横向为对应表中的属性,纵向为首个属性的数据\r\n\t\t\tcgrid.SetRowLabelValue(row,str(np[row][0]))\r\n\t\t\tfor col in range(1,len(np[row])):\r\n\t\t\t\tcgrid.SetColLabelValue(col-1,column_names[col])\r\n\t\t\t\tcgrid.SetCellValue(row,col-1,str(np[row][col]))\r\n\t\tcgrid.AutoSize()\r\n\t\treturn cgrid\r\n\r\n\tdef OnLabelleftClick(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\tnp = op.FindAll(\"car\")\r\n\t\tprint(\"RowIdx: {0}\".format(event.GetRow()))\r\n\t\tprint(\"ColIdx: {0}\".format(event.GetRow()))\r\n\t\tprint(np[event.GetRow()])\r\n\t\tevent.Skip()\r\n\r\n#继承CarOperation类,实现初始化操作界面\r\n\"\"\"\r\n数据库插入操作\r\n\"\"\"\r\nclass AddOp(CarOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\tsuper(AddOp,self).__init__(*args, **kw)\r\n\t\t#创建表中属性文本框\r\n\t\tself.car_no = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_type = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_color = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_maner = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_date = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_price=wx.TextCtrl(self.pnl,size=(210,25))\r\n\t\tself.add_affirm = wx.Button(self.pnl,label=\"添加\",size=(80,25))\r\n\t\t#为添加按钮组件绑定事件处理\r\n\t\tself.add_affirm.Bind(wx.EVT_BUTTON,self.AddAffirm)\r\n\r\n #创建静态框\r\n\t\ttext_no = wx.StaticBox(self.pnl,label=\"车辆编号\")\r\n\t\ttext_type = wx.StaticBox(self.pnl,label=\"型 号\")\r\n\t\ttext_color = wx.StaticBox(self.pnl,label=\"颜 色\")\r\n\t\ttext_maner = wx.StaticBox(self.pnl,label=\"生产厂商\")\r\n\t\ttext_date = wx.StaticBox(self.pnl,label=\"出厂日期\")\r\n\t\ttext_price=wx.StaticBox(self.pnl,label=\"价 格\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_no = wx.StaticBoxSizer(text_no,wx.HORIZONTAL)\r\n\t\thbox_type = wx.StaticBoxSizer(text_type,wx.HORIZONTAL)\r\n\t\thbox_color = wx.StaticBoxSizer(text_color,wx.HORIZONTAL)\r\n\t\thbox_maner = wx.StaticBoxSizer(text_maner,wx.HORIZONTAL)\r\n\t\thbox_date = wx.StaticBoxSizer(text_date,wx.HORIZONTAL)\r\n\t\thbox_price=wx.StaticBoxSizer(text_price,wx.HORIZONTAL)\r\n\t\t#添加到hsbox布局管理器\r\n\t\thbox_no.Add(self.car_no,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_type.Add(self.car_type,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_color.Add(self.car_color,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_maner.Add(self.car_maner,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_date.Add(self.car_date,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_price.Add(self.car_price,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#################################################################################\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_type,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_color,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_maner,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_date,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_price,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.add_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tpass\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\tdef AddAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#向stu_information表添加学生信息\r\n\t\tcar_no = self.car_no.GetValue()\r\n\t\tprint(car_no)\r\n\t\tcar_type = self.car_type.GetValue()\r\n\t\tprint(car_type)\r\n\t\tcar_color = self.car_color.GetValue()\r\n\t\tprint(car_color)\r\n\t\tcar_maner = self.car_maner.GetValue()\r\n\t\tprint(car_maner)\r\n\t\tcar_date = self.car_date.GetValue()\r\n\t\tprint(car_date)\r\n\t\tcar_price=self.car_price.GetValue()\r\n\t\tprint(car_price)\r\n\t\tnp = op.CarInsert(car_no,car_type,car_color,car_maner,car_date,car_price)\r\n\r\n#继承InquireOp类,实现初始化操作界面\r\nclass DelOp(InquireOp):\r\n\tdef __init__(self,*args,**kw):\r\n\t\t# ensure the parent's __init__ is called\r\n\t\tsuper(DelOp,self).__init__(*args, **kw)\r\n\t\t#创建删除车辆输入框、删除按钮\r\n\t\tself.del_id = wx.TextCtrl(self.pnl,pos = (407,400),size = (210,25))\r\n\t\tself.del_affirm = wx.Button(self.pnl,label=\"删除\",pos=(625,400),size=(80,25))\r\n\t\t#为删除按钮组件绑定事件处理\r\n\t\tself.del_affirm.Bind(wx.EVT_BUTTON,self.DelAffirm)\r\n\t\t#################################################################################\r\n\t\t#创建静态框\r\n\t\ttext_del = wx.StaticBox(self.pnl,label=\"请选择需要删除的车辆编号\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_del = wx.StaticBoxSizer(text_del,wx.HORIZONTAL)\r\n\t\t#添加到hbox_name布局管理器\r\n\t\thbox_del.Add(self.del_id,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_del,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.del_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button = AddOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tpass\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tprint(\"修改操作!\")\r\n\t\t\tupdate_button=UpdateOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tupdate_button.Show()\r\n\t\t\tself.Close(True)\r\n\tdef DelAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\tdel_id = self.del_id.GetValue()\r\n\t\tprint(del_id)\r\n\t\tnp = op.CarDel(int(del_id))\r\n\r\n\t\tdel_button = DelOp(None,title=\"车辆管理系统\",size=(1024,720))\r\n\t\tdel_button.Show()\r\n\t\tself.Close(True)\r\nclass UpdateOp(CarOperation):\r\n\tdef __init__(self,*args,**kw):\r\n\t\tsuper(UpdateOp,self).__init__(*args, **kw)\r\n\t\t#创建表中属性文本框\r\n\t\tself.car_no = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_type = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_color = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_maner = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_date = wx.TextCtrl(self.pnl,size = (210,25))\r\n\t\tself.car_price=wx.TextCtrl(self.pnl,size=(210,25))\r\n\t\tself.update_affirm = wx.Button(self.pnl,label=\"修改\",size=(80,25))\r\n\t\t#为添加按钮组件绑定事件处理\r\n\t\tself.update_affirm.Bind(wx.EVT_BUTTON,self.UpdateAffirm)\r\n\r\n #创建静态框\r\n\t\ttext_no = wx.StaticBox(self.pnl,label=\"车辆编号\")\r\n\t\ttext_type = wx.StaticBox(self.pnl,label=\"型 号\")\r\n\t\ttext_color = wx.StaticBox(self.pnl,label=\"颜 色\")\r\n\t\ttext_maner = wx.StaticBox(self.pnl,label=\"生产厂商\")\r\n\t\ttext_date = wx.StaticBox(self.pnl,label=\"出厂日期\")\r\n\t\ttext_price=wx.StaticBox(self.pnl,label=\"价 格\")\r\n\t\t#创建水平方向box布局管理器\r\n\t\thbox_no = wx.StaticBoxSizer(text_no,wx.HORIZONTAL)\r\n\t\thbox_type = wx.StaticBoxSizer(text_type,wx.HORIZONTAL)\r\n\t\thbox_color = wx.StaticBoxSizer(text_color,wx.HORIZONTAL)\r\n\t\thbox_maner = wx.StaticBoxSizer(text_maner,wx.HORIZONTAL)\r\n\t\thbox_date = wx.StaticBoxSizer(text_date,wx.HORIZONTAL)\r\n\t\thbox_price=wx.StaticBoxSizer(text_price,wx.HORIZONTAL)\r\n\t\t#添加到hsbox布局管理器\r\n\t\thbox_no.Add(self.car_no,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_type.Add(self.car_type,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_color.Add(self.car_color,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_maner.Add(self.car_maner,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_date.Add(self.car_date,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\thbox_price.Add(self.car_price,0,wx.EXPAND | wx.BOTTOM,5)\r\n\t\t#################################################################################\r\n\t\t#添加到vbox_showop布局管理器\r\n\t\tself.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_type,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_color,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_maner,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_date,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(hbox_price,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\t\tself.vbox_showop.Add(self.update_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)\r\n\tdef ClickButton(self,event):\r\n\t\tBid = event.GetId()\r\n\t\tif Bid == 10:\r\n\t\t\tprint(\"查询操作!\")\r\n\t\t\tinquire_button = InquireOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tinquire_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 11:\r\n\t\t\tprint(\"添加操作!\")\r\n\t\t\tadd_button=AddOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tadd_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 12:\r\n\t\t\tprint(\"删除操作!\")\r\n\t\t\tdel_button = DelOp(None,title=\"用户管理系统\",size=(1024,668))\r\n\t\t\tdel_button.Show()\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 13:\r\n\t\t\tself.Close(True)\r\n\t\telif Bid == 14:\r\n\t\t\tpass\r\n\tdef UpdateAffirm(self,event):\r\n\t\t#连接car_sale数据库\r\n\t\top = Sql_operation(\"car_sale\")\r\n\t\t#向stu_information表添加学生信息\r\n\t\tcar_no = self.car_no.GetValue()\r\n\t\tprint(car_no)\r\n\t\tcar_type = self.car_type.GetValue()\r\n\t\tprint(car_type)\r\n\t\tcar_color = self.car_color.GetValue()\r\n\t\tprint(car_color)\r\n\t\tcar_maner = self.car_maner.GetValue()\r\n\t\tprint(car_maner)\r\n\t\tcar_date = self.car_date.GetValue()\r\n\t\tprint(car_date)\r\n\t\tcar_price=self.car_price.GetValue()\r\n\t\tprint(car_price)\r\n\t\tnp = op.CarUpdate(car_no,car_type,car_color,car_maner,car_date,car_price)\r\n\"\"\"\r\nif __name__ == '__main__':\r\n\tapp = wx.App()\r\n\tlogin = CarOperation(None,title=\"车辆管理系统\",size=(1024,720))\r\n\tlogin.Show()\r\n\tapp.MainLoop()\r\n\"\"\"" }, { "alpha_fraction": 0.6461305022239685, "alphanum_fraction": 0.6473444700241089, "avg_line_length": 27.03083610534668, "blob_id": "f1d6a08c24be815e272911a5092f1b6e3a9e1ab6", "content_id": "6dc755c30062f29bebaf5df602457db1a24be682", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7256, "license_type": "permissive", "max_line_length": 174, "num_lines": 227, "path": "/mydb.py", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "import pymysql\r\nimport sys\r\nimport xlwt\r\n#创建数据库操作类\r\nclass Sql_operation(object):\r\n\t'''\r\n\t数据库操作\r\n\t'''\r\n\t#用构造函数实现数据库连接,并引入mydb参数,实现调用不同的数据库\r\n\tdef __init__(self,mydb):\r\n\t\t#实例变量\r\n\t\tself.mydb = mydb\r\n\t\t#打开数据库连接\r\n\t\tself.db = pymysql.connect(host = \"\",user = \"\",password = \"\",db = \"car_sale\",charset = \"utf8\")\r\n\t\t#创建游标对象\r\n\t\tself.cursor = self.db.cursor()\r\n\r\n\t#定义查看数据表信息函数,并引入table_field、table_name参数,实现查看不同数据表的建表语句\r\n\tdef FindAll(self,table_name):\r\n\t\t#实例变量\r\n\t\tself.table_name = table_name\r\n\t\t#定义SQL语句\r\n\t\tsql = \"select * from %s\"%(self.table_name)\r\n\t\ttry:\r\n\t\t\t#执行数据库操作\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\t#处理结果\r\n\t\t\tdata = self.cursor.fetchall()\r\n\t\t\treturn data\r\n\t\texcept Exception as err:\r\n\t\t\tprint(\"SQL执行错误,原因:\",err)\r\n\r\n\t#定义添加表数据函数\r\n\tdef CInsert(self,cname,cage,csex,ctel,Brecord):\r\n\t\t#实例变量\r\n\t\tself.cname = cname\r\n\t\tself.cage = cage\r\n\t\tself.csex = csex\r\n\t\tself.ctel = ctel\r\n\t\tself.Brecord = Brecord\r\n\t\t#定义SQL语句\r\n\t\tsql = \"insert into custom(cname,cage,csex,ctel,Brecord) values('%s','%s','%s','%s','%s')\"%(self.cname,self.cage,self.csex,self.ctel,self.Brecord)\r\n\t\ttry:\r\n\t\t\t#执行数据库操作\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\t#事务提交\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\t#事务回滚\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"SQL执行错误,原因:\",err)\r\n\tdef CarInsert(self,car_no, car_type, car_color, car_maner, car_date,car_price):\r\n\t\tself.car_no=car_no\r\n\t\tself.car_type=car_type\r\n\t\tself.car_color=car_color\r\n\t\tself.car_maner=car_maner\r\n\t\tself.car_date=car_date\r\n\t\tself.car_price=car_price\r\n\t\tsql=\"insert into car(car_no,car_type,car_color,car_maner,car_date,car_price) values('%s','%s','%s','%s','%s','%s')\"%(self.car_no,self.car_type,self.car_color,\r\n\t\tself.car_maner,self.car_date,self.car_price)\r\n\t\ttry:\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"执行错误,原因:\",err)\r\n\tdef EInsert(self,eno, ename, eage, esex, ehome, edu):\r\n\t\tself.eno=eno\r\n\t\tself.ename=ename\r\n\t\tself.eage=eage\r\n\t\tself.esex=esex\r\n\t\tself.ehome=ehome\r\n\t\tself.edu=edu\r\n\t\tsql = \"insert into employ values('%s','%s','%s','%s','%s','%s')\"%(self.eno, self.ename, self.eage,\r\n\t\tself.esex, self.ehome, self.edu)\r\n\t\ttry:\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"执行错误,原因:\",err)\r\n\r\n\tdef SInsert(self,sale_car,sale_type,sale_color,sale_date,sale_num,sale_man):\r\n\t\tself.sale_car=sale_car\r\n\t\tself.sale_type=sale_type\r\n\t\tself.sale_color=sale_color\r\n\t\tself.sale_date=sale_date\r\n\t\tself.sale_num=sale_num\r\n\t\tself.sale_man=sale_man\r\n\t\tsql=\"insert into sale values('%s','%s','%s','%s','%s','%s')\"%(self.sale_car, self.type, self.sale_color,\r\n\t\tself.sale_date,self.sale_num,self.sale_man)\r\n\t\ttry:\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"执行错误,原因:\",err)\r\n\r\n\r\n\t#定义删除表数据函数\r\n\tdef CDel(self,cname):\r\n\t\t#实例变量\r\n\t\tself.cname = cname\r\n\t\t#定义SQL语句\r\n\t\tsql = \"delete from custom where cname='%s'\"%(self.cname)\r\n\t\ttry:\r\n\t\t\t#执行数据库操作\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\t#事务提交\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\t#事务回滚\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"SQL执行错误,原因:\",err)\r\n\tdef CarDel(self,car_no):\r\n\t\tself.car_no=car_no\r\n\t\tsql=\"delete from car where car_no=%d\"%(self.car_no)\r\n\t\ttry:\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"执行错误,原因:\",err)\r\n\tdef EDel(self,eno):\r\n\t\tself.eno=eno\r\n\t\tsql=\"delete from employ where eno=%d\"%(self.eno)\r\n\t\ttry:\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\tself.cursor.rollback()\r\n\t\t\tprint(\"执行错误,原因:\",err)\r\n\r\n\tdef CUpdate(self,cname,cage,csex,ctel,Brecord):\r\n\t\t#实例变量\r\n\t\tself.cname = cname\r\n\t\tself.cage = cage\r\n\t\tself.csex = csex\r\n\t\tself.ctel = ctel\r\n\t\tself.Brecord = Brecord\r\n\t\t#定义SQL语句\r\n\t\tsql = \"update custom set cname='%s',cage='%s',csex='%s',ctel='%s',Brecord='%s' where cname='%s'\"%(self.cname,\r\n\t\tself.cage,self.csex,self.ctel,self.Brecord,self.cname)\r\n\t\ttry:\r\n\t\t\t#执行数据库操作\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\t#事务提交\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\t#事务回滚\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"SQL执行错误,原因:\",err)\r\n\tdef CarUpdate(self,car_no, car_type, car_color, car_maner, car_date,car_price):\r\n\t\t#实例变量\r\n\t\tself.car_no=car_no\r\n\t\tself.car_type=car_type\r\n\t\tself.car_color=car_color\r\n\t\tself.car_maner=car_maner\r\n\t\tself.car_date=car_date\r\n\t\tself.car_price=car_price\r\n\t\t#定义SQL语句\r\n\t\tsql = \"update car set car_no='%s',car_type='%s',car_color='%s',car_maner='%s',car_date='%s',car_price='%f' where car_no=%s\"%(self.car_no,\r\n\t\tself.car_type,self.car_color,self.car_maner,self.car_date,self.car_price,self.car_no)\r\n\t\ttry:\r\n\t\t\t#执行数据库操作\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\t#事务提交\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\t#事务回滚\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"SQL执行错误,原因:\",err)\r\n\tdef EUpdate(self,eno, ename, eage, esex, ehome, edu):\r\n\t\tself.eno=eno\r\n\t\tself.ename=ename\r\n\t\tself.eage=eage\r\n\t\tself.esex=esex\r\n\t\tself.ehome=ehome\r\n\t\tself.edu=edu\r\n\t\tsql = \"update employ set eno='%s',ename='%s',eage='%s',esex='%s',ehome='%s',edu='%s' where eno='%s'\"%(self.eno, self.ename, self.eage,\r\n\t\tself.esex, self.ehome, self.edu,self.eno)\r\n\t\ttry:\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"执行错误,原因:\",err)\r\n\tdef SUpdate(self,sale_no,sale_car,sale_type,sale_color,sale_date,sale_num,sale_man):\r\n\t\tself.sale_no=sale_no\r\n\t\tself.sale_car=sale_car\r\n\t\tself.sale_type=sale_type\r\n\t\tself.sale_color=sale_color\r\n\t\tself.sale_date=sale_date\r\n\t\tself.sale_num=sale_num\r\n\t\tself.sale_man=sale_man\r\n\t\tsql=\"update sale set sale_no='%d', sale_car='%s',sale_type='%s', sale_color='%s',sale_date='%s',sale_num='%s',sale_man='%s' where sale_no='%d'\"%(self.sale_no,self.sale_car,\r\n\t\tself.type, self.sale_color,self.sale_date,self.sale_num,self.sale_man,self.sale_no)\r\n\t\ttry:\r\n\t\t\tself.cursor.execute(sql)\r\n\t\t\tself.db.commit()\r\n\t\texcept Exception as err:\r\n\t\t\tself.db.rollback()\r\n\t\t\tprint(\"执行错误,原因:\",err)\r\n\tdef SExcel(self):\r\n\t\tsql=\"select * from sale\"\r\n\t\tself.cursor.execute(sql)\r\n\t\tdata=self.cursor.fetchall()\r\n\t\tself.cursor.scroll(0,mode='absolute')\r\n\t\tfields=self.cursor.description\r\n\t\tworkbook=xlwt.Workbook()\r\n\t\tsheet=workbook.add_sheet('sheet1', cell_overwrite_ok=True)\r\n\r\n\t\tfor field in range(len(fields)):\r\n\t\t\tsheet.write(0,field,fields[field][0])\r\n\t\t#结果写入excle\r\n\t\tfor row in range(1,len(data)+1):\r\n\t\t\tfor col in range(len(fields)):\r\n\t\t\t\tsheet.write(row,col,data[row-1][col])\r\n\t\tself.cursor.close()\r\n\t\tself.db.commit()\r\n\t\tself.db.close()\r\n\t\tworkbook.save(r'C:\\Users\\wky\\Documents\\Programa\\Car_sale\\Sale_table.xls')\r\n\r\n\t#用析构函数实现数据库关闭\r\n\tdef __del__(self):\r\n\t\t#关闭数据库连接\r\n\t\tself.db.close()\r\n" }, { "alpha_fraction": 0.4228629171848297, "alphanum_fraction": 0.5833080410957336, "avg_line_length": 53.84745788574219, "blob_id": "a7fd0e83f1749ac9fed4a954f5213740acc6fb99", "content_id": "2f023bf39776fc1b49dc7c3b55c5baeeed27ab00", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 11007, "license_type": "permissive", "max_line_length": 86, "num_lines": 177, "path": "/car_sale.sql", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "/*\r\n Navicat Premium Data Transfer\r\n\r\n Source Server : mysql\r\n Source Server Type : MySQL\r\n Source Server Version : 80019\r\n Source Host : localhost:3306\r\n Source Schema : car_sale\r\n\r\n Target Server Type : MySQL\r\n Target Server Version : 80019\r\n File Encoding : 65001\r\n\r\n Date: 09/03/2020 17:26:55\r\n*/\r\n\r\nSET NAMES utf8mb4;\r\nSET FOREIGN_KEY_CHECKS = 0;\r\n\r\n-- ----------------------------\r\n-- Table structure for car\r\n-- ----------------------------\r\nDROP TABLE IF EXISTS `car`;\r\nCREATE TABLE `car` (\r\n `car_no` char(5) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,\r\n `car_type` char(10) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,\r\n `car_color` char(8) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL,\r\n `car_maner` varchar(20) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL,\r\n `car_date` date NOT NULL,\r\n `car_price` decimal(9, 2) NOT NULL,\r\n PRIMARY KEY (`car_no`) USING BTREE\r\n) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_bin ROW_FORMAT = Dynamic;\r\n\r\n-- ----------------------------\r\n-- Records of car\r\n-- ----------------------------\r\nINSERT INTO `car` VALUES ('00010', '新朗逸', '白色', '上汽大众', '2018-02-12', 100000.00);\r\nINSERT INTO `car` VALUES ('00011', '新朗逸', '黑色', '上汽大众', '2018-02-12', 99800.00);\r\nINSERT INTO `car` VALUES ('00012', '新朗逸', '蓝色', '上汽大众', '2018-02-12', 129800.00);\r\nINSERT INTO `car` VALUES ('00013', '新朗逸', '金色', '上汽大众', '2018-02-12', 161900.00);\r\nINSERT INTO `car` VALUES ('00014', '新朗逸', '红色', '上汽大众', '2018-02-12', 143000.00);\r\nINSERT INTO `car` VALUES ('00020', '轩逸', '白色', '东风日产', '2018-07-06', 99800.00);\r\nINSERT INTO `car` VALUES ('00021', '轩逸', '橙色', '东风日产', '2018-07-06', 118600.00);\r\nINSERT INTO `car` VALUES ('00022', '轩逸', '黑色', '东风日产', '2018-07-06', 101800.00);\r\nINSERT INTO `car` VALUES ('00023', '轩逸', '蓝色', '东风日产', '2018-07-06', 139800.00);\r\nINSERT INTO `car` VALUES ('00024', '轩逸', '红色', '东风日产', '2018-07-06', 139800.00);\r\nINSERT INTO `car` VALUES ('00025', '轩逸', '金色', '东风日产', '2018-07-06', 14300.00);\r\nINSERT INTO `car` VALUES ('00030', '卡罗拉', '红色', '一汽丰田', '2018-07-08', 130800.00);\r\nINSERT INTO `car` VALUES ('00031', '卡罗拉', '白色', '一汽丰田', '2018-07-08', 119800.00);\r\nINSERT INTO `car` VALUES ('00032', '卡罗拉', '黑色', '一汽丰田', '2018-07-08', 129800.00);\r\nINSERT INTO `car` VALUES ('00033', '卡罗拉', '银色', '一汽丰田', '2018-07-08', 159800.00);\r\nINSERT INTO `car` VALUES ('00040', '速腾', '白色', '一汽大众', '2018-06-09', 128900.00);\r\nINSERT INTO `car` VALUES ('00041', '速腾', '黑色', '一汽大众', '2018-06-09', 130000.00);\r\nINSERT INTO `car` VALUES ('00042', '速腾', '银色', '一汽大众', '2018-06-09', 138900.00);\r\nINSERT INTO `car` VALUES ('00043', '速腾', '橙色', '一汽大众', '2018-06-09', 145900.00);\r\nINSERT INTO `car` VALUES ('00050', '新宝来', '白色', '一汽大众', '2018-03-15', 98800.00);\r\nINSERT INTO `car` VALUES ('00051', '新宝来', '黑色', '一汽大众', '2018-03-15', 108800.00);\r\nINSERT INTO `car` VALUES ('00052', '新宝来', '棕色', '一汽大众', '2018-03-15', 113000.00);\r\nINSERT INTO `car` VALUES ('00053', '新宝来', '橙色', '一汽大众', '2018-03-15', 120000.00);\r\nINSERT INTO `car` VALUES ('00054', '新宝来', '金色', '一汽大众', '2018-03-15', 156000.00);\r\nINSERT INTO `car` VALUES ('00060', '帕萨特', '黑色', '一汽大众 ', '2018-02-19', 184900.00);\r\nINSERT INTO `car` VALUES ('00061', '帕萨特', '白色', '一汽大众 ', '2018-02-19', 184900.00);\r\nINSERT INTO `car` VALUES ('00062', '帕萨特', '蓝色', '一汽大众 ', '2018-02-19', 206900.00);\r\nINSERT INTO `car` VALUES ('00063', '帕萨特', '金色', '一汽大众 ', '2018-02-19', 237900.00);\r\nINSERT INTO `car` VALUES ('00070', '英朗', '白色', '上汽通用', '2018-04-05', 115900.00);\r\nINSERT INTO `car` VALUES ('00071', '英朗', '黑色', '上汽通用', '2018-04-05', 118900.00);\r\nINSERT INTO `car` VALUES ('00072', '英朗', '金色', '上汽通用', '2018-04-05', 143900.00);\r\nINSERT INTO `car` VALUES ('00080', '桑塔纳', '黑色', '上汽大众 ', '2018-07-31', 56500.00);\r\nINSERT INTO `car` VALUES ('00090', '北汽EU', '白色', '北汽新能源 ', '2018-05-27', 129800.00);\r\nINSERT INTO `car` VALUES ('00100', '雅阁', '黑色', '广汽本田 ', '2018-09-15', 144800.00);\r\nINSERT INTO `car` VALUES ('00101', '雅阁', '白色', '广汽本田 ', '2018-09-15', 144800.00);\r\nINSERT INTO `car` VALUES ('00102', '雅阁', '银色', '广汽本田 ', '2018-09-15', 179800.00);\r\nINSERT INTO `car` VALUES ('00103', '雅阁', '蓝色', '广汽本田 ', '2018-09-15', 154800.00);\r\nINSERT INTO `car` VALUES ('00104', '雅阁', '橙色', '广汽本田 ', '2018-09-15', 189800.00);\r\nINSERT INTO `car` VALUES ('00105', '雅阁', '红色', '广汽本田 ', '2018-09-15', 209800.00);\r\nINSERT INTO `car` VALUES ('00112', 'car', 'blue', 'uknow', '2020-03-04', 999999.00);\r\nINSERT INTO `car` VALUES ('00113', 'car', 'pink', 'unknow', '2000-03-03', 3333333.00);\r\nINSERT INTO `car` VALUES ('00114', 'car', 'pink', 'unknow', '2000-03-03', 3333333.00);\r\n\r\n-- ----------------------------\r\n-- Table structure for custom\r\n-- ----------------------------\r\nDROP TABLE IF EXISTS `custom`;\r\nCREATE TABLE `custom` (\r\n `cname` char(10) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,\r\n `cage` int(0) NULL DEFAULT NULL,\r\n `csex` char(2) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,\r\n `ctel` varchar(20) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,\r\n `Brecord` tinyint(0) NULL DEFAULT NULL,\r\n PRIMARY KEY (`cname`) USING BTREE\r\n) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;\r\n\r\n-- ----------------------------\r\n-- Records of custom\r\n-- ----------------------------\r\nINSERT INTO `custom` VALUES ('wky', 20, '男', '123456', 0);\r\nINSERT INTO `custom` VALUES ('丰星', 53, '男', '13605404466', 0);\r\nINSERT INTO `custom` VALUES ('侯贞', 42, '女', '13202738457', 0);\r\nINSERT INTO `custom` VALUES ('况诚伦', 46, '男', '15704541508', 1);\r\nINSERT INTO `custom` VALUES ('包安', 45, '男', '13207385160', 0);\r\nINSERT INTO `custom` VALUES ('单彪顺', 49, '男', '15704081212', 1);\r\nINSERT INTO `custom` VALUES ('厍绍', 41, '男', '13000506666', 0);\r\nINSERT INTO `custom` VALUES ('奚泰旭', 37, '男', '15303655609', 1);\r\nINSERT INTO `custom` VALUES ('姜俊', 31, '男', '13804594050', 0);\r\nINSERT INTO `custom` VALUES ('宗俊启', 56, '男', '13504217626', 1);\r\nINSERT INTO `custom` VALUES ('寿羽', 35, '女', '13305126415', 0);\r\nINSERT INTO `custom` VALUES ('帅眉凝', 36, '女', '13504882278', 0);\r\nINSERT INTO `custom` VALUES ('平琰彩', 42, '女', '15905081963', 1);\r\nINSERT INTO `custom` VALUES ('桑承良', 28, '男', '13700051161', 0);\r\nINSERT INTO `custom` VALUES ('沃裕峰', 52, '男', '13702257875', 1);\r\nINSERT INTO `custom` VALUES ('涂昭珊', 29, '女', '13705051777', 1);\r\nINSERT INTO `custom` VALUES ('琴浩', 34, '男', '15203660790', 0);\r\nINSERT INTO `custom` VALUES ('相聪春', 52, '女', '15705312114', 1);\r\nINSERT INTO `custom` VALUES ('福娟', 38, '女', '13900278568', 0);\r\nINSERT INTO `custom` VALUES ('郗东有', 41, '男', '13205815165', 0);\r\nINSERT INTO `custom` VALUES ('雷建时', 50, '男', '13308510434', 1);\r\n\r\n-- ----------------------------\r\n-- Table structure for employ\r\n-- ----------------------------\r\nDROP TABLE IF EXISTS `employ`;\r\nCREATE TABLE `employ` (\r\n `eno` char(5) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,\r\n `ename` char(10) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,\r\n `eage` int(0) NULL DEFAULT NULL,\r\n `esex` char(2) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,\r\n `ehome` varchar(15) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL,\r\n `edu` char(8) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL,\r\n PRIMARY KEY (`eno`) USING BTREE\r\n) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_bin ROW_FORMAT = Dynamic;\r\n\r\n-- ----------------------------\r\n-- Records of employ\r\n-- ----------------------------\r\nINSERT INTO `employ` VALUES ('001', '隗梅娟', 31, '女', '贵州', '本科');\r\nINSERT INTO `employ` VALUES ('002', '许达平', 21, '男', '辽宁', '本科');\r\nINSERT INTO `employ` VALUES ('003', '诸利克', 27, '男', '浙江', '本科');\r\nINSERT INTO `employ` VALUES ('004', '弓影', 21, '女', '云南', '本科');\r\nINSERT INTO `employ` VALUES ('005', '贝谦', 23, '男', '上海', '本科');\r\nINSERT INTO `employ` VALUES ('006', '赏秋洁', 25, '女', '河北', '本科');\r\nINSERT INTO `employ` VALUES ('007', '闻佳雁', 24, '女', '广东', '本科');\r\nINSERT INTO `employ` VALUES ('008', '奚咏丹', 30, '女', '上海', '本科');\r\nINSERT INTO `employ` VALUES ('009', '汪蓓彩', 35, '女', '山西', '本科');\r\nINSERT INTO `employ` VALUES ('010', '梁纨', 24, '女', '广东', '本科');\r\n\r\n-- ----------------------------\r\n-- Table structure for sale\r\n-- ----------------------------\r\nDROP TABLE IF EXISTS `sale`;\r\nCREATE TABLE `sale` (\r\n `sale_car` char(5) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,\r\n `sale_type` char(10) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,\r\n `sale_color` char(8) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,\r\n `sale_date` datetime(0) NOT NULL,\r\n `sale_num` int(0) NOT NULL,\r\n `sale_man` char(10) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,\r\n PRIMARY KEY (`sale_car`) USING BTREE\r\n) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;\r\n\r\n-- ----------------------------\r\n-- Records of sale\r\n-- ----------------------------\r\nINSERT INTO `sale` VALUES ('00010', '新朗逸', '白色', '2019-05-15 10:35:35', 1, '闻佳雁');\r\nINSERT INTO `sale` VALUES ('00011', '新朗逸', '黑色', '2019-06-25 08:12:06', 1, '贝谦');\r\nINSERT INTO `sale` VALUES ('00023', '轩逸', '蓝色', '2019-03-08 12:36:21', 1, '许达平');\r\nINSERT INTO `sale` VALUES ('00032', '卡罗拉', '黑色', '2019-04-26 09:07:05', 1, '隗梅娟');\r\nINSERT INTO `sale` VALUES ('00041', '速腾', '黑色', '2019-04-28 15:35:39', 1, '隗梅娟');\r\nINSERT INTO `sale` VALUES ('00052', '新宝来', '棕色', '2019-02-23 13:34:38', 1, '奚咏丹');\r\nINSERT INTO `sale` VALUES ('00062', '帕萨特', '蓝色', '2019-05-02 13:19:26', 1, '弓影');\r\nINSERT INTO `sale` VALUES ('00071', '英朗', '黑色', '2019-09-02 11:05:48', 1, '弓影');\r\nINSERT INTO `sale` VALUES ('00080', '桑塔纳', '黑色', '2019-04-15 14:52:12', 1, '梁纨');\r\nINSERT INTO `sale` VALUES ('00090', '北汽EU', '白色', '2019-07-26 11:15:14', 1, '诸利克');\r\nINSERT INTO `sale` VALUES ('00104', '雅阁', '橙色', '2019-06-09 17:08:47', 1, '汪蓓彩');\r\nINSERT INTO `sale` VALUES ('00112', 'car', 'blue', '2019-03-04 19:21:00', 1, '');\r\nINSERT INTO `sale` VALUES ('00113', 'car', 'blue', '2019-03-04 19:21:00', 1, NULL);\r\n\r\nSET FOREIGN_KEY_CHECKS = 1;\r\n" }, { "alpha_fraction": 0.5001439452171326, "alphanum_fraction": 0.5231788158416748, "avg_line_length": 37.25423812866211, "blob_id": "ba0aa3ab2c69d6098434d21bdb7639d71260adae", "content_id": "531553a238b7071f70744dd64557f92fead7fc50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7632, "license_type": "permissive", "max_line_length": 106, "num_lines": 177, "path": "/test.py", "repo_name": "circleacid/car_sale", "src_encoding": "UTF-8", "text": "import wx\r\nimport wx.grid\r\nfrom mydb import Sql_operation\r\nfrom EOperation import EOperation\r\nfrom CarOperation import CarOperation\r\nfrom COperation import COperation\r\nfrom CarOperation import InquireOp\r\nfrom SOperation import SOperation\r\nfrom CustomManer import CustomManer\r\n#创建系统登录界面\r\nclass UserLogin(wx.Frame):\r\n '''\r\n\t登录界面\r\n\t'''\r\n\r\n #初始化登录界面\r\n def __init__(self, *args, **kw):\r\n # ensure the parent's __init__ is called\r\n super(UserLogin, self).__init__(*args, **kw)\r\n #设置窗口屏幕居中\r\n self.Center()\r\n #创建窗口\r\n self.pnl = wx.Panel(self)\r\n #调用登录界面函数\r\n self.LoginInterface()\r\n\r\n def LoginInterface(self):\r\n #创建垂直方向box布局管理器\r\n vbox = wx.BoxSizer(wx.VERTICAL)\r\n #################################################################################\r\n #创建logo静态文本,设置字体属性\r\n logo = wx.StaticText(self.pnl, label=\"轿车信息管理系统\")\r\n font = logo.GetFont()\r\n font.PointSize += 30\r\n font = font.Bold()\r\n logo.SetFont(font)\r\n #添加logo静态文本到vbox布局管理器\r\n vbox.Add(logo,\r\n proportion=0,\r\n flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,\r\n border=180)\r\n #################################################################################\r\n #创建静态框\r\n sb_username = wx.StaticBox(self.pnl, label=\"用户名\")\r\n sb_password = wx.StaticBox(self.pnl, label=\"密 码\")\r\n #创建水平方向box布局管理器\r\n hbox_username = wx.StaticBoxSizer(sb_username, wx.HORIZONTAL)\r\n hbox_password = wx.StaticBoxSizer(sb_password, wx.HORIZONTAL)\r\n #创建用户名、密码输入框\r\n self.user_name = wx.TextCtrl(self.pnl, size=(210, 25))\r\n self.user_password = wx.TextCtrl(self.pnl,\r\n size=(210, 25),\r\n style=wx.TE_PASSWORD)\r\n self.showinfo=wx.TextCtrl(self.pnl,style=wx.TE_MULTILINE | wx.HSCROLL,size=(150,150))\r\n #添加用户名和密码输入框到hbox布局管理器\r\n hbox_username.Add(self.user_name, 0, wx.EXPAND | wx.BOTTOM, 5)\r\n hbox_password.Add(self.user_password, 0, wx.EXPAND | wx.BOTTOM, 5)\r\n #将水平box添加到垂直box\r\n vbox.Add(hbox_username, proportion=0, flag=wx.CENTER)\r\n vbox.Add(hbox_password, proportion=0, flag=wx.CENTER)\r\n #################################################################################\r\n #创建水平方向box布局管理器\r\n hbox = wx.BoxSizer()\r\n #创建登录按钮、绑定事件处理\r\n login_button = wx.Button(self.pnl, label=\"登录\", size=(80, 25))\r\n login_button.Bind(wx.EVT_BUTTON, self.LoginButton)\r\n #添加登录按钮到hbox布局管理器\r\n hbox.Add(login_button, 0, flag=wx.EXPAND | wx.TOP, border=5)\r\n #将水平box添加到垂直box\r\n vbox.Add(hbox, proportion=0, flag=wx.CENTER)\r\n vbox.Add(self.showinfo, proportion=0, flag = wx.EXPAND | wx.LEFT | wx.BOTTOM | wx.RIGHT, border=5)\r\n #################################################################################\r\n #设置面板的布局管理器vbox\r\n self.pnl.SetSizer(vbox)\r\n\r\n def LoginButton(self, event):\r\n #连接cara_sale数据库\r\n op = Sql_operation(\"car_sale\")\r\n #获取users表中的用户名和密码信息,返回为二维元组\r\n np = op.FindAll(\"user\")\r\n #匹配标记\r\n login_sign = 0\r\n #匹配用户名和密码\r\n for i in np:\r\n if (i[1] == self.user_name.GetValue()) and (\r\n i[2] == self.user_password.GetValue()):\r\n login_sign = 1\r\n if i[1]=='custom':\r\n login_sign=2\r\n print(login_sign)\r\n print(i[1])\r\n print(i[2])\r\n break\r\n if login_sign == 0:\r\n self.showinfo.AppendText(\"用户名或密码错误!\")\r\n elif login_sign == 1:\r\n print(\"登录成功!\")\r\n operation = Maner(None, title=\"信息管理\", size=(1024, 668))\r\n operation.Show()\r\n self.Close(True)\r\n elif login_sign==2:\r\n print(\"登陆成功\")\r\n operation=CustomManer(None,title=\"客户界面\",size=(1024,668))\r\n operation.Show()\r\n self.Close(True)\r\nclass Maner(wx.Frame):\r\n def __init__(self, *args, **kw):\r\n super(Maner, self).__init__(*args, **kw)\r\n self.Center()\r\n self.pnl = wx.Panel(self)\r\n self.ManerBoard()\r\n\r\n def ManerBoard(self):\r\n #创建垂直方向box布局管理器\r\n self.vbox = wx.BoxSizer(wx.VERTICAL)\r\n #添加文本logo\r\n logo = wx.StaticText(self.pnl, label=\"信息管理界面\")\r\n font = logo.GetFont()\r\n font.PointSize += 30\r\n font = font.Bold()\r\n logo.SetFont(font)\r\n #将logo添加进self.vbox\r\n self.vbox.Add(logo,\r\n proportion=0,\r\n flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,\r\n border=5)\r\n stext=wx.StaticBox(self.pnl,label=\"选择操作\",size=(800,600))\r\n vbox_center=wx.StaticBoxSizer(stext,wx.VERTICAL)\r\n CButton = wx.Button(self.pnl, id=5, label=\"用户管理\", size=(150, 40))\r\n CButton.Bind(wx.EVT_BUTTON, self.Click)\r\n EButton = wx.Button(self.pnl, id=6, label=\"员工管理\", size=(150, 40))\r\n EButton.Bind(wx.EVT_BUTTON, self.Click)\r\n CarButton = wx.Button(self.pnl, id=7, label=\"车辆管理\", size=(150, 40))\r\n CarButton.Bind(wx.EVT_BUTTON, self.Click)\r\n SaleButton = wx.Button(self.pnl, id=8, label=\"销售信息管理\", size=(150, 40))\r\n SaleButton.Bind(wx.EVT_BUTTON,self.Click)\r\n #self.Bind(wx.wxEVT_BUTTON, self.Click, id=5, id2=7)\r\n vbox_center.Add(CButton, proportion=0, flag=wx.CENTER | wx.BOTTOM,border=40)\r\n vbox_center.Add(EButton, proportion=0, flag=wx.CENTER | wx.BOTTOM,border=40)\r\n vbox_center.Add(CarButton, proportion=0, flag=wx.CENTER | wx.BOTTOM,border=40)\r\n vbox_center.Add(SaleButton, proportion=0, flag=wx.CENTER | wx.BOTTOM,border=40)\r\n hbox=wx.BoxSizer()\r\n hbox.Add(vbox_center,proportion=0,flag=wx.EXPAND | wx.BOTTOM,border=5)\r\n self.vbox.Add(hbox,proportion=0,flag=wx.CENTER)\r\n self.pnl.SetSizer(self.vbox)\r\n\r\n def Click(self, event):\r\n Bid = event.GetId()\r\n if Bid == 5:\r\n User = COperation(None, title=\"客户管理界面\", size=(1024, 668))\r\n User.Show()\r\n self.Close(True)\r\n if Bid == 6:\r\n Employ=EOperation(None,title=\"员工管理界面\",size=(1024,668))\r\n Employ.Show()\r\n self.Close(True)\r\n if Bid == 7:\r\n Car=CarOperation(None,title=\"车辆管理界面\",size=(1024,668))\r\n Car.Show()\r\n self.Close(True)\r\n if Bid == 8:\r\n Sale=SOperation(None,title=\"销售信息管理界面\",size=(1024,668))\r\n Sale.Show()\r\n self.Close(True)\r\n\r\nif __name__ == '__main__':\r\n app = wx.App()\r\n login = UserLogin(None, title=\"轿车营销管理系统\", size=(1024, 668))\r\n login.Show()\r\n app.MainLoop()\r\n\"\"\"\r\nif __name__ == '__main__':\r\n app = wx.App()\r\n Userm = Maner(None, title=\"信息管理\", size=(1024, 668))\r\n Userm.Show()\r\n app.MainLoop()\r\n\"\"\"" } ]
9
characal/Sudoku-Solver
https://github.com/characal/Sudoku-Solver
eb2da0ae49fb3abdf150014d1cdd6569a67d9aaf
fc4f9609d964ca4a4477be96335bb288e6dce1e5
143c38a31b9f2bd496a1c63df0c10e5a7f995191
refs/heads/main
2023-03-05T06:38:54.953932
2021-02-09T21:14:22
2021-02-09T21:14:22
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7579505443572998, "alphanum_fraction": 0.7579505443572998, "avg_line_length": 69.75, "blob_id": "d80ea64e0a3d735a5fb696b7c11e10de7461efe6", "content_id": "2c52696fbc5534a4a9227997082c5b883e5623ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 566, "license_type": "no_license", "max_line_length": 359, "num_lines": 8, "path": "/README.md", "repo_name": "characal/Sudoku-Solver", "src_encoding": "UTF-8", "text": "# Sudoku-Solver\n## Purpose\n----------\nThis program can solve any sudoku puzzle, It was written in python using the colorama module for printing colours to the terminal\n\n## Modus Operandi:\n-----------------\nThe algorithm it uses is called recursive backtracking, which is where the program works out all possible solutions for the first part of the problem and then moves on to the second part using the first as a base, when it hits a roadblock it immediately moves back to the last available point on the tree. this process is repeated until it comes to an answer.\n" }, { "alpha_fraction": 0.5107311606407166, "alphanum_fraction": 0.5532920956611633, "avg_line_length": 18.920289993286133, "blob_id": "3fa63aab447f0021106b49faab3cfb0657ef94e2", "content_id": "af83f2a6a5e51a58165aa2e7eef79480dff1ea9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2749, "license_type": "no_license", "max_line_length": 71, "num_lines": 138, "path": "/sudoku.py", "repo_name": "characal/Sudoku-Solver", "src_encoding": "UTF-8", "text": "import os\nfrom colorama import Fore\n\nboard = [\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n]\n\ncolours = [\nFore.WHITE,\nFore.RED,\nFore.LIGHTYELLOW_EX,\nFore.YELLOW,\nFore.GREEN,\nFore.CYAN,\nFore.BLUE,\nFore.MAGENTA,\nFore.WHITE,\n]\nc_float = 82.0\n\ndef print_board(_board):\n\tprint(\"- \" * 12)\n\ty_line = 0\n\tfor y, row in enumerate(_board):\n\t\tx_line = 0\n\t\tprint(Fore.WHITE+\"| \", end=\"\")\n\t\tfor x, num in enumerate(row):\n\t\t\tc_num = int(x / 2) + int(y / 2)\n\t\t\tif c_num >= 9:\n\t\t\t\tc_num = c_num % 8\n\t\t\tcolour = colours[c_num]\n\t\t\tif num != 0:\n\t\t\t\tprint(f\"{colour}{num} \", end=\"\")\n\t\t\telse:\n\t\t\t\tprint(f\"{colour} \", end=\"\")\n\t\t\tx_line += 1\n\t\t\tif not x_line % 3:\n\t\t\t\tprint(Fore.WHITE+\"| \", end=\"\")\n\t\tprint()\n\t\ty_line += 1\n\t\tif not y_line % 3:\n\t\t\t\tprint(Fore.WHITE+\"- \" * 13)\n\ndef fill_board(_board):\n\tfor y, row in enumerate(_board):\n\t\t_row = input(\"enter row: \")\n\t\tos.system(\"clear\")\n\t\tfor x, num in enumerate(_row):\n\t\t\ttry:\n\t\t\t\t_board[y][x] = int(num)\n\t\t\texcept ValueError:\n\t\t\t\t_board[y][x] = 0\n\n\t\tprint_board(_board)\n\ndef get_squares(x, y, _board):\n\tx = int(x / 3) * 3\n\ty = int(y / 3) * 3\n\tsquares = []\n\tfor _x in range(3):\n\t\tfor _y in range(3):\n\t\t\tif _x != x and _y != y:\n\t\t\t\tsquares.append(_board[y + _y][x + _x])\n\n\treturn squares\n\ndef get_lines(x, y, board):\n\tx_line = []\n\tfor _x in range(9):\n\t\tif _x != x:\n\t\t\tx_line.append(board[y][_x])\n\n\ty_line = []\n\tfor _y in range(9):\n\t\tif _y != y:\n\t\t\ty_line.append(board[_y][x])\n\n\treturn x_line + y_line\n\ndef get_next_square(x, y, _board):\n\treached = False\n\tfor _y in range(9):\n\t\tfor _x in range(9):\n\t\t\tif reached:\n\t\t\t\tif _board[_y][_x] == 0:\n\t\t\t\t\treturn _x, _y\n\t\t\telif _x == x and _y == y:\n\t\t\t\treached = True\n\treturn None\n\ndef get_nums(x, y, _board):\n\tused_nums = get_squares(x, y, _board) + get_lines(x, y, _board)\n\tavailable_nums = [i for i in list(range(1, 10)) if i not in used_nums]\n\ttry:\n\t\tavailable_nums.remove(0)\n\texcept:\n\t\tpass\n\treturn available_nums\n\ndef get_legal(x, y, _board):\n\tused_nums = get_squares(x, y, _board) + get_lines(x, y, _board)\n\treturn _board[y][x] in used_nums\n\ndef fin(_board):\n\tprint_board(_board)\n\tquit()\n\ndef copy_board(_board):\n\treturn_board = []\n\tfor ls in _board:\n\t\treturn_board.append(ls[:])\n\treturn return_board\n\ndef solve_board(x, y, _board):\n\tnew_board = copy_board(_board)\n\tnums = get_nums(x, y, new_board)\n\tif not nums:\n\t\treturn\n\tfor num in nums:\n\t\tnew_board[y][x] = num\n\t\ttry:\n\t\t\tnx, ny = get_next_square(x, y, new_board)\n\t\texcept:\n\t\t\tfin(new_board)\n\t\tsolve_board(nx, ny, new_board)\n\n\nif __name__ == \"__main__\":\n\tfill_board(board)\n\tsolve_board(0, 0, board)\n" } ]
2
ihzarizkyk/Sample-Flask
https://github.com/ihzarizkyk/Sample-Flask
0692d27b763617618d0434cf984786f2d2ab18d3
7abd0e4a785d7a524c5e78afa8e9b9cb0fc6ac35
3248fce7afa2e8756a9e8822dda6ecd76c1a41e2
refs/heads/main
2023-01-28T02:31:51.061379
2020-12-06T23:40:12
2020-12-06T23:40:12
319,155,656
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7036144733428955, "alphanum_fraction": 0.7060241103172302, "avg_line_length": 24.18181800842285, "blob_id": "12390885946bf0debe0c2169848e52cb992299bc", "content_id": "c4d80ce899000afeb7d9ed7c0ba5cc7fb76da64d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 830, "license_type": "no_license", "max_line_length": 59, "num_lines": 33, "path": "/app.py", "repo_name": "ihzarizkyk/Sample-Flask", "src_encoding": "UTF-8", "text": "# import flask\nfrom flask import (Flask, render_template, url_for, abort)\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\n# buat variabel app\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///user.db'\ndb = SQLAlchemy(app)\nmigrate = Migrate(app,db)\n\n# buat class untuk membuat struktur table\nclass User(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\tnama = db.Column(db.String(50),nullable=False)\n\n\tdef __repr__(self):\n\t\treturn \"<User %r>\" % self.id\n\n\tdef __init__(self,nama):\n\t\tself.nama = nama\n# buat route untuk url (\"/\") sebagai index / baseurl\[email protected](\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\[email protected](\"/tentang\")\ndef tentang():\n\treturn render_template(\"tentang.html\")\n\[email protected](\"/kontak\")\ndef kontak():\n\treturn render_template(\"kontak.html\")" }, { "alpha_fraction": 0.6673004031181335, "alphanum_fraction": 0.6768060922622681, "avg_line_length": 17.172412872314453, "blob_id": "fc3f536b19f2c8fe7775628e5e6c7e02c3471c8c", "content_id": "188b416b01a3cb08f23205cb8c66261717412363", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 526, "license_type": "no_license", "max_line_length": 51, "num_lines": 29, "path": "/README.md", "repo_name": "ihzarizkyk/Sample-Flask", "src_encoding": "UTF-8", "text": "# Sample Flask\n\n**Install Virtual Environment (pake pip / pip3) :**\n`$ pip3 install virtualenv`\n\n**Buat Virtual Environment :**\n`$ virtualenv env`\n\n**Aktifkan Virtual Environment (Linux) :**\n`$ source env/bin/activate`\n\n**Aktifkan Debug Mode Flask :**\n`$ export FLASK_DEBUG=1`\n\n**Menjalankan Server Localhost Flask :**\n`$ flask run`\n\n**Install Flask :**\n`$ pip3 install flask`\n\n**Install Flask-Migrate :**\n`$ pip3 install Flask-Migrate`\n\n**Migrate :**\n```\n$ flask db init\n$ flask db migrate -m \"Migrate\"\n$ flask db upgrade\n```" } ]
2
Sharptsa/canardpc-base
https://github.com/Sharptsa/canardpc-base
e5249dd8d0455e05f2e1e8b51ca587aeee445b55
61e20908a26f68549a20cf8a04066480b30e3d47
e0a5ce49e703c26a11e31fb5c9ca592033322dca
refs/heads/master
2019-04-20T00:19:45.631199
2015-05-03T17:36:27
2015-05-03T17:36:27
25,120,159
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44252872467041016, "alphanum_fraction": 0.4479565918445587, "avg_line_length": 39.68831253051758, "blob_id": "038528cd7b3cc52521f7676eae355748b024976f", "content_id": "af9f138aee3e9a690eb7a1da1a7795f96d05e1a9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3134, "license_type": "permissive", "max_line_length": 110, "num_lines": 77, "path": "/canardpc-tests/src/templates/blocks/stats_block.html", "repo_name": "Sharptsa/canardpc-base", "src_encoding": "UTF-8", "text": "{% if reviewer.stat %}\n<div id=\"myModalScore\" class=\"modal hide\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"myModalScoreLabel\"\n aria-hidden=\"true\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-hidden=\"true\">×</button>\n <h3 id=\"myModalScoreLabel\">R&eacute;partition par note</h3>\n </div>\n <div class=\"modal-body\">\n <div class=\"review-chart\">\n <table>\n <tbody>\n <tr>\n <td></td>\n {% set max_score_occurrence = reviewer.stat.byScore.max %}\n {% set list_total_score = reviewer.stat.byScore.totalByScore %}\n {% for score, total in list_total_score %}\n <td style=\"vertical-align: bottom;\">\n {{ total }}\n\n <div class=\"bar\" style=\"height:{{ (total * 100)/ max_score_occurrence }}px;\"></div>\n </td>\n {% endfor %}\n </tr>\n <tr>\n <td><strong>Note / 10</strong> :</td>\n {% for score, total in reviewer.stat.byScore.totalByScore %}\n <td>{{score}}</td>\n {% endfor %}\n </tr>\n </tbody>\n </table>\n </div>\n </div>\n <div class=\"modal-footer\">\n <button class=\"btn\" data-dismiss=\"modal\" aria-hidden=\"true\">Close</button>\n </div>\n</div>\n<div id=\"myModalGenre\" class=\"modal hide\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"myModalGenreLabel\"\n aria-hidden=\"true\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-hidden=\"true\">×</button>\n <h3 id=\"myModalGenreLabel\">R&eacute;partition par genre</h3>\n </div>\n <div class=\"modal-body\">\n <div class=\"review-chart\">\n <table>\n <tbody>\n <tr>\n <td></td>\n {% set max_genre_occurrence = reviewer.stat.byGenre.max %}\n {% set list_total_genre = reviewer.stat.byGenre.totalByGenre %}\n {% for genre, total in list_total_genre %}\n {% if total > 1 %}\n <td style=\"vertical-align: bottom;\">\n {{ total }}\n <div class=\"bar\" style=\"height:{{ (total * 100)/ max_genre_occurrence }}px;\"></div>\n </td>\n {% endif %}\n {% endfor %}\n </tr>\n <tr>\n <td>&nbsp;</td>\n {% for genre, total in list_total_genre %}\n {% if total > 1 %}\n <td><a href=\"/genre/{{ genre }}\"><span style=\"font-size: 8pt;\">{{ genre }}</span></a></td>\n {% endif %}\n {% endfor %}\n </tr>\n </tbody>\n </table>\n </div>\n </div>\n <div class=\"modal-footer\">\n <button class=\"btn\" data-dismiss=\"modal\" aria-hidden=\"true\">Close</button>\n </div>\n</div>\n{% endif %}" }, { "alpha_fraction": 0.5858403444290161, "alphanum_fraction": 0.5928599834442139, "avg_line_length": 39.209678649902344, "blob_id": "788140f07909f183acb72f6eaf9ed1ab6f967c9e", "content_id": "77c6bb11bfbac5bca70e3d9cef5b486ff00d7a1e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4986, "license_type": "permissive", "max_line_length": 120, "num_lines": 124, "path": "/canardpc-tests/test/data_migration_utils.py", "repo_name": "Sharptsa/canardpc-base", "src_encoding": "UTF-8", "text": "import csv\nimport json\nfrom my_mongo_client import MyMongoClientReader\nimport utils\nimport mongo_utils\n\nmongo_util = mongo_utils.MongoUtils(MyMongoClientReader(env='test'))\n\n\ndef generate_reviews_json_data(magazine_filename, review_filename):\n magazines = {}\n with open(\"prod/%s.json\" % magazine_filename) as file_magazine:\n\n data_magazine = file_magazine.read()\n json_data_magazine = json.loads(data_magazine)\n for item in json_data_magazine:\n magazines[item['issueNumber']] = item['issueDate']\n\n reviewer = {}\n with open(\"prod/reviewer.json\") as file_reviewer:\n data_review = file_reviewer.read()\n json_data_review = json.loads(data_review)\n for item in json_data_review:\n reviewer[item['name']] = item['reviewerTnUrl']\n\n with open(\"prod/%s.csv\" % review_filename, 'rb') as file_input:\n\n reader = csv.DictReader(file_input, delimiter=';', fieldnames=[\n 'title', 'subTitle', 'year', 'issue', 'reviewer', 'score', 'displayedGenre', 'primaryGenre',\n 'secondaryGenre', 'studio', 'publisher', 'coverTnUrl', 'plateform', 'critic'\n ])\n\n all_row = []\n\n studio_names = []\n\n for row in reader:\n row['year'] = int(row['year'])\n\n if row['primaryGenre'] == '':\n row['primaryGenre'] = row['displayedGenre']\n\n if row['score'].isdigit():\n row['score'] = int(row['score'])\n else:\n row['otherScore'] = row['score']\n row['score'] = -1\n\n studio_and_country = utils.extractStudioAndCountry(row['studio'])\n if studio_and_country:\n row['studioName'] = studio_and_country[0]\n row['country'] = studio_and_country[1]\n else:\n row['studioName'] = row['studio']\n\n if row['studioName'] not in studio_names:\n studio_names.append(row['studioName'])\n\n if row['publisher'] == '':\n row['publisher'] = row['studioName']\n\n row['year'] = int(row['year'])\n row['issue'] = {'issueNumber': int(row['issue']),\n 'coverTnUrl': 'http://www.canardpc.com/img/couv/couv_Canard_PC_%s_169.jpg' % row['issue'],\n 'issueDate': magazines[int(row['issue'])]}\n\n row['reviewer'] = {'name': row['reviewer'], 'reviewerTnUrl': reviewer[row['reviewer']]}\n if row['coverTnUrl'] is None or row['coverTnUrl'] == '':\n row['coverTnUrl'] = \"../static/images/pas_de_jaquette.png\"\n all_row.append(row)\n\n with open(\"prod/%s.json\" % review_filename, 'wb') as review_json_file_output:\n review_json_file_output.write(json.dumps(all_row))\n\n with open(\"prod/studio.json\", 'wb') as studio_json_file_output:\n studios = [{'name': elem, 'studioTnUrl': '../static/images/no-image_106px.png'} for elem in studio_names]\n studio_json_file_output.write(json.dumps(studios))\n\n\ndef generate_magazine_json_data(magazine_filename):\n with open(\"prod/%s.csv\" % magazine_filename, 'rb') as file_input:\n reader = csv.DictReader(file_input, delimiter=';', fieldnames=['issueNumber', 'issueDate', 'title', 'subTitle'])\n\n all_row = []\n for row in reader:\n row['issueNumber'] = int(row['issueNumber'])\n row['coverUrl'] = \"http://www.canardpc.com/img/couv/couv_Canard_PC_%s.jpg\" % row['issueNumber']\n row['coverTnUrl'] = \"http://www.canardpc.com/img/couv/couv_Canard_PC_%s_169.jpg\" % row['issueNumber']\n all_row.append(row)\n\n with open(\"prod/%s.json\" % magazine_filename, 'wb') as file_output:\n file_output.write(json.dumps(all_row))\n\n\ndef generate_genre_json_data():\n with open('prod/genre.csv', 'rb') as genre_file_input:\n reader = csv.DictReader(genre_file_input, delimiter=';', fieldnames=['genre'])\n all_genres = []\n for row in reader:\n all_genres.append(row['genre'])\n\n print sorted(list(set(all_genres)))\n\n\ndef extract_studios_from_review_collection():\n with open(\"prod/studio.json\", 'wb') as file_output:\n studios = [{'name': studio_name, \"studioTnUrl\": \"../static/images/no-image_106px.png\"}\n for studio_name in mongo_util.database['review'].distinct('studioName')]\n\n file_output.write(json.dumps(studios))\n\n\nif __name__ == '__main__':\n generate_magazine_json_data('magazine_1_280')\n generate_reviews_json_data('magazine_1_280', 'review_1_200')\n\n mongo_util.insertJsonFile('review', 'review_1_200.json', 'title', parent_folder='prod')\n\n mongo_util.insertJsonFile('magazine', 'magazine_1_280.json', 'issueNumber', parent_folder='prod')\n\n mongo_util.insertJsonFile('reviewer', 'reviewer.json', 'name', parent_folder='prod')\n\n extract_studios_from_review_collection()\n mongo_util.insertJsonFile('studio', 'studio.json', 'name', parent_folder='prod')\n" }, { "alpha_fraction": 0.6817693710327148, "alphanum_fraction": 0.6865540742874146, "avg_line_length": 28.5982666015625, "blob_id": "0995aa94148765b85db65c2ab4a0d1b7f762d7dd", "content_id": "a1120c1aa4b17cf412b56f8fbc9823ea85b58436", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10242, "license_type": "permissive", "max_line_length": 112, "num_lines": 346, "path": "/canardpc-tests/src/web.py", "repo_name": "Sharptsa/canardpc-base", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport json\nfrom flask.ext.security import LoginForm\nimport admin\n\nfrom bson import json_util\nfrom flask import render_template, Flask, request, jsonify, redirect, session\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\nfrom my_mongo_client import MyMongoClientReader\n\nfrom security_access import login_serializer, hash_pass, SecurityDaoReader\nfrom data_access import MagazineDao, ReviewDao, ReviewerDao, RecipeDao, ToolDao\nfrom my_exceptions import MissingMongoConfiguration\n\n\napp = Flask(__name__)\n\nlogin_manager = LoginManager()\n\nlogin_manager.init_app(app)\n\nif 'SECRET_KEY' in os.environ:\n app.config['SECRET_KEY'] = os.environ['SECRET_KEY']\n\napp.config['SECURITY_REGISTERABLE'] = False\n\ndebug_mode = False\nif 'DEBUG' in os.environ:\n app.config['DEBUG'] = True\n\nenv = \"test\"\nif 'ENV' in os.environ:\n env = os.environ['ENV']\n\nmongo_client_reader = MyMongoClientReader(env)\n\napp.register_blueprint(admin.bp, url_prefix='/admin')\n\n\ndef format_datetime_fr(date):\n return u'le {day} à {hour}'.format(day=date.strftime('%d/%m/%Y'), hour=date.strftime('%H:%M:%S'))\n\napp.jinja_env.filters['datetime_fr'] = format_datetime_fr\n\n@login_manager.user_loader\ndef load_user(userid):\n return security_dao(mongo_client_reader).get_user(userid)\n\n\n@login_manager.token_loader\ndef load_token(token):\n data = login_serializer.loads(token)\n\n user = security_dao(mongo_client_reader).get_user(data[0])\n\n if user and data[1] == user.password:\n return user\n\n\[email protected]('/')\ndef index():\n user_id = (current_user.get_id() or None)\n return render_template('index.html', user_id=user_id)\n\n\[email protected](\"/login\", methods=[\"GET\", \"POST\"])\ndef login_page():\n form = LoginForm()\n if request.method == 'POST':\n email = form.email.data\n password = form.password.data\n user = security_dao(mongo_client_reader).get_user(email)\n\n if user and hash_pass(password) == user.password:\n login_user(user, remember=True)\n session['role'] = user.role\n return redirect(\"/admin/canardeurs.html\")\n\n return render_template('login.html', form=form)\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect('/')\n\n\[email protected]('/search.html', methods=['GET'])\ndef search():\n search_value = request.args.get('searchInput', '')\n user_id = (current_user.get_id() or None)\n reviews = review_dao(mongo_client_reader).get_all_review_by_title(search_value)\n return render_template('search.html', reviews=reviews, search_value=search_value, user_id=user_id)\n\n\[email protected]('/advancedSearch.html', methods=['GET'])\ndef advanced_search_page():\n user_id = (current_user.get_id() or None)\n return render_template('advanced_search.html', user_id=user_id)\n\n\ndef build_search_criterii(args):\n result = {}\n if len(args) == 0:\n return result\n\n title = request.args.get('title', None)\n if title and len(title) > 0:\n result['title'] = title\n\n reviewer = request.args.get('reviewer', None)\n if reviewer:\n result['reviewer.name'] = reviewer\n\n year = request.args.get('year', None)\n if year and len(year) > 0 and year.isdigit():\n result['year'] = int(year)\n\n genre = request.args.get('genre', None)\n if genre and len(genre) > 0:\n result['primaryGenre'] = genre\n\n plateform = request.args.get('plateform', None)\n if plateform and len(plateform) > 0:\n result['plateform'] = plateform\n\n score = request.args.get('score', None)\n if score and len(score) > 0 and score.isdigit():\n result['score'] = int(score)\n\n return result\n\n\[email protected]('/advancedSearch', methods=['GET'])\ndef advanced_search_results():\n search_criterii = build_search_criterii(request.args)\n reviews = review_dao(mongo_client_reader).get_all_review_by(search_filter=search_criterii)\n user_id = (current_user.get_id() or None)\n return render_template('advanced_search_result.html', user_id=user_id, reviews=reviews)\n\n\[email protected]('/gameTitles')\ndef all_game_titles():\n result = review_dao(mongo_client_reader).get_all_game_titles()\n return json_util.dumps(result)\n\n\[email protected]('/magazines.html')\ndef magazines_html():\n magazine_years = magazine_dao(mongo_client_reader).get_all_magazine_years()\n\n year = request.args.get('year', None)\n if year is None:\n year = magazine_years[-1]\n\n magazines = magazine_dao(mongo_client_reader).get_all_magazines_by_year(year)\n user_id = (current_user.get_id() or None)\n return render_template('magazines.html', magazine_years=magazine_years, magazines=magazines, year=int(year),\n user_id=user_id)\n\n\[email protected]('/magazines')\ndef magazines():\n magazines = magazine_dao(mongo_client_reader).get_all_magazines(group_by_year=False)\n return json_util.dumps(magazines)\n\n\[email protected]('/magazine/<issue_number>.html')\ndef magazine(issue_number):\n magazine = magazine_dao(mongo_client_reader).get_magazine_by_issue_number(int(issue_number))\n user_id = (current_user.get_id() or None)\n return render_template('magazine.html', magazine=magazine, user_id=user_id)\n\n\[email protected]('/scores.html')\ndef score_html():\n scores = review_dao(mongo_client_reader).get_all_scores()\n user_id = (current_user.get_id() or None)\n return render_template('scores.html', scores=scores, user_id=user_id)\n\n\[email protected]('/reviews.html')\ndef reviews_html():\n reviews = review_dao(mongo_client_reader).get_all_review_titles()\n user_id = (current_user.get_id() or None)\n return render_template('reviews.html', reviews=reviews, user_id=user_id)\n\n\[email protected]('/review/<review_title>', methods=['GET'])\ndef review(review_title):\n review = review_dao(mongo_client_reader).get_review_by_title(review_title)\n user_id = (current_user.get_id() or None)\n return render_template('review.html', review=review, user_id=user_id)\n\n\[email protected]('/review/<review_id>.html', methods=['GET'])\ndef review_by_id(review_id):\n review = review_dao(mongo_client_reader).get_review_by_oid(review_id)\n user_id = (current_user.get_id() or None)\n return render_template('review.html', review=review, user_id=user_id)\n\n\[email protected]('/reviewers')\ndef reviewers():\n reviewers = reviewer_dao(mongo_client_reader).get_all_reviewers()\n return json_util.dumps(reviewers)\n\n\[email protected]('/reviewers.html')\ndef reviewers_html():\n reviewers = reviewer_dao(mongo_client_reader).get_all_reviewers()\n user_id = (current_user.get_id() or None)\n return render_template('reviewers.html', reviewers=reviewers, user_id=user_id)\n\n\[email protected]('/reviewer/<reviewer_name>.html')\ndef reviewer_html(reviewer_name):\n reviewer = reviewer_dao(mongo_client_reader).get_reviewer_by_name(reviewer_name)\n reviewer['stat'] = reviewer_dao(mongo_client_reader).get_stats_for_a_reviewer(reviewer_name)\n user_id = (current_user.get_id() or None)\n return render_template('reviewer.html', reviewer=reviewer, user_id=user_id)\n\n\[email protected]('/reviewer/<reviewer_name>/reviews')\ndef reviewer_review(reviewer_name):\n reviewer = review_dao(mongo_client_reader).get_reviews_by_reviewer(reviewer_name)\n return json_util.dumps(reviewer)\n\n\[email protected]('/genres.html')\ndef genre_html():\n genres = review_dao(mongo_client_reader).get_all_genres()\n user_id = (current_user.get_id() or None)\n return render_template('genres.html', genres=genres, user_id=user_id)\n\n\[email protected]('/genres')\ndef genre():\n genres = review_dao(mongo_client_reader).get_all_genres_name()\n return json_util.dumps(genres)\n\n\[email protected]('/genre/<genre_name>.html')\ndef genres(genre_name):\n genre = review_dao(mongo_client_reader).get_genre_by_name(genre_name)\n user_id = (current_user.get_id() or None)\n return render_template('genre.html', genre=genre, user_id=user_id)\n\n\[email protected]('/studio/<studio_name>')\ndef studio(studio_name):\n studio = review_dao(mongo_client_reader).get_studio_by_name(studio_name)\n user_id = (current_user.get_id() or None)\n return render_template('studio.html', studio=studio, user_id=user_id)\n\n\[email protected]('/publisher/<publisher_name>')\ndef publisher(publisher_name):\n publisher = review_dao(mongo_client_reader).get_publisher_by_name(publisher_name)\n user_id = (current_user.get_id() or None)\n return render_template('publisher.html', publisher=publisher, user_id=user_id)\n\n\[email protected]('/year/<year>.html')\ndef year_html(year):\n user_id = (current_user.get_id() or None)\n try:\n year_int = int(year)\n year_data = review_dao(mongo_client_reader).get_review_by_year(year_int)\n return render_template('year.html', year=year_data, user_id=user_id)\n except ValueError as ex:\n return render_template('year.html', user_id=user_id)\n\n\[email protected]('/contact.html')\ndef contact():\n user_id = (current_user.get_id() or None)\n return render_template('contact.html', user_id=user_id)\n\n\[email protected]('/about.html')\ndef about():\n user_id = (current_user.get_id() or None)\n return render_template('about.html', user_id=user_id)\n\n\[email protected]('/google2cbe3d01e021f3ed.html')\ndef google():\n return render_template('google2cbe3d01e021f3ed.html')\n\n\[email protected]('/robots.txt')\ndef robots():\n return render_template('robots.txt')\n\n\[email protected](404)\ndef not_found():\n return render_template('404.html'), 404\n\n\[email protected](500)\ndef internal_error():\n return render_template('500.html'), 500\n\n\ndef review_dao(mongo_client):\n return ReviewDao(mongo_client)\n\n\ndef reviewer_dao(mongo_client):\n return ReviewerDao(mongo_client)\n\n\ndef magazine_dao(mongo_client):\n return MagazineDao(mongo_client)\n\n\ndef recipe_dao(mongo_client):\n return RecipeDao(mongo_client)\n\n\ndef tool_dao(mongo_client):\n return ToolDao(mongo_client)\n\n\ndef security_dao(mongo_client):\n return SecurityDaoReader(mongo_client)\n\n\ndef display_usage():\n print \"Usage: python web.py <env=[test|prod]> <debug=[True|False]>\"\n\n\nif __name__ == '__main__':\n try:\n print \"Running on env='%s' and debug_mode='%s'\" % (env, debug_mode)\n app.run(port=4000)\n except MissingMongoConfiguration as ex:\n print ex\n mongo_client_reader.close()\n sys.exit(-1)\n" }, { "alpha_fraction": 0.6025825142860413, "alphanum_fraction": 0.6054519414901733, "avg_line_length": 21.419355392456055, "blob_id": "ffe5a4493b3e66bf567035bcfe9497585834bcc8", "content_id": "b104d5c23057a744a2d6204dc56cc50f90dbe262", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 697, "license_type": "permissive", "max_line_length": 99, "num_lines": 31, "path": "/canardpc-tests/src/templates/search.html", "repo_name": "Sharptsa/canardpc-base", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n\n{% block page_title %}R&eacute;sultats de la recherche &apos;{{ search_value }}&apos;{% endblock %}\n\n{% block content %}\n\n\n\n{% include \"blocks/search_block.html\" %}\n\n\n<div class=\"search_results\">\n\t<h3>R&eacute;sultats de la recherche &apos;{{ search_value }}&apos;</h3>\n\n\t{% for review in reviews %}\n\t<div class=\"row pas review-block\">\n\t {% set enableYear = True %}\n\t {% set enableGenre = True %}\n\t {% set enableStudio = True %}\n\t {% set enablePublisher = True %}\n\n\n\t {% include \"blocks/game_info_block.html\" %}\n\n\t {% include \"blocks/reviewer_block.html\" %}\n\n\t {% include \"blocks/issue_block.html\" %}\n\t</div>\n\t{% endfor %}\n</div>\n{% endblock %}\n\n\n" }, { "alpha_fraction": 0.502894937992096, "alphanum_fraction": 0.5062034726142883, "avg_line_length": 26.5, "blob_id": "73ddc37751f99b4fc03daf64e2dd05de2cb68a26", "content_id": "f9cbe9b20c160459a56ae6f3c5d05fb1464a3a52", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1209, "license_type": "permissive", "max_line_length": 87, "num_lines": 44, "path": "/canardpc-tests/src/templates/magazine.html", "repo_name": "Sharptsa/canardpc-base", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n\n{% block page_title %}Magazine n&deg;{{ magazine.issueNumber }}{% endblock %}\n\n{% block content %}\n{% if magazine %}\n<div class=\"text-left\">\n <div class=\"row-fluid\">\n <div class=\"span3 text-center\">\n <a href=\"{{ magazine.coverUrl }}\"><img src=\"{{ magazine.coverTnUrl }}\"></a>\n\n <p>\n N&deg; {{ magazine.issueNumber }}\n <br>\n Paru le {{ magazine.issueDate }}\n </p>\n </div>\n <div class=\"span9\">\n <p><span class=\"magazine-title\">{{ magazine.title }}</span></p>\n\n <p><span class=\"magazine-subtitle\">{{ magazine.subTitle }}</span></p>\n </div>\n </div>\n <h2 class=\"text-left\">Jeux test&eacute;s</h2>\n\n <div class=\"row\">\n {% for review in magazine.review %}\n <div class=\"row-fluid review-block\">\n\n {% set enableYear = True %}\n {% set enableGenre = True %}\n {% set enableStudio = True %}\n {% set enablePublisher = True %}\n\n {% include \"blocks/game_info_block.html\" %}\n\n {% include \"blocks/reviewer_block.html\" %}\n </div>\n\n {% endfor %}\n </div>\n</div>\n{% endif %}\n{% endblock %}" }, { "alpha_fraction": 0.4386584162712097, "alphanum_fraction": 0.5110326409339905, "avg_line_length": 44.29999923706055, "blob_id": "f53dcb2ac48b725a78c86f11e8f5cf755313c15b", "content_id": "2487c523a0f6b871415b1a9361fe77dc134f184c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2266, "license_type": "permissive", "max_line_length": 140, "num_lines": 50, "path": "/canardpc-tests/test/utils_test.py", "repo_name": "Sharptsa/canardpc-base", "src_encoding": "UTF-8", "text": "from unittest import TestCase\nimport utils\n\n\nclass TestUtils(TestCase):\n def test_extractStudioAndCountry(self):\n self.assertEqual(('Ubisoft', 'France'), utils.extractStudioAndCountry('Ubisoft (France)'))\n self.assertIsNone(utils.extractStudioAndCountry('Ubisoft'))\n\n\n def test_extract_year_of_issue(self):\n self.assertEqual(2006, utils.extract_year_of_issue({'issueDate': '15/02/2006'}))\n\n\n def test_group_by_alphabet(self):\n data = [{'pouet': 'Zombi U'}, {'pouet': 'cossacks'}, {'pouet': 'call of duty'}, {'pouet': 'anno 1410'}, {'pouet': 'Alpha protocol'}]\n\n self.assertEqual([('A', [{'pouet': 'Alpha protocol'}, {'pouet': 'anno 1410'}]),\n ('C', [{'pouet': 'call of duty'}, {'pouet': 'cossacks'}]),\n ('Z', [{'pouet': 'Zombi U'}])], utils.group_by_alphabet(data, key='pouet'))\n\n\n def test_group_by_year(self):\n data = [\n {'issueNumber': 5, 'issueDate': '26/11/2003'},\n {'issueNumber': 1, 'issueDate': '26/04/2003'},\n {'issueNumber': 15, 'issueDate': '26/10/2004'},\n {'issueNumber': 20, 'issueDate': '26/11/2005'},\n {'issueNumber': 50, 'issueDate': '26/11/2006'},\n {'issueNumber': 45, 'issueDate': '26/05/2006'},\n ]\n self.maxDiff = None\n self.assertEqual([(2003, [{'issueNumber': 5, 'issueDate': '26/11/2003'},\n {'issueNumber': 1, 'issueDate': '26/04/2003'}]),\n (2004, [{'issueNumber': 15, 'issueDate': '26/10/2004'}]),\n (2005, [{'issueNumber': 20, 'issueDate': '26/11/2005'}]),\n (2006, [{'issueNumber': 50, 'issueDate': '26/11/2006'},\n {'issueNumber': 45, 'issueDate': '26/05/2006'}])], utils.group_by_year(data))\n\n\n def test_group_by(self):\n self.assertEqual({3: ['foo', 'bar'],\n 4: ['toto']},\n utils.group_by(len, ['foo', 'bar', 'toto']))\n\n\n def test_convert_list_of_pair_to_list_of_dict(self):\n list_of_pair = [(0, 10), (5, 5)]\n self.assertEqual([{'0': 10}, {'5': 5}],\n utils.convert_list_of_pair_to_list_of_dict(list_of_pair))\n\n" }, { "alpha_fraction": 0.5347092151641846, "alphanum_fraction": 0.5440900325775146, "avg_line_length": 23.272727966308594, "blob_id": "9f356f5f46bb3ea872f808ccdfbf9affec16e665", "content_id": "32ccc362128ae23c41b66a996dc2c83bb8a9c29a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 533, "license_type": "permissive", "max_line_length": 77, "num_lines": 22, "path": "/canardpc-tests/src/templates/publishers.html", "repo_name": "Sharptsa/canardpc-base", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n\n{% block page_title %}Tous des &eacute;diteurs{% endblock %}\n\n{% block content %}\n\n<h2>Tous des &eacute;diteurs</h2>\n{% if publishers %}\n<div class=\"controls-row text-left\">\n{% for letter, publisher_list in publishers %}\n <div class=\"span3\">\n <h3>{{ letter }}</h3>\n <ul>\n {% for publisher in publisher_list %}\n <li><a href=\"/publisher/{{ publisher }}\">{{ publisher }}</a></li>\n {% endfor %}\n </ul>\n </div>\n{% endfor %}\n</div>\n{% endif %}\n{% endblock %}" }, { "alpha_fraction": 0.5138121843338013, "alphanum_fraction": 0.6933701634407043, "avg_line_length": 15.454545021057129, "blob_id": "a0370ffed590bb4c047201d8dd86dcafec7482fb", "content_id": "df6f1594a4fe56efd9ed714dcf017a6bb2b9c52c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 362, "license_type": "permissive", "max_line_length": 24, "num_lines": 22, "path": "/requirements.txt", "repo_name": "Sharptsa/canardpc-base", "src_encoding": "UTF-8", "text": "Flask==0.9\nFlask-Login==0.2.4\nFlask-Mail==0.9.0\nFlask-Principal==0.4.0\nFlask-Security==1.6.6\nFlask-WTF==0.8.3\nJinja2==2.6\nWTForms==1.0.4\nWerkzeug==0.8.3\nargparse==1.2.1\nblinker==1.2\nflask-mongoengine==0.7.0\ngunicorn==0.17.4\nitsdangerous==0.21\nmongoengine==0.8.2\npasslib==1.6.1\npyasn1==0.1.7\npycrypto==2.6\npymongo==2.7.1\nrsa==3.1.1\nwsgiref==0.1.2\nMarkdown==2.4.1\n" } ]
8
ihoromi4/travian4-bot
https://github.com/ihoromi4/travian4-bot
29bb7ff1a16eefd011488f0b9358fae9809d4cde
c1b6ba7a705b22034555092e0762abfedb42ff4e
efc14cba0121ca21d05cf886aaba596cf271608d
refs/heads/master
2021-05-11T02:46:29.282304
2017-05-22T16:19:57
2017-05-22T16:19:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6936936974525452, "alphanum_fraction": 0.707207202911377, "avg_line_length": 12, "blob_id": "7f6af6513c7196c83294e9381fa343062863408c", "content_id": "86742ec8288e838c8e7c0bf22fe268b0b5fc79e8", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 222, "license_type": "permissive", "max_line_length": 38, "num_lines": 17, "path": "/readme.md", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "# Bot for Travian Legends browser game\n\n## Dependings\n* travianapi\n* pattern-observer\n* pyqt5\n* pyside\n* htmlPy\n* cx_freeze\n\n## Running\nrun python script:\n`python3 bot_gui_qt5.py`\n\n## Documentation\nPlease read:\n doc/*\n\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 34, "blob_id": "8d1ae6d27961a3ea7893b41a8e0692ab3507c32d", "content_id": "b89b20862c146e7595757f47c0cc42e0781d88c5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "permissive", "max_line_length": 34, "num_lines": 1, "path": "/travianbot/controller/__init__.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "\nfrom .controller import Controller\n" }, { "alpha_fraction": 0.6443418264389038, "alphanum_fraction": 0.6605080962181091, "avg_line_length": 18.68181800842285, "blob_id": "d7306b2dfb0eafd564f26385a89e6edb73006a2c", "content_id": "88562ea01009958740508d73957aa50b8d51ab84", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 866, "license_type": "permissive", "max_line_length": 53, "num_lines": 44, "path": "/travianbot/model/farmer.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import time\nimport configparser\nimport json\n\nfrom travianapi import account\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nurl = 'http://ts70.travian.com/'\nname = 'bro'\npassword = '2bd384f'\n\nuser_agent = config['HEADERS']['user_agent']\nheaders = {'User-Agent': user_agent}\n\nacc = account.Account(url, name, password, headers)\n\n\ndef bot_attack_raid():\n import time\n\n from botlib import farmservice\n\n village = acc.get_village_by_id(69437)\n print('Village:', village.name)\n\n with open('data/servers/ts70/farm.json') as file:\n json_data = json.load(file)\n\n farms = json_data['farms']\n\n farming = farmservice.FarmService(village, farms)\n\n while True:\n farming.update()\n time.sleep(1)\n\nwhile True:\n try:\n bot_attack_raid()\n except BaseException as e:\n print(e)\n time.sleep(20 * 60)\n" }, { "alpha_fraction": 0.5274488925933838, "alphanum_fraction": 0.5296016931533813, "avg_line_length": 31.034482955932617, "blob_id": "8d28a9e6e2210241b70f25d82ee9b03e86e03fa9", "content_id": "4f63a40122d6eb4c1a470403c47a7da396c6c487", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "permissive", "max_line_length": 105, "num_lines": 29, "path": "/travianbot/model/overwatch.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "from .web import push\n\n\nclass Overwatch:\n def __init__(self, account):\n self.account = account\n self.push_bullet = None\n self.attacks = dict()\n\n def on_attack(self):\n pass\n\n def add_pushbullet(self, api_key):\n push.init(api_key)\n self.push_bullet = True\n\n def inspect(self):\n for village in self.account.villages:\n movements = village.troops.get_movements()\n if 'in-attack' in movements:\n in_attack = movements['in-attack']\n number = in_attack['number']\n if self.attacks.get(village.id, 0) < number:\n if self.push_bullet:\n push.send('Incoming attack!', 'Begin attack on village: {}'.format(village.name))\n self.on_attack()\n self.attacks[village.id] = number\n else:\n self.attacks[village.id] = 0\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.8545454740524292, "avg_line_length": 8.166666984558105, "blob_id": "250fed7fb246ceafa1ae658cd9a400052b2c5c82", "content_id": "324cad732dc19a506a5114524934acf8f4e428ad", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 55, "license_type": "permissive", "max_line_length": 16, "num_lines": 6, "path": "/requirements.txt", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "requests\nbs4\nhtml5lib\nPyQt5\npattern-observer\ncx_freeze\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 25, "blob_id": "3dbfd6f14fb1f5a34f3ddc132ddb55dc00454221", "content_id": "809a15d9be5434abbf7b9eceb8d828e5200b74a3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27, "license_type": "permissive", "max_line_length": 25, "num_lines": 1, "path": "/travianbot/model/__init__.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "\nfrom .facade import Model\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 18.941177368164062, "blob_id": "9efabf62b69beaece48c9f511f3fe18c2d72af3b", "content_id": "d16ac7954f26bc50e19fbeb52791c65aa13cd9d2", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "permissive", "max_line_length": 44, "num_lines": 17, "path": "/travianbot/model/web/push.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "try:\n import pushbullet\n is_work = True\nexcept ImportError:\n print('pushbullet module not installed')\n is_work = False\n\n\ndef init(api_key: str) -> None:\n global pushobj\n pushobj = pushbullet.Pushbullet(api_key)\n\n\ndef send(title: str, body: str) -> None:\n push = pushobj.push_note(title, body)\n\n#print(pb.get_pushes())\n" }, { "alpha_fraction": 0.6025640964508057, "alphanum_fraction": 0.6043956279754639, "avg_line_length": 23.81818199157715, "blob_id": "ed76b40f4d8966d325c496952d0dfd286e13b3be", "content_id": "639260557d6c3f8a1710f191888a838eb49a26c4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 546, "license_type": "permissive", "max_line_length": 57, "num_lines": 22, "path": "/travianbot/jsonconf.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import json\n\n\nclass JSONConf:\n def __init__(self, filename: str):\n self.filename = filename\n\n with open(filename) as file:\n self.configuration = json.load(file)\n\n def save(self):\n with open(self.filename, 'w') as file:\n json.dump(self.configuration, file, indent=4)\n\n def __getitem__(self, item):\n return self.configuration[item]\n\n def __setitem__(self, key, value):\n self.configuration[key] = value\n\n def __contains__(self, item):\n return item in self.configuration\n" }, { "alpha_fraction": 0.6179159283638, "alphanum_fraction": 0.6288848519325256, "avg_line_length": 26.299999237060547, "blob_id": "32cf95f66b682da37d2e23fd930abbdcf2ca4c69", "content_id": "5726ba09ae88084ecfc6efad19d74da3a62f73ca", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "permissive", "max_line_length": 49, "num_lines": 20, "path": "/travianbot/model/web/interaction.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import json\nimport requests\n\ndef verification(url: str, version: tuple):\n url = url + '/api/verification'\n data = {'version': version}\n json_data = json.dumps(data)\n response = requests.post(url, data=json_data)\n if response.status_code == 200:\n data = response.json()\n return data['version_ok']\n return False\n\ndef load_accounts(url: str):\n url = url + '/api/accounts'\n response = requests.get(url)\n if response.status_code == 200:\n data = response.json()\n return data\n raise Exception()\n\n" }, { "alpha_fraction": 0.6140350699424744, "alphanum_fraction": 0.6140350699424744, "avg_line_length": 27.5, "blob_id": "935e47e24f06db208f0fe0b76f7b421639193789", "content_id": "f54e2c5666a116bcbbd9934b56c528745b0022a9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "permissive", "max_line_length": 64, "num_lines": 14, "path": "/travianbot/model/farmservice.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "from travianbot.model import farmvillage\n\n\nclass FarmService:\n def __init__(self, village, farms_positions: list):\n self.village = village\n self.farms = []\n for pos, troops in farms_positions:\n farm = farmvillage.FarmVillage(village, pos, troops)\n self.farms.append(farm)\n\n def update(self):\n for farm in self.farms:\n farm.update()\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 22, "blob_id": "b28ddf2d895900cbdb696dd10f9a2a9d35585b89", "content_id": "3991fe43fb0e43395716a1e2a20b7b0688339088", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24, "license_type": "permissive", "max_line_length": 22, "num_lines": 1, "path": "/travianbot/view/__init__.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "\nfrom .view import View\n" }, { "alpha_fraction": 0.6128500699996948, "alphanum_fraction": 0.6317957043647766, "avg_line_length": 25.39130401611328, "blob_id": "7e2886fa8e37a1de433d3c3719f6e5fc4ba577f4", "content_id": "18d0834d94fca091971d3c33056dfb6fbb021338", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1250, "license_type": "permissive", "max_line_length": 83, "num_lines": 46, "path": "/travianbot/controller/controller.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "from ..model import Model\nfrom ..view import View\n\nurl = 'http://ts5.travian.ru/'\nusername = 'broo'\npassword = 'wA4iN_tYR'\n\nuser_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'\nheaders = {'User-Agent': user_agent}\n\naccount_settings = {\n 'url': url,\n 'username': username,\n 'password': password,\n 'headers': headers\n}\n\n\nclass Controller:\n \"\"\" Контроллер. Отвечает за ввод пользователя \"\"\"\n\n def __init__(self, model: Model, view: View):\n self.model = model\n self.view = view\n\n self.view.on_open_profile.on(self.on_open_profile)\n self.view.on_new_account.on(self.on_new_account)\n self.view.on_account_bot_start.on(self.on_bot_start)\n\n def on_open_profile(self, config):\n self.load_accounts(config['accounts'])\n\n def on_new_account(self):\n self.view.new_account_card()\n\n def on_bot_start(self):\n print('start bot')\n self.model.start_service()\n\n def open_service(self):\n self.model.open_service(account_settings)\n\n def load_accounts(self, accounts_config: list):\n for config in accounts_config:\n self.open_service()\n self.view.add_account_card(config)\n" }, { "alpha_fraction": 0.6317135691642761, "alphanum_fraction": 0.644501268863678, "avg_line_length": 31.58333396911621, "blob_id": "2d0f9e02f00b2025d4a85d5544381f10103991a2", "content_id": "88e945be242b005b01db05f9e2ea73018d8ad8b4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "permissive", "max_line_length": 77, "num_lines": 24, "path": "/travianbot/model/farmvillage.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import random\nimport time\n\nfrom travianbot.model import statemachine\n\n\nclass FarmVillage(statemachine.StateMachine):\n def __init__(self, village, pos, troops_number=10):\n statemachine.StateMachine.__init__(self)\n self.village = village\n self.pos = pos\n self.troops_number = troops_number\n self.attack_period = 8 * 60 + 180 * random.random()\n self.last_attack_time = 0\n self.set_state(self.state_wait)\n\n def state_wait(self):\n if time.time() - self.last_attack_time > self.attack_period:\n self.set_state(self.state_raid)\n\n def state_raid(self):\n self.last_attack_time = time.time()\n self.village.troops.attack_raid(self.pos, {'t4': self.troops_number})\n self.set_state(self.state_wait)\n" }, { "alpha_fraction": 0.5898641347885132, "alphanum_fraction": 0.5914315581321716, "avg_line_length": 25.58333396911621, "blob_id": "1e660614900cf76adb05bc958bd5926ff0d0d4a4", "content_id": "d32685fd18d318b223a8863b9a5819441172d625", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1914, "license_type": "permissive", "max_line_length": 88, "num_lines": 72, "path": "/travianbot/view/signindialog.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import sys\n\nfrom PyQt5.QtWidgets import QDialog\nfrom PyQt5 import uic\nimport observer\n\n\nclass SignUpDialog(QDialog):\n def __init__(self, parrent, settings: dict, profiles_config: dict):\n super(QDialog, self).__init__(parrent)\n\n uic.loadUi(settings['ui_signindialog'], self)\n\n self.profiles_config = profiles_config\n self.any_password = '******'\n\n self.setModal(True)\n\n self.on_open_profile = observer.Event()\n\n self.button_exit.clicked.connect(self.exit)\n self.button_ok.clicked.connect(self.ok)\n\n def get_email(self):\n return self.combobox_emails.currentText()\n email = property(get_email)\n\n def get_password(self):\n try:\n config = next((i for i in self.profiles_config if i['email'] == self.email))\n if self.edit_password.text() == self.any_password:\n return config['password_sha1']\n else:\n return self.edit_password.text()\n except StopIteration:\n return self.edit_password.text()\n password = property(get_password)\n\n def get_config(self):\n try:\n return next((i for i in self.profiles_config if i['email'] == self.email))\n except StopIteration:\n return {}\n config = property(get_config)\n\n def validate_password(self, password: str):\n return True\n\n def open_dialog(self):\n password = '******'\n\n for profile in self.profiles_config:\n self.combobox_emails.addItem(profile['email'])\n\n self.edit_password.setText(password)\n\n self.show()\n self.exec()\n\n self.on_open_profile.trigger(self.config)\n\n return {\n 'email': self.email,\n 'password': self.password\n }\n\n def exit(self):\n sys.exit()\n\n def ok(self):\n if self.validate_password(self.password):\n super(QDialog, self).close()\n" }, { "alpha_fraction": 0.6441803574562073, "alphanum_fraction": 0.6466270685195923, "avg_line_length": 28.494844436645508, "blob_id": "081daf1908858bd49df59d0abf3de10746254d10", "content_id": "161fd76c689463396df09b4a308f76db65256b05", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2861, "license_type": "permissive", "max_line_length": 86, "num_lines": 97, "path": "/travianbot/view/view.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import sys\n\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QHBoxLayout\nimport observer\n\nfrom .mainwindow import MainWindow\nfrom .signindialog import SignUpDialog\nfrom .newaccountcard import NewAccountCard\nfrom .accountcard import AccountCard\n\n\nclass View:\n def __init__(self, ui_config: dict, profiles_config: dict):\n self.ui_config = ui_config\n self.profiles_config = profiles_config\n self._new_card = None\n\n # events\n self.on_open_profile = observer.Event()\n\n self.on_new_account = observer.Event()\n self.on_account_bot_start = observer.Event()\n\n self.init_gui()\n\n # connected actions\n action = lambda: self.on_new_account.trigger()\n self.mainwindow.button_new_account.clicked.connect(action)\n\n def init_gui(self):\n self.app = QApplication(sys.argv)\n self.mainwindow = MainWindow(self.ui_config)\n\n def show(self):\n self.mainwindow.show()\n\n dialog = SignUpDialog(self.mainwindow, self.ui_config, self.profiles_config)\n dialog.on_open_profile.on(lambda config: self.on_open_profile.trigger(config))\n result = dialog.open_dialog()\n\n print('email:', result['email'])\n print('password:', result['password'])\n\n sys.exit(self.app.exec())\n\n def new_account_card(self):\n if self._new_card:\n return\n\n widget_cards_id = 0\n parrent = self.mainwindow.stacked_widget.widget(widget_cards_id)\n\n ui = self.ui_config['ui_newplayingcard']\n\n card = self._new_card = NewAccountCard(parrent, ui)\n card.on_save.on(self.save_new_account_card)\n\n layout = parrent.findChild(QHBoxLayout)\n items_count = layout.count()\n offset = 2\n layout.insertWidget(items_count - offset, card)\n\n self.mainwindow.button_new_account.hide()\n #card.show()\n\n def save_new_account_card(self):\n widget_cards_id = 0\n parrent = self.mainwindow.stacked_widget.widget(widget_cards_id)\n\n layout = parrent.findChild(QHBoxLayout)\n layout.removeWidget(self._new_card)\n\n account_config = self._new_card.get_account_config()\n\n self._new_card.hide()\n self._new_card.destroy()\n self._new_card = None\n\n self.add_account_card(account_config)\n\n self.mainwindow.button_new_account.show()\n\n def add_account_card(self, account_config: dict):\n widget_cards_id = 0\n parrent = self.mainwindow.stacked_widget.widget(widget_cards_id)\n\n ui = self.ui_config['ui_playingcard']\n\n card = AccountCard(parrent, ui, account_config)\n\n card.on_account_bot_start.on(self.on_account_bot_start.trigger)\n\n layout = parrent.findChild(QHBoxLayout)\n items_count = layout.count()\n offset = 2\n layout.insertWidget(items_count - offset, card)\n" }, { "alpha_fraction": 0.6241700053215027, "alphanum_fraction": 0.6254979968070984, "avg_line_length": 24.525423049926758, "blob_id": "dff54e6643f105237a8706bcf15d5c53061a5f62", "content_id": "82d939fab1c3ee07a4607f19acbcec5fc3c954ca", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1646, "license_type": "permissive", "max_line_length": 88, "num_lines": 59, "path": "/travianbot/model/botservice.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import threading\nimport queue\n\nimport observer\n\nfrom .statemachine import StateMachine\n\nfrom travianapi import account\nfrom . import restransfer\nfrom . import service\n\nQUEUE_LIMIT = 0\n\n\nclass BotService(service.Service, StateMachine):\n \"\"\" Обьединяет функции управления отдельным аккаунтом \"\"\"\n\n def __init__(self, settings: dict):\n service.Service.__init__(self)\n super(StateMachine, self).__init__()\n\n self.is_open = True\n\n self.settings = settings\n self.url = settings['url']\n self.name = settings['username']\n self.password = settings['password']\n self.headers = settings['headers']\n\n self.account = None\n self.resource_transfer = None\n\n self.start_service_thread(self.run)\n\n self.f()\n\n def close(self) -> None:\n \"\"\" Закрывает сервис - останавливает управление аккаунтом \"\"\"\n\n self.is_open = False\n print('service', id(self), 'close')\n\n @service.transmitter\n def f(self):\n print('some func')\n\n def run(self) -> None:\n \"\"\" Функция выполняется в новом потоке. Управляет аккаунтом \"\"\"\n\n from time import sleep\n\n self.account = account.Account(self.url, self.name, self.password, self.headers)\n self.resource_transfer = restransfer.ResourceTransferNet(self.account, {})\n\n while self.is_open:\n print('Bot service', id(self), 'step')\n self.handle_orders()\n self.resource_transfer.update()\n sleep(3)\n" }, { "alpha_fraction": 0.7161749601364136, "alphanum_fraction": 0.7182095646858215, "avg_line_length": 27.08571434020996, "blob_id": "9ac12342545397746638301a34b9ef4ef693a186", "content_id": "03fe7359bb61a7679904e20855ce99a02ef8baa3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 983, "license_type": "permissive", "max_line_length": 88, "num_lines": 35, "path": "/bot_gui_qt5.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import os\nimport logging\nimport json\n\nfrom travianbot import jsonconf\n\nif not os.path.isdir('log'):\n os.makedirs('log')\n\nlog_format = '%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s'\nlog_file = None # 'log/log.log'\nlogging.basicConfig(format=log_format,\n level=logging.DEBUG,\n filename=log_file)\n\nfrom travianbot.model import Model\nfrom travianbot.controller import Controller\nfrom travianbot.view import View\n\nconfig_path = 'config.json'\n\nconfig = jsonconf.JSONConf(config_path)\n\nui_config_path = os.path.join(config['config_dir'], config['ui_config'])\nview_config = jsonconf.JSONConf(ui_config_path)\n\nprofiles_config_path = os.path.join(config['config_dir'], config['profiles_config'])\nprofiles_config = jsonconf.JSONConf(profiles_config_path)\n\nmodel = Model()\nview = View(view_config, profiles_config)\ncontroller = Controller(model, view)\n# controller.load_accounts(profiles_config[0]['accounts'])\n\nview.show()\n" }, { "alpha_fraction": 0.6305969953536987, "alphanum_fraction": 0.6604477763175964, "avg_line_length": 27.210525512695312, "blob_id": "0e1a3a94621d8655beb961f0d388e4d1931f6d79", "content_id": "4d37cae8c8fd07adb4583aa76aea00ba6258b5cc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "permissive", "max_line_length": 72, "num_lines": 38, "path": "/setup.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "from cx_Freeze import setup, Executable\n\n# Dependencies are automatically detected, but it might need\n# fine tuning.\nproduct_name = 'Travian Legends Bot'\nbdist_msi_options = {\n 'upgrade_code': '{66620F3A-DC3A-11E2-B341-002219E9B01E}',\n 'add_to_path': False,\n 'initial_target_dir': r'[ProgramFilesFolder]\\%s' % (product_name),\n}\n\ninclude_files = ['config.json', 'data']\n\nbuildOptions = dict(\n packages=['travianapi', 'travianbot'],\n excludes=[],\n includes=['travianapi', 'travianbot', 'queue'],\n include_files=include_files)\n\nimport sys\n\nbase = 'Win32GUI' if sys.platform == 'win32' else None\ntargetName = 'travianbot'\nif sys.platform == 'win32':\n targetName += '.exe'\n\nexecutables = [\n Executable('bot_gui_qt5.py',\n base=base,\n targetName=targetName,\n icon=\"data/images/icon.ico\")\n]\n\nsetup(name='Travian Legends Bot',\n version='0.1.0',\n description='Bot for Travian Legends Browser Game',\n options=dict(bdist_msi=bdist_msi_options, build_exe=buildOptions),\n executables=executables)\n" }, { "alpha_fraction": 0.574679970741272, "alphanum_fraction": 0.574679970741272, "avg_line_length": 32.380950927734375, "blob_id": "a48b8d86734433fbce8835a06ef13e5dcc5959a5", "content_id": "929cc0437d2ab828464a147430271313a576955c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 703, "license_type": "permissive", "max_line_length": 69, "num_lines": 21, "path": "/travianbot/model/statemachine.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "\n\nclass StateMachine:\n def __init__(self):\n self.active_state = None\n self.transitions = {}\n\n def set_state(self, state):\n self.active_state = state\n\n def add_transition(self, state, condition, new_state):\n transitions = self.transitions.get(state, [])\n transitions.append((condition, new_state))\n self.transitions[state] = transitions\n\n def update(self):\n if self.active_state:\n transitions = self.transitions.get(self.active_state, [])\n self.active_state()\n for condition, new_state in transitions:\n if condition():\n self.active_state = new_state\n break\n" }, { "alpha_fraction": 0.49450549483299255, "alphanum_fraction": 0.5274725556373596, "avg_line_length": 44, "blob_id": "78615d3769a63c0639fa0a4e9d0efdc22ab0eb4d", "content_id": "47ca7c862667757965034ed4e8df675874c8e8ae", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "permissive", "max_line_length": 59, "num_lines": 2, "path": "/travianbot/__init__.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "__version_tuple__ = (0, 1, 0)\n__version__ = '.'.join((str(i) for i in __version_tuple__))\n\n" }, { "alpha_fraction": 0.5764706134796143, "alphanum_fraction": 0.5764706134796143, "avg_line_length": 24.97222137451172, "blob_id": "0e1d6ee1445f9cf0582d8b7247eadf04a906c902", "content_id": "f0e537cc683fc70f9d733a536bb793e0c44fb43f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 935, "license_type": "permissive", "max_line_length": 59, "num_lines": 36, "path": "/travianbot/model/service.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import queue\nimport threading\n\n\ndef transmitter(func):\n def shell(self, *args, **kwargs):\n if self.is_thread():\n func(self, *args, **kwargs)\n else:\n self.put_order(shell, args, kwargs)\n\n return shell\n\n\nclass Service:\n def __init__(self):\n self._queue = queue.Queue()\n self._thread = None\n self._ident = None\n\n def start_service_thread(self, thread_func):\n self._thread = threading.Thread(target=thread_func)\n self._thread.daemon = True\n self._thread.start()\n self._ident = self._thread.ident\n\n def is_thread(self):\n return self._ident == threading.get_ident()\n\n def put_order(self, func, args: list, kwargs: dict):\n self._queue.put((func, (self,) + args, kwargs))\n\n def handle_orders(self):\n while not self._queue.empty():\n func, args, kwargs = self._queue.get()\n func(*args, **kwargs)\n" }, { "alpha_fraction": 0.6294642686843872, "alphanum_fraction": 0.6294642686843872, "avg_line_length": 28.217391967773438, "blob_id": "dae7dc1de7e1816d149c225f1bb622cc09d9dca3", "content_id": "14fdad47f6ac7a4e3c1cfa62a1d5688e68beb8a3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 792, "license_type": "permissive", "max_line_length": 70, "num_lines": 23, "path": "/travianbot/model/facade.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "from . import botservice\n\n\nclass Model:\n \"\"\" Обьединяет функции управления аккаунтом и обработку данных \"\"\"\n\n def __init__(self):\n self.services = []\n self.current_service = None\n\n def open_service(self, settings: dict) -> None:\n \"\"\" Открывает новый сервис - управление новым аккаунтом \"\"\"\n service = botservice.BotService(settings)\n self.services.append(service)\n\n def close_service(self, service: botservice.BotService) -> None:\n \"\"\" Закрывает указанный сервис \"\"\"\n service.close()\n if service == self.current_service:\n self.current_service = None\n\n def start_service(self):\n pass\n" }, { "alpha_fraction": 0.6483660340309143, "alphanum_fraction": 0.6601307392120361, "avg_line_length": 30.875, "blob_id": "f043b098c11f0602bbdc074d4d4f433f73ab6952", "content_id": "8512df4b68971f563007d824ca3fb0980890ae4e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "permissive", "max_line_length": 96, "num_lines": 48, "path": "/travianbot/view/mainwindow.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import PyQt5.QtWidgets as qtwidgets\nfrom PyQt5.QtWidgets import QApplication, qApp\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QLabel, QAction, QPushButton\nfrom PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5 import uic\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, settings: dict):\n super().__init__()\n\n self.settings = settings\n uic.loadUi(settings['ui_mainwindow'], self)\n\n widget_cards_id = 0\n self.cards_widget = self.stacked_widget.widget(widget_cards_id)\n\n self.button_new_account = self.cards_widget.findChild(QPushButton, 'button_new_account')\n\n self.setGeometry(130, 22, 500, 500)\n self.setWindowTitle(settings['mainwindow_title'])\n\n icon = QIcon(settings['icon_path'])\n self.setWindowIcon(icon)\n\n self.statusbar = self.statusBar()\n self.status = \"Ready to work!\"\n # self.init_menubar()\n\n def get_status(self) -> str:\n return ''\n\n def set_status(self, message: str) -> None:\n self.statusbar.showMessage(message)\n status = property(get_status, set_status)\n\n def init_menubar(self):\n icon = QIcon(self.settings['icon_path'])\n\n exit_action = QAction(icon, '&Exit', self)\n exit_action.setShortcut('Ctrl+Q')\n exit_action.setStatusTip('Exit application')\n exit_action.triggered.connect(qApp.exit)\n\n menubar = self.menuBar()\n file_menu = menubar.addMenu('&File')\n file_menu.addAction(exit_action)\n" }, { "alpha_fraction": 0.5691382884979248, "alphanum_fraction": 0.5961923599243164, "avg_line_length": 31.6143798828125, "blob_id": "71676384f0194db4861bde49e589a6848edb797e", "content_id": "b413b9006b59091a9e4528d4ddf8bb949a2d1dac", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5364, "license_type": "permissive", "max_line_length": 101, "num_lines": 153, "path": "/travianbot/model/restransfer.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import logging\nimport random\nimport time\n\nfrom travianapi import account\n\n# Типы поселений (битовые маски):\nIGNORE = 0 # поселение игнорируется\n\nDEVELOPMENT = 2**0 # поселение с ресурсными полями не максимального уровня\n# в такие деревни поставляются ресурсы для застройки\n\nBARRACKS = 2**1 # деревня производящая войска (нуждается в зерне)\n\nRESOURCES = 2**2 # деревня производит ресурсы\nCROP = 2**3 # дереня производит зерно\n\nSOURCE = RESOURCES | CROP # деревня производит и ресурсы и зерно\n\nTARGET = DEVELOPMENT | BARRACKS # требует ресурсов для построек и войск\n\nsettings = {\n 79385: {'type': TARGET, 'priory': 1}, # 1.\n 84821: {'type': TARGET, 'priory': 2}, # 2.\n 86917: {'type': SOURCE, 'priory': 10}, # 3.\n 88380: {'type': SOURCE, 'priory': 10}, # 4.\n 89791: {'type': SOURCE, 'priory': 10}, # 5.\n 90902: {'type': SOURCE, 'priory': 10}, # 6.\n 91726: {'type': SOURCE, 'priory': 10}, # 7.\n 92436: {'type': SOURCE, 'priory': 10}, # 8.\n 93405: {'type': TARGET, 'priory': 3}, # 9.\n 94524: {'type': TARGET, 'priory': 3}, # 10.\n 95147: {'type': TARGET, 'priory': 3}, # 11.\n 95965: {'type': TARGET, 'priory': 0}, # 12.\n 96741: {'type': TARGET, 'priory': 0} # 13.\n}\n\n\nclass ResourceTransferNode:\n def __init__(self, village, setting: dict = {}):\n self.village = village\n self.type = setting['type']\n self.priory = setting['priory']\n\n def get_marketplace(self):\n return self.village.get_building('marketplace')\n\n marketplace = property(get_marketplace)\n\n def get_tradeoffice(self):\n return self.village.get_building('tradeoffice')\n\n tradeoffice = property(get_tradeoffice)\n\n def get_able_carry(self):\n if self.tradeoffice:\n return self.tradeoffice.able_carry\n else:\n return 500\n\n able_carry = property(get_able_carry)\n\n def need_resources(self):\n max_resource = self.village.warehouse\n max_crop = self.village.granary\n resources = self.village.resources\n production = self.village.production\n production_time = 1\n if self.marketplace:\n moves = self.marketplace.get_merchants_moves()\n moves_incoming = moves['incoming']\n move_resources = [move['resources'] for move in moves_incoming]\n else:\n return [0] * 4\n\n max_resources = [max_resource] * 3 + [max_crop]\n needs = [(max_resources[i] - resources[i]) for i in range(4)]\n\n needs = [(needs[i] - max(0, production[i]) * production_time) for i in range(4)]\n\n for move in move_resources:\n needs = [(needs[i] - move[i]) for i in range(4)]\n\n needs = [max(0, int(r)) for r in needs]\n\n print('needs:', needs)\n\n return needs\n\n def is_need_resources(self):\n return sum(self.need_resources()) > 0\n\n def send(self, target):\n if not self.marketplace:\n # в деревне нет рынка\n return\n if self.marketplace.free_merchants == 0:\n # на рынке нет свободных торговцев\n logging.debug('на рынке нет свободных торговцев')\n return\n\n capacity = self.able_carry * self.marketplace.free_merchants\n warehouse = self.village.warehouse\n granary = self.village.granary\n limit_percent = 0.1\n max_resource = [warehouse] * 3 + [granary]\n resources = self.village.resources\n # resources[3] = 0\n free_resources = [max(0.0, resources[i] - max_resource[i] * limit_percent) for i in range(4)]\n\n if capacity < sum(free_resources):\n factor = capacity / sum(free_resources)\n free_resources = [int(r * factor) for r in free_resources]\n\n need_to_send = target.need_resources()\n print('need resources:', need_to_send)\n\n free_resources = [min(need_to_send[i], free_resources[i]) for i in range(4)]\n\n transfer_target = target.village.pos\n transfer_task = free_resources\n\n self.marketplace.send_resources(transfer_target, transfer_task)\n\n logging.debug('ресурсы отправлены')\n\n return True\n\n def send_to(self, targets):\n for node in targets:\n if node.is_need_resources():\n print('need resources')\n if self.send(node):\n print('send resources')\n return True\n\n\nclass ResourceTransferNet:\n def __init__(self, account: account.Account, settings: dict = {}):\n self.nodes = []\n for id in settings:\n village = account.get_village_by_id(id)\n node = ResourceTransferNode(village, settings[id])\n self.nodes.append(node)\n\n def update(self):\n targets = [node for node in self.nodes if node.type == TARGET]\n key = lambda x: x.priory\n targets.sort(key=key)\n sources = [node for node in self.nodes if node.type == SOURCE]\n for node in sources:\n node.send_to(targets)\n time.sleep(3.0)\n" }, { "alpha_fraction": 0.6006768345832825, "alphanum_fraction": 0.6040608882904053, "avg_line_length": 23.625, "blob_id": "c8f488cd0d66e518573c2631a29c5aecdc9f3d3e", "content_id": "8d7a948ac6544cf4475903c5d98d85a8ba6aa5d7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "permissive", "max_line_length": 57, "num_lines": 24, "path": "/travianbot/view/newaccountcard.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "from PyQt5.QtWidgets import QFrame\nfrom PyQt5 import uic\nimport observer\n\n\nclass NewAccountCard(QFrame):\n def __init__(self, parrent, ui: str):\n QFrame.__init__(self, parrent)\n\n uic.loadUi(ui, self)\n\n self.on_save = observer.Event()\n\n self.button_save.clicked.connect(self._on_save)\n\n def _on_save(self):\n self.on_save.trigger()\n\n def get_account_config(self):\n return {\n 'server': self.combobox_server.currentText(),\n 'username': self.edit_username.text(),\n 'password': self.edit_password.text()\n }\n" }, { "alpha_fraction": 0.5208333134651184, "alphanum_fraction": 0.5282257795333862, "avg_line_length": 31.34782600402832, "blob_id": "8b30ad59c24941d1bea8631819d97168a14a30e4", "content_id": "0d2d88bbc84acff3855d5d1130677a578bd3f391", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2976, "license_type": "permissive", "max_line_length": 88, "num_lines": 92, "path": "/travianbot/model/resfieldsbuilder.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "import time\nimport random\n\nfrom travlib import account\nfrom travlib.village.buildings import resourcefield\n\nBUILDINGS_TYPES = [\n resourcefield.Woodcutter,\n resourcefield.Claypit,\n resourcefield.Ironmine,\n resourcefield.Cropland\n]\n\n\nclass ResourceBuilder:\n def __init__(self, account: account.Account):\n self.account = account\n\n def error_test(self):\n village = self.account.villages[0]\n print(village.name)\n print(village.builds)\n print('sleep')\n time.sleep(600)\n print(village.builds)\n\n @staticmethod\n def get_low_level_build(village, building_type=None):\n buildings = village.outer.buildings\n building = None\n for b in buildings:\n if not building:\n if not building_type:\n building = b\n elif type(b) is building_type:\n building = b\n else:\n if type(b) is building_type:\n if b.level < building.level:\n building = b\n return building\n\n def resource_balance_builder(self):\n village = self.account.villages[0]\n while True:\n if not village.builds:\n resources = village.resources\n min_res = min(resources[:3])\n min_res_index = resources.index(min_res)\n building_type = BUILDINGS_TYPES[min_res_index]\n if village.free_crop >= 5:\n building = self.get_low_level_build(village, building_type)\n building.build()\n else:\n building = self.get_low_level_build(village, resourcefield.Cropland)\n building.build()\n print('sleep')\n time.sleep(300 + 300 * random.random())\n\n def outside_build(self):\n village = self.account.villages[0]\n print(village.name)\n print(village.builds)\n\n def get_low_level_build():\n buildings = village.outer.buildings\n building = buildings[0]\n for b in buildings:\n if not type(b) is resourcefield.Cropland:\n if b.level < building.level:\n building = b\n return building\n\n def get_low_level_cropland():\n buildings = village.outer.buildings\n building = buildings[0]\n for b in buildings:\n if type(b) is resourcefield.Cropland:\n if b.level < building.level:\n building = b\n return building\n\n while True:\n if not village.builds:\n if village.free_crop >= 5:\n building = get_low_level_build()\n building.build()\n else:\n building = get_low_level_cropland()\n building.build()\n print('sleep')\n time.sleep(60 + 240 * random.random())\n" }, { "alpha_fraction": 0.6672694683074951, "alphanum_fraction": 0.6708860993385315, "avg_line_length": 26.649999618530273, "blob_id": "381d42b697fdc7411b9655d04e3d149b018c9b5e", "content_id": "9a26d17b456f70adb0ea4efc1abe6a23bec9c366", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "permissive", "max_line_length": 64, "num_lines": 20, "path": "/travianbot/view/accountcard.py", "repo_name": "ihoromi4/travian4-bot", "src_encoding": "UTF-8", "text": "from PyQt5.QtWidgets import QFrame\nfrom PyQt5 import uic\nimport observer\n\n\nclass AccountCard(QFrame):\n def __init__(self, parrent, ui: str, account_config: dict):\n QFrame.__init__(self, parrent)\n\n uic.loadUi(ui, self)\n\n self.label_server.setText(account_config['server'])\n self.label_username.setText(account_config['username'])\n\n self.on_account_bot_start = observer.Event()\n\n self.button_bot_start.clicked.connect(self.on_bot_start)\n\n def on_bot_start(self):\n self.on_account_bot_start.trigger()\n" } ]
27
TeoTwawki/zsnes-archive
https://github.com/TeoTwawki/zsnes-archive
8f29f5ec399be880f5c9a56e9079cf85066c24b2
0f6916aa5fe311a833e150d6c483bf981fc9b4e5
7e4c51837425d67a6d3658026877dfdeea030bb3
refs/heads/master
2016-08-12T11:23:21.695860
2015-06-09T23:09:35
2015-06-09T23:09:35
51,671,308
4
2
null
null
null
null
null
[ { "alpha_fraction": 0.6855713129043579, "alphanum_fraction": 0.700583815574646, "avg_line_length": 19.672412872314453, "blob_id": "99ef4acb284300bf6e9ef0dfb649c3c8ceaa69c8", "content_id": "9bfd915129189093d97882c84e5378eb8452f7ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 72, "num_lines": 58, "path": "/src/debugger/zthread.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include \"zthread.h\"\n#include \"load.h\"\n\nextern \"C\" { void zstart(); }\n\nZSNESThread::ZSNESThread() : running(false)\n{\n}\n\nvoid ZSNESThread::run()\n{\n if (!running)\n {\n if (!setjmp(jump))\n {\n running = true;\n zstart();\n }\n }\n}\n\nvoid ZSNESThread::done()\n{\n if (running)\n {\n running = false;\n longjmp(jump, 1);\n }\n}\n\nvoid ZSNESThread::prepare_close()\n{\n if (running)\n {\n debugger_quit = true;\n }\n}\n" }, { "alpha_fraction": 0.44183382391929626, "alphanum_fraction": 0.4808022975921631, "avg_line_length": 22.58108139038086, "blob_id": "64d7a6f352033dbf57461325cf96d0784397c0d8", "content_id": "b52a2cf2eb7cf4d77523eddfa50dbaa634fa2419", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1745, "license_type": "no_license", "max_line_length": 93, "num_lines": 74, "path": "/src/tools/emutools/spcdasm.py", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "import array\n\ntry:\n import psyco\nexcept:\n pass\n\ndef uint8(n):\n n &= 255\n if n >= 128:\n n -= 256\n return n\n\n\nrules = { \"NOP\": lambda op, addr, n: [n],\n \"CSWAP\": lambda op, addr, n: [(n >> 8) & 255, n & 255],\n \"CREL\": lambda op, addr, n: [(n & 255), addr + op.bytes + uint8(n >> 8)],\n \"R1\": lambda op, addr, n: [addr + op.bytes + uint8(n)]\n }\n\nclass Op:\n def __init__(self, mnemonic, args, opcode, bytes, rule, klass, argShift=\"0\", argOr=\"00\"):\n self.mnemonic = mnemonic\n if args == '\"\"':\n self.args = ''\n else:\n self.args = args\n self.opcode = int(opcode, 16)\n self.bytes = int(bytes)\n self.rule = rules[rule]\n self.klass = klass\n self.argShift = int(argShift)\n self.argOr = int(argOr, 16)\n\nops = [None]*256\n\nfor line in file('tools/emutools/spc_asm.tab'):\n if line[0].isalnum():\n op = Op(*line.split())\n ops[op.opcode] = op\n \n\ndef disasm_op(mem, pc):\n def readint(addr, length):\n n = 0\n for i in range(length)[::-1]:\n n = n*256 + mem[addr+i]\n return n\n\n op = ops[mem[pc]]\n args = op.rule(op, pc, readint(pc+1, op.bytes-1))\n\n s = '%s ' % op.mnemonic\n\n for c in op.args:\n if c == '*':\n s += \"$%x\" % args.pop(0)\n else:\n s += c\n \n return (s, pc+op.bytes)\n\ndef disasm(mem, start, stop):\n pc = start\n while pc < stop:\n (op, newpc) = disasm_op(mem, pc)\n print \"%04x: %-6s\" % (pc, op)\n pc = newpc\n\n\nif __name__ == '__main__':\n import sys\n mem = array.array('B', file(sys.argv[1], 'rb').read()[0x100:0x10100])\n disasm(mem, 0x0000, 0x10000)\n" }, { "alpha_fraction": 0.5557065010070801, "alphanum_fraction": 0.602717399597168, "avg_line_length": 34.72815704345703, "blob_id": "75de565b971eacb4c95fad6c29e88cd3fa135b90", "content_id": "2a75747b27f9b5e63123e7469b22c8603a0bb7d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3680, "license_type": "no_license", "max_line_length": 85, "num_lines": 103, "path": "/src/chips/dsp4emu.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "#ifndef DSP4EMU_H\n#define DSP4EMU_H\n\n#include <stdint.h>\n#include <stdbool.h>\n\nstruct DSP4_t\n{\n bool waiting4command;\n bool half_command;\n uint16_t command;\n uint32_t in_count;\n uint32_t in_index;\n uint32_t out_count;\n uint32_t out_index;\n uint8_t parameters[512];\n uint8_t output[512];\n};\n\nextern struct DSP4_t DSP4;\n\nstruct DSP4_vars_t\n{\n // op control\n int8_t DSP4_Logic; // controls op flow\n\n\n // projection format\n int16_t lcv; // loop-control variable\n int16_t distance; // z-position into virtual world\n int16_t raster; // current raster line\n int16_t segments; // number of raster lines drawn\n\n // 1.15.16 or 1.15.0 [sign, integer, fraction]\n int32_t world_x; // line of x-projection in world\n int32_t world_y; // line of y-projection in world\n int32_t world_dx; // projection line x-delta\n int32_t world_dy; // projection line y-delta\n int16_t world_ddx; // x-delta increment\n int16_t world_ddy; // y-delta increment\n int32_t world_xenv; // world x-shaping factor\n int16_t world_yofs; // world y-vertical scroll\n\n int16_t view_x1; // current viewer-x\n int16_t view_y1; // current viewer-y\n int16_t view_x2; // future viewer-x\n int16_t view_y2; // future viewer-y\n int16_t view_dx; // view x-delta factor\n int16_t view_dy; // view y-delta factor\n int16_t view_xofs1; // current viewer x-vertical scroll\n int16_t view_yofs1; // current viewer y-vertical scroll\n int16_t view_xofs2; // future viewer x-vertical scroll\n int16_t view_yofs2; // future viewer y-vertical scroll\n int16_t view_yofsenv; // y-scroll shaping factor\n int16_t view_turnoff_x; // road turnoff data\n int16_t view_turnoff_dx; // road turnoff delta factor\n\n\n // drawing area\n\n int16_t viewport_cx; // x-center of viewport window\n int16_t viewport_cy; // y-center of render window\n int16_t viewport_left; // x-left of viewport\n int16_t viewport_right; // x-right of viewport\n int16_t viewport_top; // y-top of viewport\n int16_t viewport_bottom; // y-bottom of viewport\n\n\n // sprite structure\n\n int16_t sprite_x; // projected x-pos of sprite\n int16_t sprite_y; // projected y-pos of sprite\n int16_t sprite_attr; // obj attributes\n bool sprite_size; // sprite size: 8x8 or 16x16\n int16_t sprite_clipy; // visible line to clip pixels off\n int16_t sprite_count;\n\n // generic projection variables designed for\n // two solid polygons + two polygon sides\n\n int16_t poly_clipLf[2][2]; // left clip boundary\n int16_t poly_clipRt[2][2]; // right clip boundary\n int16_t poly_ptr[2][2]; // HDMA structure pointers\n int16_t poly_raster[2][2]; // current raster line below horizon\n int16_t poly_top[2][2]; // top clip boundary\n int16_t poly_bottom[2][2]; // bottom clip boundary\n int16_t poly_cx[2][2]; // center for left/right points\n int16_t poly_start[2]; // current projection points\n int16_t poly_plane[2]; // previous z-plane distance\n\n\n // OAM\n int16_t OAM_attr[16]; // OAM (size,MSB) data\n int16_t OAM_index; // index into OAM table\n int16_t OAM_bits; // offset into OAM table\n\n int16_t OAM_RowMax; // maximum number of tiles per 8 aligned pixels (row)\n int16_t OAM_Row[32]; // current number of tiles per row\n};\n\nextern struct DSP4_vars_t DSP4_vars;\n\n#endif\n" }, { "alpha_fraction": 0.5900059938430786, "alphanum_fraction": 0.624623715877533, "avg_line_length": 22.230770111083984, "blob_id": "6172e0b4458d941a633fa71e45c8f24f6e5550ce", "content_id": "8abbf1de7f90a2b1696f0f43650616b92aee6dde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3322, "license_type": "no_license", "max_line_length": 91, "num_lines": 143, "path": "/src/jma/rngcoder.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2002 Andrea Mazzoleni ( http://advancemame.sf.net )\nCopyright (C) 2001-4 Igor Pavlov ( http://www.7-zip.org )\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense version 2.1 as published by the Free Software Foundation.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n*/\n\n#ifndef __COMPRESSION_RANGECODER_H\n#define __COMPRESSION_RANGECODER_H\n\n#include \"inbyte.h\"\n\nnamespace NCompression {\nnamespace NArithmetic {\n\nconst UINT32 kNumTopBits = 24;\nconst UINT32 kTopValue = (1 << kNumTopBits);\n\nclass CRangeDecoder\n{\npublic:\n NStream::CInByte m_Stream;\n UINT32 m_Range;\n UINT32 m_Code;\n UINT32 m_Word;\n void Normalize()\n {\n while (m_Range < kTopValue)\n {\n m_Code = (m_Code << 8) | m_Stream.ReadByte();\n m_Range <<= 8;\n }\n }\n\n void Init(ISequentialInStream *aStream)\n {\n m_Stream.Init(aStream);\n m_Code = 0;\n m_Range = UINT32(-1);\n for(int i = 0; i < 5; i++)\n m_Code = (m_Code << 8) | m_Stream.ReadByte();\n }\n\n UINT32 GetThreshold(UINT32 aTotal)\n {\n return (m_Code) / ( m_Range /= aTotal);\n }\n\n void Decode(UINT32 aStart, UINT32 aSize, UINT32)\n {\n m_Code -= aStart * m_Range;\n m_Range *= aSize;\n Normalize();\n }\n\n /*\n UINT32 DecodeDirectBitsDiv(UINT32 aNumTotalBits)\n {\n m_Range >>= aNumTotalBits;\n UINT32 aThreshold = m_Code / m_Range;\n m_Code -= aThreshold * m_Range;\n\n Normalize();\n return aThreshold;\n }\n\n UINT32 DecodeDirectBitsDiv2(UINT32 aNumTotalBits)\n {\n if (aNumTotalBits <= kNumBottomBits)\n return DecodeDirectBitsDiv(aNumTotalBits);\n UINT32 aResult = DecodeDirectBitsDiv(aNumTotalBits - kNumBottomBits) << kNumBottomBits;\n return (aResult | DecodeDirectBitsDiv(kNumBottomBits));\n }\n */\n\n UINT32 DecodeDirectBits(UINT32 aNumTotalBits)\n {\n UINT32 aRange = m_Range;\n UINT32 aCode = m_Code;\n UINT32 aResult = 0;\n for (UINT32 i = aNumTotalBits; i > 0; i--)\n {\n aRange >>= 1;\n /*\n aResult <<= 1;\n if (aCode >= aRange)\n {\n aCode -= aRange;\n aResult |= 1;\n }\n */\n UINT32 t = (aCode - aRange) >> 31;\n aCode -= aRange & (t - 1);\n // aRange = aRangeTmp + ((aRange & 1) & (1 - t));\n aResult = (aResult << 1) | (1 - t);\n\n if (aRange < kTopValue)\n {\n aCode = (aCode << 8) | m_Stream.ReadByte();\n aRange <<= 8;\n }\n }\n m_Range = aRange;\n m_Code = aCode;\n return aResult;\n }\n\n UINT32 DecodeBit(UINT32 aSize0, UINT32 aNumTotalBits)\n {\n UINT32 aNewBound = (m_Range >> aNumTotalBits) * aSize0;\n UINT32 aSymbol;\n if (m_Code < aNewBound)\n {\n aSymbol = 0;\n m_Range = aNewBound;\n }\n else\n {\n aSymbol = 1;\n m_Code -= aNewBound;\n m_Range -= aNewBound;\n }\n Normalize();\n return aSymbol;\n }\n\n UINT64 GetProcessedSize() {return m_Stream.GetProcessedSize(); }\n};\n\n}}\n\n#endif\n" }, { "alpha_fraction": 0.5591248869895935, "alphanum_fraction": 0.5702021718025208, "avg_line_length": 22.147436141967773, "blob_id": "e047cf9e1c476a86f1f1e2691f546713fb0caec1", "content_id": "c2463a2d78e859b26abe913b1d13d00ad477e389", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3611, "license_type": "no_license", "max_line_length": 109, "num_lines": 156, "path": "/src/win/safelib.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n\n#include <windows.h>\n#include <process.h>\n#include <io.h>\n#define _POSIX_\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <fcntl.h>\n\n#include \"safelib.h\"\n#include \"../argv.h\"\n\n//These are here because I don't believe in MSVC's prefixing affixation\n#define dup _dup\n#define dup2 _dup2\n#define pipe _pipe\n#define flushall _flushall\n#define cwait _cwait\n\n\n//Introducing a popen which doesn't return until it knows for sure of program launched or couldn't open -Nach\n\n#define READ_FD 0\n#define WRITE_FD 1\n\nstatic struct fp_pid_link\n{\n FILE *fp;\n int pid;\n struct fp_pid_link *next;\n} fp_pids = { 0, 0, 0 };\n\nFILE *safe_popen(char *command, const char *mode)\n{\n FILE *ret = 0;\n char **argv = build_argv(command);\n if (argv)\n {\n int filedes[2];\n\n if (mode && (*mode == 'r' || *mode == 'w') &&\n !pipe(filedes, 512, (mode[1] == 'b' ? O_BINARY : O_TEXT) | O_NOINHERIT))\n {\n int fd_original;\n FILE *fp;\n\n if (*mode == 'r')\n {\n fd_original = dup(STDOUT_FILENO);\n dup2(filedes[WRITE_FD], STDOUT_FILENO);\n close(filedes[WRITE_FD]);\n if (!(fp = fdopen(filedes[READ_FD], mode)))\n {\n close(filedes[READ_FD]);\n }\n }\n else\n {\n fd_original = dup(STDIN_FILENO);\n dup2(filedes[READ_FD], STDIN_FILENO);\n close(filedes[READ_FD]);\n if (!(fp = fdopen(filedes[WRITE_FD], mode)))\n {\n close(filedes[WRITE_FD]);\n }\n }\n\n if (fp)\n {\n intptr_t childpid;\n flushall();\n\n childpid = spawnvp(P_NOWAIT, argv[0], (const char* const*)argv);\n if (childpid > 0)\n {\n struct fp_pid_link *link = &fp_pids;\n while (link->next)\n {\n link = link->next;\n }\n\n link->next = (struct fp_pid_link *)malloc(sizeof(struct fp_pid_link));\n if (link->next)\n {\n link->next->fp = fp;\n link->next->pid = childpid;\n link->next->next = 0;\n ret = fp;\n }\n else\n {\n fclose(fp);\n TerminateProcess((HANDLE)childpid, 0);\n cwait(0, childpid, WAIT_CHILD);\n }\n }\n else\n {\n fclose(fp);\n }\n }\n\n if (*mode == 'r')\n {\n dup2(fd_original, STDOUT_FILENO);\n }\n else\n {\n dup2(fd_original, STDIN_FILENO);\n }\n close(fd_original);\n }\n free(argv);\n }\n return(ret);\n}\n\nvoid safe_pclose(FILE *fp)\n{\n struct fp_pid_link *link = &fp_pids;\n\n while (link->next && link->next->fp != fp)\n {\n link = link->next;\n }\n if (link->next->fp == fp)\n {\n struct fp_pid_link *dellink = link->next;\n fclose(fp);\n cwait(0, link->next->pid, WAIT_CHILD);\n link->next = link->next->next;\n free(dellink);\n }\n}\n" }, { "alpha_fraction": 0.6911295652389526, "alphanum_fraction": 0.7038927674293518, "avg_line_length": 22.388059616088867, "blob_id": "b56de27d6dff05e4ac3dc8e5556448d7bcd774a2", "content_id": "198bd38a8e6da8060bdd67170569c2854143cd14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1567, "license_type": "no_license", "max_line_length": 88, "num_lines": 67, "path": "/src/debugger/load.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <QApplication>\n#include \"load.h\"\n#include \"ui.h\"\n#include \"zthread.h\"\n\nstatic bool debugger_running = false;\nstatic int app_exit_num = 0;\nstatic ZSNESThread zthread;\n\nunsigned char debugger_quit = false;\n\nvoid debug_main()\n{\n if (!debugger_running)\n {\n debugger_running = true;\n\n int argc = 1;\n char *argv[] = { \"debug\" };\n QApplication app(argc, argv);\n QObject::connect(&app, SIGNAL(lastWindowClosed()), &zthread, SLOT(prepare_close()));\n\n QtDebugger::showQtDebugger(0);\n\n zthread.start();\n app.exec();\n zthread.wait();\n QtDebugger::destroyQtDebugger();\n exit(app_exit_num);\n }\n}\n\n\nvoid debug_exit(int exit_num)\n{\n if (debugger_running)\n {\n app_exit_num = exit_num;\n qApp->quit();\n zthread.done();\n }\n else\n {\n exit(exit_num);\n }\n}\n" }, { "alpha_fraction": 0.686170220375061, "alphanum_fraction": 0.6914893388748169, "avg_line_length": 12.428571701049805, "blob_id": "3d851d2e530f8654be2865cf28bfd41bb024f929", "content_id": "c8ed01139f58c06d969c68ae568beaf42841e7a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 188, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/src/gui/gui.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "#include \"ui_zsnes.h\"\n\nclass ZSNESMainForm : public QMainWindow\n{\n Q_OBJECT\n\npublic:\n ZSNESMainForm(QMainWindow *parent = 0);\n\nprivate slots:\n\nprivate:\n Ui::ZSNESMainForm ui;\n };\n" }, { "alpha_fraction": 0.7282809615135193, "alphanum_fraction": 0.7406038045883179, "avg_line_length": 20.639999389648438, "blob_id": "559499b6409fff1039e95af75ba8af10911c1273", "content_id": "60e40e74ac237f5211738dc35d3a19c42fa4010d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1623, "license_type": "no_license", "max_line_length": 72, "num_lines": 75, "path": "/src/zdir.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#ifndef ZDIR_H\n#define ZDIR_H\n\nstruct dirent_info\n{\n char *name;\n mode_t mode;\n off_t size;\n#ifdef __UNIXSDL__\n uid_t uid;\n gid_t gid;\n#endif\n};\n\n#ifndef __UNIXSDL__\n#include <stdint.h>\n#include <windows.h>\n#include <io.h>\n\n//Avoid clashing with DJGPP and MinGW extras\n\nstruct z_dirent\n{\n char d_name[256];\n};\n\ntypedef struct\n{\n intptr_t find_first_handle;\n struct _finddata_t fileinfo;\n struct z_dirent entry;\n} z_DIR;\n\nz_DIR *z_opendir(const char *path);\nstruct z_dirent *z_readdir(z_DIR *dir);\nint z_closedir(z_DIR *dir);\n\n#ifndef NO_ZDIR_TYPEDEF\n#define dirent z_dirent\ntypedef z_DIR DIR;\n#define opendir z_opendir\n#define readdir z_readdir\n#define closedir z_closedir\n#endif\n\n#else\n#include <dirent.h>\ntypedef DIR z_DIR;\n#endif\n\nstruct dirent_info *readdir_info(z_DIR *dir);\nint dirent_access(struct dirent_info *entry, int mode);\n\n#endif\n" }, { "alpha_fraction": 0.4851182699203491, "alphanum_fraction": 0.571179211139679, "avg_line_length": 18.58461570739746, "blob_id": "f2b49f5c97b39d68d77e8a3b508f5099c680d24c", "content_id": "08dd4eafb823e89b4ff3e33ce346bcea0af5283c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11457, "license_type": "no_license", "max_line_length": 103, "num_lines": 585, "path": "/src/chips/seta11.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "// ST-011 SNES DSP adapted from Morita Shogi 64\n//\n// notes:\n// - the SNES uses DMA to/from 60:0000 and maybe 68:0xxx\n// - some code redundancy (helper subroutines for movement projection)\n//\n// - OPS04/05 have unknown output values (!)\n// - OPS06/07 have unknown purposes\n//\n// - plenty of missing opcodes that don't show up in the only known binary log (st011-demo)\n// (play the game until captured/promoted pieces, king checked, endgame)\n// - minus emulation cycles (busy signals), bit-perfect to 'st011-demo'\n\n//#define DEBUG_DSP\n\n#ifdef DEBUG_DSP\n#include <stdio.h>\nint debug1, debug2;\nint line_count;\n#endif\n\n\nvoid (*RunST011)();\nvoid ST011_Command();\n\nunsigned char ST011_DR;\nunsigned char ST011_SR;\n\nint ST011_input_length;\n\n#define ST011_ram setaramdata\n\nextern unsigned char *setaramdata;\n\n#define ST011_board ( ST011_ram+0x130 )\n\nint ST011_dma_count;\nint ST011_dma_index;\n\nint ST011_king1;\nint ST011_king2;\n\n// (x,y)\n#define MOVE_UUL -1,-20\n#define MOVE_UL\t\t -1,-10\n#define MOVE_ULAll -9,- 9\n#define MOVE_U\t\t 0,-10\n#define MOVE_UAll 0,- 9\n#define MOVE_UR\t\t 1,-10\n#define MOVE_URAll 9,- 9\n#define MOVE_UUR 1,-20\n\n#define MOVE_L\t\t -1, 0\n#define MOVE_LAll\t -9, 0\n#define MOVE_R\t\t 1, 0\n#define MOVE_RAll\t 9, 0\n\n#define MOVE_DDL\t -1, 20\n#define MOVE_DL\t\t -1, 10\n#define MOVE_DLAll -9, 9\n#define MOVE_D\t\t 0, 10\n#define MOVE_DAll 0, 9\n#define MOVE_DR\t\t 1, 10\n#define MOVE_DRAll 9, 9\n#define MOVE_DDR\t 1, 20\n\n#define MOVE_STOP 127,127\n#define MOVE_NOP\t 0, 0\nconst int ST011_move_table[8*2][9*2] =\n{\n\t// Pawn: one step forward\n\t// - Promoted: same as Gold\n\t{ MOVE_D, MOVE_STOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP },\n\t{ MOVE_DR, MOVE_D, MOVE_DL, MOVE_R, MOVE_L, MOVE_U, MOVE_STOP, MOVE_NOP, MOVE_NOP },\n\n\t// Lance: all steps forward\n\t// - Promoted: same as Gold\n\t{ MOVE_DAll, MOVE_STOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP },\n\t{ MOVE_DR, MOVE_D, MOVE_DL, MOVE_R, MOVE_L, MOVE_U, MOVE_STOP, MOVE_NOP, MOVE_NOP },\n\n\t// Knight: one step side, two forward\n\t// - Promoted: same as Gold\n\t{ MOVE_DDR, MOVE_DDL, MOVE_STOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP },\n\t{ MOVE_DR, MOVE_D, MOVE_DL, MOVE_R, MOVE_L, MOVE_U, MOVE_STOP, MOVE_NOP, MOVE_NOP },\n\n\t// Silver general: one any diagonal, one step forward\n\t// - Promoted: same as Gold\n\t{ MOVE_DR, MOVE_D, MOVE_DL, MOVE_UR, MOVE_UL, MOVE_STOP, MOVE_NOP, MOVE_NOP, MOVE_NOP },\n\t{ MOVE_DR, MOVE_D, MOVE_DL, MOVE_R, MOVE_L, MOVE_U, MOVE_STOP, MOVE_NOP, MOVE_NOP },\n\n\t// Gold general: one any forward, one sideways or one backward\n\t// - Promoted: N/A\n\t{ MOVE_DR, MOVE_D, MOVE_DL, MOVE_R, MOVE_L, MOVE_U, MOVE_STOP, MOVE_NOP, MOVE_NOP },\n\t{ MOVE_STOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP },\n\n\t// Bishop: any diagonal\n\t// - Promoted: Bishop + King\n\t{ MOVE_DRAll, MOVE_DLAll, MOVE_URAll, MOVE_ULAll, MOVE_STOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP },\n\t{ MOVE_DRAll, MOVE_D, MOVE_DLAll, MOVE_R, MOVE_L, MOVE_URAll, MOVE_U, MOVE_ULAll, MOVE_STOP },\n\n\t// Rook: any vertical, horizontal\n\t// - Promoted: Rook + King\n\t{ MOVE_DAll, MOVE_RAll, MOVE_LAll, MOVE_UAll, MOVE_STOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP },\n\t{ MOVE_DR, MOVE_DAll, MOVE_DL, MOVE_RAll, MOVE_LAll, MOVE_UR, MOVE_UAll, MOVE_UL, MOVE_STOP },\n\n\t// King: one any direction\n\t// - Promoted: N/A\n\t{ MOVE_DR, MOVE_D, MOVE_DL, MOVE_R, MOVE_L, MOVE_UR, MOVE_U, MOVE_UL, MOVE_STOP },\n\t{ MOVE_STOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP, MOVE_NOP },\n};\n\n\nvoid ST011_Reset()\n{\n RunST011 = &ST011_Command;\n ST011_SR = 0xc4;\n}\n\n\nvoid ST011_OP01_A()\n{\n\tif( ST011_dma_count-- )\n\t{\n\t\tST011_board[ ST011_dma_index++ ] = ST011_DR;\n\t}\n\n\tif( ST011_dma_count == 0 )\n\t{\n#ifdef DEBUG_DSP\n\t\tint lcv1, lcv2;\n#endif\n\t\tint lcv;\n\n\t\tfor( lcv = 0; lcv < 11; lcv++ )\n\t\t{\n\t\t\tST011_board[ lcv ] = 0;\n\t\t}\n\t\tfor( lcv = 11; lcv < 21; lcv++ )\n\t\t{\n\t\t\tST011_board[ lcv ] = 0x80;\n\t\t}\n\n\t\tST011_king1 = ST011_board[ 126+21 ];\n\t\tST011_king2 = ST011_board[ 127+21 ];\n\n\t\tRunST011 = &ST011_Command;\n\t\tST011_SR = 0xc4;\n\n#ifdef DEBUG_DSP\n\t\t// Debug\n\t\tprintf( \"OP01\\n\" );\n\t\tfor( lcv1 = 0; lcv1 < 9; lcv1++ )\n\t\t{\n\t\t\tfor( lcv2 = 0; lcv2 < 10; lcv2++ )\n\t\t\t{\n\t\t\t\tprintf( \"%02x \", ST011_board[ lcv1*10 + lcv2 + 21 ] );\n\t\t\t}\n\n\t\t\tprintf( \"\\n\" );\n\t\t}\n\t\tprintf( \"OP01 END\\n\\n\" );\n#endif\n\t}\n}\n\nvoid ST011_OP01()\n{\n\tST011_dma_count = 128;\n\tST011_dma_index = 0+21;\n\n\tRunST011 = &ST011_OP01_A;\n\tST011_SR = 0xa4;\n}\n\n\nvoid ST011_OP02_A()\n{\n\tif( ST011_dma_count-- )\n\t{\n\t\tST011_DR = ST011_ram[ ST011_dma_index-- ];\n\t}\n\n\tif( ST011_dma_count == 0 )\n\t{\n#ifdef DEBUG_DSP\n\t\tint lcv1, lcv2;\n#endif\n\n\t\tRunST011 = &ST011_Command;\n\t\tST011_SR = 0xc4;\n\n#ifdef DEBUG_DSP\n\t\t// Debug\n#define OP02_ROW 10\n\n\t\tprintf( \"OP02\\n\" );\n\t\tfor( lcv1 = 0; lcv1 < 0x83 / OP02_ROW; lcv1++ )\n\t\t{\n\t\t\tfor( lcv2 = 0; lcv2 < OP02_ROW; lcv2++ )\n\t\t\t{\n\t\t\t\tprintf( \"%02x \", ST011_ram[ debug1 - lcv1 * OP02_ROW - lcv2 ] );\n\t\t\t}\n\n\t\t\tprintf( \"\\n\" );\n\t\t}\n\t\tprintf( \"OP02 END\\n\\n\" );\n#endif\n\t}\n}\n\nvoid ST011_OP02()\n{\n\tswitch( ST011_input_length-- )\n\t{\n\t\tcase 4: ST011_dma_index = ST011_DR;\tbreak;\n\t\tcase 3: ST011_dma_index |= ST011_DR << 8;\tbreak;\n\t\tcase 2: ST011_dma_count = ST011_DR;\tbreak;\n\t\tcase 1:\n\t\t\tST011_dma_count |= ST011_DR << 8;\n\n#ifdef DEBUG_DSP\n\t\t\tdebug1 = ST011_dma_index;\n\t\t\tdebug2 = 0;\n#endif\n\n\t\t\tRunST011 = &ST011_OP02_A;\n\t\t\tST011_SR = 0xa4;\n\t\t\tbreak;\n\t}\n}\n\n\nvoid ST011_Project_Moves( int color )\n{\n\tint row, col, lcv, index;\n\tint dir;\n\n\tindex = 0x121;\n\tfor( lcv = 0; lcv < 0x83; lcv++ )\n\t{\n\t\tST011_ram[ index-- ] = 0;\n\t}\n\tindex = 0x121 - 21;\n\n\tif( color == 0x20 )\n\t{\n\t\tdir = 1;\n\t}\n\telse\n\t{\n\t\tdir = -1;\n\t}\n\n\tfor( row = 0; row < 9; row++ )\n\t{\n\t\tfor( col = 0; col < 10; col++ )\n\t\t{\n\t\t\tint shogi_piece;\n\t\t\tint piece_id;\n\t\t\tint lcv_steps, lcv_move;\n\t\t\tint move_list[ 9*2 ];\n\n\t\t\tshogi_piece = ST011_board[ row*10+col+21 ];\n\t\t\tpiece_id = shogi_piece & 0x1f;\n\n\t\t\tif( col == 9 ) continue;\n\t\t\tif( shogi_piece == 0x00 ) continue;\n\t\t\tif( ( shogi_piece & ~0x1f ) != color ) continue;\n\n\t\t\tfor( lcv = 0; lcv < 9*2; lcv++ )\n\t\t\t{\n\t\t\t\tmove_list[ lcv ] = ST011_move_table[ piece_id >> 1 ][ lcv ];\n\t\t\t}\n\n\t\t\tlcv_move = 0;\n\t\t\twhile( move_list[ lcv_move ] != 0x7f )\n\t\t\t{\n\t\t\t\tint pos_x, pos_y;\n\n\t\t\t\tlcv_steps = 1;\n\t\t\t\tif( move_list[ lcv_move ] == 9 || move_list[ lcv_move ] == -9 )\n\t\t\t\t{\n\t\t\t\t\tlcv_steps = 9;\n\t\t\t\t\tif( move_list[ lcv_move ] == 9 )\n\t\t\t\t\t{\n\t\t\t\t\t\tmove_list[ lcv_move ] = 1;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tmove_list[ lcv_move ] = -1;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif( move_list[ lcv_move+1 ] == 9 || move_list[ lcv_move+1 ] == -9 )\n\t\t\t\t{\n\t\t\t\t\tlcv_steps = 9;\n\t\t\t\t\tif( move_list[ lcv_move+1 ] == 9 )\n\t\t\t\t\t{\n\t\t\t\t\t\tmove_list[ lcv_move+1 ] = 1;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tmove_list[ lcv_move+1 ] = -1;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tmove_list[ lcv_move+1 ] /= 10;\n\t\t\t\t}\n\n\t\t\t\tpos_x = col;\n\t\t\t\tpos_y = row;\n\t\t\t\twhile( lcv_steps-- )\n\t\t\t\t{\n\t\t\t\t\tpos_x += move_list[ lcv_move+0 ];\n\t\t\t\t\tpos_y += ( move_list[ lcv_move+1 ] * dir );\n\n\t\t\t\t\tST011_ram[ index - pos_y*10 - pos_x ] = 0x80;\n\n\t\t\t\t\tif( ST011_board[ pos_y*10 + pos_x + 21 ] ) break;\n\t\t\t\t}\n\n\t\t\t\tlcv_move += 2;\n\t\t\t}\n\t\t} // end col\n\t} // end row\n}\n\n\nint ST011_Project_Valid_Moves( int color )\n{\n\tint row, col, lcv, index;\n\tint dir;\n\n\tindex = 0x556;\n\n\tif( color == 0x20 )\n\t{\n\t\tdir = 1;\n\t}\n\telse\n\t{\n\t\tdir = -1;\n\t}\n\n\tfor( row = 0; row < 9; row++ )\n\t{\n\t\tfor( col = 0; col < 10; col++ )\n\t\t{\n\t\t\tint shogi_piece;\n\t\t\tint piece_id;\n\t\t\tint lcv_steps, lcv_move;\n\t\t\tint move_list[ 9*2 ];\n\n\t\t\tshogi_piece = ST011_board[ row*10+col+21 ];\n\t\t\tpiece_id = shogi_piece & 0x1f;\n\n\t\t\tif( col == 9 ) continue;\n\t\t\tif( shogi_piece == 0x00 ) continue;\n\t\t\tif( ( shogi_piece & ~0x1f ) != color ) continue;\n\n\t\t\tfor( lcv = 0; lcv < 9*2; lcv++ )\n\t\t\t{\n\t\t\t\tmove_list[ lcv ] = ST011_move_table[ piece_id >> 1 ][ lcv ];\n\t\t\t}\n\n\t\t\tlcv_move = 0;\n\t\t\twhile( move_list[ lcv_move ] != 0x7f )\n\t\t\t{\n\t\t\t\tint pos_x, pos_y;\n\n\t\t\t\tlcv_steps = 1;\n\t\t\t\tif( move_list[ lcv_move ] == 9 || move_list[ lcv_move ] == -9 )\n\t\t\t\t{\n\t\t\t\t\tlcv_steps = 9;\n\t\t\t\t\tif( move_list[ lcv_move ] == 9 )\n\t\t\t\t\t{\n\t\t\t\t\t\tmove_list[ lcv_move ] = 1;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tmove_list[ lcv_move ] = -1;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif( move_list[ lcv_move+1 ] == 9 || move_list[ lcv_move+1 ] == -9 )\n\t\t\t\t{\n\t\t\t\t\tlcv_steps = 9;\n\t\t\t\t\tif( move_list[ lcv_move+1 ] == 9 )\n\t\t\t\t\t{\n\t\t\t\t\t\tmove_list[ lcv_move+1 ] = 1;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tmove_list[ lcv_move+1 ] = -1;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tmove_list[ lcv_move+1 ] /= 10;\n\t\t\t\t}\n\n\t\t\t\tpos_x = col;\n\t\t\t\tpos_y = row;\n\t\t\t\twhile( lcv_steps-- )\n\t\t\t\t{\n\t\t\t\t\tpos_x += move_list[ lcv_move+0 ];\n\t\t\t\t\tpos_y += ( move_list[ lcv_move+1 ] * dir );\n\n\t\t\t\t\tif( pos_x < 0 ) break;\n\t\t\t\t\tif( pos_x > 8 ) break;\n\t\t\t\t\tif( pos_y < 0 ) break;\n\t\t\t\t\tif( pos_y > 8 ) break;\n\t\t\t\t\tif( ( ST011_board[ pos_y*10 + pos_x + 21 ] & ~0x1f ) == color ) break;\n\n\t\t\t\t\tST011_ram[ index + 0x000 ] = 21 + row*10 + col;\n\t\t\t\t\tST011_ram[ index + 0x001 ] = 0;\n\t\t\t\t\tST011_ram[ index + 0x418 ] = 21 + pos_y*10 + pos_x;\n\t\t\t\t\tST011_ram[ index + 0x419 ] = 0;\n\n\t\t\t\t\tif( pos_y >= 6 )\n\t\t\t\t\t{\n\t\t\t\t\t\tST011_ram[ index + 0x418 ] |= 0x80;\n\t\t\t\t\t}\n\t\t\t\t\tindex += 2;\n\n\t\t\t\t\tif( ST011_board[ pos_y*10+pos_x+21 ] ) break;\n\t\t\t\t}\n\n\t\t\t\tlcv_move += 2;\n\t\t\t}\n\t\t} // end col\n\t} // end row\n\n\treturn ( index-0x556 ) >> 1;\n}\n\n\nvoid ST011_OP04()\n{\n\tST011_Project_Moves( 0x40 );\n\n\t// unknown outputs\n\tST011_ram[ 0x12c ] = 0;\n\tST011_ram[ 0x12d ] = 0;\n\tST011_ram[ 0x12e ] = 0;\n\tST011_ram[ 0x12f ] = 0;\n\n\tRunST011 = &ST011_Command;\n\tST011_SR = 0xc4;\n}\n\n\nvoid ST011_OP05()\n{\n\tST011_Project_Moves( 0x20 );\n\n\t// unknown outputs\n\tST011_ram[ 0x12c ] = 0;\n\tST011_ram[ 0x12d ] = 0;\n\tST011_ram[ 0x12e ] = 0;\n\tST011_ram[ 0x12f ] = 0;\n\n\tRunST011 = &ST011_Command;\n\tST011_SR = 0xc4;\n}\n\n\nvoid ST011_OP0E()\n{\n\tint valid_moves;\n\n\tvalid_moves = ST011_Project_Valid_Moves( 0x20 );\n\n\tST011_ram[ 0x12c ] = valid_moves & 0xff;\n\tST011_ram[ 0x12d ] = ( valid_moves >> 8 ) & 0xff;\n\n\tRunST011 = &ST011_Command;\n\tST011_SR = 0xc4;\n}\n\n\nvoid ST011_Command()\n{\n#ifdef DEBUG_DSP\n\tprintf( \"OP%02X @ line %d\\n\", ST011_DR, line_count );\n#endif\n\n\t// busy\n\tST011_SR = 0x84;\n\n\tswitch( ST011_DR )\n\t{\n\t\t// Download shogi playboard to on-board memory\n\t\tcase 0x01:\n\t\t\tST011_OP01();\n\t\t\tbreak;\n\n\t\t// Upload shogi analysis data to outside memory\n\t\tcase 0x02:\n\t\t\tST011_input_length = 4;\n\t\t\tRunST011 = ST011_OP02;\n\t\t\tbreak;\n\n\t\t// Project all moves of player color $40\n\t\tcase 0x04:\n\t\t\tST011_OP04();\n\t\t\tbreak;\n\n\t\t// Project all moves of player color $20\n\t\tcase 0x05:\n\t\t\tST011_OP05();\n\t\t\tbreak;\n\n\t\t// Unknown - seems to set flags $00,$20,$40,..$e0 for restricted movement lists\n\t\tcase 0x06:\n\t\t\t//ST011_OP06();\n\t\t\tST011_SR = 0xc4;\n\t\t\tbreak;\n\n\t\t// Unknown - seems to set flags $00,$20,$40,..$e0 for restricted movement lists\n\t\tcase 0x07:\n\t\t\t//ST011_OP07();\n\t\t\tST011_SR = 0xc4;\n\t\t\tbreak;\n\n\t\t// List valid moves of player color $20\n\t\tcase 0x0E:\n\t\t\tST011_OP0E();\n\t\t\tbreak;\n\n\t\tdefault:\n#ifdef DEBUG_DSP\n\t\t\tprintf( \"Unknown OP @ line %d\\n\", line_count );\n#endif\n\t\t\tbreak;\n\t}\n}\n\n\nunsigned short seta11_address;\nunsigned char seta11_byte;\n\nvoid ST011_MapR_68()\n{\n if (seta11_address < 0x1000)\n {\n ST011_DR = ST011_ram[seta11_address & 0xfff];\n }\n seta11_byte = ST011_DR;\n}\n\nvoid ST011_MapW_68()\n{\n ST011_DR = seta11_byte;\n\n if (seta11_address < 0x1000)\n {\n ST011_ram[seta11_address & 0xfff] = ST011_DR;\n }\n}\n\nvoid ST011_MapR_60()\n{\n if (seta11_address == 0)\n {\n RunST011();\n }\n if (seta11_address == 1)\n {\n seta11_byte = ST011_SR;\n return;\n }\n seta11_byte = ST011_DR;\n}\n\nvoid ST011_MapW_60()\n{\n ST011_DR = seta11_byte;\n\n if (seta11_address == 0)\n {\n RunST011();\n }\n}\n" }, { "alpha_fraction": 0.3683854341506958, "alphanum_fraction": 0.4223846197128296, "avg_line_length": 43.512046813964844, "blob_id": "7eea9153a27e6454e65583a7562f409f30d71191", "content_id": "fdf77de0f18e7e7c5c6d49ac17bd5b8f79d61e21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7389, "license_type": "no_license", "max_line_length": 88, "num_lines": 166, "path": "/src/cpu/zspc/disasm.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "// http://www.slack.net/~ant/\n\n#include \"disasm.h\"\n\n#include <stdio.h>\n\n/* Copyright (C) 2005-2007 by Shay Green. Permission is hereby granted, free of\ncharge, to any person obtaining a copy of this software module and associated\ndocumentation files (the \"Software\"), to deal in the Software without\nrestriction, including without limitation the rights to use, copy, modify,\nmerge, publish, distribute, sublicense, and/or sell copies of the Software, and\nto permit persons to whom the Software is furnished to do so, subject to the\nfollowing conditions: The above copyright notice and this permission notice\nshall be included in all copies or substantial portions of the Software. THE\nSOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */\n\nstatic char const op_lens [256] =\n{ //0 1 2 3 4 5 6 7 8 9 A B C D E F\n 1,1,2,3,2,3,1,2,2,3,3,2,3,1,3,1,// 0\n 2,1,2,3,2,3,3,2,3,1,2,2,1,1,3,3,// 1\n 1,1,2,3,2,3,1,2,2,3,3,2,3,1,3,2,// 2\n 2,1,2,3,2,3,3,2,3,1,2,2,1,1,2,3,// 3\n 1,1,2,3,2,3,1,2,2,3,3,2,3,1,3,2,// 4\n 2,1,2,3,2,3,3,2,3,1,2,2,1,1,3,3,// 5\n 1,1,2,3,2,3,1,2,2,3,3,2,3,1,3,1,// 6\n 2,1,2,3,2,3,3,2,3,1,2,2,1,1,2,1,// 7\n 1,1,2,3,2,3,1,2,2,3,3,2,3,2,1,3,// 8\n 2,1,2,3,2,3,3,2,3,1,2,2,1,1,1,1,// 9\n 1,1,2,3,2,3,1,2,2,3,3,2,3,2,1,1,// A\n 2,1,2,3,2,3,3,2,3,1,2,2,1,1,1,1,// B\n 1,1,2,3,2,3,1,2,2,3,3,2,3,2,1,1,// C\n 2,1,2,3,2,3,3,2,2,2,2,2,1,1,3,1,// D\n 1,1,2,3,2,3,1,2,2,3,3,2,3,1,1,1,// E\n 2,1,2,3,2,3,3,2,2,2,3,2,1,1,2,1 // F\n};\n\nint spc_disasm_len( int opcode ) { return op_lens [opcode]; }\n\nstatic const char op_names [0x100] [16] =\n{\n\"NOP\", \"TCALL 0\", \"SET1 d.0\", \"BBS d.0, r\",\n\"OR A, d\", \"OR A, !a\", \"OR A, (X)\", \"OR A, [d+X]\",\n\"OR A, #i\", \"OR dd, ds\", \"OR1 C, m.b\", \"ASL d\",\n\"ASL !a\", \"PUSH PSW\", \"TSET1 !a\", \"BRK\",\n\"BPL r\", \"TCALL 1\", \"CLR1 d.0\", \"BBC d.0, r\",\n\"OR A, d+X\", \"OR A, !a+X\", \"OR A, !a+Y\", \"OR A, [d]+Y\",\n\"OR d, #i\", \"OR (X), (Y)\", \"DECW d\", \"ASL d+X\",\n\"ASL A\", \"DEC X\", \"CMP X, !a\", \"JMP [!a+X]\",\n\"CLRP\", \"TCALL 2\", \"SET1 d.1\", \"BBS d.1, r\",\n\"AND A, d\", \"AND A, !a\", \"AND A, (X)\", \"AND A, [d+X]\",\n\"AND A, #i\", \"AND dd, ds\", \"OR1 C, /m.b\", \"ROL d\",\n\"ROL !a\", \"PUSH A\", \"CBNE d, r\", \"BRA r\",\n\"BMI r\", \"TCALL 3\", \"CLR1 d.1\", \"BBC d.1, r\",\n\"AND A, d+X\", \"AND A, !a+X\", \"AND A, !a+Y\", \"AND A, [d]+Y\",\n\"AND d, #i\", \"AND (X), (Y)\", \"INCW d\", \"ROL d+X\",\n\"ROL A\", \"INC X\", \"CMP X, d\", \"CALL !a\",\n\"SETP\", \"TCALL 4\", \"SET1 d.2\", \"BBS d.2, r\",\n\"EOR A, d\", \"EOR A, !a\", \"EOR A, (X)\", \"EOR A, [d+X]\",\n\"EOR A, #i\", \"EOR dd, ds\", \"AND1 C, m.b\", \"LSR d\",\n\"LSR !a\", \"PUSH X\", \"TCLR1 !a\", \"PCALL u\",\n\"BVC r\", \"TCALL 5\", \"CLR1 d.2\", \"BBC d.2, r\",\n\"EOR A, d+X\", \"EOR A, !a+X\", \"EOR A, !a+Y\", \"EOR A, [d]+Y\",\n\"EOR d, #i\", \"EOR (X), (Y)\", \"CMPW YA, d\", \"LSR d+X\",\n\"LSR A\", \"MOV X, A\", \"CMP Y, !a\", \"JMP !a\",\n\"CLRC\", \"TCALL 6\", \"SET1 d.3\", \"BBS d.3, r\",\n\"CMP A, d\", \"CMP A, !a\", \"CMP A, (X)\", \"CMP A, [d+X]\",\n\"CMP A, #i\", \"CMP dd, ds\", \"AND1 C, /m.b\", \"ROR d\",\n\"ROR !a\", \"PUSH Y\", \"DBNZ d, r\", \"RET\",\n\"BVS r\", \"TCALL 7\", \"CLR1 d.3\", \"BBC d.3, r\",\n\"CMP A, d+X\", \"CMP A, !a+X\", \"CMP A, !a+Y\", \"CMP A, [d]+Y\",\n\"CMP d, #i\", \"CMP (X), (Y)\", \"ADDW YA, d\", \"ROR d+X\",\n\"ROR A\", \"MOV A, X\", \"CMP Y, d\", \"RET1\",\n\"SETC\", \"TCALL 8\", \"SET1 d.4\", \"BBS d.4, r\",\n\"ADC A, d\", \"ADC A, !a\", \"ADC A, (X)\", \"ADC A, [d+X]\",\n\"ADC A, #i\", \"ADC dd, ds\", \"EOR1 C, m.b\", \"DEC d\",\n\"DEC !a\", \"MOV Y, #i\", \"POP PSW\", \"MOV d, #i\",\n\"BCC r\", \"TCALL 9\", \"CLR1 d.4\", \"BBC d.4, r\",\n\"ADC A, d+X\", \"ADC A, !a+X\", \"ADC A, !a+Y\", \"ADC A, [d]+Y\",\n\"ADC d, #i\", \"ADC (X), (Y)\", \"SUBW YA, d\", \"DEC d+X\",\n\"DEC A\", \"MOV X, SP\", \"DIV YA, X\", \"XCN A\",\n\"EI\", \"TCALL 10\", \"SET1 d.5\", \"BBS d.5, r\",\n\"SBC A, d\", \"SBC A, !a\", \"SBC A, (X)\", \"SBC A, [d+X]\",\n\"SBC A, #i\", \"SBC dd, ds\", \"MOV1 C, m.b\", \"INC d\",\n\"INC !a\", \"CMP Y, #i\", \"POP A\", \"MOV (X)+, A\",\n\"BCS r\", \"TCALL 11\", \"CLR1 d.5\", \"BBC d.5, r\",\n\"SBC A, d+X\", \"SBC A, !a+X\", \"SBC A, !a+Y\", \"SBC A, [d]+Y\",\n\"SBC d, #i\", \"SBC (X), (Y)\", \"MOVW YA, d\", \"INC d+X\",\n\"INC A\", \"MOV SP, X\", \"DAS A\", \"MOV A, (X)+\",\n\"DI\", \"TCALL 12\", \"SET1 d.6\", \"BBS d.6, r\",\n\"MOV d, A\", \"MOV !a, A\", \"MOV (X), A\", \"MOV [d+X], A\",\n\"CMP X, #i\", \"MOV !a, X\", \"MOV1 m.b, C\", \"MOV d, Y\",\n\"MOV !a, Y\", \"MOV X, #i\", \"POP X\", \"MUL YA\",\n\"BNE r\", \"TCALL 13\", \"CLR1 d.6\", \"BBC d.6, r\",\n\"MOV d+X, A\", \"MOV !a+X, A\", \"MOV !a+Y, A\", \"MOV [d]+Y, A\",\n\"MOV d, X\", \"MOV d+Y, X\", \"MOVW d, YA\", \"MOV d+X, Y\",\n\"DEC Y\", \"MOV A, Y\", \"CBNE d+X, r\", \"DAA A\",\n\"CLRV\", \"TCALL 14\", \"SET1 d.7\", \"BBS d.7, r\",\n\"MOV A, d\", \"MOV A, !a\", \"MOV A, (X)\", \"MOV A, [d+X]\",\n\"MOV A, #i\", \"MOV X, !a\", \"NOT1 m.b\", \"MOV Y, d\",\n\"MOV Y, !a\", \"NOTC\", \"POP Y\", \"SLEEP\",\n\"BEQ r\", \"TCALL 15\", \"CLR1 d.7\", \"BBC d.7, r\",\n\"MOV A, d+X\", \"MOV A, !a+X\", \"MOV A, !a+Y\", \"MOV A, [d]+Y\",\n\"MOV X, d\", \"MOV X, d+Y\", \"MOV dd, ds\", \"MOV Y, d+X\",\n\"INC Y\", \"MOV Y, A\", \"DBNZ Y, r\", \"STOP\"\n};\n\nconst char* spc_disasm_form( int opcode ) { return op_names [opcode]; }\n\nint spc_disasm( unsigned addr, int opcode, int x, int y, char* out )\n{\n\t// Interpret opcode format\n\tconst char* in = op_names [opcode];\n\twhile ( (*out = *in) != 0 )\n\t{\n\t\tswitch ( *in++ )\n\t\t{\n\t\tcase 'i': // #i\n\t\t\tout += sprintf( out, \"$%02X\", x );\n\t\t\tbreak;\n\n\t\tcase 'u': // PCALL u\n\t\t\tout += sprintf( out, \"$FF%02X\", x );\n\t\t\tbreak;\n\n\t\tcase 'd': { // d, dd, ds\n\t\t\tint n = y;\n\t\t\tif ( *in != 'd' )\n\t\t\t{\n\t\t\t\tn = x;\n\t\t\t\tx = y;\n\t\t\t}\n\t\t\tif ( *in == 'd' || *in == 's' )\n\t\t\t\t++in;\n\n\t\t\tout += sprintf( out, \"$%02X\", n );\n\t\t\tbreak;\n\t\t}\n\n\t\tcase 'a': // !a\n\t\t\tout += sprintf( out, \"$%04X\", y * 0x100 + x );\n\t\t\tbreak;\n\n\t\tcase 'm': // m.b\n\t\t\tout += sprintf( out, \"$%04X\", (y * 0x100 + x) & 0x1FFF );\n\t\t\tbreak;\n\n\t\tcase 'b': // m.b\n\t\t\tout += sprintf( out, \"%d\", y >> 5 );\n\t\t\tbreak;\n\n\t\tcase 'r': // branch r\n\t\t\tout += sprintf( out, \"$%04X\", (addr + op_lens [opcode] + (signed char) x) & 0xFFFF );\n\t\t\tbreak;\n\n\t\tdefault:\n\t\t\t++out;\n\t\t}\n\t}\n\n\treturn op_lens [opcode];\n}\n" }, { "alpha_fraction": 0.5178095102310181, "alphanum_fraction": 0.5980965495109558, "avg_line_length": 22.214414596557617, "blob_id": "4226593746c8942051c96d6015f9f2a5863221b3", "content_id": "a4c44d222a239c7259a6f2d9979186ffb3ed8068", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 64095, "license_type": "no_license", "max_line_length": 109, "num_lines": 2761, "path": "/src/initc.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n\n\n#ifdef __UNIXSDL__\n#include \"gblhdr.h\"\n#include \"linux/audio.h\"\n#define DIR_SLASH \"/\"\n#else\n#include <stdio.h>\n#include <stdlib.h>\n#include <ctype.h>\n#include <string.h>\n#include <sys/stat.h>\n#include <stdbool.h>\n#define DIR_SLASH \"\\\\\"\n#endif\n#include \"asm_call.h\"\n#include \"cfg.h\"\n#include \"input.h\"\n#include \"zpath.h\"\n#include \"cpu/memtable.h\"\n#include \"cpu/zspc/zspc.h\"\n#include <stdint.h>\n\n#ifdef QT_DEBUGGER\n#include \"debugger/load.h\"\n#endif\n\n#define NUMCONV_FR4\n#include \"numconv.h\"\n\n#ifndef __GNUC__\n#define strcasecmp stricmp\n#define strncasecmp strnicmp\n#endif\n\n//NSRT Goodness\n#define Lo 0x7FC0\n#define Hi 0xFFC0\n#define EHi 0x40FFC0\n\n#define MB_bytes 0x100000\n#define Mbit_bytes 0x20000\n\n//Offsets to add to infoloc start to reach particular variable\n#define BankOffset 21 //Contains Speed as well\n#define TypeOffset 22\n#define ROMSizeOffset 23\n#define SRAMSizeOffset 24\n#define CountryOffset 25\n#define CompanyOffset 26\n#define VersionOffset 27\n#define InvCSLowOffset 28\n#define InvCSHiOffset 29\n#define CSLowOffset 30\n#define CSHiOffset 31\n//Additional defines for the BS header\n#define BSYearOffset 21 //Not sure how to calculate year yet\n#define BSMonthOffset 22\n#define BSDayOffset 23\n#define BSBankOffset 24\n#define BSSizeOffset 25 //Contains Type as well\n//26 - 31 is the same\n#define ResetLoOffset 60\n#define ResetHiOffset 61\n\n\n// Some archaic code from an unfinished Dynarec\nextern uint32_t curexecstate;\nextern bool spcon;\n\nvoid procexecloop()\n{\n curexecstate &= 0xFFFFFF00;\n\n if (spcon) { curexecstate += 3; }\n else { curexecstate += 1; }\n}\n\nvoid Debug_WriteString(char *str)\n{\n FILE *fp = 0;\n fp = fopen_dir(ZCfgPath, \"zsnes.dbg\", \"w\");\n if (!fp) { return; }\n fputs(str, fp);\n fclose(fp);\n}\n\n//I want to port over the more complicated\n//functions from init.asm, or replace with\n//better versions from NSRT. -Nach\n\n//init.asm goodness\nextern uint32_t NumofBanks;\nextern uint32_t NumofBytes;\nextern uint8_t *romdata;\nextern uint8_t romtype;\nextern uint8_t Interleaved;\n\nuint32_t maxromspace;\nuint32_t curromspace;\nuint32_t infoloc;\nuint32_t ramsize;\nuint32_t ramsizeand;\n\nbool SplittedROM;\nuint32_t addOnStart;\nuint32_t addOnSize;\n\n\n//Deinterleave functions\nbool validChecksum(uint8_t *ROM, int32_t BankLoc)\n{\n if (ROM[BankLoc + InvCSLowOffset] + (ROM[BankLoc + InvCSHiOffset] << 8) +\n ROM[BankLoc + CSLowOffset] + (ROM[BankLoc + CSHiOffset] << 8) == 0xFFFF)\n {\n return(true);\n }\n return(false);\n}\n\nbool valid_normal_bank(uint8_t bankbyte)\n{\n switch (bankbyte)\n {\n case 32: case 33: case 48: case 49:\n return(true);\n break;\n }\n return(false);\n}\n\nbool EHiHeader(uint8_t *ROM, int32_t BankLoc)\n{\n if (validChecksum(ROM, BankLoc) && (ROM[BankLoc+BankOffset] == 53 || ROM[BankLoc+BankOffset] == 37))\n {\n return(true);\n }\n return(false);\n}\n\nvoid SwapData(uint32_t *loc1, uint32_t *loc2, uint32_t amount)\n{\n uint32_t temp;\n while (amount--)\n {\n temp = *loc1;\n *loc1++ = *loc2;\n *loc2++ = temp;\n }\n}\n\nvoid swapBlocks(uint8_t *blocks)\n{\n uint_fast32_t i, j;\n for (i = 0; i < NumofBanks; i++)\n {\n for (j = 0; j < NumofBanks; j++)\n {\n if (blocks[j] == (int8_t)i)\n {\n int8_t b;\n SwapData(((uint32_t *)romdata + blocks[i]*0x2000), ((uint32_t *)romdata + blocks[j]*0x2000), 0x2000);\n b = blocks[j];\n blocks[j] = blocks[i];\n blocks[i] = b;\n break;\n }\n }\n }\n}\n\nvoid deintlv1()\n{\n uint8_t blocks[256];\n int_fast32_t i;\n int32_t numblocks = NumofBanks/2;\n for (i = 0; i < numblocks; i++)\n {\n blocks[i * 2] = i + numblocks;\n blocks[i * 2 + 1] = i;\n }\n swapBlocks(blocks);\n}\n\nvoid CheckIntl1(uint8_t *ROM)\n{\n uint32_t ROMmidPoint = NumofBytes / 2;\n if (validChecksum(ROM, ROMmidPoint + Lo) &&\n !validChecksum(ROM, Lo) &&\n ROM[ROMmidPoint+Lo+CountryOffset] < 14) //Country Code\n {\n deintlv1();\n Interleaved = true;\n }\n else if (validChecksum(ROM, Lo) && !validChecksum(ROM, Hi) &&\n ROM[Lo+CountryOffset] < 14 && //Country code\n //Rom make up\n (ROM[Lo+BankOffset] == 33 || ROM[Lo+BankOffset] == 49 ||\n ROM[Lo+BankOffset] == 53 || ROM[Lo+BankOffset] == 58))\n {\n if (ROM[Lo+20] == 32 ||//Check that Header name did not overflow\n !(ROM[Lo+BankOffset] == ROM[Lo+20] || ROM[Lo+BankOffset] == ROM[Lo+19] ||\n ROM[Lo+BankOffset] == ROM[Lo+18] || ROM[Lo+BankOffset] == ROM[Lo+17]))\n {\n deintlv1();\n Interleaved = true;\n }\n }\n}\n\nvoid CheckIntlEHi(uint8_t *ROM)\n{\n if (EHiHeader(ROM, Lo))\n {\n uint32_t oldNumBanks = NumofBanks;\n\n //Swap 4MB ROM with the other one\n SwapData((uint32_t *)romdata, ((uint32_t *)romdata+((NumofBytes-0x400000)/4)), 0x100000);\n\n //Deinterleave the 4MB ROM first\n NumofBanks = 128;\n deintlv1();\n\n //Now the other one\n NumofBanks = oldNumBanks - 128;\n romdata += 0x100000; //Ofset pointer\n deintlv1();\n\n //Now fix the data and we're done\n NumofBanks = oldNumBanks;\n romdata -= 0x100000;\n\n Interleaved = true;\n }\n}\n\n//ROM loading functions, which some strangly enough were in guiload.inc\nbool AllASCII(unsigned char *b, int32_t size)\n{\n int_fast32_t i;\n for (i = 0; i < size; i++)\n {\n if (b[i] && (b[i] < 32 || b[i] > 126))\n {\n return(false);\n }\n }\n return(true);\n}\n\n//Code to detect if opcode sequence is a valid and popular one for an SNES ROM\n//Code by Cowering\nstatic bool valid_start_sequence(uint8_t opcode1, uint8_t opcode2, uint8_t opcode3)\n{\n switch (opcode1)\n {\n case 0x78: case 0x5c: case 0x18: case 0xad:\n return(true);\n break;\n case 0x4b:\n if (opcode2 == 0xab && (opcode3 == 0x18 || opcode3 == 0x20))\n {\n return(true);\n }\n break;\n case 0x4c:\n if ((opcode2 == 0x00 || opcode2 == 0xc0) && opcode3 == 0x84)\n {\n return(true);\n }\n if (opcode2 == 0x6d && opcode3 == 0x86)\n {\n return(true);\n }\n if (opcode2 == 0x00 && opcode3 == 0x80)\n {\n return(true);\n }\n break;\n case 0xc2:\n if (opcode2 == 0x30 && opcode3 == 0xa9)\n {\n return(true);\n }\n break;\n case 0x20:\n if ((opcode2 == 0x16 || opcode2 == 0x06) && opcode3 == 0x80)\n {\n return(true);\n }\n break;\n case 0x80:\n if ((opcode2 == 0x16 && opcode3 == 0x4c) ||\n (opcode2 == 0x07 && opcode3 == 0x82))\n {\n return(true);\n }\n break;\n case 0x9c:\n if (opcode2 == 0x00 && opcode3 == 0x21)\n {\n return(true);\n }\n break;\n case 0xa2:\n if (opcode2 == 0xff && opcode3 == 0x86)\n {\n return(true);\n }\n break;\n case 0xa9:\n if ((opcode2 == 0x00 && (opcode3 = 0x48 || opcode3 == 0x4b)) ||\n (opcode2 == 0x8f && opcode3 == 0x8d) ||\n (opcode2 == 0x20 && opcode3 == 0x4b) ||\n (opcode2 == 0x1f && opcode3 == 0x4b))\n {\n return(true);\n }\n break;\n }\n return(false);\n}\n\nstatic int16_t valid_reset(uint8_t *Buffer)\n{\n uint8_t *ROM = romdata;\n uint16_t Reset = Buffer[ResetLoOffset] | ((uint16_t)Buffer[ResetHiOffset] << 8);\n if ((Reset != 0xFFFF) && (Reset & 0x8000))\n {\n uint8_t opcode1 = ROM[(Reset+0) & 0x7FFF];\n uint8_t opcode2 = ROM[(Reset+1) & 0x7FFF];\n uint8_t opcode3 = ROM[(Reset+2) & 0x7FFF];\n\n if (valid_start_sequence(opcode1, opcode2, opcode3))\n {\n return(10);\n }\n return(2);\n }\n return(-4);\n}\n\nint32_t InfoScore(uint8_t *Buffer)\n{\n int32_t score = valid_reset(Buffer);\n if (validChecksum(Buffer, 0)) { score += 5; }\n if (Buffer[CompanyOffset] == 0x33) { score += 3; }\n if (!Buffer[ROMSizeOffset]) { score += 2; }\n if ((1 << (Buffer[ROMSizeOffset] - 7)) > 48) { score -= 2; }\n if ((8 << Buffer[SRAMSizeOffset]) > 1024) { score -= 2; }\n if (Buffer[CountryOffset] < 14) { score += 2; }\n if (!AllASCII(Buffer, 20)) { score -= 2; }\n if (valid_normal_bank(Buffer[BSBankOffset])) { score += 2; }\n return(score);\n}\n\nextern uint8_t ForceHiLoROM;\nextern uint8_t forceromtype;\n\nvoid BankCheck()\n{\n uint8_t *ROM = romdata;\n infoloc = 0;\n Interleaved = false;\n\n if (NumofBytes < Lo)\n {\n romtype = 1;\n infoloc = 1; //Whatever, we just need a valid location\n }\n\n if (NumofBytes < Hi)\n {\n romtype = 1;\n infoloc = Lo;\n }\n\n if (NumofBytes >= 0x500000)\n {\n //Deinterleave if neccesary\n CheckIntlEHi(ROM);\n\n if (EHiHeader(ROM, EHi))\n {\n romtype = 2;\n infoloc = EHi;\n }\n }\n\n if (!infoloc)\n {\n static bool CommandLineForce2 = false;\n int32_t loscore, hiscore;\n\n //Deinterleave if neccesary\n CheckIntl1(ROM);\n\n loscore = InfoScore(ROM+Lo);\n hiscore = InfoScore(ROM+Hi);\n\n switch(ROM[Lo + BankOffset])\n {\n case 32: case 35: case 48: case 50:\n loscore += 3;\n break;\n }\n switch(ROM[Hi + BankOffset])\n {\n case 33: case 49: case 53: case 58:\n hiscore += 3;\n break;\n }\n\n /*\n Force code.\n ForceHiLoROM is from the GUI.\n forceromtype is from Command line, we have a static var\n to prevent forcing a secong game loaded from the GUI when\n the first was loaded from the command line with forcing.\n */\n if (ForceHiLoROM == 1 ||\n (forceromtype == 1 && !CommandLineForce2))\n {\n CommandLineForce2 = true;\n loscore += 50;\n }\n else if (ForceHiLoROM == 2 ||\n (forceromtype == 2 && !CommandLineForce2))\n {\n CommandLineForce2 = true;\n hiscore += 50;\n }\n\n if (hiscore > loscore)\n {\n romtype = 2;\n infoloc = Hi;\n }\n else\n {\n romtype = 1;\n infoloc = Lo;\n }\n }\n}\n\n//Chip detection functions\nbool CHIPBATT, BSEnable, C4Enable, DSP1Enable, DSP2Enable, DSP3Enable;\nbool DSP4Enable, OBCEnable, RTCEnable, SA1Enable, SDD1Enable, SFXEnable;\nbool SETAEnable; //ST010 & 11\nbool SGBEnable, SPC7110Enable, ST18Enable;\n\nvoid chip_detect()\n{\n uint8_t *ROM = romdata;\n\n C4Enable = RTCEnable = SA1Enable = SDD1Enable = OBCEnable = CHIPBATT = false;\n SGBEnable = ST18Enable = DSP1Enable = DSP2Enable = DSP3Enable = false;\n DSP4Enable = SPC7110Enable = BSEnable = SFXEnable = SETAEnable = false;\n\n //DSP Family\n if (ROM[infoloc+TypeOffset] == 3)\n {\n if (ROM[infoloc+BankOffset] == 48) { DSP4Enable = true; }\n else { DSP1Enable = true; }\n return;\n }\n\n if (ROM[infoloc+TypeOffset] == 5)\n {\n CHIPBATT = true;\n if (ROM[infoloc+BankOffset] == 32) { DSP2Enable = true; }\n else if (ROM[infoloc+BankOffset] == 48 && ROM[infoloc+CompanyOffset] == 0xB2) //Bandai\n { DSP3Enable = true; }\n else { DSP1Enable = true; }\n return;\n }\n\n switch((uint16_t)ROM[infoloc+BankOffset] | (ROM[infoloc+TypeOffset] << 8))\n {\n case 0x1320: //Mario Chip 1\n case 0x1420: //GSU-x\n SFXEnable = true;\n return;\n break;\n\n case 0x1520: //GSU-x + Battery\n case 0x1A20: //GSU-1 + Battery + Start in 21MHz\n SFXEnable = true;\n CHIPBATT = true;\n return;\n break;\n\n case 0x2530:\n OBCEnable = true;\n CHIPBATT = true;\n return;\n break;\n\n case 0x3423:\n SA1Enable = true;\n return;\n break;\n\n case 0x3223: //One sample game seems to use this for some reason\n case 0x3523:\n SA1Enable = true;\n CHIPBATT = true;\n return;\n break;\n\n case 0x4332:\n SDD1Enable = true;\n return;\n break;\n\n case 0x4532:\n SDD1Enable = true;\n CHIPBATT = true;\n return;\n break;\n\n case 0x5535:\n RTCEnable = true;\n CHIPBATT = true;\n return;\n break;\n\n case 0xE320:\n SGBEnable = true;\n return;\n break;\n\n case 0xF320:\n C4Enable = true;\n return;\n break;\n\n case 0xF530:\n ST18Enable = true;\n CHIPBATT = true; //Check later if this should be removed\n return;\n break;\n\n case 0xF53A:\n SPC7110Enable = true;\n CHIPBATT = true;\n return;\n break;\n\n case 0xF630:\n SETAEnable = true;\n CHIPBATT = true;\n return;\n break;\n\n case 0xF93A:\n SPC7110Enable = true;\n RTCEnable = true;\n CHIPBATT = true;\n return;\n break;\n }\n\n //BS Dump\n if ((ROM[infoloc+CompanyOffset] == 0x33 || ROM[infoloc+CompanyOffset] == 0xFF) &&\n (!ROM[infoloc+BSYearOffset] || (ROM[infoloc+BSYearOffset] & 131) == 128) &&\n valid_normal_bank(ROM[infoloc+BSBankOffset]))\n {\n uint8_t m = ROM[infoloc+BSMonthOffset];\n if (!m && !ROM[infoloc+BSDayOffset])\n {\n //BS Add-on cart\n return;\n }\n if ((m == 0xFF && ROM[infoloc+BSDayOffset] == 0xFF) ||\n (!(m & 0xF) && ((m >> 4) - 1 < 12)))\n {\n BSEnable = true;\n return;\n }\n }\n}\n\n//Checksum functions\nuint16_t sum(uint8_t *array, size_t size)\n{\n uint16_t theSum = 0;\n uint_fast32_t i;\n\n //Prevent crashing by reading too far (needed for messed up ROMs)\n if (array + size > romdata + maxromspace)\n {\n return(0xFFFF);\n }\n\n for (i = 0; i < size; i++)\n {\n theSum += array[i];\n }\n return(theSum);\n}\n\nstatic uint16_t Checksumvalue;\nvoid CalcChecksum()\n{\n uint8_t *ROM = romdata;\n\n if (SplittedROM)\n {\n Checksumvalue = sum(ROM+addOnStart, addOnSize);\n Checksumvalue -= sum(ROM+infoloc+addOnStart-16, 48);\n }\n else if (SPC7110Enable)\n {\n Checksumvalue = sum(ROM, curromspace);\n }\n else\n {\n Checksumvalue = sum(ROM, curromspace);\n if (NumofBanks > 128 && maxromspace == 6*MB_bytes)\n {\n Checksumvalue += sum(ROM+4*MB_bytes, 2*MB_bytes);\n }\n if (BSEnable)\n {\n Checksumvalue -= sum(&ROM[infoloc - 16], 48); //Fix for BS Dumps\n }\n }\n}\n\nstatic void rom_memcpy(uint8_t *dest, uint8_t *src, size_t len)\n{\n uint8_t *endrom = romdata+maxromspace;\n while (len-- && (dest < endrom) && (src < endrom))\n {\n *dest++ = *src++;\n }\n}\n\n//This will mirror up non power of two ROMs to powers of two\nstatic uint32_t mirror_rom(uint8_t *start, size_t length)\n{\n uint32_t mask = 0x800000;\n while (!(length & mask)) { mask >>= 1; }\n\n length -= mask;\n if (length)\n {\n start += mask;\n length = mirror_rom(start, length);\n\n while (length != mask)\n {\n rom_memcpy(start+length, start, length);\n length += length;\n }\n }\n\n return(length+mask);\n}\n\n//Misc functions\nvoid MirrorROM(uint8_t *ROM)\n{\n uint32_t ROMSize, StartMirror = 0;\n if (!SPC7110Enable)\n {\n curromspace = mirror_rom(romdata, curromspace);\n }\n else if (curromspace == 0x300000)\n {\n memcpy(romdata+curromspace, romdata, curromspace);\n curromspace += curromspace;\n }\n\n if (curromspace > maxromspace)\n {\n curromspace = maxromspace;\n }\n NumofBanks = curromspace >> 15;\n\n //This will mirror (now) full sized ROMs through the ROM buffer\n ROMSize = curromspace;\n while (ROMSize < maxromspace)\n {\n ROM[ROMSize++] = ROM[StartMirror++];\n }\n\n //If ROM was too small before, but now decent size with mirroring, adjust location\n if (infoloc < Lo)\n {\n infoloc = Lo;\n }\n}\n\n\nvoid SetupSramSize()\n{\n uint8_t *ROM = romdata;\n if (BSEnable)\n {\n ramsize = 0;\n }\n else if (SFXEnable)\n {\n if (ROM[infoloc+CompanyOffset] == 0x33) //Extended header\n {\n ramsize = 8 << ((uint32_t)ROM[infoloc-3]);\n }\n else\n {\n ramsize = 256;\n }\n }\n else if (SETAEnable)\n {\n ramsize = 32;\n }\n else if (!strncmp((char *)ROM, \"BANDAI SFC-ADX\", 14))\n { // For the Sufami Turbo\n ramsize = 8 << ((uint32_t)ROM[0x100032]);\n }\n else\n {\n ramsize = ((ROM[infoloc+SRAMSizeOffset]) ? (8 << ((uint32_t)ROM[infoloc+SRAMSizeOffset])) : 0);\n }\n\n //Fix if some ROM goes nuts on size\n if (ramsize > 1024)\n {\n ramsize = 1024;\n }\n\n //Convert from Kb to bytes;\n ramsize *= 128;\n ramsizeand = ramsize-1;\n}\n\n//File loading code\nbool Header512;\n\nchar CSStatus[41], CSStatus2[41], CSStatus3[41], CSStatus4[41];\n\nvoid DumpROMLoadInfo()\n{\n extern char *ZVERSION, *VERSION_DATE, *VERSION_PORT;\n\n FILE *fp = 0;\n\n if (RomInfo) //rominfo.txt info dumping enabled?\n {\n fp = fopen_dir(ZCfgPath, \"rominfo.txt\", \"w\");\n if (!fp) { return; }\n fprintf(fp, \"This is the info for the last game you ran.\\n\\nZSNES v%s - %s - %s\\n\",\n ZVERSION, VERSION_DATE, VERSION_PORT);\n fputs(\"File: \", fp);\n fputs(ZCartName, fp);\n fputs(\" Header: \", fp);\n fputs(Header512 ? \"Yes\\n\" : \"No\\n\", fp);\n fputs(CSStatus, fp);\n fputs(\"\\n\", fp);\n fputs(CSStatus2, fp);\n fputs(\"\\n\", fp);\n fputs(CSStatus3, fp);\n fputs(\"\\n\", fp);\n fputs(CSStatus4, fp);\n fputs(\"\\n\", fp);\n fclose(fp);\n }\n}\n\nvoid loadFile(char *filename)\n{\n bool multifile = false;\n char *incrementer = 0;\n uint8_t *ROM = romdata;\n\n if (strlen(filename) >= 3) //Char + \".1\"\n {\n char *ext = filename+strlen(filename)-2;\n if (!strcmp(ext, \".1\") || !strcasecmp(ext, \".A\"))\n {\n incrementer = ext + 1;\n multifile = true;\n }\n }\n\n for (;;)\n {\n struct stat stat_results;\n stat_dir(ZRomPath, filename, &stat_results);\n\n if ((uint32_t)stat_results.st_size <= maxromspace+512-curromspace)\n {\n FILE *fp = 0;\n fp = fopen_dir(ZRomPath, filename, \"rb\");\n\n if (!fp) { return; }\n\n if (curromspace && ((stat_results.st_size & 0x7FFF) == 512))\n {\n stat_results.st_size -= 512;\n fseek(fp, 512, SEEK_SET);\n }\n\n fread(ROM+curromspace, stat_results.st_size, 1, fp);\n fclose(fp);\n\n curromspace += stat_results.st_size;\n\n if (!multifile) { return; }\n\n (*incrementer)++;\n }\n else\n {\n return;\n }\n }\n}\n\nvoid loadGZipFile(char *filename)\n{\n //Open file for size reading\n FILE *fp = fopen_dir(ZRomPath, filename, \"rb\");\n if (fp)\n {\n uint32_t fsize, gzsize;\n gzFile GZipFile;\n\n fseek(fp, -4, SEEK_END);\n gzsize = fread4(fp);\n fsize = ftell(fp);\n rewind(fp);\n\n //Open GZip file for decompression, use existing file handle\n if ((GZipFile = gzdopen(fileno(fp), \"rb\")))\n {\n uint32_t len = gzdirect(GZipFile) ? fsize : gzsize;\n if (len && (len <= maxromspace+512) && ((uint32_t)gzread(GZipFile, romdata, len) == len))\n {\n curromspace = len; //Success\n }\n gzclose(GZipFile);\n }\n fclose(fp);\n }\n}\n\nvoid loadZipFile(char *filename)\n{\n int err, fileSize;\n uint8_t *ROM = romdata;\n bool multifile = false, NSS = false;\n char *incrementer = 0;\n\n unzFile zipfile = unzopen_dir(ZRomPath, filename); //Open zip file\n int cFile = unzGoToFirstFile(zipfile); //Set cFile to first compressed file\n unz_file_info cFileInfo; //Create variable to hold info for a compressed file\n\n int LargestGoodFile = 0; //To keep track of largest file\n\n //Variables for the file we pick\n char ourFile[256];\n ourFile[0] = '\\n';\n\n while(cFile == UNZ_OK) //While not at end of compressed file list\n {\n //Temporary char array for file name\n char cFileName[256];\n\n //Gets info on current file, and places it in cFileInfo\n unzGetCurrentFileInfo(zipfile, &cFileInfo, cFileName, 256, NULL, 0, NULL, 0);\n\n //Get the file's size\n fileSize = cFileInfo.uncompressed_size;\n\n //Find split files\n if (strlen(cFileName) >= 3) //Char + \".1\"\n {\n char *ext = cFileName+strlen(cFileName)-2;\n if (!strcmp(ext, \".1\") || !strcasecmp(ext, \".A\"))\n {\n strcpy(ourFile, cFileName);\n incrementer = ourFile+strlen(ourFile)-1;\n multifile = true;\n break;\n }\n }\n\n //Find Nintendo Super System ROMs\n if (strlen(cFileName) >= 5) //Char + \".IC2\"\n {\n char *ext = cFileName+strlen(cFileName)-4;\n if (!strncasecmp(ext, \".IC\", 3))\n {\n strcpy(ourFile, cFileName);\n incrementer = ourFile+strlen(ourFile)-1;\n *incrementer = '7';\n NSS = true;\n break;\n }\n }\n\n //Check for valid ROM based on size\n if (((intmax_t)fileSize <= maxromspace+512) &&\n (fileSize > LargestGoodFile))\n {\n strcpy(ourFile, cFileName);\n LargestGoodFile = fileSize;\n }\n\n //Go to next file in zip file\n cFile = unzGoToNextFile(zipfile);\n }\n\n //No files found\n if (ourFile[0] == '\\n')\n {\n unzClose(zipfile);\n return;\n }\n\n for (;;)\n {\n //Sets current file to the file we liked before\n if (unzLocateFile(zipfile, ourFile, 1) != UNZ_OK)\n {\n if (NSS)\n {\n (*incrementer)--;\n continue;\n }\n unzClose(zipfile);\n return;\n }\n\n //Gets info on current file, and places it in cFileInfo\n unzGetCurrentFileInfo(zipfile, &cFileInfo, ourFile, 256, NULL, 0, NULL, 0);\n\n //Get the file's size\n fileSize = cFileInfo.uncompressed_size;\n\n //Too big?\n if (curromspace + fileSize > maxromspace+512)\n {\n unzClose(zipfile);\n return;\n }\n\n //Open file\n unzOpenCurrentFile(zipfile);\n\n //Read file into memory\n err = unzReadCurrentFile(zipfile, ROM+curromspace, fileSize);\n\n //Close file\n unzCloseCurrentFile(zipfile);\n\n //Encountered error?\n if (err != fileSize)\n {\n unzClose(zipfile);\n return;\n }\n\n if (curromspace && ((fileSize & 0x7FFF) == 512))\n {\n fileSize -= 512;\n memmove(ROM+curromspace, ROM+curromspace+512, fileSize);\n }\n\n curromspace += fileSize;\n\n if (NSS)\n {\n if (!*incrementer) { return; }\n (*incrementer)--;\n continue;\n }\n\n if (!multifile)\n {\n unzClose(zipfile);\n return;\n }\n (*incrementer)++;\n }\n}\n\nvoid load_file_fs(char *path)\n{\n uint8_t *ROM = romdata;\n\n if (isextension(path, \"jma\"))\n {\n #ifdef NO_JMA\n puts(\"This binary was built without JMA support.\");\n #else\n load_jma_file_dir(ZRomPath, path);\n #endif\n }\n if (isextension(path, \"zip\"))\n {\n loadZipFile(path);\n }\n if (isextension(path, \"gz\"))\n {\n loadGZipFile(path);\n }\n else\n {\n loadFile(path);\n }\n\n if ((curromspace & 0x7FFF) == 512)\n {\n memmove(ROM, ROM+512, addOnStart);\n curromspace -= 512;\n }\n}\n\nchar *STCart2 = 0;\nuint8_t *sram2;\nextern uint8_t *sram;\n\nvoid SplitSetup(char *basepath, char *basefile, uint32_t MirrorSystem)\n{\n uint8_t *ROM = romdata;\n\n curromspace = 0;\n if (maxromspace < addOnStart+addOnSize) { return; }\n memmove(ROM+addOnStart, ROM, addOnSize);\n\n if (!*basepath)\n {\n load_file_fs(basefile);\n }\n else\n {\n load_file_fs(basepath);\n }\n\n if (!curromspace) { return; }\n\n switch (MirrorSystem)\n {\n case 1:\n memcpy(ROM+0x100000, ROM, 0x100000); //Mirror 8 to 16\n break;\n\n case 2:\n memcpy(ROM+0x180000, ROM+0x100000, 0x80000); //Mirrors 12 to 16\n memcpy(ROM+0x200000, ROM+0x400000, 0x80000); //Copy base over\n memset(ROM+0x280000, 0, 0x180000); //Blank out rest\n break;\n\n case 3:\n memcpy(ROM+0x40000, ROM, 0x40000);\n memcpy(ROM+0x80000, ROM, 0x80000);\n break;\n }\n\n curromspace = addOnStart+addOnSize;\n SplittedROM = true;\n}\n\nvoid SplitSupport()\n{\n char *ROM = (char *)romdata;\n SplittedROM = false;\n\n //Same Game add on\n if (((curromspace == 0x60000) || (curromspace == 0x80000)) && ROM[Hi+CompanyOffset] == 0x33 &&\n !ROM[Hi+BankOffset] && !ROM[Hi+BSMonthOffset] && !ROM[Hi+BSDayOffset])\n {\n addOnStart = 0x200000;\n addOnSize = 0x80000;\n SplitSetup(SGPath, \"SAMEGAME.ZIP\", 1);\n }\n\n //SD Gundam G-Next add on\n if (curromspace == 0x80000 && ROM[Lo+CompanyOffset] == 0x33 &&\n !ROM[Lo+BankOffset] && !ROM[Lo+BSMonthOffset] && !ROM[Lo+BSDayOffset] && !strncmp(ROM+Lo, \"GNEXT\", 5))\n {\n addOnStart = 0x400000;\n addOnSize = 0x80000;\n SplitSetup(GNextPath, \"G-NEXT.ZIP\", 2);\n addOnStart = 0x200000; //Correct for checksum calc\n }\n\n //Sufami Turbo\n if (!strncmp(ROM, \"BANDAI SFC-ADX\", 14))\n {\n if (!STCart2)\n {\n addOnStart = 0x100000;\n addOnSize = curromspace;\n SplitSetup(STPath, \"STBIOS.ZIP\", 3);\n }\n else if (maxromspace >= (curromspace<<2)+0x100000)\n {\n memcpy(ROM+curromspace+curromspace, ROM, curromspace);\n memcpy(ROM+curromspace*3, ROM, curromspace);\n curromspace = 0;\n load_file_fs(STCart2);\n memcpy(ROM+curromspace, ROM, curromspace);\n SwapData((uint32_t *)romdata, ((uint32_t *)romdata+(curromspace>>1)), curromspace>>1);\n addOnSize = curromspace<<2;\n addOnStart = 0x100000;\n SplitSetup(STPath, \"STBIOS.ZIP\", 3);\n addOnSize = (curromspace-addOnStart) >> 2; //Correct for checksum calc\n sram2 = sram+65536;\n }\n }\n}\n\nbool NSRTHead(uint8_t *ROM)\n{\n uint8_t *NSRTHead = ROM + 0x1D0; //NSRT Header Location\n\n if (!strncmp(\"NSRT\", (char*)&NSRTHead[24],4) && NSRTHead[28] == 22)\n {\n if ((sum(NSRTHead, 32) & 0xFF) != NSRTHead[30] ||\n NSRTHead[30] + NSRTHead[31] != 255 ||\n (NSRTHead[0] & 0x0F) > 13 ||\n ((NSRTHead[0] & 0xF0) >> 4) > 3 ||\n ((NSRTHead[0] & 0xF0) >> 4) == 0)\n {\n return(false); //Corrupt\n }\n return(true); //NSRT header\n }\n return(false); //None\n}\n\nvoid calculate_state_sizes(), InitRewindVars(), zst_init();\nbool findZipIPS(char *, char *);\nbool PatchUsingIPS(char *);\nextern bool EMUPause;\nextern uint8_t device1, device2;\nextern bool IPSPatched;\nuint8_t lorommapmode2, curromsize, snesinputdefault1, snesinputdefault2;\nbool input1gp, input1mouse, input2gp, input2mouse, input2scope, input2just;\n\nvoid loadROM()\n{\n bool isCompressed = false, isZip = false;\n\n zst_init();\n\n EMUPause = false;\n curromspace = 0;\n\n if (isextension(ZCartName, \"jma\"))\n {\n #ifdef NO_JMA\n puts(\"This binary was built without JMA support.\");\n return;\n #else\n isCompressed = true;\n load_jma_file_dir(ZRomPath, ZCartName);\n #endif\n }\n else if (isextension(ZCartName, \"zip\"))\n {\n isCompressed = true;\n isZip = true;\n loadZipFile(ZCartName);\n }\n else if (isextension(ZCartName, \"gz\"))\n {\n isCompressed = true;\n loadGZipFile(ZCartName);\n }\n\n if (!isCompressed) { loadFile(ZCartName); }\n\n Header512 = false;\n\n if (!curromspace) { return; }\n\n if (!strncmp(\"GAME DOCTOR SF 3\", (char *)romdata, 16) ||\n !strncmp(\"SUPERUFO\", (char *)romdata+8, 8))\n {\n Header512 = true;\n }\n else\n {\n int32_t HeadRemain = (curromspace & 0x7FFF);\n switch(HeadRemain)\n {\n case 0:\n break;\n\n case 512:\n Header512 = true;\n break;\n\n default:\n {\n uint8_t *ROM = romdata;\n\n //SMC/SWC header\n if (ROM[8] == 0xAA && ROM[9]==0xBB && ROM[10]== 4)\n {\n Header512 = true;\n }\n //FIG header\n else if ((ROM[4] == 0x77 && ROM[5] == 0x83) ||\n (ROM[4] == 0xDD && ROM[5] == 0x82) ||\n (ROM[4] == 0xDD && ROM[5] == 2) ||\n (ROM[4] == 0xF7 && ROM[5] == 0x83) ||\n (ROM[4] == 0xFD && ROM[5] == 0x82) ||\n (ROM[4] == 0x00 && ROM[5] == 0x80) ||\n (ROM[4] == 0x47 && ROM[5] == 0x83) ||\n (ROM[4] == 0x11 && ROM[5] == 2))\n {\n Header512 = true;\n }\n break;\n }\n }\n }\n\n device1 = 0;\n device2 = 0;\n input1gp = true;\n input1mouse = true;\n input2gp = true;\n input2mouse = true;\n input2scope = true;\n input2just = true;\n\n if (Header512)\n {\n uint8_t *ROM = romdata;\n if (NSRTHead(ROM))\n {\n switch (ROM[0x1ED] & 0xF0) //Port 1\n {\n case 0x00: //Gamepad\n input1mouse = false;\n break;\n\n case 0x10: //Mouse port 1\n device1 = 1;\n input1gp = false;\n break;\n\n case 0x20: //Mouse or Gamepad port 1\n device1 = 1;\n break;\n\n case 0x90: //Lasabirdie - not yet supported\n input1gp = false;\n input1mouse = false;\n break;\n }\n\n switch (ROM[0x1ED] & 0x0F) //Port 1\n {\n case 0x00: //Gamepad\n input2mouse = false;\n input2scope = false;\n input2just = false;\n break;\n\n case 0x01: //Mouse port 2\n device2 = 1;\n input2gp = false;\n input2scope = false;\n input2just = false;\n break;\n\n case 0x02: //Mouse or Gamepad port 2\n device1 = 2;\n input2just = false;\n input2scope = false;\n break;\n\n case 0x03: //Super Scope port 2\n device2 = 2;\n input2gp = false;\n input2mouse = false;\n input2just = false;\n break;\n\n case 0x04: //Super Scope or Gamepad port 2\n device2 = 2;\n input2mouse = false;\n input2just = false;\n break;\n\n case 0x05: //Justifier (Lethal Enforcer gun) port 2\n device2 = 3;\n input2mouse = false;\n input2scope = false;\n break;\n\n case 0x06: //Multitap port 2\n input2gp = false;\n input2mouse = false;\n input2just = false;\n input2scope = false;\n break;\n\n case 0x07: //Mouse or Gamepad port 1, Mouse, Super Scope, or Gamepad port 2\n input2just = false;\n break;\n\n case 0x08: //Mouse or Multitap port 2\n device2 = 1;\n input2just = false;\n input2scope = false;\n break;\n\n case 0x09: //Lasabirdie - not yet supported\n input2gp = false;\n input2mouse = false;\n input2just = false;\n input2scope = false;\n break;\n\n case 0x0A: //Barcode Battler - not yet supported\n input2gp = false;\n input2mouse = false;\n input2just = false;\n input2scope = false;\n break;\n }\n }\n curromspace -= 512;\n memmove(romdata, romdata+512, curromspace);\n }\n\n snesinputdefault1 = device1;\n snesinputdefault2 = device2;\n\n SplitSupport();\n\n if (isZip)\n {\n int_fast8_t i;\n char ext[4];\n\n strcpy(ext, \"ips\");\n for (i = 0; findZipIPS(ZCartName, ext); i++)\n {\n if (i > 9) { break; }\n ext[2] = i+'0';\n }\n }\n\n if (curromspace)\n {\n uint8_t *ROM = romdata;\n\n if (!IPSPatched)\n {\n int_fast8_t i;\n char ext[4];\n\n strcpy(ext, \"ips\");\n for (i = 0; PatchUsingIPS(ext); i++)\n {\n if (i > 9) { break; }\n ext[2] = i+'0';\n }\n }\n\n NumofBytes = curromspace;\n NumofBanks = curromspace >> 15;\n BankCheck();\n curromsize = ROM[infoloc+ROMSizeOffset];\n chip_detect();\n SetupSramSize();\n calculate_state_sizes();\n InitRewindVars();\n }\n}\n\n//Memory Setup functions\nextern uint8_t wramdataa[65536];\nextern uint8_t ram7fa[65536];\nextern uint8_t regptra[49152];\nextern uint8_t regptwa[49152];\nextern uint8_t vidmemch2[4096];\nextern uint8_t vidmemch4[4096];\nextern uint8_t vidmemch8[4096];\nextern uint8_t pal16b[1024];\nextern uint8_t pal16bcl[1024];\nextern uint8_t pal16bclha[1024];\nextern uint8_t pal16bxcl[256];\n//extern uint8_t SPCRAM[65472];\n\nextern uint8_t *vidbuffer;\nextern uint8_t *vram;\nextern uint8_t *vcache2b;\nextern uint8_t *vcache4b;\nextern uint8_t *vcache8b;\n\n#if 0\nvoid clearSPCRAM()\n{\n /*\n SPC RAM is filled with alternating 0x00 and 0xFF for 0x20 bytes.\n\n Basically the SPCRAM is initialized as follows:\n xx00 - xx1f: $00\n xx20 - xx3f: $ff\n xx40 - xx5f: $00\n xx60 - xx7f: $ff\n xx80 - xx9f: $00\n xxa0 - xxbf: $ff\n xxc0 - xxdf: $00\n xxe0 - xxff: $ff\n */\n uint_fast32_t i;\n for (i = 0; i < 65472; i += 0x40)\n {\n memset(SPCRAM+i, 0, 0x20);\n memset(SPCRAM+i+0x20, 0xFF, 0x20);\n }\n}\n#endif\n\nvoid clearmem2()\n{\n memset(sram, 0xFF, 65536);\n memset(vram, 0, 65536);\n memset(vidmemch2, 1, 4096);\n memset(vidmemch4, 1, 4096);\n memset(vidmemch8, 1, 4096);\n //clearSPCRAM();\n}\n\nvoid clearmem()\n{\n int_fast32_t i;\n\n memset(vidbuffer, 0, 131072);\n memset(wramdataa, 0, 65536);\n memset(ram7fa, 0, 65536);\n memset(sram, 0, 65536*2);\n memset(regptra, 0, 49152);\n memset(regptwa, 0, 49152);\n memset(vcache2b, 0, 262144+256);\n memset(vcache4b, 0, 131072+256);\n memset(vcache8b, 0, 65536+256);\n memset(pal16b, 0, 1024);\n memset(pal16bcl, 0, 1024);\n memset(pal16bclha, 0, 1024);\n for (i=0 ; i<1024 ; i+=4)\n {\n memset(pal16bxcl+i, 255, 2);\n memset(pal16bxcl+i+2, 0, 2);\n }\n memset(romdata, 0xFF, maxromspace+32768);\n clearmem2();\n}\n\nextern uint8_t BRRBuffer[32];\nextern uint8_t echoon0;\nextern uint32_t PHdspsave;\nextern uint32_t PHdspsave2;\nuint8_t echobuf[90000];\nextern uint8_t *spcBuffera;\nextern uint8_t DSPMem[256];\n\nvoid clearvidsound()\n{\n// memset(BRRBuffer, 0, PHdspsave);\n// memset(&echoon0, 0, PHdspsave2);\n memset(echobuf, 0, 90000);\n memset(spcBuffera, 0, 65536*4+4096);\n// memset(DSPMem, 0, 256);\n}\n\n/*\n\n--------------Caution Hack City--------------\n\nWould be nice to trash this section in the future\n*/\n\nextern uint8_t cycpb268, cycpb358, cycpbl2, cycpblt2, cycpbl;\nextern uint8_t cycpblt, opexec268, opexec358, opexec268b, opexec358b;\nextern uint8_t opexec268cph, opexec358cph, opexec268cphb, opexec358cphb;\n\nvoid headerhack()\n{\n char *RomData = (char *)romdata;\n\n if (curromspace < Lo || HacksDisable) { return; }\n\n //Super Famista (J)\n //Shows black screen after one screen.\n if (!strncmp((RomData+Lo),\"\\xbd\\xb0\\xca\\xdf\\xb0\\xcc\\xa7\\xd0\\xbd\\xc0 \", 12))\n {\n RomData[0x2762F] = 0xEA;\n RomData[0x27630] = 0xEA;\n }\n\n //Super Famista 2 (J)\n //Shows black screen after loading the ROM.\n if (!strncmp((RomData+Lo),\"\\xbd\\xb0\\xca\\xdf\\xb0\\xcc\\xa7\\xd0\\xbd\\xc0 2\", 12))\n {\n //Skip a check for value FF at 2140 when spc not initialized yet?!?\n RomData[0x6CED] = 0xEA;\n RomData[0x6CEE] = 0xEA;\n //Skip a check for value FF at 2140 when spc not initialized yet?!?\n RomData[0x6CF9] = 0xEA;\n RomData[0x6CFA] = 0xEA;\n }\n\n //Deae Tonosama Appare Ichiban (J)\n //Shows some screen and hangs there.\n if (!strncmp((RomData+Lo),\"\\xc3\\xde\\xb1\\xb4\\xc4\\xc9\\xbb\\xcf\", 8))\n {\n RomData[0x17837C] = 0xEA;\n RomData[0x17837D] = 0xEA;\n }\n\n //Human Grand Prix III - F1 Triple Battle (J)\n //Shows black screen after loading the ROM.\n if (!strncmp((RomData+Lo),\"HUMAN GRANDPRIX 3 \", 20))\n {\n cycpb268 = 135;\n cycpb358 = 157;\n cycpbl2 = 125;\n cycpblt2 = 125;\n cycpbl = 125;\n cycpblt = 125;\n }\n\n //Accele Brid (J)\n //Hangs after some time in the first level.\n if (!strncmp((RomData+Lo),\"ACCELEBRID \", 12))\n {\n RomData[0x34DA2] = 0;\n RomData[0x34DA3] = 0;\n }\n\n //Home Alone (J/E/U)\n //Hangs after starting a new game.\n if (!strncmp((RomData+Lo),\"HOME ALONE \", 12))\n {\n RomData[0x666B] = 0xEE;\n RomData[0x666C] = 0xBC;\n }\n\n //Rendering Ranger R2\n //Shows black screen after loading the ROM.\n if (!strncmp((RomData+Lo),\"REND\", 4))\n {\n cycpb268 = 157;\n cycpb358 = 157;\n cycpbl2 = 157;\n cycpblt2 = 157;\n cycpbl = 157;\n cycpblt = 157;\n }\n\n //Tuff E Nuff (U/E), Dead Dance (J),\n //Cyber Knight II - Tikyu Teikoku no Yabou (J)\n //Shows black screen after loading the ROM. (Tuff E Nuff, Dead Dance)\n //Shows black screen after two screens. (Cyber Knight II)\n if (!strncmp((RomData+Lo),\"CYBER KNIGHT 2 \", 16) ||\n !strncmp((RomData+Lo),\"DEAD\", 4) ||\n !strncmp((RomData+Lo),\"TUFF\", 4))\n {\n cycpb268 = 75;\n cycpb358 = 77;\n cycpbl2 = 75;\n cycpblt2 = 75;\n cycpbl = 75;\n cycpblt = 75;\n }\n\n //Addams Family Values (U/E)\n //Restarts or shows a black screen after starting a new game.\n if (!strncmp((RomData+Lo), \"ADDAMS FAMILY VALUES\", 20))\n {\n opexec268 = 120;\n opexec358 = 100;\n }\n\n //Front Mission\n //Flickering worldmap and statusbar.\n if (!strncmp((RomData+Hi), \"\\xcc\\xdb\\xdd\\xc4\\xd0\\xaf\\xbc\\xae\", 8) ||\n !strncmp((RomData+Hi), \"FRONT MI\", 8))\n {\n opexec268 = 226;\n opexec358 = 226;\n opexec268cph = 80;\n opexec358cph = 80;\n }\n}\n\nvoid Setper2exec()\n{\n if (per2exec != 100)\n { // Decrease standard % of execution by 5% to replace branch and 16bit\n // cycle deductions\n opexec268b = (unsigned char)((opexec268 * 95 * per2exec) / 10000);\n opexec358b = (unsigned char)((opexec358 * 87 * per2exec) / 10000); // 82\n opexec268cphb = (unsigned char)((opexec268cph * 95 * per2exec) / 10000);\n opexec358cphb = (unsigned char)((opexec358cph * 87 * per2exec) / 10000); // 82\n printf(\"%d %d %d %d\", opexec268b, opexec358b, opexec268cphb, opexec358cphb);\n }\n}\n\nextern uint32_t MsgCount, MessageOn;\nextern char *Msgptr;\nuint32_t CRC32;\n\nuint32_t showinfogui()\n{\n uint_fast32_t i;\n uint8_t *ROM = romdata;\n\n strcpy(CSStatus, \" TYPE: \");\n strcpy(CSStatus2, \"INTERLEAVED: CHKSUM: \");\n strcpy(CSStatus3, \"VIDEO: BANK: CRC32: \");\n strcpy(CSStatus4, \" \");\n\n for (i=0 ; i<21 ; i++)\n { CSStatus[i] = (ROM[infoloc + i]) ? ROM[infoloc + i] : 32; }\n\n if (Interleaved)\n {\n memcpy(CSStatus2+12, \"Yes \", 4);\n memcpy(CSStatus4+10, \"PLEASE DEINTERLEAVE ROM\", 23);\n }\n else\n {\n memcpy(CSStatus2+12, \"No \", 4);\n memset(CSStatus4+10, ' ', 23);\n }\n\n memcpy(CSStatus2+20, (IPSPatched) ? \"IPS \":\" \", 4);\n memcpy(CSStatus3+6, (ROM[infoloc + 25] < 2 || ROM[infoloc + 25] > 12) ? \"NTSC\":\"PAL \", 4);\n\n if (infoloc == EHi) { memcpy(CSStatus3+19, \"EHi \", 4); }\n else { memcpy(CSStatus3+19, (romtype == 2) ? \"Hi \":\"Lo \", 4); }\n\n memcpy(CSStatus+31, \"NORMAL \", 9);\n if (SA1Enable) { memcpy(CSStatus+31, \"SA-1 \", 9); }\n if (RTCEnable) { memcpy(CSStatus+31, \"RTC \", 9); }\n if (SPC7110Enable) { memcpy(CSStatus+31, \"SPC7110 \", 9); }\n if (SFXEnable) { memcpy(CSStatus+31, \"SUPER FX \", 9); }\n if (C4Enable) { memcpy(CSStatus+31, \"C4 \", 9); }\n if (DSP1Enable) { memcpy(CSStatus+31, \"DSP-1 \", 9); }\n if (DSP2Enable) { memcpy(CSStatus+31, \"DSP-2 \", 9); }\n if (DSP3Enable) { memcpy(CSStatus+31, \"DSP-3 \", 9); }\n if (DSP4Enable) { memcpy(CSStatus+31, \"DSP-4 \", 9); }\n if (SDD1Enable) { memcpy(CSStatus+31, \"S-DD1 \", 9); }\n if (OBCEnable) { memcpy(CSStatus+31, \"OBC1 \", 9); }\n if (SETAEnable) { memcpy(CSStatus+31, \"SETA DSP \", 9); }\n if (ST18Enable) { memcpy(CSStatus+31, \"ST018 \", 9); }\n if (SGBEnable) { memcpy(CSStatus+31, \"SGB \", 9); }\n if (BSEnable) { memcpy(CSStatus+31, \"BROADCAST\", 9);\n // dummy out date so CRC32 matches\n ROM[infoloc+BSMonthOffset] = 0x42;\n ROM[infoloc+BSDayOffset] = 0x00; }\n // 42 is the answer, and the uCONSRT standard\n\n // calculate CRC32 for the whole ROM, or Add-on ROM only\n CRC32 = (SplittedROM) ? crc32(0, ROM+addOnStart, addOnSize) : crc32(0, ROM, NumofBytes);\n // place CRC32 on line\n sprintf(CSStatus3+32, \"%08X\", (unsigned int)CRC32);\n\n i = (SplittedROM) ? infoloc + 0x1E + addOnStart: infoloc + 0x1E;\n\n if ((ROM[i] == (Checksumvalue & 0xFF)) && (ROM[i+1] == (Checksumvalue >> 8)))\n { memcpy(CSStatus2+36, \"OK \", 4); }\n else\n {\n memcpy(CSStatus2+36, \"FAIL\", 4);\n if (!IPSPatched) { memcpy(CSStatus4, \"BAD ROM \",8); }\n else { memset(CSStatus4, ' ', 7); }\n }\n\n DumpROMLoadInfo();\n\n MessageOn = 300;\n Msgptr = CSStatus;\n return (MsgCount);\n}\n\nextern uint32_t nmiprevaddrl, nmiprevaddrh, nmirept, nmiprevline, nmistatus;\nextern uint8_t yesoutofmemory;\nextern uint8_t NextLineCache, sramsavedis, sndrot, regsbackup[3019];\n/*\nextern uint32_t Voice0Freq, Voice1Freq, Voice2Freq, Voice3Freq;\nextern uint32_t Voice4Freq, Voice5Freq, Voice6Freq, Voice7Freq;\nextern uint32_t dspPAdj;\nextern uint16_t Voice0Pitch, Voice1Pitch, Voice2Pitch, Voice3Pitch;\nextern uint16_t Voice4Pitch, Voice5Pitch, Voice6Pitch, Voice7Pitch;*/\nvoid outofmemfix(), GUIDoReset();\n\nvoid initpitch()\n{\n/* Voice0Pitch = DSPMem[2+0*0x10];\n Voice0Freq = ((((Voice0Pitch & 0x3FFF) * dspPAdj) >> 8) & 0xFFFFFFFF);\n Voice1Pitch = DSPMem[2+1*0x10];\n Voice1Freq = ((((Voice1Pitch & 0x3FFF) * dspPAdj) >> 8) & 0xFFFFFFFF);\n Voice2Pitch = DSPMem[2+2*0x10];\n Voice2Freq = ((((Voice2Pitch & 0x3FFF) * dspPAdj) >> 8) & 0xFFFFFFFF);\n Voice3Pitch = DSPMem[2+3*0x10];\n Voice3Freq = ((((Voice3Pitch & 0x3FFF) * dspPAdj) >> 8) & 0xFFFFFFFF);\n Voice4Pitch = DSPMem[2+4*0x10];\n Voice4Freq = ((((Voice4Pitch & 0x3FFF) * dspPAdj) >> 8) & 0xFFFFFFFF);\n Voice5Pitch = DSPMem[2+5*0x10];\n Voice5Freq = ((((Voice5Pitch & 0x3FFF) * dspPAdj) >> 8) & 0xFFFFFFFF);\n Voice6Pitch = DSPMem[2+6*0x10];\n Voice6Freq = ((((Voice6Pitch & 0x3FFF) * dspPAdj) >> 8) & 0xFFFFFFFF);\n Voice7Pitch = DSPMem[2+7*0x10];\n Voice7Freq = ((((Voice7Pitch & 0x3FFF) * dspPAdj) >> 8) & 0xFFFFFFFF);*/\n}\n\nextern uint32_t SfxR1, SfxR2, SetaCmdEnable, SfxSFR, SfxSCMR;\nextern uint8_t disablespcclr, *sfxramdata, SramExists;\nextern uint8_t *setaramdata, *wramdata, *SA1RAMArea, cbitmode;\nextern uint8_t ForcePal, ForceROMTiming, romispal, MovieWaiting, DSP1Type;\nextern uint16_t totlines;\nvoid SetAddressingModes(), GenerateBank0Table();\nvoid SetAddressingModesSA1(), GenerateBank0TableSA1();\nvoid InitDSP(), InitDSP2(), InitDSP3(), InitDSP4(), InitOBC1(), InitFxTables();\nvoid initregr(), initregw();\n\nvoid CheckROMType()\n{\n char *ROM = (char *)romdata;\n\n //Do this before mirroring\n lorommapmode2 = 0;\n //24 Mbit ROMs with the following game code have a BS-X slot on board and need\n //different mapping for them. Known matches are Derby Stallion 96 and Sound Novel Tsukuru.\n //Note, in 4 character game codes, Z generally means there is a BS-X slot, but some games\n //abuse this. So we check specifically for B1, as B1 is a non abuser.\n if ((curromspace == 0x300000) && !strncmp(ROM+Lo-16, \"B1Z\", 3) && ROM[Lo+CompanyOffset] == 0x33)\n { lorommapmode2 = 1; }\n\n if (!MovieWaiting)\n {\n MirrorROM(romdata);\n CalcChecksum();\n }\n\n // Setup memmapping\n SetAddressingModes();\n GenerateBank0Table();\n\n disablespcclr = (memcmp(ROM+Hi, \"BS Z\", 4)) ? 0 : 1;\n\n if (!strncmp(ROM, \"BANDAI SFC-ADX\", 14))\n {\n map_mem(0x60, &stbanka, 0x08);\n if (STCart2)\n {\n map_mem(0x70, &stbankb, 0x08);\n }\n }\n else if (romtype == 1) // LoROM SRAM mapping\n { // banks 70 - 77\n map_mem(0x70, &srambank, 0x08);\n\n if (!BSEnable)\n { // banks 78 - 7D (not for BS)\n map_mem(0x78, &srambank, 0x06);\n }\n\n if (!SDD1Enable)\n { // banks F0 - FF (not for S-DD1)\n map_mem(0xF0, &srambank, 0x10);\n }\n }\n\n // Setup DSP-X stuff\n DSP1Type = 0;\n\n if (DSP1Enable)\n {\n InitDSP();\n\n if (romtype == 1)\n {\n DSP1Type = 1;\n map_mem(0x30, &dsp1bank, 0x10);\n map_mem(0xB0, &dsp1bank, 0x10);\n map_mem(0xE0, &dsp1bank, 0x10);\n }\n else\n {\n DSP1Type = 2;\n }\n }\n\n if (DSP2Enable)\n {\n asm_call(InitDSP2);\n map_mem(0x3F, &dsp2bank, 1);\n }\n\n if (DSP3Enable)\n {\n InitDSP3();\n\n // DSP-3 mapping, banks 20 - 3F\n map_mem(0x20, &dsp3bank, 0x20);\n }\n\n if (DSP4Enable)\n {\n InitDSP4();\n\n // DSP-4 mapping, banks 30 - 3F\n map_mem(0x30, &dsp4bank, 0x10);\n }\n\n if (OBCEnable)\n {\n InitOBC1();\n\n map_mem(0x00, &obc1bank, 0x40);\n map_mem(0x80, &obc1bank, 0x40);\n }\n\n if (C4Enable)\n {\n map_mem(0x00, &c4bank, 0x40);\n map_mem(0x80, &c4bank, 0x40);\n }\n\n if (SFXEnable)\n {\n // Setup SuperFX stuff\n if (maxromspace >= 0x600000)\n {\n //SuperFX mapping, banks 70 - 73\n map_mem(0x70, &sfxbank, 1);\n map_mem(0x71, &sfxbankb, 1);\n map_mem(0x72, &sfxbankc, 1);\n map_mem(0x73, &sfxbankd, 1);\n\n //SRAM mapping, banks 78 - 79\n map_mem(0x78, &sramsbank, 2);\n\n SfxR1 = 0;\n SfxR2 = 0;\n memset(sfxramdata, 0, 262144); // clear 256kB SFX ram\n\n if (SramExists)\n {\n memcpy(sfxramdata, sram, 65536); // proper SFX sram area\n }\n\n asm_call(InitFxTables);\n }\n else\n {\n yesoutofmemory = 1;\n }\n }\n\n if (SETAEnable)\n {\n if (strncmp(ROM+Lo, \"2DAN MORITA SHOUGI\", 18))\n {\n //Setup Seta 10 stuff\n\n //Really banks 68h-6Fh:0000-7FFF are all mapped the same by the chip but\n //F1 ROC II only uses bank 68h\n map_mem(0x68, &setabank, 1);\n\n //Control register (and some status?) is in banks 60h-67h:0000-3FFF\n map_mem(0x60, &setabanka, 1);\n\n SetaCmdEnable = 0x00000080; // 60:0000\n }\n else\n {\n void ST011_Reset();\n ST011_Reset();\n map_mem(0x68, &seta11bank, 1);\n map_mem(0x60, &seta11banka, 1);\n }\n memset(setaramdata, 0, 4096); // clear 4kB SETA ram\n\n // proper SETA sram area\n if (SramExists)\n {\n memcpy(setaramdata, sram, 4096);\n }\n }\n\n // General stuff all mixed together [... wouldn't it be cool to clean that]\n SfxSFR = 0;\n SfxSCMR &= 0xFFFFFF00;\n asm_call(initregr);\n asm_call(initregw);\n\n if (SA1Enable)\n {\n SA1RAMArea = (uint8_t *)ROM + 4096*1024;\n\n GenerateBank0TableSA1();\n SetAddressingModesSA1();\n\n if (CHIPBATT) // proper SA-1 sram area\n {\n memset(SA1RAMArea, 0, 131072);\n if (SramExists) { memcpy(SA1RAMArea, sram, 131072); }\n }\n }\n\n wramdata = wramdataa;\n}\n\nextern uint16_t copv, brkv, abortv, nmiv, nmiv2, irqv, irqv2, resetv;\nextern uint16_t copv8, brkv8, abortv8, nmiv8, irqv8;\n\nvoid SetIRQVectors()\n{ // get vectors (NMI & reset)\n uint8_t *ROM = romdata;\n\n if (!memcmp(ROM+infoloc+36+24, \"\\0xFF\\0xFF\", 2)) // if reset error\n {\n memcpy(ROM+infoloc+36+6, \"\\0x9C\\0xFF\", 2);\n memcpy(ROM+infoloc+36+24, \"\\0x80\\0xFF\", 2);\n }\n\n memcpy(&copv, ROM+infoloc+0x24, 2);\n memcpy(&brkv, ROM+infoloc+0x26, 2);\n memcpy(&abortv, ROM+infoloc+0x28, 2);\n memcpy(&nmiv, ROM+infoloc+0x2A, 2);\n memcpy(&nmiv2, ROM+infoloc+0x2A, 2);\n memcpy(&irqv, ROM+infoloc+0x2E, 2);\n memcpy(&irqv2, ROM+infoloc+0x2E, 2);\n\n // 8-bit and reset\n memcpy(&copv8, ROM+infoloc+0x34, 2);\n memcpy(&abortv8, ROM+infoloc+0x38, 2);\n memcpy(&nmiv8, ROM+infoloc+0x3A, 2);\n memcpy(&resetv, ROM+infoloc+0x3C, 2);\n memcpy(&brkv8, ROM+infoloc+0x3E, 2);\n memcpy(&irqv8, ROM+infoloc+0x3E, 2);\n\n if (yesoutofmemory) // failed ?\n {\n resetv = 0x8000;\n memcpy(ROM+0x0000, \"\\0x80\\0xFE\", 2);\n memcpy(ROM+0x8000, \"\\0x80\\0xFE\", 2);\n }\n}\n\nvoid SetupROM()\n{\n static bool CLforce = false;\n uint8_t *ROM = romdata;\n\n CheckROMType();\n SetIRQVectors();\n\n /* get timing (pal/ntsc)\n ForceROMTiming is from the GUI.\n ForcePal is from Command line, we have a static var\n to prevent forcing a secong game loaded from the GUI when\n the first was loaded from the command line with forcing.\n */\n if (ForcePal && !CLforce) { CLforce = true; }\n else { ForcePal = ForceROMTiming; }\n\n switch (ForcePal)\n {\n case 1:\n romispal = 0;\n break;\n case 2:\n romispal = (!BSEnable);\n break;\n default:\n romispal = ((!BSEnable) && (ROM[infoloc+CountryOffset] > 1) && (ROM[infoloc+CountryOffset] < 0xD));\n }\n\n if (romispal)\n {\n totlines = 314;\n MsgCount = 100;\n }\n else\n {\n totlines = 263;\n MsgCount = 120;\n }\n}\n\nextern int32_t NumComboLocl;\nextern uint8_t ComboHeader[23];\nextern int8_t CombinDataLocl[3300];\nextern bool romloadskip;\n\nvoid SaveCombFile()\n{\n if (!romloadskip)\n {\n FILE *fp;\n\n setextension(ZSaveName, \"cmb\");\n\n if (NumComboLocl)\n {\n ComboHeader[22] = NumComboLocl;\n\n if ((fp = fopen_dir(ZComboPath, ZSaveName, \"wb\")))\n {\n fwrite(ComboHeader, 1, 23, fp);\n fwrite(CombinDataLocl, 1, NumComboLocl*66, fp);\n fclose(fp);\n }\n }\n }\n}\n\nvoid OpenCombFile()\n{\n FILE *fp;\n\n setextension(ZSaveName, \"cmb\");\n NumComboLocl = 0;\n\n if ((fp = fopen_dir(ZComboPath, ZSaveName, \"rb\")))\n {\n fread(ComboHeader, 1, 23, fp);\n NumComboLocl = ComboHeader[22];\n\n if (NumComboLocl)\n {\n fread(CombinDataLocl, 1, NumComboLocl*66, fp);\n }\n\n fclose(fp);\n }\n}\n\nuint8_t SFXCounter, SfxAC, ForceNewGfxOff;\n\nvoid preparesfx()\n{\n char *ROM = (char *)romdata;\n int_fast8_t i;\n\n SFXCounter = SfxAC = 0;\n\n if (!strncmp(ROM+Lo, \"FX S\", 4) ||\n !strncmp(ROM+Lo, \"DIRT\", 4))\n {\n SFXCounter = 1;\n }\n else if (!strncmp(ROM+Lo, \"Stun\", 4))\n {\n ForceNewGfxOff=1;\n }\n\n for (i=63;i>=0;i--)\n {\n memcpy((int32_t *)romdata+i*0x4000 ,(int32_t *)romdata+i*0x2000,0x8000);\n memcpy((int32_t *)romdata+i*0x4000+0x2000,(int32_t *)romdata+i*0x2000,0x8000);\n }\n}\n\nvoid map_set(void **dest, uint8_t *src, size_t count, size_t step)\n{\n while (count--)\n {\n *dest = src;\n dest++;\n src += step;\n }\n}\n\nextern uint8_t MultiType;\nextern void *snesmmap[256];\nextern void *snesmap2[256];\n\nuint32_t cromptradd;\nextern uint8_t MultiTap;\nextern uint32_t SfxR0, SfxR1, SfxR2, SfxR3, SfxR4, SfxR5, SfxR6, SfxR7,\n SfxR8, SfxR9, SfxR10, SfxR11, SfxR12, SfxR13, SfxR14, SfxR15;\nextern void *ram7f;\n\nvoid map_lorom()\n{\n uint8_t *ROM = romdata;\n uint_fast8_t x;\n\n // set addresses 8000-FFFF\n // set banks 00-7F (80h x 32KB ROM banks @ 8000h)\n map_set(snesmmap,ROM-0x8000,0x80,0x8000);\n\n // set banks 80-FF (80h x 32KB ROM banks @ 8000h)\n if (lorommapmode2)\n {\n map_set(snesmmap+0x80,ROM+0x1F8000,0x20,0x8000);\n map_set(snesmmap+0xA0,ROM+0x0F8000,0x60,0x8000);\n }\n else\n {\n map_set(snesmmap+0x80,ROM-0x8000,0x80,0x8000);\n }\n\n // set addresses 0000-7FFF\n // set banks 00-3F (40h x WRAM)\n map_set(snesmap2,wramdata,0x40,0);\n\n // set banks 40-7F (40h x 32KB ROM banks @ 8000h)\n map_set(snesmap2+0x40,ROM+0x200000,0x40,0x8000);\n\n // set banks 80-BF (40h x WRAM)\n map_set(snesmap2+0x80,wramdata,0x40,0);\n\n // set banks C0-FF (40h x 32KB ROM banks @ 8000h)\n map_set(snesmap2+0xC0,ROM+0x200000,0x40,0x8000);\n\n // set banks 70-77 (07h x SRAM)\n for (x = 0x70; x <= 0x77; x++) { snesmap2[x] = sram; }\n\n // set banks 7E/7F (WRAM)\n snesmmap[0x7E] = snesmap2[0x7E] = wramdata;\n snesmmap[0x7F] = snesmap2[0x7F] = ram7f;\n}\n\nvoid map_hirom()\n{\n uint8_t *ROM = romdata;\n uint_fast8_t x;\n\n // set addresses 8000-FFFF\n // set banks 00-3F (40h x 64KB ROM banks @10000h)\n map_set(snesmmap,ROM,0x40,0x10000);\n\n // set banks 40-7F (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0x40,ROM,0x40,0x10000);\n\n // set banks 80-BF (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0x80,ROM,0x40,0x10000);\n\n // set banks C0-FF (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0xC0,ROM,0x40,0x10000);\n\n // set addresses 0000-7FFF\n // set banks 00-3F (40h x WRAM)\n map_set(snesmap2,wramdata,0x40,0);\n\n // set banks 40-7F (40h x 64KB ROM banks @10000h)\n map_set(snesmap2+0x40,ROM,0x40,0x10000);\n\n // set banks 80-BF (40h x WRAM)\n map_set(snesmap2+0x80,wramdata,0x40,0);\n\n // set banks C0-FF (40h x 64KB ROM banks @10000h)\n map_set(snesmap2+0xC0,ROM,0x40,0x10000);\n\n // set banks 70-77 (07h x SRAM)\n for (x = 0x70; x <= 0x77; x++) { snesmap2[x] = sram; }\n\n // set banks 7E/7F (WRAM)\n snesmmap[0x7E] = snesmap2[0x7E] = wramdata;\n snesmmap[0x7F] = snesmap2[0x7F] = ram7f;\n}\n\nvoid map_ehirom()\n{\n uint8_t *ROM = romdata;\n uint_fast8_t x;\n\n // set addresses 8000-FFFF\n // set banks 00-3F (40h x 32KB ROM banks @ 10000h)\n map_set(snesmmap,ROM+0x400000,0x20,0x10000);\n map_set(snesmmap+0x20,ROM+0x400000,0x20,0x10000);\n\n // set banks 40-7F (40h x 32KB ROM banks @ 10000h)\n map_set(snesmmap+0x40,ROM+0x400000,0x20,0x10000);\n map_set(snesmmap+0x60,ROM+0x400000,0x20,0x10000);\n\n // set banks 80-BF (40h x 32KB ROM banks @10000h)\n map_set(snesmmap+0x80,ROM+0x400000,0x20,0x10000);\n map_set(snesmmap+0xA0,ROM+0x400000,0x20,0x10000);\n\n // set banks C0-FF (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0xC0,ROM,0x40,0x10000);\n\n // set addresses 0000-7FFF\n // set banks 00-3F (40h x WRAM)\n map_set(snesmap2,wramdata,0x40,0);\n\n // set banks 40-7F (40h x 32KB ROM banks @ 8000h)\n map_set(snesmap2+0x40,ROM+0x400000,0x20,0x10000);\n map_set(snesmap2+0x60,ROM+0x400000,0x20,0x10000);\n\n // set banks 80-BF (40h x WRAM)\n map_set(snesmap2+0x80,wramdata,0x40,0);\n\n // set banks C0-FF (40h x 64KB ROM banks @10000h)\n map_set(snesmap2+0xC0,ROM,0x40,0x10000);\n\n // set banks 70-77 (07h x SRAM)\n for(x = 0x70; x <= 0x77; x++) { snesmap2[x] = sram; }\n\n // set banks 7E/7F (WRAM)\n snesmmap[0x7E] = snesmap2[0x7E] = wramdata;\n snesmmap[0x7F] = snesmap2[0x7F] = ram7f;\n}\n\nvoid map_sfx()\n{\n uint8_t *ROM = romdata;\n\n // Clear SFX registers\n SfxR0 = SfxR1 = SfxR2 = SfxR3 = SfxR4 = SfxR5 = SfxR6 = SfxR7 = 0;\n SfxR8 = SfxR9 = SfxR10 = SfxR11 = SfxR12 = SfxR13 = SfxR14 = SfxR15 = 0;\n\n // set addresses 8000-FFFF\n // set banks 00-3F (40h x 64KB ROM banks @10000h)\n map_set(snesmmap,ROM,0x40,0x10000);\n\n // set banks 40-7F (40h x128KB ROM banks @20000h)\n map_set(snesmmap+0x40,ROM+0x8000,0x30,0x20000);\n map_set(snesmmap+0x70,ROM+0x8000,0x10,0x20000);\n\n // set banks 80-BF (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0x80,ROM,0x40,0x10000);\n\n // set banks C0-FF (40h x128KB ROM banks @20000h)\n map_set(snesmmap+0xC0,ROM+0x8000,0x30,0x20000);\n map_set(snesmmap+0xF0,ROM+0x8000,0x10,0x20000);\n\n // set addresses 0000-7FFF\n // set banks 00-3F (40h x WRAM)\n map_set(snesmap2,wramdata,0x40,0);\n\n // set banks 40-7F (40h x128KB ROM banks @20000h)\n map_set(snesmap2+0x40,ROM+0x8000,0x30,0x20000);\n map_set(snesmap2+0x70,ROM+0x8000,0x10,0x20000);\n\n // set banks 80-BF (40h x WRAM)\n map_set(snesmap2+0x80,wramdata,0x40,0);\n\n // set banks C0-FF (40h x128KB ROM banks @20000h)\n map_set(snesmap2+0xC0,ROM+0x8000,0x30,0x20000);\n map_set(snesmap2+0xF0,ROM+0x8000,0x10,0x20000);\n\n // set banks 70-73/78/79 (SFXRAM & SRAM)\n map_set(snesmap2+0x70,sfxramdata,4,0x10000);\n snesmap2[0x78] = snesmap2[0x79] = sram;\n\n // set banks 7E/7F (WRAM)\n snesmmap[0x7E] = snesmap2[0x7E] = wramdata;\n snesmmap[0x7F] = snesmap2[0x7F] = ram7f;\n\n preparesfx();\n}\n\nvoid map_sa1()\n{\n uint8_t *ROM = romdata;\n uint8_t test[] = { 0xA9, 0x10, 0xCF, 0xAD };\n\n if(!memcmp(ROM+0xB95, test, 4)) { ROM[0xB96] = 0; }\n\n // set addresses 8000-FFFF\n // set banks 00-3F (40h x 32KB ROM banks @ 8000h)\n map_set(snesmmap,ROM-0x8000,0x40,0x8000);\n\n // set banks 40-7F (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0x40,ROM,0x40,0x10000);\n\n // set banks 80-BF (40h x 32KB ROM banks @ 8000h)\n map_set(snesmmap+0x80,ROM+0x1F8000,0x40,0x8000);\n\n // set banks C0-FF (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0xC0,ROM,0x40,0x10000);\n\n // set addresses 0000-7FFF\n // set banks 00-3F (40h x WRAM)\n map_set(snesmap2,wramdata,0x40,0);\n\n // set banks 40-7F (40h x 64KB ROM banks @10000h)\n map_set(snesmap2+0x40,ROM,0x40,0x10000);\n\n // set banks 80-BF (40h x WRAM)\n map_set(snesmap2+0x80,wramdata,0x40,0);\n\n // set banks C0-FF (40h x 64KB ROM banks @10000h)\n map_set(snesmap2+0xC0,ROM,0x40,0x10000);\n\n // set banks 7E/7F (WRAM)\n snesmmap[0x7E] = snesmap2[0x7E] = wramdata;\n snesmmap[0x7F] = snesmap2[0x7F] = ram7f;\n}\n\nvoid map_sdd1()\n{\n uint8_t *ROM = romdata;\n\n // set addresses 8000-FFFF\n // set banks 00-3F (40h x 32KB ROM banks @ 8000h)\n map_set(snesmmap,ROM-0x8000,0x40,0x8000);\n\n // set banks 40-7F (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0x40,ROM,0x40,0x10000);\n\n // set banks 80-BF (40h x 32KB ROM banks @ 8000h)\n map_set(snesmmap+0x80,ROM-0x8000,0x40,0x8000);\n\n // set banks C0-FF (40h x 64KB ROM banks @10000h)\n map_set(snesmmap+0xC0,ROM,0x40,0x10000);\n\n // set addresses 0000-7FFF\n // set banks 00-3F (40h x WRAM)\n map_set(snesmap2,wramdata,0x40,0);\n\n // set banks 40-7F (40h x 64KB ROM banks @10000h)\n map_set(snesmap2+0x40,ROM,0x40,0x10000);\n\n // set banks 80-BF (40h x WRAM)\n map_set(snesmap2+0x80,wramdata,0x40,0);\n\n // set banks C0-FF (40h x 64KB ROM banks @10000h)\n map_set(snesmap2+0xC0,ROM,0x40,0x10000);\n\n // set banks 7E/7F (WRAM)\n snesmmap[0x7E] = snesmap2[0x7E] = wramdata;\n snesmmap[0x7F] = snesmap2[0x7F] = ram7f;\n}\n\nvoid map_bsx()\n{\n uint8_t *ROM = romdata;\n uint_fast8_t x;\n\n // set addresses 8000-FFFF\n // set banks 00-7F (80h x 32KB ROM banks @ 8000h)\n map_set(snesmmap,ROM-0x8000,0x80,0x8000);\n\n // set banks 80-BF (40h x 32KB ROM banks @ 8000h)\n map_set(snesmmap+0x80,ROM-0x8000,0x40,0x8000);\n\n // set banks C0-FF (40h x 32KB ROM banks @ 8000h)\n map_set(snesmmap+0xC0,ROM+0x8000,0x40,0x8000);\n\n // set addresses 0000-7FFF\n // set banks 00-3F (40h x WRAM)\n map_set(snesmap2,wramdata,0x40,0);\n\n // set banks 40-7F (40h x 32KB ROM banks @ 8000h)\n map_set(snesmap2+0x40,ROM+0x200000,0x40,0x8000);\n\n // set banks 80-BF (40h x WRAM)\n map_set(snesmap2+0x80,wramdata,0x40,0);\n\n // set banks C0-FF (40h x 32KB ROM banks @ 8000h)\n map_set(snesmap2+0xC0,ROM+0x8000,0x40,0x8000);\n\n // set banks 70-77 (07h x SRAM)\n for (x = 0x70; x <= 0x77; x++) { snesmap2[x] = sram; }\n\n // set banks 7E/7F (WRAM)\n snesmmap[0x7E] = snesmap2[0x7E] = wramdata;\n snesmmap[0x7F] = snesmap2[0x7F] = ram7f;\n}\n\nvoid initsnes()\n{\n ForceNewGfxOff = 0;\n\n if (!BSEnable)\n {\n MultiTap = pl12s34 ? 0 : (pl3contrl || pl4contrl || pl5contrl);\n\n if (SFXEnable) { map_sfx(); }\n else if (SA1Enable) { map_sa1(); }\n else if (SDD1Enable) { map_sdd1(); }\n else if (SPC7110Enable) { map_hirom(); }\n else if (curromsize == 13) { map_ehirom(); }\n else if (romtype == 2) { map_hirom(); }\n else { map_lorom(); }\n }\n else\n {\n map_bsx();\n }\n}\n\nvoid DosExit(), OpenSramFile(), CheatCodeLoad(), LoadSecondState(), LoadGameSpecificInput();\nextern uint8_t GUIOn, GUIOn2;\n\nbool loadfileGUI()\n{\n bool result = true;\n\n spcon = !SPCDisable;\n MessageOn = yesoutofmemory = IPSPatched = 0;\n\n loadROM();\n\n if (curromspace)\n {\n SramExists = 0;\n OpenSramFile();\n OpenCombFile();\n LoadGameSpecificInput();\n\n if (!(GUIOn || GUIOn2))\n {\n puts(\"File opened successfully !\");\n }\n }\n else\n {\n if (GUIOn || GUIOn2) { result = false; }\n else\n {\n puts(\"Error opening file!\\n\");\n asm_call(DosExit);\n }\n }\n\n return (result);\n}\n\nextern uint32_t CheatOn, NumCheats;\nextern uint8_t CheatWinMode, CheatSearchStatus;\nvoid GUIQuickLoadUpdate();\n\nvoid powercycle(bool sramload, bool romload)\n{\n zspc_reset();\n clearmem2();\n nmiprevaddrl = 0;\n nmiprevaddrh = 0;\n nmirept = 0;\n nmiprevline = 224;\n nmistatus = 0;\n NextLineCache = 0;\n curexecstate = 1;\n\n if (sramload) { OpenSramFile(); }\n if (romload) { romloadskip = 1; }\n\n if (!romload || (loadfileGUI()))\n {\n if (romload)\n { CheatOn = NumCheats = CheatWinMode = CheatSearchStatus = 0; }\n\n SetupROM();\n\n if (romload)\n {\n if (DisplayInfo) { showinfogui(); }\n initsnes();\n }\n\n sramsavedis = 0;\n memcpy(&sndrot, regsbackup, 3019);\n\n if (yesoutofmemory) { asm_call(outofmemfix); }\n asm_call(GUIDoReset);\n\n if (romload)\n {\n GUIQuickLoadUpdate();\n\n if (AutoLoadCht) { CheatCodeLoad(); }\n if (AutoState) { LoadSecondState(); }\n }\n }\n}\n\nextern uint8_t osm2dis, ReturnFromSPCStall, SPCStallSetting, prevoamptr;\nextern uint8_t NMIEnab, INTEnab;\nextern uint8_t doirqnext, vidbright, forceblnk, timeron, spcP, JoyAPos, JoyBPos;\nextern uint8_t coladdr, coladdg, coladdb;\nextern uint8_t SDD1BankA,SDD1BankB, SDD1BankC, SDD1BankD;\nextern uint8_t intrset, curcyc, cycpl, GUIReset;\nextern uint32_t numspcvblleft, SPC700read, SPC700write, spc700idle;\nextern uint32_t xa, xdb, xpb, xs, xd, xx, xy, scrndis;\nextern uint16_t VIRQLoc, resolutn, xpc;\n//extern uint8_t spcextraram[64], SPCROM[64];\nextern uint32_t tableD[256];\nuint8_t SPCSkipXtraROM, disableeffects = 0;\n//This is saved in states\nuint8_t cycpl = 0; // cycles per scanline\nuint8_t cycphb = 0; // cycles per hblank\nuint8_t intrset = 0; // interrupt set\nuint16_t curypos = 0; // current y position\nuint16_t stackand = 0x01FF; // value to and stack to keep it from going to the wrong area\nuint16_t stackor = 0x0100; // value to or stack to keep it from going to the wrong area\n\n// 65816 registers\nuint8_t xp = 0;\nuint8_t xe = 0;\nuint8_t xirqb = 0; // which bank the irqs start at\nuint32_t Curtableaddr = 0; // Current table address\n\nvoid SA1Reset();\nvoid InitC4();\nvoid RTCinit();\nvoid SPC7110init();\nvoid SPC7110_deinit_decompression_state();\n\nvoid init65816()\n{\n uint_fast8_t i;\n\n if(SA1Enable)\n {\n SA1Reset();\n SetAddressingModesSA1();\n }\n\n if(C4Enable)\n {\n InitC4();\n }\n\n if(RTCEnable)\n RTCinit();\n\n if (SPC7110Enable)\n {\n SPC7110init();\n map_mem(0x50, &SPC7110bank, 1);\n map_mem(0x00, &SPC7110SRAMBank, 1);\n map_mem(0x30, &SPC7110SRAMBank, 1);\n //Below should not be needed, since 50 is mapped above\n //snesmmap[0x50] = SPC7110_buffer;\n //snesmap2[0x50] = SPC7110_buffer;\n //memset(SPC7110_buffer, 0, 0x10000);\n }\n else\n {\n SPC7110_deinit_decompression_state();\n }\n\n cycpb268 = 140;\n cycpb358 = 156;\n cycpbl2 = 140;\n cycpblt2 = 156;\n cycpbl = 140;\n cycpblt = 156;\n\n SPCSkipXtraROM = 0;\n if(ReturnFromSPCStall)\n {\n cycpb268 = 69;\n cycpb358 = 81;\n cycpbl2 = 69;\n cycpblt2 = 69;\n cycpbl = 69;\n cycpblt = 69;\n SPCSkipXtraROM = 1;\n if(SPCStallSetting == 2)\n {\n cycpb268 = 240;\n cycpb358 = 240;\n cycpbl = 240;\n cycpblt = 240;\n cycpbl2 = 240;\n cycpblt2 = 240;\n SPCSkipXtraROM = 0;\n }\n }\n else\n {\n SPCStallSetting = 0;\n }\n\n numspcvblleft = 500;\n SPC700write = 0;\n SPC700read = 0;\n spc700idle = 0;\n\n for(i = 0;i<0x40;i++)\n {\n //spcextraram[i] = 0xFF;\n //SPCRAM[0xFFC0+i] = SPCROM[i];\n }\n\n // Clear SPC Memory\n //clearSPCRAM();\n clearvidsound();\n\n prevoamptr = 0xFF;\n disableeffects = osm2dis = 0;\n opexec268 = opexec268b;\n opexec358 = opexec358b;\n opexec268cph = opexec268cphb;\n opexec358cph = opexec358cphb;\n\n if (!(romdata[infoloc+BankOffset] & 0xF0)) // if not fastrom\n {\n opexec358 = opexec268;\n opexec358cph = opexec268cph;\n cycpb358 = cycpb268;\n }\n\n // Check Headers\n headerhack();\n\n\n // What the hell is this?\n //SPCRAM[0xF4] = 0;\n //SPCRAM[0xF5] = 0;\n //SPCRAM[0xF6] = 0;\n //SPCRAM[0xF7] = 0;\n\n //reg1read = 0;\n //reg2read = 0;\n //reg3read = 0;\n //reg4read = 0;\n cycpbl = 0;\n //spcnumread = 0;\n NMIEnab = 1;\n VIRQLoc = 0;\n doirqnext = 0;\n resolutn = 224;\n vidbright = 0;\n forceblnk = 0;\n //spcP = 0;\n //timeron = 0;\n JoyAPos = 0;\n JoyBPos = 0;\n coladdr = 0;\n coladdg = 0;\n coladdb = 0;\n INTEnab = 0;\n xa = 0;\n xdb = 0;\n xpb = 0;\n xs = 0x01FF;\n xd = 0;\n xx = 0;\n xy = 0;\n SDD1BankA = 0;\n SDD1BankB = 0x01;\n SDD1BankC = 0x02;\n SDD1BankD = 0x03;\n xirqb = 0;\n xp = 52; // NVMXDIZC\n xe = 1; // E\n xpc = resetv;\n\n intrset = 0;\n\n if (xpc < 0x8000)\n {\n xpc += 0x8000;\n //xpb = 0x40;\n }\n\n // 2.68 Mhz / 3.58 Mhz = 228\n curcyc = cycpl = opexec268;\n cycphb = opexec268cph; // 2.68 Mhz / 3.58 Mhz = 56\n cycpbl = 110; // 3.58Mhz = 175\n cycpblt = 110;\n curypos = 0;\n Curtableaddr = *tableD;\n scrndis = 0;\n stackand = 0x01FF;\n stackor = 0x0100;\n\n nmiprevaddrl = 0;\n nmiprevaddrh = 0;\n nmirept = 0;\n nmiprevline = 224;\n nmistatus = 0;\n\n if(GUIReset)\n {\n GUIReset = 0;\n }\n\n else\n {\n memset(wramdataa,0x55,0x10000);\n memset(ram7fa, 0x55,0x10000);\n }\n\n if(BSEnable)\n {\n memset(wramdataa,0xFF,0x10000);\n memset(ram7fa,0xFF,0x10000);\n if(romtype == 1)\n memset(ram7fa+65528, 0x01, 8);\n }\n}\n\n\n#ifndef QT_DEBUGGER\n#define debug_exit(n) exit(n)\n#endif\n\nextern unsigned char debugger;\nstatic bool zexit_called = false;\n\nvoid zexit()\n{\n if (!zexit_called)\n {\n zexit_called = true;\n if (debugger)\n {\n debug_exit(0);\n }\n else\n {\n exit(0);\n }\n }\n}\n\nvoid zexit_error()\n{\n if (!zexit_called)\n {\n zexit_called = true;\n if (debugger)\n {\n debug_exit(1);\n }\n else\n {\n exit(1);\n }\n }\n}\n" }, { "alpha_fraction": 0.7210526466369629, "alphanum_fraction": 0.7360902428627014, "avg_line_length": 20.786884307861328, "blob_id": "62d374156a63d800fd69cd4f1f1c4db9351cd235", "content_id": "876464a4866d4e2453122801d6e4ac471a69efa7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1330, "license_type": "no_license", "max_line_length": 72, "num_lines": 61, "path": "/src/debugger/ui.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <QMessageBox>\n\n#include \"ui.h\"\n\nQtDebugger::QtDebugger(QWidget *parent) : QMainWindow(parent)\n{\n ui.setupUi(this);\n}\n\nQtDebugger::~QtDebugger()\n{\n\n}\n\nQtDebugger *QtDebugger::singleton = 0;\n\nvoid QtDebugger::showQtDebugger(QWidget *parent)\n{\n if (!singleton)\n {\n singleton = new QtDebugger(parent);\n }\n singleton->show();\n}\n\nvoid QtDebugger::destroyQtDebugger()\n{\n if (singleton)\n {\n delete singleton;\n singleton = 0;\n }\n}\n\nextern \"C\" { extern unsigned char EMUPause; }\n\nvoid QtDebugger::on_pauseButton_clicked()\n{\n EMUPause ^= 1;\n}\n\n" }, { "alpha_fraction": 0.6200722455978394, "alphanum_fraction": 0.6407687067985535, "avg_line_length": 32.81666564941406, "blob_id": "738b39c996b78fa113d7128dc233303313db459f", "content_id": "9762858eb8f45730762a29d935c3d0d97030b8fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 6088, "license_type": "no_license", "max_line_length": 93, "num_lines": 180, "path": "/src/Makefile.in", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "#Copyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n#\n#http://www.zsnes.com\n#http://sourceforge.net/projects/zsnes\n#https://zsnes.bountysource.com\n#\n#This program is free software; you can redistribute it and/or\n#modify it under the terms of the GNU General Public License\n#version 2 as published by the Free Software Foundation.\n#\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n#\n#You should have received a copy of the GNU General Public License\n#along with this program; if not, write to the Free Software\n#Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n\nCHIP_D=chips\nCPU_D=cpu\nDEBUG_D=debugger\nEFF_D=effects\nGUI_D=gui\nJMA_D=jma\nMMLIB_D=mmlib\nNET_D=net\nPORT_D=linux\nSPC_D=cpu/zspc\nTOOL_D=tools\nVID_D=video\nZIP_D=zip\n\nPSR=parsegen\nPSR_H=cfg.h md.h input.h\n\nCHIP_AO=$(CHIP_D)/7110proc.o $(CHIP_D)/c4proc.o $(CHIP_D)/dsp1proc.o\\\n\t$(CHIP_D)/dsp2proc.o $(CHIP_D)/dsp3proc.o $(CHIP_D)/dsp4proc.o\\\n\t$(CHIP_D)/fxemu2.o $(CHIP_D)/fxemu2b.o $(CHIP_D)/fxemu2c.o\\\n\t$(CHIP_D)/fxtable.o $(CHIP_D)/obc1proc.o $(CHIP_D)/sa1proc.o\\\n\t$(CHIP_D)/sa1regs.o $(CHIP_D)/sfxproc.o $(CHIP_D)/st10proc.o\\\n\t$(CHIP_D)/st11proc.o\nCHIP_CO=$(CHIP_D)/7110emu.o $(CHIP_D)/c4emu.o $(CHIP_D)/dsp1emu.o\\\n\t$(CHIP_D)/dsp3emu.o $(CHIP_D)/dsp4emu.o $(CHIP_D)/obc1emu.o\\\n\t$(CHIP_D)/sa1emu.o $(CHIP_D)/sdd1emu.o $(CHIP_D)/seta10.o\\\n\t$(CHIP_D)/seta11.o\n\nCPU_AO=$(CPU_D)/dma.o $(CPU_D)/execute.o $(CPU_D)/irq.o $(CPU_D)/memory.o\\\n\t$(CPU_D)/stable.o $(CPU_D)/table.o $(CPU_D)/tablec.o\nCPU_CO=$(CPU_D)/executec.o $(CPU_D)/memtable.o $(SPC_D)/zdsp.o\\\n\t$(SPC_D)/zspc.o\n\nDEBUG_CO=@DEBUG_CF@\nDEBUG_QO=@DEBUG_QF@\n\nEFF_CO=$(EFF_D)/burn.o $(EFF_D)/smoke.o $(EFF_D)/water.o\n\nGUI_AO=$(GUI_D)/guiold.o $(GUI_D)/menu.o\nGUI_CO=$(GUI_D)/gui.o $(GUI_D)/guifuncs.o $(GUI_D)/moc_gui.o\nGUI_QO=$(GUI_D)/moc_gui.cpp $(GUI_D)/ui_zsnes.h\n\nJMA_CO=@JMA_F@\n\nMAIN_AO=endmem.o init.o vcache.o ztime.o\nMAIN_CO=cfg.o initc.o input.o md.o patch.o ui.o version.o zdir.o\\\n\tzloader.o zmovie.o zpath.o zstate.o ztimec.o\n\nMMLIB_CO=@MMLIB_F@\n\nNET_CO=\n#$(NET_D)/ztcp.o\n\nPORT_AO=$(PORT_D)/sdlintrf.o\nPORT_CO=$(PORT_D)/audio.o $(PORT_D)/battery.o @GL_DRAW@ $(PORT_D)/lib.o\\\n\t$(PORT_D)/safelib.o $(PORT_D)/sdllink.o $(PORT_D)/sw_draw.o\\\n\t$(PORT_D)/x11.o\n\nTOOL_CO=$(TOOL_D)/fileutil.o $(TOOL_D)/strutil.o\n\nVID_AO=$(VID_D)/copyvwin.o $(VID_D)/makev16b.o $(VID_D)/makev16t.o\\\n\t$(VID_D)/makevid.o $(VID_D)/mode716.o $(VID_D)/mode716b.o\\\n\t$(VID_D)/mode716d.o $(VID_D)/mode716e.o $(VID_D)/mode716t.o\\\n\t$(VID_D)/mv16tms.o $(VID_D)/m716text.o $(VID_D)/newg162.o\\\n\t$(VID_D)/newgfx.o $(VID_D)/newgfx16.o $(VID_D)/procvid.o\\\n\t$(VID_D)/sw_draw.o $(VID_D)/2xsaiw.o $(VID_D)/hq2x16.o $(VID_D)/hq2x32.o\\\n\t$(VID_D)/hq3x16.o $(VID_D)/hq3x32.o $(VID_D)/hq4x16.o $(VID_D)/hq4x32.o\nVID_CO=$(VID_D)/procvidc.o $(VID_D)/ntsc.o\n\nZIP_CO=$(ZIP_D)/unzip.o $(ZIP_D)/zpng.o\n\nZ_AOBJS=$(MAIN_AO) $(CHIP_AO) $(CPU_AO) $(GUI_AO) $(PORT_AO) $(VID_AO)\nZ_COBJS=$(MAIN_CO) $(CHIP_CO) $(CPU_CO) $(DEBUG_CO) $(EFF_CO) $(GUI_CO)\\\n\t$(JMA_CO) $(MMLIB_CO) $(NET_CO) $(PORT_CO) $(VID_CO) $(ZIP_CO)\nZ_QOBJS=$(DEBUG_QO) $(GUI_QO)\nZ_OBJS=$(Z_AOBJS) $(Z_COBJS)\n\n.PHONY: default main tools all install clean cclean tclean distclean\n.SUFFIXES: .asm .c .cpp .psr .ui .h\n\n%.o: %.asm\n\t@NASMPATH@ @NFLAGS@ -o $@ $<\n%.o: %.c\n\t@CC@ @CFLAGS@ -o $@ -c $<\n%.o: %.cpp\n\t@CXX@ @CXXFLAGS@ -o $@ -c $<\n%.o %.h: %.psr $(PSR)\n\t./$(PSR) @PSRFLAGS@ -gcc @CC@ -compile -flags \"@CFLAGS@ -O1\" -cheader $*.h -fname $* $*.o $<\nui_%.h: %.ui\n\tuic -o $@ $<\nmoc_%.cpp: %.h\n\tmoc -o $@ $<\n\ndefault: main\nall: main tools\nmain: makefile.dep $(Z_QOBJS) $(Z_OBJS)\n\t@ZC@ -o @ZSNESEXE@ $(Z_OBJS) @ZCFLAGS@ @LDFLAGS@\n\trm -f version.o\n\n$(PSR): parsegen.cpp\n\t@CXX@ @CXXFLAGS@ -o $@ $< @LDFLAGS@\n\nTOOLSEXE=$(TOOL_D)/archopt $(TOOL_D)/cutrtype $(TOOL_D)/extraext\\\n\t$(TOOL_D)/macroll $(TOOL_D)/minwhite $(TOOL_D)/nreplace\\\n\t$(TOOL_D)/sec-test $(TOOL_D)/srccount $(TOOL_D)/varrep\ntools: $(TOOLSEXE) $(TOOL_D)/depbuild\n$(TOOL_D)/archopt: $(TOOL_D)/archopt.c\n\t@CC@ @CFLAGS@ -m32 -o $@ [email protected]\n$(TOOL_D)/cutrtype: $(TOOL_D)/cutrtype.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_CO)\n$(TOOL_D)/depbuild: $(TOOL_D)/depbuild.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_CO)\n$(TOOL_D)/extraext: $(TOOL_D)/extraext.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_CO)\n$(TOOL_D)/macroll: $(TOOL_D)/macroll.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_CO)\n$(TOOL_D)/minwhite: $(TOOL_D)/minwhite.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_D)/fileutil.o\n$(TOOL_D)/nreplace: $(TOOL_D)/nreplace.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_D)/fileutil.o\n$(TOOL_D)/sec-test: $(TOOL_D)/sec-test.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_CO)\n$(TOOL_D)/srccount: $(TOOL_D)/srccount.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_D)/fileutil.o\n$(TOOL_D)/varrep: $(TOOL_D)/varrep.cpp $(TOOL_CO)\n\t@CXX@ @CXXFLAGS@ -o $@ [email protected] $(TOOL_CO)\n\ninclude makefile.dep\n\nconfig.status: config.guess config.sub configure.in\n\t./autogen.sh --recheck\nMakefile: config.status Makefile.in\n\t./config.status\nmakefile.dep: $(TOOL_D)/depbuild Makefile\n\t$(TOOL_D)/depbuild @CC@ \"@CFLAGS@\" @NASMPATH@ \"@NFLAGS@\" $(Z_OBJS) > $@\n\n# set ${prefix} so mandir works.\nprefix=@prefix@\nexec_prefix=@exec_prefix@\ndatarootdir=@datarootdir@\n\nplayspc: playspc.c $(SPC_D)/zdsp.o $(SPC_D)/zspc.o\n\t@ZC@ @CFLAGS@ -o $@ playspc.c $(SPC_D)/zdsp.o $(SPC_D)/zspc.o -lao\n\ninstall:\n\t@INSTALL@ -d -m 0755 $(DESTDIR)/@bindir@\n\t@INSTALL@ -m 0755 @ZSNESEXE@ $(DESTDIR)/@bindir@\n\t@INSTALL@ -d -m 0755 $(DESTDIR)/@mandir@/man1\n\t@INSTALL@ -m 0644 linux/zsnes.1 $(DESTDIR)/@mandir@/man1\nuninstall:\n\trm -f @bindir@/$(notdir @ZSNESEXE@) @mandir@/man1/zsnes.1\n\nclean:\n\trm -f makefile.dep $(Z_OBJS) $(Z_QOBJS) $(PSR) $(PSR_H) @ZSNESEXE@\ncclean:\n\trm -f $(Z_COBJS) $(Z_QOBJS) $(PSR) $(PSR_H) @ZSNESEXE@\ntclean:\n\trm -f $(TOOL_CO) $(TOOLSEXE)\ndistclean: clean tclean\n\trm -f Makefile aclocal.m4 configure config.log \\\n\tconfig.status config.h $(TOOL_D)/depbuild\n\n" }, { "alpha_fraction": 0.3583754003047943, "alphanum_fraction": 0.6019706726074219, "avg_line_length": 36.484683990478516, "blob_id": "2e820cf86b352c17888a6469a1c06ef6a42cf3a9", "content_id": "4b8f53a18cba8dc57768e2361546931343fa119d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 20805, "license_type": "no_license", "max_line_length": 140, "num_lines": 555, "path": "/src/chips/seta10.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n//http://users.tpg.com.au/trauma/dsp/st010.html\n\n#ifdef __UNIXSDL__\n#include \"gblhdr.h\"\n#else\n#include <math.h> //sqrt()\n#include <stdlib.h> //abs()\n#include <stdint.h>\n#include <stdbool.h>\n#endif\n\n#define SRAM setaramdata\n\nextern unsigned char *setaramdata;\nvoid ST010DoCommand();\n\n// Mode 7 scaling constants for all raster lines\nconst int16_t ST010_M7Scale[176] = {\n\t0x0380, 0x0325, 0x02da, 0x029c, 0x0268, 0x023b, 0x0215, 0x01f3,\n\t0x01d5, 0x01bb, 0x01a3, 0x018e, 0x017b, 0x016a, 0x015a, 0x014b,\n\t0x013e, 0x0132, 0x0126, 0x011c, 0x0112, 0x0109, 0x0100, 0x00f8,\n\t0x00f0, 0x00e9, 0x00e3, 0x00dc, 0x00d6, 0x00d1, 0x00cb, 0x00c6,\n\t0x00c1, 0x00bd, 0x00b8, 0x00b4, 0x00b0, 0x00ac, 0x00a8, 0x00a5,\n\t0x00a2, 0x009e, 0x009b, 0x0098, 0x0095, 0x0093, 0x0090, 0x008d,\n\t0x008b, 0x0088, 0x0086, 0x0084, 0x0082, 0x0080, 0x007e, 0x007c,\n\t0x007a, 0x0078, 0x0076, 0x0074, 0x0073, 0x0071, 0x006f, 0x006e,\n\t0x006c, 0x006b, 0x0069, 0x0068, 0x0067, 0x0065, 0x0064, 0x0063,\n\t0x0062, 0x0060, 0x005f, 0x005e, 0x005d, 0x005c, 0x005b, 0x005a,\n\t0x0059, 0x0058, 0x0057, 0x0056, 0x0055, 0x0054, 0x0053, 0x0052,\n\t0x0051, 0x0051, 0x0050, 0x004f, 0x004e, 0x004d, 0x004d, 0x004c,\n\t0x004b, 0x004b, 0x004a, 0x0049, 0x0048, 0x0048, 0x0047, 0x0047,\n\t0x0046, 0x0045, 0x0045, 0x0044, 0x0044, 0x0043, 0x0042, 0x0042,\n\t0x0041, 0x0041, 0x0040, 0x0040, 0x003f, 0x003f, 0x003e, 0x003e,\n\t0x003d, 0x003d, 0x003c, 0x003c, 0x003b, 0x003b, 0x003a, 0x003a,\n\t0x003a, 0x0039, 0x0039, 0x0038, 0x0038, 0x0038, 0x0037, 0x0037,\n\t0x0036, 0x0036, 0x0036, 0x0035, 0x0035, 0x0035, 0x0034, 0x0034,\n\t0x0034, 0x0033, 0x0033, 0x0033, 0x0032, 0x0032, 0x0032, 0x0031,\n\t0x0031, 0x0031, 0x0030, 0x0030, 0x0030, 0x0030, 0x002f, 0x002f,\n\t0x002f, 0x002e, 0x002e, 0x002e, 0x002e, 0x002d, 0x002d, 0x002d,\n\t0x002d, 0x002c, 0x002c, 0x002c, 0x002c, 0x002b, 0x002b, 0x002b\n};\n\nconst int16_t ST010_SinTable[256] = {\n\t 0x0000, 0x0324, 0x0648, 0x096a, 0x0c8c, 0x0fab, 0x12c8, 0x15e2,\n\t 0x18f9, 0x1c0b, 0x1f1a, 0x2223, 0x2528, 0x2826, 0x2b1f, 0x2e11,\n\t 0x30fb, 0x33df, 0x36ba, 0x398c, 0x3c56, 0x3f17, 0x41ce, 0x447a,\n\t 0x471c, 0x49b4, 0x4c3f, 0x4ebf, 0x5133, 0x539b, 0x55f5, 0x5842,\n\t 0x5a82, 0x5cb3, 0x5ed7, 0x60eb, 0x62f1, 0x64e8, 0x66cf, 0x68a6,\n\t 0x6a6d, 0x6c23, 0x6dc9, 0x6f5e, 0x70e2, 0x7254, 0x73b5, 0x7504,\n\t 0x7641, 0x776b, 0x7884, 0x7989, 0x7a7c, 0x7b5c, 0x7c29, 0x7ce3,\n\t 0x7d89, 0x7e1d, 0x7e9c, 0x7f09, 0x7f61, 0x7fa6, 0x7fd8, 0x7ff5,\n\t 0x7fff, 0x7ff5, 0x7fd8, 0x7fa6, 0x7f61, 0x7f09, 0x7e9c, 0x7e1d,\n\t 0x7d89, 0x7ce3, 0x7c29, 0x7b5c, 0x7a7c, 0x7989, 0x7884, 0x776b,\n\t 0x7641, 0x7504, 0x73b5, 0x7254, 0x70e2, 0x6f5e, 0x6dc9, 0x6c23,\n\t 0x6a6d, 0x68a6, 0x66cf, 0x64e8, 0x62f1, 0x60eb, 0x5ed7, 0x5cb3,\n\t 0x5a82, 0x5842, 0x55f5, 0x539b, 0x5133, 0x4ebf, 0x4c3f, 0x49b4,\n\t 0x471c, 0x447a, 0x41ce, 0x3f17, 0x3c56, 0x398c, 0x36ba, 0x33df,\n\t 0x30fb, 0x2e11, 0x2b1f, 0x2826, 0x2528, 0x2223, 0x1f1a, 0x1c0b,\n\t 0x18f8, 0x15e2, 0x12c8, 0x0fab, 0x0c8c, 0x096a, 0x0648, 0x0324,\n\t 0x0000, -0x0324, -0x0648, -0x096b, -0x0c8c, -0x0fab, -0x12c8, -0x15e2,\n\t-0x18f9, -0x1c0b, -0x1f1a, -0x2223, -0x2528, -0x2826, -0x2b1f, -0x2e11,\n\t-0x30fb, -0x33df, -0x36ba, -0x398d, -0x3c56, -0x3f17, -0x41ce, -0x447a,\n\t-0x471c, -0x49b4, -0x4c3f, -0x4ebf, -0x5133, -0x539b, -0x55f5, -0x5842,\n\t-0x5a82, -0x5cb3, -0x5ed7, -0x60ec, -0x62f1, -0x64e8, -0x66cf, -0x68a6,\n\t-0x6a6d, -0x6c23, -0x6dc9, -0x6f5e, -0x70e2, -0x7254, -0x73b5, -0x7504,\n\t-0x7641, -0x776b, -0x7884, -0x7989, -0x7a7c, -0x7b5c, -0x7c29, -0x7ce3,\n\t-0x7d89, -0x7e1d, -0x7e9c, -0x7f09, -0x7f61, -0x7fa6, -0x7fd8, -0x7ff5,\n\t-0x7fff, -0x7ff5, -0x7fd8, -0x7fa6, -0x7f61, -0x7f09, -0x7e9c, -0x7e1d,\n\t-0x7d89, -0x7ce3, -0x7c29, -0x7b5c, -0x7a7c, -0x7989, -0x7883, -0x776b,\n\t-0x7641, -0x7504, -0x73b5, -0x7254, -0x70e2, -0x6f5e, -0x6dc9, -0x6c23,\n\t-0x6a6d, -0x68a6, -0x66cf, -0x64e8, -0x62f1, -0x60eb, -0x5ed7, -0x5cb3,\n\t-0x5a82, -0x5842, -0x55f5, -0x539a, -0x5133, -0x4ebf, -0x4c3f, -0x49b3,\n\t-0x471c, -0x447a, -0x41cd, -0x3f17, -0x3c56, -0x398c, -0x36b9, -0x33de,\n\t-0x30fb, -0x2e10, -0x2b1f, -0x2826, -0x2527, -0x2223, -0x1f19, -0x1c0b,\n\t-0x18f8, -0x15e2, -0x12c8, -0x0fab, -0x0c8b, -0x096a, -0x0647, -0x0324};\n\nconst unsigned char ST010_ArcTan[32][32] = {\n\t{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,\n\t 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80},\n\t{ 0x80, 0xa0, 0xad, 0xb3, 0xb6, 0xb8, 0xb9, 0xba, 0xbb, 0xbb, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd,\n\t 0xbd, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf},\n\t{ 0x80, 0x93, 0xa0, 0xa8, 0xad, 0xb0, 0xb3, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xb9, 0xba, 0xba, 0xbb,\n\t 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xbc, 0xbc, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd, 0xbd},\n\t{ 0x80, 0x8d, 0x98, 0xa0, 0xa6, 0xaa, 0xad, 0xb0, 0xb1, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb7, 0xb8,\n\t 0xb8, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbc, 0xbc, 0xbc, 0xbc},\n\t{ 0x80, 0x8a, 0x93, 0x9a, 0xa0, 0xa5, 0xa8, 0xab, 0xad, 0xaf, 0xb0, 0xb2, 0xb3, 0xb4, 0xb5, 0xb5,\n\t 0xb6, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xba, 0xba, 0xba, 0xba, 0xba, 0xbb, 0xbb},\n\t{ 0x80, 0x88, 0x90, 0x96, 0x9b, 0xa0, 0xa4, 0xa7, 0xa9, 0xab, 0xad, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,\n\t 0xb4, 0xb4, 0xb5, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9},\n\t{ 0x80, 0x87, 0x8d, 0x93, 0x98, 0x9c, 0xa0, 0xa3, 0xa6, 0xa8, 0xaa, 0xac, 0xad, 0xae, 0xb0, 0xb0,\n\t 0xb1, 0xb2, 0xb3, 0xb4, 0xb4, 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7, 0xb7, 0xb8, 0xb8, 0xb8},\n\t{ 0x80, 0x86, 0x8b, 0x90, 0x95, 0x99, 0x9d, 0xa0, 0xa3, 0xa5, 0xa7, 0xa9, 0xaa, 0xac, 0xad, 0xae,\n\t 0xaf, 0xb0, 0xb1, 0xb2, 0xb2, 0xb3, 0xb3, 0xb4, 0xb4, 0xb5, 0xb5, 0xb6, 0xb6, 0xb6, 0xb7, 0xb7},\n\t{ 0x80, 0x85, 0x8a, 0x8f, 0x93, 0x97, 0x9a, 0x9d, 0xa0, 0xa2, 0xa5, 0xa6, 0xa8, 0xaa, 0xab, 0xac,\n\t 0xad, 0xae, 0xaf, 0xb0, 0xb0, 0xb1, 0xb2, 0xb2, 0xb3, 0xb3, 0xb4, 0xb4, 0xb5, 0xb5, 0xb5, 0xb5},\n\t{ 0x80, 0x85, 0x89, 0x8d, 0x91, 0x95, 0x98, 0x9b, 0x9e, 0xa0, 0xa0, 0xa4, 0xa6, 0xa7, 0xa9, 0xaa,\n\t 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb0, 0xb1, 0xb1, 0xb2, 0xb2, 0xb3, 0xb3, 0xb4, 0xb4, 0xb4},\n\t{ 0x80, 0x84, 0x88, 0x8c, 0x90, 0x93, 0x96, 0x99, 0x9b, 0x9e, 0xa0, 0xa2, 0xa4, 0xa5, 0xa7, 0xa8,\n\t 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xaf, 0xb0, 0xb0, 0xb1, 0xb2, 0xb2, 0xb2, 0xb3, 0xb3},\n\t{ 0x80, 0x84, 0x87, 0x8b, 0x8e, 0x91, 0x94, 0x97, 0x9a, 0x9c, 0x9e, 0xa0, 0xa2, 0xa3, 0xa5, 0xa6,\n\t 0xa7, 0xa9, 0xaa, 0xab, 0xac, 0xac, 0xad, 0xae, 0xae, 0xaf, 0xb0, 0xb0, 0xb1, 0xb1, 0xb2, 0xb2},\n\t{ 0x80, 0x83, 0x87, 0x8a, 0x8d, 0x90, 0x93, 0x96, 0x98, 0x9a, 0x9c, 0x9e, 0xa0, 0xa2, 0xa3, 0xa5,\n\t 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xac, 0xad, 0xae, 0xae, 0xaf, 0xb0, 0xb0, 0xb0, 0xb1},\n\t{ 0x80, 0x83, 0x86, 0x89, 0x8c, 0x8f, 0x92, 0x94, 0x96, 0x99, 0x9b, 0x9d, 0x9e, 0xa0, 0xa2, 0xa3,\n\t 0xa4, 0xa5, 0xa7, 0xa8, 0xa9, 0xa9, 0xaa, 0xab, 0xac, 0xac, 0xad, 0xae, 0xae, 0xaf, 0xaf, 0xb0},\n\t{ 0x80, 0x83, 0x86, 0x89, 0x8b, 0x8e, 0x90, 0x93, 0x95, 0x97, 0x99, 0x9b, 0x9d, 0x9e, 0xa0, 0xa1,\n\t 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xaa, 0xab, 0xac, 0xad, 0xad, 0xae, 0xae, 0xaf},\n\t{ 0x80, 0x83, 0x85, 0x88, 0x8b, 0x8d, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9b, 0x9d, 0x9f, 0xa0,\n\t 0xa1, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa8, 0xa9, 0xaa, 0xab, 0xab, 0xac, 0xad, 0xad, 0xae},\n\t{ 0x80, 0x83, 0x85, 0x88, 0x8a, 0x8c, 0x8f, 0x91, 0x93, 0x95, 0x97, 0x99, 0x9a, 0x9c, 0x9d, 0x9f,\n\t 0xa0, 0xa1, 0xa2, 0xa3, 0xa5, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xaa, 0xab, 0xab, 0xac, 0xad},\n\t{ 0x80, 0x82, 0x85, 0x87, 0x89, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x97, 0x99, 0x9b, 0x9c, 0x9d,\n\t 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa8, 0xa9, 0xaa, 0xaa, 0xab, 0xac},\n\t{ 0x80, 0x82, 0x85, 0x87, 0x89, 0x8b, 0x8d, 0x8f, 0x91, 0x93, 0x95, 0x96, 0x98, 0x99, 0x9b, 0x9c,\n\t 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa7, 0xa8, 0xa9, 0xa9, 0xaa, 0xab},\n\t{ 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x95, 0x97, 0x98, 0x9a, 0x9b,\n\t 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa6, 0xa7, 0xa8, 0xa8, 0xa9, 0xaa},\n\t{ 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x91, 0x93, 0x94, 0x96, 0x97, 0x99, 0x9a,\n\t 0x9b, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa5, 0xa6, 0xa7, 0xa7, 0xa8, 0xa9},\n\t{ 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8b, 0x8d, 0x8f, 0x90, 0x92, 0x94, 0x95, 0x97, 0x98, 0x99,\n\t 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa5, 0xa6, 0xa6, 0xa7, 0xa8},\n\t{ 0x80, 0x82, 0x84, 0x86, 0x87, 0x89, 0x8b, 0x8d, 0x8e, 0x90, 0x91, 0x93, 0x94, 0x96, 0x97, 0x98,\n\t 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa3, 0xa4, 0xa5, 0xa6, 0xa6, 0xa7},\n\t{ 0x80, 0x82, 0x84, 0x85, 0x87, 0x89, 0x8a, 0x8c, 0x8e, 0x8f, 0x91, 0x92, 0x94, 0x95, 0x96, 0x98,\n\t 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa2, 0xa3, 0xa4, 0xa5, 0xa5, 0xa6},\n\t{ 0x80, 0x82, 0x83, 0x85, 0x87, 0x88, 0x8a, 0x8c, 0x8d, 0x8f, 0x90, 0x92, 0x93, 0x94, 0x96, 0x97,\n\t 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa2, 0xa3, 0xa4, 0xa5, 0xa5},\n\t{ 0x80, 0x82, 0x83, 0x85, 0x86, 0x88, 0x8a, 0x8b, 0x8d, 0x8e, 0x90, 0x91, 0x92, 0x94, 0x95, 0x96,\n\t 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa2, 0xa3, 0xa4, 0xa4},\n\t{ 0x80, 0x82, 0x83, 0x85, 0x86, 0x88, 0x89, 0x8b, 0x8c, 0x8e, 0x8f, 0x90, 0x92, 0x93, 0x94, 0x95,\n\t 0x96, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa2, 0xa3, 0xa4},\n\t{ 0x80, 0x82, 0x83, 0x85, 0x86, 0x87, 0x89, 0x8a, 0x8c, 0x8d, 0x8e, 0x90, 0x91, 0x92, 0x93, 0x95,\n\t 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9e, 0x9f, 0xa0, 0xa1, 0xa1, 0xa2, 0xa3},\n\t{ 0x80, 0x81, 0x83, 0x84, 0x86, 0x87, 0x89, 0x8a, 0x8b, 0x8d, 0x8e, 0x8f, 0x90, 0x92, 0x93, 0x94,\n\t 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9e, 0x9f, 0xa0, 0xa1, 0xa1, 0xa2},\n\t{ 0x80, 0x81, 0x83, 0x84, 0x86, 0x87, 0x88, 0x8a, 0x8b, 0x8c, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93,\n\t 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0x9f, 0xa0, 0xa1, 0xa1},\n\t{ 0x80, 0x81, 0x83, 0x84, 0x85, 0x87, 0x88, 0x89, 0x8b, 0x8c, 0x8d, 0x8e, 0x90, 0x91, 0x92, 0x93,\n\t 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0x9f, 0xa0, 0xa1},\n\t{ 0x80, 0x81, 0x83, 0x84, 0x85, 0x87, 0x88, 0x89, 0x8a, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92,\n\t 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9c, 0x9d, 0x9e, 0x9f, 0x9f, 0xa0}};\n\nshort ST010_Sin(short Theta)\n{\n return ST010_SinTable[(Theta >> 8) & 0xff];\n}\n\nshort ST010_Cos(short Theta)\n{\n return ST010_SinTable[((Theta + 0x4000) >> 8) & 0xff];\n}\n\nvoid ST010_OP01(short x0, short y0, short *x1, short *y1, short *Quadrant, short *Theta)\n{\n if ((x0 < 0) && (y0 < 0))\n {\n *x1 = -x0;\n *y1 = -y0;\n *Quadrant = -0x8000;\n }\n else if (x0 < 0)\n {\n *x1 = y0;\n *y1 = -x0;\n *Quadrant = -0x4000;\n }\n else if (y0 < 0)\n {\n *x1 = -y0;\n *y1 = x0;\n *Quadrant = 0x4000;\n }\n else\n {\n *x1 = x0;\n *y1 = y0;\n *Quadrant = 0x0000;\n }\n\n while ((*x1 > 0x1f) || (*y1 > 0x1f))\n {\n if (*x1 > 1) *x1 >>= 1;\n if (*y1 > 1) *y1 >>= 1;\n }\n\n if (*y1 == 0) *Quadrant += 0x4000;\n\n *Theta = (ST010_ArcTan[*y1][*x1] << 8) ^ *Quadrant;\n}\n\nvoid ST010_Scale(short Multiplier, short X0, short Y0, int *X1, int *Y1)\n{\n *X1 = X0 * Multiplier << 1;\n *Y1 = Y0 * Multiplier << 1;\n}\n\nvoid ST010_Multiply(short Multiplicand, short Multiplier, int *Product)\n{\n *Product = Multiplicand * Multiplier << 1;\n}\n\nvoid ST010_Rotate(short Theta, short X0, short Y0, short *X1, short *Y1)\n{\n *X1 = (Y0 * ST010_Sin(Theta) >> 15) + (X0 * ST010_Cos(Theta) >> 15);\n *Y1 = (Y0 * ST010_Cos(Theta) >> 15) - (X0 * ST010_Sin(Theta) >> 15);\n}\n\nvoid ST010_SortDrivers(uint16_t Positions, uint16_t Places[32], uint16_t Drivers[32])\n{\n bool Sorted;\n uint16_t Temp;\n\n if (Positions > 1)\n {\n do\n {\n int i;\n Sorted = true;\n for (i = 0; i < Positions - 1; i++)\n {\n if (Places[i] < Places[i + 1])\n {\n Temp = Places[i + 1];\n Places[i + 1] = Places[i];\n Places[i] = Temp;\n\n Temp = Drivers[i + 1];\n Drivers[i + 1] = Drivers[i];\n Drivers[i] = Temp;\n\n Sorted = false;\n }\n }\n Positions--;\n } while (!Sorted);\n }\n}\n\n#define ST010_WORD(offset) (*((short *)(SRAM+offset)))\n//#define ST010_WORD(offset) (SRAM[offset + 1] << 8) | SRAM[offset]\n\nvoid ST010DoCommand()\n{\n switch(SRAM[0x20])\n {\n /*\n Calculate track data based on direction coords\n\n Input\n 0x0000-0x0001 : DX (signed)\n 0x0002-0x0003 : DY (signed)\n Output\n 0x0010-0x0011 : Angle (signed)\n */\n\n case 0x01:\n {\n SRAM[0x0006] = SRAM[0x0002];\n SRAM[0x0007] = SRAM[0x0003];\n ST010_OP01(*(short*)&SRAM[0x0000], *(short*)&SRAM[0x0002], (short *)SRAM, (short *)&SRAM[2], (short *)&SRAM[4], (short *)&SRAM[0x10]);\n }\n break;\n\n //Sorts a bunch of values by weight\n\n case 0x02:\n {\n ST010_SortDrivers(*(short*)&SRAM[0x0024], (uint16_t*)&SRAM[0x0040], (uint16_t*)&SRAM[0x0080]);\n }\n break;\n\n /*\n Two Dimensional Coordinate Scale\n\n Input\n 0x0000-0x0001 : X0 (signed)\n 0x0002-0x0003 : Y0 (signed)\n 0x0004-0x0005 : Multiplier (signed)\n Output\n 0x0010-0x0013 : X1 (signed)\n 0x0014-0x0017 : Y1 (signed)\n */\n\n case 0x03:\n {\n ST010_Scale(*(short*)&SRAM[0x0004], *(short*)&SRAM[0x0000], *(short*)&SRAM[0x0002], (int *)&SRAM[0x10], (int *)&SRAM[0x14]);\n }\n break;\n\n //Calculate the vector length of (x,y)\n\n case 0x04:\n {\n int16_t square, x,y;\n x=*((int16_t*)SRAM);\n y=*((int16_t*)&SRAM[2]);\n square=(int16_t)sqrt((double)(y*y+x*x));\n *((int16_t*)&SRAM[0x10])=square;\n break;\n }\n\n //Calculate AI orientation based on specific guidelines\n case 0x05:\n {\n int dx,dy;\n int16_t a1,b1,c1;\n uint16_t o1;\n\n bool wrap=false;\n\n //Target (x,y) coordinates\n int16_t ypos_max = ST010_WORD(0x00C0);\n int16_t xpos_max = ST010_WORD(0x00C2);\n\n //Current coordinates and direction\n int32_t ypos = SRAM[0xC4]|(SRAM[0xC5]<<8)|(SRAM[0xC6]<<16)|(SRAM[0xC7]<<24);\n int32_t xpos = SRAM[0xC8]|(SRAM[0xC9]<<8)|(SRAM[0xCA]<<16)|(SRAM[0xCB]<<24);\n uint16_t rot = SRAM[0xCC]|(SRAM[0xCD]<<8);\n\n //Physics\n uint16_t speed = ST010_WORD(0x00D4);\n uint16_t accel = ST010_WORD(0x00D6);\n uint16_t speed_max = ST010_WORD(0x00D8);\n\n //Special condition acknowledgment\n int16_t system = ST010_WORD(0x00DA);\n int16_t flags = ST010_WORD(0x00DC);\n\n //New target coordinates\n int16_t ypos_new = ST010_WORD(0x00DE);\n int16_t xpos_new = ST010_WORD(0x00E0);\n\n //Backup speed\n uint16_t old_speed = speed;\n\n //Mask upper bit\n xpos_new &= 0x7FFF;\n\n //Get the current distance\n dx = xpos_max-(xpos>>16);\n dy = ypos_max-(ypos>>16);\n\n //Quirk: clear and move in9\n SRAM[0xD2]=0xFF;\n SRAM[0xD3]=0xFF;\n SRAM[0xDA]=0;\n SRAM[0xDB]=0;\n\n //Grab the target angle\n ST010_OP01(dy,dx,&a1,&b1,&c1,(int16_t *)&o1);\n\n //Check for wrapping\n if (abs(o1-rot)>0x8000)\n {\n o1+=0x8000;\n rot+=0x8000;\n wrap=true;\n }\n\n //Special case\n if (abs(o1-rot)==0x8000)\n {\n speed = 0x100;\n }\n\n //Slow down for sharp curves\n else if (abs(o1-rot)>=0x1000)\n {\n uint32_t slow = abs(o1-rot);\n slow >>= 4; //Scaling\n speed -= slow;\n }\n //Otherwise accelerate\n else\n {\n speed += accel;\n if (speed > speed_max)\n {\n //Clip speed\n speed = speed_max;\n }\n }\n\n //Prevent negative/positive overflow\n if( abs(old_speed-speed)>0x8000)\n {\n if (old_speed<speed) { speed=0; }\n else { speed=0xff00; }\n }\n\n //Adjust direction by so many degrees\n //Be careful of negative adjustments\n if ((o1>rot && (o1-rot)>0x80) || (o1<rot && (rot-o1)>=0x80))\n {\n if (o1<rot) { rot-=0x280; }\n else if (o1>rot) { rot+=0x280; }\n }\n\n //Turn off wrapping\n if (wrap) { rot-=0x8000; }\n\n //Now check the distances (store for later)\n dx = (xpos_max<<16)-xpos;\n dy = (ypos_max<<16)-ypos;\n dx>>=16;\n dy>>=16;\n\n //If we're in so many units of the target, signal it\n if ((system && (dy<=6 && dy>=-8) && (dx<=126 && dx>=-128)) ||\n (!system && (dx<=6 && dx>=-8) && (dy<=126 && dy>=-128)))\n {\n //Announce our new destination and flag it\n xpos_max = xpos_new&0x7FFF;\n ypos_max = ypos_new;\n flags |= 0x08;\n }\n\n //Update position\n xpos -= (ST010_Cos(rot) * 0x400 >> 15) * (speed >> 8) << 1;\n ypos -= (ST010_Sin(rot) * 0x400 >> 15) * (speed >> 8) << 1;\n\n //Quirk: mask upper byte\n xpos &= 0x1FFFFFFF;\n ypos &= 0x1FFFFFFF;\n\n SRAM[0x00C0]=(uint8_t)(ypos_max);\n SRAM[0x00C1]=(uint8_t)(ypos_max >> 8);\n SRAM[0x00C2]=(uint8_t)(xpos_max);\n SRAM[0x00C3]=(uint8_t)(xpos_max >> 8);\n SRAM[0x00C4]=(uint8_t)(ypos);\n SRAM[0x00C5]=(uint8_t)(ypos >> 8);\n SRAM[0x00C6]=(uint8_t)(ypos >> 16);\n SRAM[0x00C7]=(uint8_t)(ypos >> 24);\n SRAM[0x00C8]=(uint8_t)(xpos);\n SRAM[0x00C9]=(uint8_t)(xpos >> 8);\n SRAM[0x00CA]=(uint8_t)(xpos >> 16);\n SRAM[0x00CB]=(uint8_t)(xpos >> 24);\n SRAM[0x00CC]=(uint8_t)(rot);\n SRAM[0x00CD]=(uint8_t)(rot >> 8);\n SRAM[0x00D4]=(uint8_t)(speed);\n SRAM[0x00D5]=(uint8_t)(speed >> 8);\n SRAM[0x00DC]=(uint8_t)(flags);\n SRAM[0x00DD]=(uint8_t)(flags >> 8);\n }\n break;\n\n /*\n 16-bit Multiplication\n\n Input\n 0x0000-0x0001 : Multiplcand (signed)\n 0x0002-0x0003 : Multiplier (signed)\n Output\n 0x0010-0x0013 : Product (signed)\n */\n\n case 0x06:\n {\n ST010_Multiply(*(short*)&SRAM[0x0000], *(short*)&SRAM[0x0002], (int *)&SRAM[0x10]);\n }\n break;\n\n /*\n Mode 7 Raster Data Calculation\n\n Input\n 0x0000-0x0001 : Angle (signed)\n Output\n 0x00f0-0x024f : Mode 7 Matrix A\n 0x0250-0x03af : Mode 7 Matrix B\n 0x03b0-0x050f : Mode 7 Matrix C\n 0x0510-0x066f : Mode 7 Matrix D\n */\n\n case 0x07:\n {\n int16_t data;\n int32_t offset = 0;\n int16_t Theta = ST010_WORD(0x0000);\n\n int32_t line;\n for (line = 0; line < 176; line++)\n {\n //Calculate Mode 7 Matrix A/D data\n data = ST010_M7Scale[line] * ST010_Cos(Theta) >> 15;\n SRAM[0x00f0 + offset]=(uint8_t)(data);\n SRAM[0x00f1 + offset]=(uint8_t)(data >> 8);\n SRAM[0x0510 + offset]=(uint8_t)(data);\n SRAM[0x0511 + offset]=(uint8_t)(data >> 8);\n\n //Calculate Mode 7 Matrix B/C data\n data = ST010_M7Scale[line] * ST010_Sin(Theta) >> 15;\n SRAM[0x0250 + offset]=(uint8_t)(data);\n SRAM[0x0251 + offset]=(uint8_t)(data >> 8);\n\n if (data) { data = ~data; }\n\n SRAM[0x03b0 + offset]=(uint8_t)(data);\n SRAM[0x03b1 + offset]=(uint8_t)(data >> 8);\n\n offset += 2;\n }\n\n //Shift Angle for use with Lookup table\n SRAM[0x00] = SRAM[0x01];\n SRAM[0x01] = 0x00;\n }\n break;\n\n /*\n Two dimensional Coordinate Rotation\n\n Input\n 0x0000-0x0001 : X0 (signed)\n 0x0002-0x0003 : Y0 (signed)\n 0x0004-0x0005 : Angle (signed)\n Output\n 0x0010-0x0011 : X1 (signed)\n 0x0012-0x0013 : Y1 (signed)\n */\n\n case 0x08:\n {\n ST010_Rotate(*(short*)&SRAM[0x0004], *(short*)&SRAM[0x0000], *(short*)&SRAM[0x0002], (short *)&SRAM[0x10], (short *)&SRAM[0x12]);\n }\n break;\n\n default: break;\n }\n\n //Lower signal: op processed\n SRAM[0x20]=0;\n SRAM[0x21]=0;\n}\n\n" }, { "alpha_fraction": 0.7446351647377014, "alphanum_fraction": 0.7575107216835022, "avg_line_length": 21.190475463867188, "blob_id": "4ada0c1d364d3b217b6c25a6f192dfcdb128c3ea", "content_id": "46dd73a84cebd38b63c9785e53d5ebdd2fe16fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 466, "license_type": "no_license", "max_line_length": 82, "num_lines": 21, "path": "/src/cpu/zspc/b_config.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "// User configuration file\n\n// zspc 0.9.0\n#ifndef BLARGG_CONFIG_H\n#define BLARGG_CONFIG_H\n\n// Uncomment to disable debugging checks\n//#define NDEBUG 1\n\n// Uncomment to enable platform-specific (and possibly non-portable) optimizations\n#define BLARGG_NONPORTABLE 1\n\n// Uncomment if you get errors in the bool section of blargg_common.h\n//#define BLARGG_COMPILER_HAS_BOOL 1\n\n// Use standard config.h if present\n#ifdef HAVE_CONFIG_H\n\t#include \"config.h\"\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.7081126570701599, "alphanum_fraction": 0.7173125147819519, "avg_line_length": 29.658119201660156, "blob_id": "6af4cf060d8cacfc96a62e1ab5210de95594514c", "content_id": "2a67e1305e716a29245a0905aed004ef0c57134c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3587, "license_type": "no_license", "max_line_length": 94, "num_lines": 117, "path": "/src/zpath.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#ifndef ZPATH_H\n#define ZPATH_H\n\n#include <zlib.h>\n#include <stdio.h>\n#include <sys/stat.h>\n\n#include \"zip/zunzip.h\"\n\n#ifndef NO_JMA\n#include \"jma/zsnesjma.h\"\n#endif\n\n#ifdef _MSC_VER\n#define F_OK 0\n#define X_OK F_OK //Drop down to F_OK because MSVC is stupid\n#define W_OK 2\n#define R_OK 4\ntypedef unsigned short mode_t;\n#endif\n\n#if !defined(__cplusplus) && !defined(bool)\n//C++ style code in C\n#include <stdbool.h>\n#endif\n\n#ifdef __UNIXSDL__\n#define DIR_SLASH \"/\"\n#define DIR_SLASH_C '/'\n#define DIR_SLASH_C_OTHER '\\\\'\n#define ROOT_LEN 1 //\"/\"\n#define DIR_R_ACCESS (R_OK|X_OK)\n#define IS_ABSOLUTE(path) ((*(path) == '/') || (*(path) == '~'))\n#else\n#define DIR_SLASH \"\\\\\"\n#define DIR_SLASH_C '\\\\'\n#define DIR_SLASH_C_OTHER '/'\n#define ROOT_LEN 3 //\"A:\\\"\n#define DIR_R_ACCESS (F_OK)\n#define IS_ABSOLUTE(path) ((*(path) == '\\\\') || (*(path) && ((path)[1] == ':')))\n#endif\n\n#define PATH_SIZE 4096\n#define NAME_SIZE 512\n#define realpath_native realpath\n\n#ifndef S_ISDIR\n#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)\n#endif\n\nextern char ZCfgFile[];\nextern char *ZStartPath, *ZCfgPath, *ZSramPath, *ZRomPath;\nextern char *ZSnapPath, *ZSpcPath, *ZIpsPath, *ZMoviePath;\nextern char *ZChtPath, *ZComboPath, *ZInpPath, *ZSStatePath;\nextern char *ZCartName, *ZSaveName, *ZStateName, *ZSaveST2Name;\n\nbool init_paths(char *launch_command);\nvoid init_save_paths();\nbool init_rom_path(char *path);\n\nchar *strdupcat(const char *str1, const char *str2);\n\nint access_dir(const char *path, const char *file, int mode);\nint stat_dir(const char *path, const char *file, struct stat *buf);\nFILE *fopen_dir(const char *path, const char *file, const char *mode);\ngzFile gzopen_dir(const char *path, const char *file, const char *mode);\nunzFile unzopen_dir(const char *path, const char *file);\n#ifndef NO_JMA\nvoid load_jma_file_dir(const char *path, const char *file);\n#endif\nint remove_dir(const char *path, const char *file);\nint mkdir_dir(const char *path, const char *dir);\nchar *realpath_dir(const char *path, const char *file, char *buf);\nFILE *fdreopen_dir(const char *path, const char *file, const char *mode, int fd);\nint system_dir(const char *path, const char *command);\nFILE *popen_dir(const char *path, char *command, const char *type);\n\nvoid natify_slashes(char *str);\nchar *strcutslash(char *str);\nchar *strcatslash(char *str);\nvoid setextension(char *base, const char *ext);\nbool isextension(const char *fname, const char *ext);\nvoid strdirname(char *str);\nvoid strbasename(char *str);\nbool mkpath(const char *path, mode_t mode);\n\n#ifdef __UNIXSDL__\nchar *realpath_link(const char *path, char *resolved_path);\nchar *realpath_tilde(const char *path, char *resolved_path);\n#else\n#define realpath_link realpath_native\n#endif\n\nvoid psr_cfg_run(unsigned char (*psr_func)(const char *), const char *dir, const char *fname);\n\n#endif\n" }, { "alpha_fraction": 0.6819620132446289, "alphanum_fraction": 0.7009493708610535, "avg_line_length": 23.30769157409668, "blob_id": "e5c76a2ad441cb280a8e836f11dea0096f55e178", "content_id": "f8e9b1ef895eb2acde722eaf443b51a15f955d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 632, "license_type": "no_license", "max_line_length": 79, "num_lines": 26, "path": "/src/cpu/zspc/disasm.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "// SNES SPC-700 disassembler\n\n#ifndef SPC_DISASM_H\n#define SPC_DISASM_H\n\n#ifdef __cplusplus\n\textern \"C\" {\n#endif\n\n// Length of instruction (1, 2, or 3 bytes)\nint spc_disasm_len( int opcode );\n\n// Disassemble instruction into output string and return length of instruction.\n// opcode = mem [addr], data = mem [addr + 1], data2 = mem [addr + 2]\nenum { spc_disasm_max = 32 }; // maximum length of output string\nint spc_disasm( unsigned addr, int opcode, int data, int data2, char* out );\n\n// Returns template form of opcode without any values filled in\nconst char* spc_disasm_form( int opcode );\n\n\n#ifdef __cplusplus\n\t}\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.7231040596961975, "alphanum_fraction": 0.7278071641921997, "avg_line_length": 26.435483932495117, "blob_id": "dbc7a25122a7d212be0a4f7a9791b5f6bbc313a0", "content_id": "0a7174f0a8b411dc3fb490ebf1a46a97d9b2fb65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1701, "license_type": "no_license", "max_line_length": 114, "num_lines": 62, "path": "/src/cpu/zspc/resamp.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/* Simple sample rate increase using linear interpolation */\n\n#ifndef RESAMPLER_H\n#define RESAMPLER_H\n\n#include <assert.h>\n\n#ifdef __cplusplus\n\textern \"C\" {\n#endif\n\n/* All input and output is in stereo. Individual samples are counted, not pairs,\nso all counts should be a multiple of 2. */\n\n/* Changes input/output ratio. Does *not* clear buffer, allowing changes to ratio\nwithout disturbing sound (in case you want to make slight adjustments in real-time). */\nvoid resampler_set_rate( int in_rate, int out_rate );\n\n/* Clears input buffer */\nvoid resampler_clear( void );\n\n/* Number of samples that can be written to input buffer */\nstatic int resampler_max_write( void );\n\n/* Pointer to where new input samples should be written */\nstatic short* resampler_buffer( void );\n\n/* Tells resampler that 'count' samples have been written to input buffer */\nstatic void resampler_write( int count );\n\n/* Resamples and generates at most 'count' output samples and returns number of\nsamples actually written to '*out' */\nint resampler_read( short* out, int count );\n\n\n#ifndef RESAMPLER_BUF_SIZE\n\t#define RESAMPLER_BUF_SIZE 4096\n#endif\n\n/* Private */\nextern short resampler_buf [RESAMPLER_BUF_SIZE + 8];\nextern short* resampler_write_pos;\n\nstatic inline int resampler_max_write( void ) { return resampler_buf + RESAMPLER_BUF_SIZE - resampler_write_pos; }\n\nstatic inline short* resampler_buffer( void ) { return resampler_write_pos; }\n\nstatic inline void resampler_write( int count )\n{\n\tassert( count % 2 == 0 ); /* must be even */\n\n\tresampler_write_pos += count;\n\n\t/* fails if you overfill buffer */\n\tassert( resampler_write_pos <= &resampler_buf [RESAMPLER_BUF_SIZE] );\n}\n\n#ifdef __cplusplus\n\t}\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.6932515501976013, "alphanum_fraction": 0.7303169965744019, "avg_line_length": 28.636363983154297, "blob_id": "78d3ff7867bcd6db5f54ea51ad8a767383807e39", "content_id": "420858603618302fb6d9e877c10f8359c0939e40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3912, "license_type": "no_license", "max_line_length": 105, "num_lines": 132, "path": "/src/jma/iiostrm.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 NSRT Team ( http://nsrt.edgeemu.com )\nCopyright (C) 2002 Andrea Mazzoleni ( http://advancemame.sf.net )\nCopyright (C) 2001-4 Igor Pavlov ( http://www.7-zip.org )\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense version 2.1 as published by the Free Software Foundation.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n*/\n\n#include \"portable.h\"\n#include \"iiostrm.h\"\n#include \"crc32.h\"\n\nHRESULT ISequentialInStream_Array::Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n if (aSize > size)\n {\n aSize = size;\n }\n\n *aProcessedSize = aSize;\n memcpy(aData, data, aSize);\n size -= aSize;\n data += aSize;\n return(S_OK);\n}\n\nHRESULT ISequentialOutStream_Array::Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n if (aSize > size)\n {\n overflow = true;\n aSize = size;\n }\n\n *aProcessedSize = aSize;\n memcpy(data, aData, aSize);\n size -= aSize;\n data += aSize;\n total += aSize;\n return(S_OK);\n}\n\nHRESULT ISequentialInStream_String::Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n if (aSize > data.size())\n {\n aSize = data.size();\n }\n\n *aProcessedSize = aSize;\n memcpy(aData, data.c_str(), aSize);\n data.erase(0, aSize);\n return(S_OK);\n}\n\nHRESULT ISequentialOutStream_String::Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n *aProcessedSize = aSize;\n data.append((const char *)aData, aSize);\n total += aSize;\n return(S_OK);\n}\n\nHRESULT ISequentialInStream_Istream::Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n data.read((char *)aData, aSize);\n *aProcessedSize = data.gcount();\n return(S_OK);\n}\n\nHRESULT ISequentialOutStream_Ostream::Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n *aProcessedSize = aSize;\n data.write((char *)aData, aSize);\n total += aSize;\n return(S_OK);\n}\n\n\n\nHRESULT ISequentialInStreamCRC32_Array::Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n ISequentialInStream_Array::Read(aData, aSize, aProcessedSize);\n crc32 = CRC32lib::CRC32((const unsigned char *)aData, *aProcessedSize, ~crc32);\n return(S_OK);\n}\n\nHRESULT ISequentialOutStreamCRC32_Array::Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n ISequentialOutStream_Array::Write(aData, aSize, aProcessedSize);\n crc32 = CRC32lib::CRC32((const unsigned char *)aData, *aProcessedSize, ~crc32);\n return(S_OK);\n}\n\nHRESULT ISequentialInStreamCRC32_String::Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n ISequentialInStream_String::Read(aData, aSize, aProcessedSize);\n crc32 = CRC32lib::CRC32((const unsigned char *)aData, *aProcessedSize, ~crc32);\n return(S_OK);\n}\n\nHRESULT ISequentialOutStreamCRC32_String::Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n ISequentialOutStream_String::Write(aData, aSize, aProcessedSize);\n crc32 = CRC32lib::CRC32((const unsigned char *)aData, *aProcessedSize, ~crc32);\n return(S_OK);\n}\n\nHRESULT ISequentialInStreamCRC32_Istream::Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n ISequentialInStream_Istream::Read(aData, aSize, aProcessedSize);\n crc32 = CRC32lib::CRC32((const unsigned char *)aData, *aProcessedSize, ~crc32);\n return(S_OK);\n}\n\nHRESULT ISequentialOutStreamCRC32_Ostream::Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize)\n{\n ISequentialOutStream_Ostream::Write(aData, aSize, aProcessedSize);\n crc32 = CRC32lib::CRC32((const unsigned char *)aData, *aProcessedSize, ~crc32);\n return(S_OK);\n}\n" }, { "alpha_fraction": 0.5818461179733276, "alphanum_fraction": 0.6255120038986206, "avg_line_length": 28.98245620727539, "blob_id": "489c408a5837bd450cb9770532160c8326e090f3", "content_id": "aa4a87297a2cc91e0eae8dea99887b49e4a053f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 27344, "license_type": "no_license", "max_line_length": 179, "num_lines": 912, "path": "/src/chips/7110emu.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdint.h>\n#include <stdbool.h>\n#include <string.h>\n#include \"../zpath.h\"\n#include \"../cfg.h\"\n\n#ifndef __GNUC__\n#define INLINE static\n#else\n#define INLINE static inline\n#endif\n\n/*\nDecompression Code by anomie, Nach, and _Demo_\nBased on a reference implementation by neviksti\n\n\nYou may use the code here under the GPL version 2 license.\nIf you would like to use the decomression code (and only the\ndecompression code) within a program incompatible with the GPLv2,\nyou may do so under the following 6 conditions:\n\n1) The program you are using it in is a Super Nintendo emulator.\n2) Your emulator's source code is publicly available.\n3) In your emulator's credits and documentation's credits you\n thank anomie, Nach, and _Demo_.\n4) In your changelogs provided with your emulator or on your\n website; thanks anomie, Nach, and _Demo_ for the SPC7110\n decompression code.\n5) Any improvements you make to the decompression code are\n sent to the developers of ZSNES.\n6) The GPLv2 header from the top of this file and this notice\n is replicated at the top of the copied decompression code.\n\n\nIf you would like to use any other code here, such as the graphics\ncaching code, or register emulation under a GPLv2 incompatible\nlicense, or you would like to use the decompression code without\ncomplying with the 6 conditions listed above, you must contact us\nand request permission.\n*/\n\nstatic uint8_t EvolutionTable[53][4] =\n{\n //prob, nextlps, nextmps, toggle invert\n {0x5a, 1, 1,1}, //0 l,m\n {0x25, 6, 2,0}, //1 l,m\n {0x11, 8, 3,0}, //2 l,m\n {0x08, 10, 4,0}, //3 ,m\n {0x03, 12, 5,0}, //4 ,m\n {0x01, 15, 5,0}, //5 ,m\n\n {0x5a, 7, 7,1}, //6 l,\n {0x3f, 19, 8,0}, //7 l,m\n {0x2c, 21, 9,0}, //8 l,m\n {0x20, 22, 10,0}, //9 ,m\n {0x17, 23, 11,0}, //10 ,m\n {0x11, 25, 12,0}, //11 ,m\n {0x0c, 26, 13,0}, //12 ,m\n {0x09, 28, 14,0}, //13 ,m\n {0x07, 29, 15,0}, //14 ,m\n {0x05, 31, 16,0}, //15 ,m\n {0x04, 32, 17,0}, //16 ,m\n {0x03, 34, 18,0}, //17 ,m\n {0x02, 35, 5,0}, //18 ,m\n\n {0x5a, 20, 20,1}, //19 l,m\n {0x48, 39, 21,0}, //20 l,m\n {0x3a, 40, 22,0}, //21 l,m\n {0x2e, 42, 23,0}, //22 l,m\n {0x26, 44, 24,0}, //23 l,m\n {0x1f, 45, 25,0}, //24 l,m\n {0x19, 46, 26,0}, //25 l,m\n {0x15, 25, 27,0}, //26 l,m\n {0x11, 26, 28,0}, //27 l,m\n {0x0e, 26, 29,0}, //28 l,m\n {0x0b, 27, 30,0}, //29 ,m\n {0x09, 28, 31,0}, //30 ,m\n {0x08, 29, 32,0}, //31 l,m\n {0x07, 30, 33,0}, //32 l,m\n {0x05, 31, 34,0}, //33 l,m <--- changed lps\n {0x04, 33, 35,0}, //34 ,m ... this is NOT skipped\n {0x04, 33, 36,0}, //35 ,m\n {0x03, 34, 37,0}, //36 ,m\n {0x02, 35, 38,0}, //37 ,m ... this is NOT skipped\n {0x02, 36, 5,0}, //38 ,m\n\n {0x58, 39, 40,1}, //39 l,m\n {0x4d, 47, 41,0}, //40 l,m\n {0x43, 48, 42,0}, //41 ,m\n {0x3b, 49, 43,0}, //42 ,m\n {0x34, 50, 44,0}, //43 l,m\n {0x2e, 51, 45,0}, //44 l,m\n {0x29, 44, 46,0}, //45 l,m\n {0x25, 45, 24,0}, //46 ,m\n\n {0x56, 47, 48,1}, //47 l,m\n {0x4f, 47, 49,0}, //48 l,m\n {0x47, 48, 50,0}, //49 l,m\n {0x41, 49, 51,0}, //50 l,m\n {0x3c, 50, 52,0}, //51 l,m\n {0x37, 51, 43,0} //52 ,m\n};\n\n#define PROB(x) EvolutionTable[Contexts[x].index][0]\n#define NEXT_LPS(x) EvolutionTable[Contexts[x].index][1]\n#define NEXT_MPS(x) EvolutionTable[Contexts[x].index][2]\n#define TOGGLE_INVERT(x) EvolutionTable[Contexts[x].index][3]\n\nstatic struct\n{\n uint8_t index;\n uint8_t invert;\n} Contexts[32];\nstatic uint8_t top,val;\nstatic uint32_t in;\nstatic int mode,inverts,in_count;\nstatic uint8_t *datain;\nstatic uint8_t buffer[32];\nstatic int buf_idx;\nstatic uint32_t pixelorder[16];\nstatic uint32_t pixel_left, pixel_above, pixel_above_left, pixel_context;\n\n//Note that the following function doesn't neccesarily work right when x is 0\n//So don't use it outside of SPC7110 code without a protect that x isn't 0,\n//or you are happy with input = 0, output = 0.\nINLINE uint8_t highest_bit_position(uint8_t x)\n{\n #if defined(__GNUC__) && defined(__i386__)\n uint16_t x2 = x;\n __asm__ __volatile__(\"bsrw %0,%0\" : \"=r\" (x2) : \"0\" (x2));\n return(x2);\n #else\n if (x>>4)\n {\n x = ((0xFFA4>>((x>>4)&0xE))&3)+4;\n }\n else\n {\n x = (0xFFA4>>(x&0xE))&3;\n }\n return(x);\n #endif\n}\n\nINLINE void update_context(uint8_t con)\n{\n uint8_t prob;\n int flag_lps,shift;\n\n //get PROB\n prob = PROB(con);\n\n //get symbol\n top = top-prob;\n if(val <= top)\n {\n //mps\n flag_lps=0;\n }\n else\n {\n //lps\n val = val - top - 1;\n top = prob - 1;\n flag_lps=1;\n }\n\n //renormalize\n shift = 0;\n if (top < 0x7F)\n {\n if (in_count < 8)\n {\n in = (in << 8) | *datain++;\n in_count += 8;\n }\n shift = 7-highest_bit_position(top+1); //1+(top<63)+(top<31)+(top<15)+(top<7)+(top<3)+!top;\n top = ((top+1)<<shift)-1;\n val = (val<<shift)+((in>>(in_count-shift))&((1<<shift)-1));\n in_count -= shift;\n }\n\n //update processing info\n //update context state\n if (flag_lps)\n {\n inverts = (inverts<<1)+(1-Contexts[con].invert);\n if (TOGGLE_INVERT(con)) { Contexts[con].invert^=1; }\n Contexts[con].index = NEXT_LPS(con);\n }\n else\n {\n inverts = (inverts<<1)+Contexts[con].invert;\n if (shift) { Contexts[con].index = NEXT_MPS(con); }\n }\n}\n\n\n\n/*\nFor future calls, the value of pixel_left must be shifted into the first position,\nwith the rest of the array moved after the first position.\nHowever, a pixel must be returned. The pixel returned is chosen by sorting\npixel_left, pixel_above, and pixel_above_left into the first three positions in a\ncopied array, with the rest of the array moved after the positions containing\npixel_left, pixel_above, and pixel_above_left. Then index into this copied array.\nHowever this copied is never needed again.\nA stable copy and move/sort of 3 values could be done optimally in 4 loops.\nBut since the array is then thrown away, it would be better to find the appropriate\nvalues without needing to copy and move/sort.\n\nThese defines do a copy and move/sort:\n\n#define PIXEL_SHIFT(array, value) \\\n temp = array[0]; \\\n for(m = 0; temp != value; ++m) \\\n { \\\n temp2 = temp; \\\n temp = array[m+1]; \\\n array[m+1] = temp2; \\\n } \\\n array[0] = temp\n\n#define PIXEL_SHIFT_ALL(ct) \\\n PIXEL_SHIFT(pixelorder, pixel_left); \\\n memcpy(realorder, pixelorder, ct*sizeof(uint32_t)); \\\n PIXEL_SHIFT(realorder, pixel_above_left); \\\n PIXEL_SHIFT(realorder, pixel_above); \\\n PIXEL_SHIFT(realorder, pixel_left)\n\nThe function below moves pixel_left where needed, but instead of copying and sorting\nto find the pixel to return, it uses the following algorithm:\nCheck for equality between pixel_left, pixel_above, and pixel_above_left, and\ndetermine if any of the first 3 positions of the array are desired. In those cases,\nthe value can be returned immediatly. In other cases, only a single pass is required\nto go through the array to account for pixel_above and pixel_above_left (pixel_left\nis always at the beginning), and then directly return the value.\n\nThis method saves needing a whole array, a copy, and extra sorting loops, replacing\nwith a method at maximum requiring a single loop through the array.\n*/\n\nINLINE uint32_t pixel_shift(int index)\n{\n uint32_t *p;\n\n if (*pixelorder != pixel_left)\n {\n uint32_t previous = pixel_left;\n for (p = pixelorder; *p != pixel_left; ++p)\n {\n uint32_t hold = *p;\n *p = previous;\n previous = hold;\n }\n *p = previous;\n }\n\n if (index)\n {\n switch (pixel_context)\n {\n case 0: //((pixel_left == pixel_above) && (pixel_above == pixel_above_left))\n return(pixelorder[index]);\n\n case 1: //(pixel_left == pixel_above)\n if (index == 1) { return(pixel_above_left); }\n for (p = pixelorder+1; p < pixelorder+index; ++p)\n {\n if (*p == pixel_above_left) { return(pixelorder[index]); }\n }\n return(pixelorder[index-1]);\n\n case 2: case 3: //((pixel_left == pixel_above_left) || (pixel_above == pixel_above_left))\n if (index == 1) { return(pixel_above); }\n for (p = pixelorder+1; p < pixelorder+index; ++p)\n {\n if (*p == pixel_above) { return(pixelorder[index]); }\n }\n return(pixelorder[index-1]);\n\n case 4: //pixel_left != pixel_above != pixel_above_left != pixel_left\n if (index == 1) { return(pixel_above); }\n if (index == 2) { return(pixel_above_left); }\n for (p = pixelorder+1; p < pixelorder+index; ++p)\n {\n if (*p == pixel_above)\n {\n for (p = p+1; p < pixelorder+index; ++p)\n {\n if (*p == pixel_above_left) { return(pixelorder[index]); }\n }\n return((pixelorder[index-1] == pixel_above) ? pixelorder[index-2] : pixelorder[index-1]);\n }\n if (*p == pixel_above_left)\n {\n for (p = p+1; p < pixelorder+index; ++p)\n {\n if (*p == pixel_above) { return(pixelorder[index]); }\n }\n return((pixelorder[index-1] == pixel_above_left) ? pixelorder[index-2] : pixelorder[index-1]);\n }\n }\n return(pixelorder[index-2]);\n }\n }\n return(pixel_left); //pixel_left is always index 0\n}\n\nstatic void InitDecompression(int inmode, uint8_t *data)\n{\n int i;\n\n mode=inmode;\n datain=data;\n top=0xFF;\n val=*datain++;\n in=*datain++;\n inverts=0;\n in_count=8;\n memset(Contexts, 0, sizeof(Contexts));\n memset(buffer, 0, sizeof(buffer));\n buf_idx=32;\n for (i=0; i<16; i++) { pixelorder[i]=((i&8)<<21)|((i&4)<<14)|((i&2)<<7)|(i&1); }\n pixel_left=pixel_above=pixel_above_left=0;\n}\n\n#define CONTEXT() (pixel_left == pixel_above ? pixel_above != pixel_above_left : pixel_above == pixel_above_left ? 2 : 3 + (pixel_left != pixel_above_left))\n//#define CONTEXT() ((pixel_left==pixel_above && pixel_above==pixel_above_left)?0:(pixel_left==pixel_above)?1:(pixel_above==pixel_above_left)?2:(pixel_left==pixel_above_left)?3:4)\n\n\nstatic uint8_t DecompressByte(void)\n{\n int i, bit;\n uint8_t con;\n uint32_t pixel;\n uint32_t out, out2;\n\n if (buf_idx>=32)\n {\n switch (mode)\n {\n case 0:\n out=(buffer[30]<<8)+buffer[31];\n for (i=0; i<32; i++)\n {\n update_context(0);\n update_context(1+(inverts&1));\n update_context(3+(inverts&3));\n update_context(7+(inverts&7));\n update_context(15);\n update_context(16+(inverts&1));\n update_context(18+(inverts&3));\n update_context(22+(inverts&7));\n out = (out<<8) + (((out>>8)^inverts)&0xff);\n buffer[i] = (uint8_t)out;\n }\n break;\n\n case 1:\n out=(buffer[30]<<8)+buffer[31];\n for (i=0; i<32; i+=2)\n {\n for (bit=7; bit>=0; bit--)\n {\n //get first symbol context\n con=pixel_context=CONTEXT();\n update_context(con);\n\n //get context of second symbol\n con = 5 + con*2 + (inverts&1);\n update_context(con);\n\n // Update pixel map\n pixel = pixel_shift(inverts&3);\n\n // Update reference pixels\n pixel_left = ((out >> 0) & 0x0101);\n pixel_above = ((out >> 6) & 0x0101);\n pixel_above_left = ((out >> 7) & 0x0101);\n\n //get new pixel\n out = ((out<<1)&0xfefe) + pixel;\n }\n buffer[i]=(uint8_t)(out >> 8);\n buffer[i+1]=(uint8_t)(out >> 0);\n }\n break;\n\n case 2:\n out=(buffer[14]<<24)+(buffer[15]<<16)+(buffer[30]<<8)+buffer[31];\n for (i=0; i<16; i+=2)\n {\n out2=0;\n for (bit=7; bit>=0; bit--)\n {\n //// First bit\n update_context(0);\n\n //// Second bit\n con = 1 + (inverts&1);\n update_context(con);\n\n pixel_context = CONTEXT();\n\n //// Third bit\n if (con == 1)\n {\n con = 5 + 5*(inverts&1) + pixel_context;\n }\n else\n {\n con = 3 + (inverts&1);\n }\n update_context(con);\n\n //// Fourth bit\n if (con<10)\n {\n con = 9 + con*2 + (inverts&1);\n }\n else\n {\n con = 29 + (inverts&1);\n }\n update_context(con);\n\n pixel = pixel_shift(inverts&0x0f);\n\n // Update reference pixels\n pixel_left = pixel;\n pixel_above = ((out >> (bit-1)) & 0x01010101);\n pixel_above_left = ((out >> bit) & 0x01010101);\n\n //get new pixel\n out2 += pixel<<bit;\n }\n // Miscalculated 'pixel_above' at the end of the loop above, so fix it now\n pixel_above = ((out2 >> 7) & 0x01010101);\n out=out2;\n\n buffer[i+ 0] = (uint8_t)(out >> 24);\n buffer[i+ 1] = (uint8_t)(out >> 16);\n buffer[i+16] = (uint8_t)(out >> 8);\n buffer[i+17] = (uint8_t)(out >> 0);\n }\n break;\n }\n buf_idx=0;\n }\n return(buffer[buf_idx++]);\n}\n\nstatic void DecompressSkipBytesBuffer(uint8_t *buffer, uint16_t amount)\n{\n while (amount--)\n {\n *buffer++ = DecompressByte();\n }\n}\n\nstatic void DecompressSkipBytes(uint16_t amount)\n{\n while (amount--)\n {\n DecompressByte();\n }\n}\n\n//Communication Code\nextern uint32_t CRC32;\nextern uint8_t SPCCompressionRegs[];\nextern uint8_t *romdata;\n\n#define READ_WORD16_LE(pos) (*(uint16_t *)(pos))\n#define WRITE_WORD16_LE(pos, val) (*(uint16_t *)(pos) = (val))\n\n#define READ_WORD24_LE(pos) ((uint32_t)(*(uint16_t *)(pos)) + (((uint32_t)((pos)[2])) << 16))\n#define READ_WORD24_BE(pos) ((((uint32_t)((pos)[0])) << 16) + (((uint32_t)((pos)[1])) << 8) + (pos)[2])\n\n#define NUM_ELEMENTS(x) (sizeof((x))/sizeof((x)[0]))\n\n\n\n//Caching Code\n\n#define TABLE_AMOUNT 256\n#define LOOKUP_AMOUNT 64\n\nstruct decompression_table\n{\n uint8_t *data;\n uint16_t length;\n};\n\nstruct address_lookup\n{\n uint32_t address;\n struct decompression_table *table;\n};\n\nstatic struct\n{\n uint32_t rom_crc32;\n\n uint32_t last_address;\n uint8_t last_entry;\n\n uint8_t *compression_begin;\n uint8_t compression_mode;\n\n uint16_t decompression_used_length;\n\n uint8_t *graphics_buffer;\n uint32_t graphics_buffer_used;\n\n struct decompression_table *tables;\n uint16_t table_used;\n struct decompression_table *table_current;\n\n struct address_lookup *lookup;\n uint8_t lookup_used;\n} decompression_state;\n\n\nstatic void save_decompression_state()\n{\n if (decompression_state.graphics_buffer)\n {\n char fname[13];\n FILE *fp_idx;\n\n sprintf(fname, \"%08X.idx\", decompression_state.rom_crc32);\n if ((fp_idx = fopen_dir(ZSramPath, fname, \"wb\")))\n {\n gzFile fp_gfx;\n\n sprintf(fname, \"%08X.gfx\", decompression_state.rom_crc32);\n if ((fp_gfx = gzopen_dir(ZSramPath, fname, \"wb9\")))\n {\n struct address_lookup *lookup_ptr = decompression_state.lookup,\n *lookup_end = decompression_state.lookup+decompression_state.lookup_used;\n for (; lookup_ptr < lookup_end; ++lookup_ptr)\n {\n unsigned int entry_index;\n for (entry_index = 0; entry_index < 256; ++entry_index)\n {\n if (lookup_ptr->table[entry_index].length) //We only write graphics that have completed decompressing\n {\n fwrite(&lookup_ptr->address, 3, 1, fp_idx);\n fwrite(&entry_index, 1, 1, fp_idx);\n fwrite(&lookup_ptr->table[entry_index].length, 2, 1, fp_idx);\n\n gzwrite(fp_gfx, lookup_ptr->table[entry_index].data, lookup_ptr->table[entry_index].length);\n }\n }\n }\n gzclose(fp_gfx);\n }\n fclose(fp_idx);\n }\n }\n}\n\nstatic void load_decompression_state()\n{\n if (decompression_state.graphics_buffer)\n {\n char fname[13];\n FILE *fp_idx;\n\n sprintf(fname, \"%08X.idx\", decompression_state.rom_crc32);\n if ((fp_idx = fopen_dir(ZSramPath, fname, \"rb\")))\n {\n gzFile fp_gfx;\n\n sprintf(fname, \"%08X.gfx\", decompression_state.rom_crc32);\n if ((fp_gfx = gzopen_dir(ZSramPath, fname, \"rb\")))\n {\n struct address_lookup *lookup_ptr = decompression_state.lookup-1;\n\n uint32_t address = 0, last_address = 0;\n uint16_t length;\n uint8_t entry;\n\n for (;;)\n {\n fread(&address, 3, 1, fp_idx);\n fread(&entry, 1, 1, fp_idx);\n fread(&length, 2, 1, fp_idx);\n\n if (feof(fp_idx)) { break; }\n\n if (last_address != address)\n {\n ++decompression_state.lookup_used;\n (++lookup_ptr)->address = last_address = address;\n lookup_ptr->table = decompression_state.tables+decompression_state.table_used;\n decompression_state.table_used += TABLE_AMOUNT;\n }\n lookup_ptr->table[entry].data = decompression_state.graphics_buffer+decompression_state.graphics_buffer_used;\n lookup_ptr->table[entry].length = length;\n decompression_state.graphics_buffer_used += length;\n\n gzread(fp_gfx, lookup_ptr->table[entry].data, length);\n }\n gzclose(fp_gfx);\n }\n fclose(fp_idx);\n }\n }\n}\n\n\nstatic bool SPC7110_init_decompression_state()\n{\n if (SPC7110Cache)\n {\n size_t lookup_bytes = LOOKUP_AMOUNT*sizeof(struct address_lookup);\n size_t table_bytes = TABLE_AMOUNT*LOOKUP_AMOUNT*sizeof(struct decompression_table);\n\n if (!decompression_state.graphics_buffer)\n {\n memset(&decompression_state, 0, sizeof(decompression_state));\n\n decompression_state.graphics_buffer = malloc(0x1000000); //16MB\n if (decompression_state.graphics_buffer)\n {\n decompression_state.lookup = malloc(lookup_bytes);\n if (decompression_state.lookup)\n {\n decompression_state.tables = malloc(table_bytes);\n if (decompression_state.tables)\n {\n memset(decompression_state.tables, 0, table_bytes);\n decompression_state.rom_crc32 = CRC32;\n load_decompression_state();\n }\n else\n {\n free(decompression_state.lookup);\n free(decompression_state.graphics_buffer);\n }\n }\n else\n {\n free(decompression_state.graphics_buffer);\n }\n }\n }\n else //Loading a second SPC7110 game right after another\n {\n uint8_t *graphics_buffer = decompression_state.graphics_buffer;\n struct decompression_table *tables = decompression_state.tables;\n struct address_lookup *lookup = decompression_state.lookup;\n\n save_decompression_state();\n\n memset(&decompression_state, 0, sizeof(decompression_state));\n\n decompression_state.graphics_buffer = graphics_buffer;\n decompression_state.tables = tables;\n decompression_state.lookup = lookup;\n\n memset(decompression_state.tables, 0, table_bytes);\n decompression_state.rom_crc32 = CRC32;\n load_decompression_state();\n }\n }\n\n return(decompression_state.graphics_buffer);\n}\n\nvoid SPC7110_deinit_decompression_state()\n{\n if (decompression_state.graphics_buffer)\n {\n save_decompression_state();\n\n free(decompression_state.graphics_buffer);\n free(decompression_state.tables);\n free(decompression_state.lookup);\n\n memset(&decompression_state, 0, sizeof(decompression_state));\n }\n}\n\n\nstatic void get_lookup(uint32_t address)\n{\n size_t low = 0,\n high = decompression_state.lookup_used-1,\n mid;\n\n decompression_state.table_current = 0;\n\n while (low <= high)\n {\n mid = low + ((high-low)>>1);\n if (decompression_state.lookup[mid].address < address)\n {\n low = mid+1;\n }\n else if (decompression_state.lookup[mid].address > address)\n {\n high = mid-1;\n }\n else\n {\n decompression_state.table_current = decompression_state.lookup[mid].table;\n break;\n }\n }\n\n if (!decompression_state.table_current)\n {\n memmove(decompression_state.lookup+(low+1), decompression_state.lookup+low, (decompression_state.lookup_used-low)*sizeof(struct address_lookup));\n ++decompression_state.lookup_used;\n decompression_state.lookup[low].address = address;\n decompression_state.table_current = decompression_state.lookup[low].table = decompression_state.tables+decompression_state.table_used;\n decompression_state.table_used += TABLE_AMOUNT;\n }\n}\n\n\nstatic void init_buffered_decompression(uint32_t address, uint8_t entry, uint16_t skip_amount)\n{\n if (decompression_state.graphics_buffer)\n {\n //First handle previous decompression cache\n if (decompression_state.last_address && //Check that there was indeed a last decompression\n !decompression_state.table_current->length) //And it exceeded the known length\n {\n decompression_state.table_current->length = decompression_state.decompression_used_length;\n decompression_state.graphics_buffer_used += decompression_state.decompression_used_length;\n }\n\n if ((decompression_state.last_address != address) || (decompression_state.last_entry != entry))\n {\n uint8_t *spc7110_table = romdata + 0x100000 + address + (((uint16_t)entry) << 2); //<<2 because each entry is 4 bytes\n decompression_state.last_address = address;\n decompression_state.last_entry = entry;\n decompression_state.compression_mode = *spc7110_table++;\n decompression_state.compression_begin = romdata + 0x100000 + READ_WORD24_BE(spc7110_table);\n decompression_state.decompression_used_length = skip_amount<<decompression_state.compression_mode;\n\n get_lookup(address);\n decompression_state.table_current += entry;\n\n if (!decompression_state.table_current->length)\n {\n decompression_state.table_current->data = decompression_state.graphics_buffer+decompression_state.graphics_buffer_used;\n InitDecompression(decompression_state.compression_mode, decompression_state.compression_begin);\n DecompressSkipBytesBuffer(decompression_state.table_current->data, decompression_state.decompression_used_length);\n }\n }\n else\n {\n decompression_state.decompression_used_length = skip_amount<<decompression_state.compression_mode;\n }\n }\n}\n\nstatic uint8_t read_buffered_decompress(uint8_t byte)\n{\n if (decompression_state.table_current)\n {\n if (decompression_state.table_current->length && //There is a known length\n decompression_state.table_current->length <= decompression_state.decompression_used_length) //And it's about to exceed it\n {\n decompression_state.table_current->data = decompression_state.graphics_buffer+decompression_state.graphics_buffer_used;\n decompression_state.table_current->length = 0;\n\n InitDecompression(decompression_state.compression_mode, decompression_state.compression_begin);\n DecompressSkipBytesBuffer(decompression_state.table_current->data, decompression_state.decompression_used_length+1);\n\n //puts(\"Exceeded previous known length\");\n }\n else if (!decompression_state.table_current->length)\n {\n decompression_state.table_current->data[decompression_state.decompression_used_length] = DecompressByte();\n }\n\n byte = decompression_state.table_current->data[decompression_state.decompression_used_length++];\n }\n return(byte);\n}\n\nstatic void init_non_buffered_decompression(uint32_t address, uint8_t entry, uint16_t skip_amount)\n{\n uint8_t *spc7110_table = romdata + 0x100000 + address + (((uint16_t)entry) << 2); //<<2 because each entry is 4 bytes\n decompression_state.last_address = address;\n decompression_state.last_entry = entry;\n decompression_state.compression_mode = *spc7110_table++;\n decompression_state.compression_begin = romdata + 0x100000 + READ_WORD24_BE(spc7110_table);\n decompression_state.decompression_used_length = skip_amount<<decompression_state.compression_mode;\n\n InitDecompression(decompression_state.compression_mode, decompression_state.compression_begin);\n DecompressSkipBytes(decompression_state.decompression_used_length);\n}\n\nstatic uint8_t read_non_buffered_decompress(uint8_t byte)\n{\n if (decompression_state.last_address)\n {\n ++decompression_state.decompression_used_length;\n byte = DecompressByte();\n }\n return(byte);\n}\n\nvoid copy_spc7110_state_data(uint8_t **buffer, void (*copy_func)(unsigned char **, void *, size_t), bool load)\n{\n copy_func(buffer, &decompression_state.last_address, 3);\n copy_func(buffer, &decompression_state.last_entry, sizeof(uint8_t));\n copy_func(buffer, &decompression_state.decompression_used_length, sizeof(uint16_t));\n\n if (load && decompression_state.last_address)\n {\n uint32_t last_address = decompression_state.last_address;\n uint8_t last_entry = decompression_state.last_entry;\n uint16_t decompression_used_length = decompression_state.decompression_used_length;\n\n decompression_state.last_address = 0;\n decompression_state.last_entry = 0;\n decompression_state.decompression_used_length = 0;\n\n if (decompression_state.graphics_buffer)\n {\n init_buffered_decompression(last_address, last_entry, 0);\n }\n else\n {\n init_non_buffered_decompression(last_address, last_entry, 0);\n DecompressSkipBytes(decompression_used_length);\n }\n decompression_state.decompression_used_length = decompression_used_length;\n }\n}\n\n//Processing Code, a work in progress\n/*\nSPCCompressionRegs[x]\n0 - Decompressed byte\n1 - Compression table low\n2 - Compression table high\n3 - Compression table bank\n4 - Compression table index\n5 - Decompression buffer index low\n6 - Decompression buffer index high\n7 - DMA Channel\n8 - ?\n9 - Compression length low\nA - Compression length high\nB - Decompression control register\nC - Decompression status\n*/\n\nvoid (*init_decompression)(uint32_t address, uint8_t entry, uint16_t skip_amount);\nuint8_t (*read_decompress)(uint8_t byte);\n\nvoid SPC7110initC()\n{\n memset(SPCCompressionRegs, 0, 0x0C);\n if (SPC7110_init_decompression_state())\n {\n init_decompression = init_buffered_decompression;\n read_decompress = read_buffered_decompress;\n }\n else\n {\n init_decompression = init_non_buffered_decompression;\n read_decompress = read_non_buffered_decompress;\n }\n}\n\n//DECOMPRESSED DATA CONTINUOUS READ PORT\n//Returns a decompressed value from bank $50 and decrements 16 bit counter value at $4809/A by 1\nvoid SPC7110_4800()\n{\n WRITE_WORD16_LE(SPCCompressionRegs+9, READ_WORD16_LE(SPCCompressionRegs+9)-1);\n SPCCompressionRegs[0] = read_decompress(SPCCompressionRegs[0]);\n}\n\nvoid SPC7110_4806w()\n{\n init_decompression(READ_WORD24_LE(SPCCompressionRegs+1), SPCCompressionRegs[4], READ_WORD16_LE(SPCCompressionRegs+5));\n SPCCompressionRegs[0xC] = 0x80;\n}\n" }, { "alpha_fraction": 0.6176534295082092, "alphanum_fraction": 0.7212086319923401, "avg_line_length": 36.887977600097656, "blob_id": "8073fd02a17881813b9fed6db6c95ab24003aa33", "content_id": "5834708cc6b1c7c2038cc8796f36bce7926c8df9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 13867, "license_type": "no_license", "max_line_length": 108, "num_lines": 366, "path": "/src/cpu/memtable.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n\n\n#ifdef __UNIXSDL__\n#include \"../gblhdr.h\"\n#define DIR_SLASH \"/\"\n#else\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/stat.h>\n#define DIR_SLASH \"\\\\\"\n#endif\n#include <stdint.h>\n#include \"memtable.h\"\n#include \"../gblvars.h\"\n\nextern unsigned int Curtableaddr, tableA[256];\n\nvoid PrepareOffset()\n{\n Curtableaddr -= (unsigned int)tableA;\n}\n\nvoid ResetOffset()\n{\n Curtableaddr += (unsigned int)tableA;\n}\n\nextern unsigned int snesmmap[256], snesmap2[256];\n\nvoid BankSwitchSDD1C (unsigned char bankval, unsigned int offset)\n{\n unsigned int curbankval = bankval, i;\n\n curbankval &= 7;\n curbankval <<= 20;\n curbankval += (uintptr_t)romdata;\n\n for (i=0; i<16 ; i++)\n {\n snesmap2[offset+i] = curbankval;\n snesmmap[offset+i] = curbankval;\n curbankval += 0x10000;\n }\n}\n\nextern unsigned char SDD1BankA, SDD1BankB, SDD1BankC, SDD1BankD;\n\nvoid UpdateBanksSDD1()\n{\n if (SDD1BankA)\n {\n BankSwitchSDD1C(SDD1BankA, 0x0C0);\n BankSwitchSDD1C(SDD1BankB, 0x0D0);\n BankSwitchSDD1C(SDD1BankC, 0x0E0);\n BankSwitchSDD1C(SDD1BankD, 0x0F0);\n }\n}\n\nextern void (*Bank0datr8[256])(), (*Bank0datr16[256])(), (*Bank0datw8[256])(), (*Bank0datw16[256])();\nextern void *DPageR8, *DPageR16, *DPageW8, *DPageW16;\n\nextern unsigned int xdb, xpb, xs, xx, xy, xd;\nextern unsigned short oamaddrt, xat, xst, xdt, xxt, xyt;\nextern unsigned char xdbt, xpbt;\n\nvoid UpdateDPageC()\n{\n DPageR8 = Bank0datr8[(xd >> 8) & 0xFF];\n DPageR16 = Bank0datr16[(xd >> 8) & 0xFF];\n DPageW8 = Bank0datw8[(xd >> 8) & 0xFF];\n DPageW16 = Bank0datw16[(xd >> 8) & 0xFF];\n}\n\nextern unsigned int SA1xd;\nextern void *SA1DPageR8, *SA1DPageR16, *SA1DPageW8, *SA1DPageW16;\n\nvoid SA1UpdateDPageC()\n{\n SA1DPageR8 = Bank0datr8[(SA1xd >> 8) & 0xFF];\n SA1DPageR16 = Bank0datr16[(SA1xd >> 8) & 0xFF];\n SA1DPageW8 = Bank0datw8[(SA1xd >> 8) & 0xFF];\n SA1DPageW16 = Bank0datw16[(SA1xd >> 8) & 0xFF];\n}\n\nvoid unpackfunct()\n{\n oamaddrt = (oamaddr & 0xFFFF);\n xat = (xa & 0xFFFF);\n xdbt = (xdb & 0xFF);\n xpbt = (xpb & 0xFF);\n xst = (xs & 0xFFFF);\n xdt = (xd & 0xFFFF);\n xxt = (xx & 0xFFFF);\n xyt = (xy & 0xFFFF);\n}\n\n#define bit_test(byte, checkbit) (byte & (1 << checkbit)) ? 1 : 0\n\nextern unsigned int GlobalVL, GlobalVR, EchoVL, EchoVR, EchoRate[16], MaxEcho;\nextern unsigned int EchoFB, NoiseSpeeds[32], dspPAdj, NoiseInc, bg1ptrx;\nextern unsigned int bg1ptry, bg2ptrx, bg2ptry, bg3ptrx, bg3ptry, bg4ptrx;\nextern unsigned int bg4ptry;\nextern int FIRTAPVal0, FIRTAPVal1, FIRTAPVal2, FIRTAPVal3, FIRTAPVal4;\nextern int FIRTAPVal5, FIRTAPVal6, FIRTAPVal7;\nextern unsigned short VolumeConvTable[32768], bg1ptr, bg1ptrb, bg1ptrc;\nextern unsigned short bg2ptr, bg2ptrb, bg2ptrc, bg3ptr, bg3ptrb, bg3ptrc;\nextern unsigned short bg4ptr, bg4ptrb, bg4ptrc;\nextern unsigned char VolumeTableb[256], MusicVol, Voice0Status;\nextern unsigned char Voice1Status, Voice2Status, Voice3Status, Voice4Status;\nextern unsigned char Voice5Status, Voice6Status, Voice7Status, Voice0Noise;\nextern unsigned char Voice1Noise, Voice2Noise, Voice3Noise, Voice4Noise;\nextern unsigned char Voice5Noise, Voice6Noise, Voice7Noise, bgtilesz;\nextern unsigned char BG116x16t, BG216x16t, BG316x16t, BG416x16t, vramincby8on;\nextern unsigned char vramincr;\n\nextern void (**regptw)();\nvoid reg2118();\nvoid reg2118inc();\nvoid reg2118inc8();\nvoid reg2118inc8inc();\nvoid reg2119();\nvoid reg2119inc();\nvoid reg2119inc8();\nvoid reg2119inc8inc();\n\nvoid repackfunct()\n{\n/* unsigned char block;\n\n // Global/Echo Volumes\n GlobalVL = (VolumeConvTable[(MusicVol << 8) + VolumeTableb[DSPMem[0x0C]]] & 0xFF);\n GlobalVR = (VolumeConvTable[(MusicVol << 8) + VolumeTableb[DSPMem[0x1C]]] & 0xFF);\n EchoVL = (VolumeConvTable[(MusicVol << 8) + VolumeTableb[DSPMem[0x2C]]] & 0xFF);\n EchoVR = (VolumeConvTable[(MusicVol << 8) + VolumeTableb[DSPMem[0x3C]]] & 0xFF);\n\n // Echo Values\n MaxEcho = EchoRate[(DSPMem[0x7D] & 0xF)];\n EchoFB = VolumeTableb[DSPMem[0x0D]];\n\n // FIR Filter Values\n FIRTAPVal0 = (char)DSPMem[0x0F];\n FIRTAPVal1 = (char)DSPMem[0x1F];\n FIRTAPVal2 = (char)DSPMem[0x2F];\n FIRTAPVal3 = (char)DSPMem[0x3F];\n FIRTAPVal4 = (char)DSPMem[0x4F];\n FIRTAPVal5 = (char)DSPMem[0x5F];\n FIRTAPVal6 = (char)DSPMem[0x6F];\n FIRTAPVal7 = (char)DSPMem[0x7F];\n\n // Noise\n block = DSPMem[0x6C];\n DSPMem[0x6C] &= 0x7F;\n\n if (block & 0xC0)\n {\n Voice0Status = Voice1Status = Voice2Status = Voice3Status = 0;\n Voice4Status = Voice5Status = Voice6Status = Voice7Status = 0;\n }\n\n NoiseInc = (((NoiseSpeeds[(block & 0x1F)] * dspPAdj) >> 17) & 0xFFFFFFFF);\n\n Voice0Noise = bit_test(DSPMem[0x3D], 0);\n Voice1Noise = bit_test(DSPMem[0x3D], 1);\n Voice2Noise = bit_test(DSPMem[0x3D], 2);\n Voice3Noise = bit_test(DSPMem[0x3D], 3);\n Voice4Noise = bit_test(DSPMem[0x3D], 4);\n Voice5Noise = bit_test(DSPMem[0x3D], 5);\n Voice6Noise = bit_test(DSPMem[0x3D], 6);\n Voice7Noise = bit_test(DSPMem[0x3D], 7);*/\n\n bg1ptrx = bg1ptrb - bg1ptr;\n bg1ptry = bg1ptrc - bg1ptr;\n bg2ptrx = bg2ptrb - bg2ptr;\n bg2ptry = bg2ptrc - bg2ptr;\n bg3ptrx = bg3ptrb - bg3ptr;\n bg3ptry = bg3ptrc - bg3ptr;\n bg4ptrx = bg4ptrb - bg4ptr;\n bg4ptry = bg4ptrc - bg4ptr;\n\n // 16x16 tiles\n BG116x16t = bit_test(bgtilesz, 0);\n BG216x16t = bit_test(bgtilesz, 1);\n BG316x16t = bit_test(bgtilesz, 2);\n BG416x16t = bit_test(bgtilesz, 3);\n\n oamaddr = oamaddrt;\n xa = xat;\n xdb = xdbt;\n xpb = xpbt;\n xs = xst;\n xd = xdt;\n xx = xxt;\n xy = xyt;\n\n if (vramincby8on == 1)\n {\n if (vramincr == 1)\n {\n regptw[0x2118] = reg2118inc8inc;\n regptw[0x2119] = reg2119inc8;\n }\n else\n {\n regptw[0x2118] = reg2118inc8;\n regptw[0x2119] = reg2119inc8inc;\n }\n }\n else\n {\n if (vramincr == 1)\n {\n regptw[0x2118] = reg2118inc;\n regptw[0x2119] = reg2119;\n }\n else\n {\n regptw[0x2118] = reg2118;\n regptw[0x2119] = reg2119inc;\n }\n }\n}\n\nvoid regaccessbankr8(), regaccessbankw8(), regaccessbankr16(), regaccessbankw16();\nvoid memaccessbankr8(), memaccessbankw8(), memaccessbankr16(), memaccessbankw16();\nvoid wramaccessbankr8(), wramaccessbankw8(), wramaccessbankr16(), wramaccessbankw16();\nvoid sramaccessbankr8(), sramaccessbankw8(), sramaccessbankr16(), sramaccessbankw16();\nvoid eramaccessbankr8(), eramaccessbankw8(), eramaccessbankr16(), eramaccessbankw16();\n\nvoid regaccessbankr8SA1(), regaccessbankw8SA1(), regaccessbankr16SA1(), regaccessbankw16SA1();\nvoid SA1RAMaccessbankr8(), SA1RAMaccessbankw8(), SA1RAMaccessbankr16(), SA1RAMaccessbankw16();\nvoid SA1RAMaccessbankr8b(), SA1RAMaccessbankw8b(), SA1RAMaccessbankr16b(), SA1RAMaccessbankw16b();\n\nvoid sramaccessbankr8s(), sramaccessbankw8s(), sramaccessbankr16s(), sramaccessbankw16s();\nvoid DSP1Read8b3F(), DSP1Write8b3F(), DSP1Read16b3F(), DSP1Write16b3F();\nvoid DSP2Read8b(), DSP2Write8b(), DSP2Read16b(), DSP2Write16b();\nvoid DSP3Read8b(), DSP3Write8b(), DSP3Read16b(), DSP3Write16b();\nvoid DSP4Read8b(), DSP4Write8b(), DSP4Read16b(), DSP4Write16b();\nvoid setaaccessbankr8(), setaaccessbankw8(), setaaccessbankr16(), setaaccessbankw16();\nvoid setaaccessbankr8a(), setaaccessbankw8a(), setaaccessbankr16a(), setaaccessbankw16a();\nvoid Seta11Read8_60(), Seta11Write8_60(), Seta11Read16_60(), Seta11Write16_60();\nvoid Seta11Read8_68(), Seta11Write8_68(), Seta11Read16_68(), Seta11Write16_68();\nvoid sfxaccessbankr8(), sfxaccessbankw8(), sfxaccessbankr16(), sfxaccessbankw16();\nvoid sfxaccessbankr8b(), sfxaccessbankw8b(), sfxaccessbankr16b(), sfxaccessbankw16b();\nvoid sfxaccessbankr8c(), sfxaccessbankw8c(), sfxaccessbankr16c(), sfxaccessbankw16c();\nvoid sfxaccessbankr8d(), sfxaccessbankw8d(), sfxaccessbankr16d(), sfxaccessbankw16d();\nvoid OBC1Read8b(), OBC1Write8b(), OBC1Read16b(), OBC1Write16b();\nvoid C4Read8b(), C4Write8b(), C4Read16b(), C4Write16b();\nvoid memaccessspc7110r8(), memaccessspc7110r16(), memaccessspc7110w8(), memaccessspc7110w16();\nvoid SPC7110ReadSRAM8b(), SPC7110ReadSRAM16b(), SPC7110WriteSRAM8b(), SPC7110WriteSRAM16b();\nvoid stsramr8(), stsramr16(), stsramw8(), stsramw16();\nvoid stsramr8b(), stsramr16b(), stsramw8b(), stsramw16b();\n\nmrwp regbank = { regaccessbankr8, regaccessbankw8, regaccessbankr16, regaccessbankw16 };\nmrwp membank = { memaccessbankr8, memaccessbankw8, memaccessbankr16, memaccessbankw16 };\nmrwp wrambank = { wramaccessbankr8, wramaccessbankw8, wramaccessbankr16, wramaccessbankw16 };\nmrwp srambank = { sramaccessbankr8, sramaccessbankw8, sramaccessbankr16, sramaccessbankw16 };\nmrwp erambank = { eramaccessbankr8, eramaccessbankw8, eramaccessbankr16, eramaccessbankw16 };\n\nmrwp sa1regbank = { regaccessbankr8SA1, regaccessbankw8SA1, regaccessbankr16SA1, regaccessbankw16SA1 };\nmrwp sa1rambank = { SA1RAMaccessbankr8, SA1RAMaccessbankw8, SA1RAMaccessbankr16, SA1RAMaccessbankw16 };\nmrwp sa1rambankb = { SA1RAMaccessbankr8b, SA1RAMaccessbankw8b, SA1RAMaccessbankr16b, SA1RAMaccessbankw16b };\n\nmrwp sramsbank = { sramaccessbankr8s, sramaccessbankw8s, sramaccessbankr16s, sramaccessbankw16s };\nmrwp dsp1bank = { DSP1Read8b3F, DSP1Write8b3F, DSP1Read16b3F, DSP1Write16b3F };\nmrwp dsp2bank = { DSP2Read8b, DSP2Write8b, DSP2Read16b, DSP2Write16b };\nmrwp dsp3bank = { DSP3Read8b, DSP3Write8b, DSP3Read16b, DSP3Write16b };\nmrwp dsp4bank = { DSP4Read8b, DSP4Write8b, DSP4Read16b, DSP4Write16b };\nmrwp setabank = { setaaccessbankr8, setaaccessbankw8, setaaccessbankr16, setaaccessbankw16 };\nmrwp setabanka = { setaaccessbankr8a, setaaccessbankw8a, setaaccessbankr16a, setaaccessbankw16a };\nmrwp seta11bank = { Seta11Read8_68, Seta11Write8_68, Seta11Read16_68, Seta11Write16_68 };\nmrwp seta11banka = { Seta11Read8_60, Seta11Write8_60, Seta11Read16_60, Seta11Write16_60 };\nmrwp sfxbank = { sfxaccessbankr8, sfxaccessbankw8, sfxaccessbankr16, sfxaccessbankw16 };\nmrwp sfxbankb = { sfxaccessbankr8b, sfxaccessbankw8b, sfxaccessbankr16b, sfxaccessbankw16b };\nmrwp sfxbankc = { sfxaccessbankr8c, sfxaccessbankw8c, sfxaccessbankr16c, sfxaccessbankw16c };\nmrwp sfxbankd = { sfxaccessbankr8d, sfxaccessbankw8d, sfxaccessbankr16d, sfxaccessbankw16d };\nmrwp obc1bank = { OBC1Read8b, OBC1Write8b, OBC1Read16b, OBC1Write16b };\nmrwp c4bank = { C4Read8b, C4Write8b, C4Read16b, C4Write16b };\nmrwp SPC7110bank = { memaccessspc7110r8, memaccessspc7110w8, memaccessspc7110r16, memaccessspc7110w16 };\nmrwp SPC7110SRAMBank = { SPC7110ReadSRAM8b, SPC7110WriteSRAM8b, SPC7110ReadSRAM16b, SPC7110WriteSRAM16b };\nmrwp stbanka = { stsramr8, stsramw8, stsramr16, stsramw16 };\nmrwp stbankb = { stsramr8b, stsramw8b, stsramr16b, stsramw16b };\n\n\nvoid SetAddressingModes()\n{ // Banks\n map_mem(0x00, &regbank, 0x40); // 00 - 3F\n map_mem(0x40, &membank, 0x3E); // 40 - 7D\n map_mem(0x7E, &wrambank, 0x01); // 7E\n map_mem(0x7F, &erambank, 0x01); // 7F\n map_mem(0x80, &regbank, 0x40); // 80 - BF\n map_mem(0xC0, &membank, 0x40); // C0 - FF\n}\n\nvoid SetAddressingModesSA1()\n{\n map_mem(0x00, &sa1regbank, 0x40); // 00 - 3F\n map_mem(0x40, &sa1rambank, 0x20); // 40 - 5F\n map_mem(0x60, &sa1rambankb, 0x10); // 60 - 6F\n map_mem(0x70, &srambank, 0x08); // 70 - 77\n map_mem(0x78, &membank, 0x06); // 78 - 7D\n map_mem(0x7E, &wrambank, 0x01); // 7E\n map_mem(0x7F, &erambank, 0x01); // 7F\n map_mem(0x80, &sa1regbank, 0x40); // 80 - BF\n map_mem(0xC0, &membank, 0x40); // C0 - FF\n}\n\nvoid membank0r8reg(), membank0w8reg(), membank0r16reg(), membank0w16reg();\nvoid membank0r8ram(), membank0w8ram(), membank0r16ram(), membank0w16ram();\nvoid membank0r8rom(), membank0w8rom(), membank0r16rom(), membank0w16rom();\nvoid membank0r8romram(), membank0w8romram(), membank0r16romram(), membank0w16romram();\nvoid membank0r8inv(), membank0w8inv(), membank0r16inv(), membank0w16inv();\nvoid membank0r8chip(), membank0w8chip(), membank0r16chip(), membank0w16chip();\nvoid membank0r8ramSA1(), membank0w8ramSA1(), membank0r16ramSA1(), membank0w16ramSA1();\n\nmrwp regbank0 = { membank0r8reg, membank0w8reg, membank0r16reg, membank0w16reg };\nmrwp rambank0 = { membank0r8ram, membank0w8ram, membank0r16ram, membank0w16ram };\nmrwp rombank0 = { membank0r8rom, membank0w8rom, membank0r16rom, membank0w16rom };\nmrwp romrambank0 = { membank0r8romram, membank0w8romram, membank0r16romram, membank0w16romram };\nmrwp invbank0 = { membank0r8inv, membank0w8inv, membank0r16inv, membank0w16inv };\nmrwp chipbank0 = { membank0r8chip, membank0w8chip, membank0r16chip, membank0w16chip };\nmrwp sa1rambank0 = { membank0r8ramSA1, membank0w8ramSA1, membank0r16ramSA1, membank0w16ramSA1 };\n\nstatic void map_bank0(size_t dest, mrwp *src, size_t num)\n{\n rep_stosd(Bank0datr8+dest, src->memr8, num);\n rep_stosd(Bank0datw8+dest, src->memw8, num);\n rep_stosd(Bank0datr16+dest, src->memr16, num);\n rep_stosd(Bank0datw16+dest, src->memw16, num);\n}\n\nvoid GenerateBank0Table()\n{\n map_bank0(0x00, &rambank0, 0x20); // 00 - 1F\n map_bank0(0x20, &regbank0, 0x28); // 20 - 47\n map_bank0(0x48, &invbank0, 0x17); // 48 - 5E\n map_bank0(0x5F, &chipbank0, 0x1F); // 5F - 7D\n map_bank0(0x7E, &rombank0, 0x81); // 7E - FE\n map_bank0(0xFF, &romrambank0, 0x01); // FF\n}\n\nvoid GenerateBank0TableSA1()\n{\n map_bank0(0x00, &sa1rambank0, 0x20); // 00 - 1F\n}\n" }, { "alpha_fraction": 0.66008460521698, "alphanum_fraction": 0.6748942136764526, "avg_line_length": 23.877193450927734, "blob_id": "266f5d74a35a3abb43eccdc12b137f63651cf9cb", "content_id": "c14f51d83ef1bca46750c1c10b53f2a3f3705199", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1418, "license_type": "no_license", "max_line_length": 123, "num_lines": 57, "path": "/src/linux/lib.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <unistd.h>\n#include <fcntl.h>\n#include <sys/stat.h>\n#include <errno.h>\n\n#include \"lib.h\"\n\n#ifndef HAVE_AT_FUNCTIONS\n\nint fstatat(int dirfd, const char *pathname, struct stat *buf, int flags)\n{\n int success = -1;\n\n if ((!flags || (flags == AT_SYMLINK_NOFOLLOW)))\n {\n int cwdfd = -1;\n if ((dirfd == AT_FDCWD) || (pathname && (*pathname == '/')) || (((cwdfd=open(\".\", O_RDONLY)) != -1) && !fchdir(dirfd)))\n {\n success = (!flags) ? stat(pathname, buf) : lstat(pathname, buf);\n }\n\n if (cwdfd != -1)\n {\n fchdir(cwdfd);\n close(cwdfd);\n }\n }\n else\n {\n errno = EINVAL;\n }\n\n return(success);\n}\n\n#endif\n" }, { "alpha_fraction": 0.6538975238800049, "alphanum_fraction": 0.6692650318145752, "avg_line_length": 30.619718551635742, "blob_id": "ad8c43516d5dac46cd13756bbfd285cb021d53ad", "content_id": "dd00fd80d777d071c5428d91cce6593fdd4e982d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4490, "license_type": "no_license", "max_line_length": 94, "num_lines": 142, "path": "/src/cpu/zspc/resamp.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/* http://www.slack.net/~ant/ */\n\n#include \"resamp.h\"\n\n#include <stdint.h>\n#include <string.h>\n#include <limits.h>\n\n/* Copyright (C) 2004-2007 Shay Green. This module is free software; you\ncan redistribute it and/or modify it under the terms of the GNU Lesser\nGeneral Public License as published by the Free Software Foundation; either\nversion 2.1 of the License, or (at your option) any later version. This\nmodule is distributed in the hope that it will be useful, but WITHOUT ANY\nWARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\ndetails. You should have received a copy of the GNU Lesser General Public\nLicense along with this module; if not, write to the Free Software Foundation,\nInc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */\n\n#if INT_MAX < 0x7FFFFFFF\n\t#error \"Requires that int type have at least 32 bits\"\n#endif\n\ntypedef short sample_t;\n\n/* the extra is for extra_samples (see below), a hard-coded constant so it\nwouldn't have to be defined in the header file */\nsample_t resampler_buf [RESAMPLER_BUF_SIZE + 8];\nsample_t* resampler_write_pos;\n\n/* Code deals with pairs of samples (stereo). Most references to sample really\nmean a pair of samples. */\nenum { stereo = 2 };\n\n/* How many extra samples to keep around. Linear interpolation needs one extra. */\nenum { extra_samples = 1 * stereo };\n\n/* Number of bits used for linear interpolation. Can't be greater than 15 or\ntemporary products overflow a 32-bit integer. */\nenum { interp_bits = 15 };\n\n/* Number of fractional bits in s_pos. By making it interp_bits + 32, extraction\nof the high interp_bits will be more efficient on 32-bit machines. */\nenum { fixed_bits = interp_bits + 32 };\ntypedef uint64_t fixed_t;\nstatic fixed_t const unit = (fixed_t) 1 << fixed_bits;\n\nstatic fixed_t s_step; /* number of input samples for every output sample */\nstatic fixed_t s_pos; /* fraction of input sample already used */\nstatic int s_prev [stereo];/* previous samples for two-point low-pass filter */\n\nvoid resampler_set_rate( int in, int out )\n{\n\tfixed_t numer = in * unit;\n\ts_step = (numer + out / 2) / out; /* round to nearest */\n\n\tif ( !resampler_write_pos )\n\t\tresampler_clear();\n}\n\nvoid resampler_clear( void )\n{\n\t/* For most input/output ratios, the step fraction will not be exact, so\n\terror will periodically occur. If s_step is exact or was rounded up, error\n\twill be one less output sample. If rounded down, error will be one extra\n\toutput sample. Since error can be in either direction, start with position\n\tof half the step. */\n\ts_pos = s_step / 2;\n\n\ts_prev [0] = 0;\n\ts_prev [1] = 0;\n\tresampler_write_pos = resampler_buf + extra_samples;\n\tmemset( resampler_buf, 0, extra_samples * sizeof resampler_buf [0] );\n}\n\nint resampler_read( sample_t* out_begin, int count )\n{\n\tsample_t* out = out_begin;\n\tsample_t* const out_end = out + count;\n\n\tsample_t const* in = resampler_buf;\n\tsample_t* const in_end = resampler_write_pos - extra_samples;\n\n\tif ( in < in_end )\n\t{\n\t\tfixed_t const step = s_step;\n\t\tif ( step != unit )\n\t\t{\n\t\t\t/* local copies help optimizer */\n\t\t\tfixed_t pos = s_pos;\n\t\t\tint prev0 = s_prev [0];\n\t\t\tint prev1 = s_prev [1];\n\n\t\t\t/* Step fractionally through input samples to generate output samples */\n\t\t\tdo\n\t\t\t{\n\t\t\t\t/* Linear interpolation between current and next input sample, based on\n\t\t\t\tfractional portion of position. */\n\t\t\t\tint const factor = pos >> (fixed_bits - interp_bits);\n\n\t\t\t\t#define INTERP( i, out ) \\\n\t\t\t\t\tout = (in [0 + i] * ((1 << interp_bits) - factor) + in [2 + i] * factor) >> interp_bits;\\\n\n\t\t\t\t/* interpolate left and right */\n\t\t\t\tINTERP( 0, out [0] )\n\t\t\t\tINTERP( 1, out [1] )\n\t\t\t\tout += stereo;\n\n\t\t\t\t/* increment fractional position and separate whole part */\n\t\t\t\tpos += step;\n\t\t\t\tin += (int) (pos >> fixed_bits) * stereo;\n\t\t\t\tpos &= unit - 1;\n\t\t\t}\n\t\t\twhile ( in < in_end && out < out_end );\n\n\t\t\ts_prev [1] = prev1;\n\t\t\ts_prev [0] = prev0;\n\t\t\ts_pos = pos;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t/* no resampling */\n\t\t\tint n = in_end - in;\n\t\t\tif ( n > count )\n\t\t\t\tn = count;\n\t\t\tmemcpy( out, in, n * sizeof *out );\n\t\t\tin += n;\n\t\t\tout += n;\n\t\t}\n\t}\n\n\t/* move unused samples to beginning of input buffer */\n\t{\n\t\tint result = out - out_begin;\n\t\tint remain = resampler_write_pos - in;\n\t\tif ( remain < 0 )\n\t\t\tremain = 0; /* occurs when reducing sample rate */\n\t\tresampler_write_pos = &resampler_buf [remain];\n\t\tmemmove( resampler_buf, in, remain * sizeof *in );\n\t\treturn result;\n\t}\n}\n" }, { "alpha_fraction": 0.5719441175460815, "alphanum_fraction": 0.6173945069313049, "avg_line_length": 22.539785385131836, "blob_id": "68367a302cc6cf4936e038bcac972b62f27dce59", "content_id": "6e37e6f891b083e3c089ac39e029eb365cdf351c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 21892, "license_type": "no_license", "max_line_length": 113, "num_lines": 930, "path": "/src/ui.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n\n#ifdef __UNIXSDL__\n#include \"gblhdr.h\"\n#include \"zdir.h\"\n#else\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <time.h>\n#include <stdbool.h>\n#endif\n\n#include \"asm_call.h\"\n#include \"cfg.h\"\n#include \"input.h\"\n#include \"mmlib/mm.h\"\n#include \"zpath.h\"\n\n#define BIT(x) (1 << (x))\n\nextern unsigned int xa, MessageOn, maxromspace;\nextern unsigned char FPSOn, spcon, device1, device2;\nextern char *Msgptr, CSStatus[], CSStatus2[], CSStatus3[], CSStatus4[];\n\nunsigned short selc0040, selcA000, selcB800;\n\nunsigned char *vidbuffer; // video buffer (1024x239 = 244736)\nunsigned char *ngwinptr;\nunsigned char *vidbufferofsa; // offset 1\nunsigned char *vidbufferofsb; // offset 2\nunsigned char *headdata;\nunsigned char *romdata; // rom data (4MB = 4194304)\nunsigned char *sfxramdata; // SuperFX Ram Data\nunsigned char *setaramdata; // Seta ST010/ST011 SRam Data\nunsigned char *wramdata; // stack (64K = 65536)\nunsigned char *ram7f; // ram @ 7f = 65536\nunsigned char *vram; // vram = 65536\nunsigned char *sram; // sram = 65536*2 = 131072\nunsigned char regptra[49152];\nunsigned char regptwa[49152];\nunsigned char *regptr = regptra;\nunsigned char *regptw = regptwa;\nunsigned char *vcache2b; // 2-bit video cache\nunsigned char *vcache4b; // 4-bit video cache\nunsigned char *vcache8b; // 8-bit video cache\nunsigned char romispal; // 0 = NTSC, 1 = PAL\nunsigned char newgfx16b;\n\nunsigned char previdmode; // previous video mode\nunsigned char cbitmode; // bit mode, 0=8bit, 1=16bit\n\nunsigned char opexec268 = 162; // # of opcodes/scanline in 2.68Mhz mode\nunsigned char opexec358 = 185; // # of opcodes/scanline in 3.58Mhz mode (228/180)\nunsigned char opexec268cph = 42; // # of opcodes/hblank in 2.68Mhz mode\nunsigned char opexec358cph = 45; // # of opcodes/hblank in 3.58Mhz mode (56/50)\nunsigned char opexec268b = 162; // # of opcodes/scanline in 2.68Mhz mode\nunsigned char opexec358b = 185; // # of opcodes/scanline in 3.58Mhz mode (228/180)\nunsigned char opexec268cphb = 42; // # of opcodes/hblank in 2.68Mhz mode\nunsigned char opexec358cphb = 45; // # of opcodes/hblank in 3.58Mhz mode (56/50)\nunsigned char debugdisble = 1; // debugger disable. 0 = no, 1 = yes\nunsigned char gammalevel16b = 0; // gamma level (16-bit engine)\nunsigned char AddSub256 = 0; // screen add/sub in 256 colors\nunsigned char dmadeddis = 0; // DMA deduction\nunsigned char OldStyle = 1; // Old style joystick on\nunsigned char SecondPort = 0; // Secondary Joystick Port Enabled (209h) (DOS port only)\n\nunsigned char Doublevbuf = 1; // Double video buffer\nunsigned char V8Mode = 0; // Vegetable mode! =) (Greyscale mode)\nunsigned char fastmemptr = 0;\nunsigned char ForcePal = 0; // 1 = NTSC, 2 = PAL\nunsigned char finterleave = 0;\nunsigned char DSPDisable = 0; // Disable DSP emulation\nunsigned char MusicVol = 0;\nunsigned char MMXextSupport = 0;\n\nvoid init(), WaitForKey(), MMXCheck(), DosExit();\nvoid SystemInit(), StartUp(), MultiMouseInit();\n\nvoid zexit(), zexit_error();\n\nvoid *alloc_ptr;\nunsigned int alloc_size;\n\nvoid alloc_help()\n{\n alloc_ptr=malloc(alloc_size);\n}\n\nextern bool input1gp;\nextern bool input1mouse;\nextern bool input2gp;\nextern bool input2mouse;\nextern bool input2scope;\nextern bool input2just;\n\nvoid cycleinputdevice1()\n{\n for (;;)\n {\n device1++;\n if (device1 >= 2)\n {\n device1 = 0;\n }\n if (device1 == 0)\n {\n if (input1gp) { return; }\n device1++;\n }\n if (device1 == 1)\n {\n if (input1mouse) { return; }\n }\n }\n}\n\nstatic bool device2_wrap = false;\n\nvoid cycleinputdevice2()\n{\n for (;;)\n {\n device2++;\n if (device2 >= 5)\n {\n device2_wrap = true;\n device2 = 0;\n }\n if (device2 == 0)\n {\n if (input2gp) { return; }\n device2++;\n }\n if (device2 == 1)\n {\n if (input2mouse) { return; }\n device2++;\n }\n if (device2 == 2)\n {\n if (input2scope) { return; }\n device2++;\n }\n if (device2 == 3)\n {\n if (input2just) { return; }\n device2++;\n }\n if (device2 == 4)\n {\n if (input2just) { return; }\n }\n }\n}\n\nstatic void outofmemory()\n{\n puts(\"You don't have enough memory to run this program!\");\n asm_call(DosExit);\n}\n\nextern unsigned char wramdataa[65536], ram7fa[65536];\n\nunsigned char *BitConv32Ptr = 0;\nunsigned char *RGBtoYUVPtr = 0;\nunsigned char *spcBuffera = 0;\nunsigned char *spritetablea = 0;\nunsigned char *vbufaptr = 0;\nunsigned char *vbufeptr = 0;\nunsigned char *ngwinptrb = 0;\nunsigned char *vbufdptr = 0;\nunsigned char *romaptr = 0;\nunsigned char *vcache2bs = 0; // 2-bit video secondary cache\nunsigned char *vcache4bs = 0; // 4-bit video secondary cache\nunsigned char *vcache8bs = 0; // 8-bit video secondary cache\n\nunsigned char vrama[65536];\n\nunsigned char mode7tab[65536];\n\nunsigned short fulladdtab[65536];\nunsigned short VolumeConvTable[32768];\nunsigned int dspWptr[256];\nunsigned int dspRptr[256];\n\n#define deallocmemhelp(p) if (p) { free(p); }\n\nvoid deallocmem()\n{\n deallocmemhelp(BitConv32Ptr);\n deallocmemhelp(RGBtoYUVPtr);\n deallocmemhelp(spcBuffera);\n deallocmemhelp(spritetablea);\n deallocmemhelp(vbufaptr);\n deallocmemhelp(vbufeptr);\n deallocmemhelp(ngwinptrb);\n deallocmemhelp(vbufdptr);\n deallocmemhelp(romaptr);\n deallocmemhelp(vcache2bs);\n deallocmemhelp(vcache4bs);\n deallocmemhelp(vcache8bs);\n deallocmemhelp(vcache2b);\n deallocmemhelp(vcache4b);\n deallocmemhelp(vcache8b);\n deallocmemhelp(sram);\n}\n\n#define AllocmemFail(ptr, size) if (!(ptr = malloc(size))) { outofmemory(); }\n\nstatic void allocmem()\n{\n AllocmemFail(BitConv32Ptr, 4096+65536*16);\n AllocmemFail(RGBtoYUVPtr,65536*4+4096);\n AllocmemFail(spcBuffera,65536*4+4096);\n AllocmemFail(spritetablea,256*512+4096);\n AllocmemFail(vbufaptr,512*296*4+4096+512*296);\n AllocmemFail(vbufeptr,288*2*256+4096);\n AllocmemFail(ngwinptrb,256*224+4096);\n AllocmemFail(vbufdptr,1024*296);\n AllocmemFail(vcache2bs,65536*4*4+4096);\n AllocmemFail(vcache4bs,65536*4*2+4096);\n AllocmemFail(vcache8bs,65536*4+4096);\n AllocmemFail(sram,65536*2);\n AllocmemFail(vcache2b,262144+256);\n AllocmemFail(vcache4b,131072+256);\n AllocmemFail(vcache8b,65536+256);\n\n newgfx16b = 1;\n if ((romaptr = malloc(0x600000+32768*2+4096)))\n {\n maxromspace = 0x600000;\n }\n else\n {\n if ((romaptr = malloc(0x400000+32768*2+4096)))\n {\n maxromspace = 0x400000;\n }\n else\n {\n if ((romaptr = malloc(0x200000+32768*2+4096)))\n {\n maxromspace = 0x200000;\n }\n else\n {\n outofmemory();\n }\n }\n }\n\n // Set up memory values\n vidbuffer = vbufaptr;\n vidbufferofsa = vbufaptr;\n ngwinptr = ngwinptrb;\n vidbufferofsb = vbufeptr;\n\n headdata = romaptr;\n romdata = romaptr;\n sfxramdata = romaptr+0x400000;\n setaramdata = romaptr+0x400000;\n\n // Puts this ASM after the end of the ROM:\n // CLI\n // here: BRA here\n // But why?\n romdata[maxromspace+0] = 0x58;\n romdata[maxromspace+1] = 0x80;\n romdata[maxromspace+2] = 0xFE;\n\n wramdata = wramdataa;\n ram7f = ram7fa;\n vram = vrama;\n\n regptr -= 0x8000;\n regptw -= 0x8000;\n}\n\nunsigned char txtfailedalignd[] = \"Data Alignment Failure : \";\nunsigned char txtfailedalignc[] = \"Code Alignment Failure : \";\n\nvoid zstart()\n{\n unsigned int ptr;\n\n asm_call(MMXCheck);\n asm_call(StartUp);\n\n // Print welcome message part 2.\n puts(\"Use ZSNES -? for command line definitions.\\n\");\n\n#ifdef __UNIXSDL__\n MultiMouseInit();\n#endif\n\n asm_call(SystemInit);\n\n if (guioff && !*ZCartName)\n {\n puts(\"Will not start without a GUI unless a filename is supplied.\");\n zexit();\n }\n else\n {\n extern bool romloadskip;\n romloadskip = true;\n }\n\n allocmem();\n\n if (!(spcon = !SPCDisable)) { soundon = 0; }\n DSPDisable = !soundon;\n\n if (!frameskip)\n {\n FPSOn = FPSAtStart;\n }\n\n gammalevel16b = gammalevel >> 1;\n\n ptr = (unsigned int)&init;\n if ((ptr & 3))\n {\n printf(\"%s%d\", txtfailedalignc, (ptr & 0x1F));\n asm_call(WaitForKey);\n }\n\n ptr = (unsigned int)&xa;\n if ((ptr & 3))\n {\n printf(\"%s%d\", txtfailedalignd, (ptr & 0x1F));\n asm_call(WaitForKey);\n }\n\n asm_call(init);\n}\n\nstatic char *seconds_to_asc(unsigned int seconds)\n{\n static char buffer[50];\n char *p = buffer;\n unsigned int hours, minutes;\n\n minutes = seconds/60;\n seconds -= minutes*60;\n hours = minutes/60;\n minutes -= hours*60;\n *buffer = 0;\n\n if (hours)\n {\n sprintf(p, \"%u hours \", hours);\n p += strlen(p);\n }\n if (minutes)\n {\n sprintf(p, \"%u min \", minutes);\n p += strlen(p);\n }\n if (seconds)\n {\n sprintf(p, \"%u sec\", seconds);\n p += strlen(p);\n }\n if (!*buffer)\n {\n strcpy(buffer, \"0 sec\");\n }\n return(buffer);\n}\n\nvoid DisplayBatteryStatus()\n{\n int CheckBattery();\n int CheckBatteryTime();\n int CheckBatteryPercent();\n\n *CSStatus2 = 0;\n *CSStatus3 = 0;\n *CSStatus4 = 0;\n\n switch (CheckBattery())\n {\n case -1: //No battery\n strcpy(CSStatus, \"No battery present\");\n break;\n\n case 0: //Plugged in\n {\n int percent = CheckBatteryPercent();\n\n strcpy(CSStatus, \"PC is plugged in\");\n if (percent > 0)\n {\n sprintf(CSStatus2, \"%d%% charged\", percent);\n }\n }\n break;\n\n case 1: //Not plugged in\n {\n int percent = CheckBatteryPercent();\n int battery_time = CheckBatteryTime();\n\n strcpy(CSStatus, \"PC is running off of battery\");\n if (battery_time > 0)\n {\n sprintf(CSStatus2, \"Time remaining: %s\", seconds_to_asc(battery_time));\n }\n if (percent > 0)\n {\n sprintf(CSStatus3, \"%d%% remaining\", percent);\n }\n }\n break;\n }\n\n Msgptr = CSStatus;\n MessageOn = 100;\n}\n\n// Make use of multiple mice.\n\nint MouseCount = 0;\n\nunsigned short MouseMoveX[2];\nunsigned short MouseMoveY[2];\nunsigned short MouseButtons[2];\n\nstatic bool MouseWaiting[2];\n\nvoid MultiMouseShutdown()\n{\n MouseCount = 0;\n ManyMouse_Quit();\n}\n\nvoid MultiMouseInit()\n{\n#ifdef __linux__\n DIR *input_dir;\n\n puts(\"Starting Mouse detection.\");\n input_dir = opendir(\"/dev/input\");\n if (input_dir)\n {\n struct dirent_info *entry;\n while ((entry = readdir_info(input_dir)))\n {\n if (!strncasecmp(entry->name, \"event\", strlen(\"event\")))\n {\n if (dirent_access(entry, R_OK))\n {\n printf(\"Unable to poll /dev/input/%s. Make sure you have read permissions to it.\\n\", entry->name);\n }\n }\n }\n closedir(input_dir);\n }\n else\n {\n puts(\"/dev/input does not exist or is inaccessable\");\n }\n#endif\n MouseCount = ManyMouse_Init();\n printf(\"ManyMouse: %d mice detected.\\n\", MouseCount);\n\n if (MouseCount > 1)\n {\n MouseMoveX[0] = MouseMoveX[1] = 0;\n MouseMoveY[0] = MouseMoveY[1] = 0;\n MouseButtons[0] = MouseButtons[1] = 0;\n MouseWaiting[0] = MouseWaiting[1] = false;\n atexit(MultiMouseShutdown);\n\n printf(\"Using ManyMouse for:\\nMouse 0: %s\\nMouse 1: %s\\n\", ManyMouse_DeviceName(0), ManyMouse_DeviceName(1));\n }\n else\n {\n strcpy(CSStatus, \"Dual mice not detected\");\n strcpy(CSStatus2, \"\");\n strcpy(CSStatus3, \"\");\n strcpy(CSStatus4, \"\");\n Msgptr = CSStatus;\n MessageOn = 100;\n\n MultiMouseShutdown();\n }\n}\n\n#define MOUSE_BUTTON_HANDLE(mouse, bit, value) \\\n if (value) { mouse |= BIT(bit); } \\\n else { mouse &= ~BIT(bit); }\n\nunsigned char mouse;\nvoid MultiMouseProcess()\n{\n ManyMouseEvent event;\n if (MouseWaiting[mouse])\n {\n MouseWaiting[mouse] = false;\n }\n else\n {\n MouseMoveX[mouse] = 0;\n MouseMoveY[mouse] = 0;\n\n while (ManyMouse_PollEvent(&event))\n {\n if (event.device != 0 && event.device != 1)\n {\n continue;\n }\n\n //printf(\"Device: %d; Type: %d; Item: %d; Value: %d\\n\", event.device, event.type, event.item, event.value);\n\n if ((event.device == (mouse^1)) && !MouseWaiting[event.device])\n {\n MouseMoveX[event.device] = 0;\n MouseMoveY[event.device] = 0;\n MouseWaiting[event.device] = true;\n }\n\n if (event.type == MANYMOUSE_EVENT_RELMOTION)\n {\n if (event.item == 0) { MouseMoveX[event.device] = event.value; }\n else { MouseMoveY[event.device] = event.value; }\n }\n else if (event.type == MANYMOUSE_EVENT_BUTTON)\n {\n if (event.item == 0) { MOUSE_BUTTON_HANDLE(MouseButtons[event.device], 0, event.value); }\n else if (event.item == 1) { MOUSE_BUTTON_HANDLE(MouseButtons[event.device], 1, event.value); }\n }\n }\n }\n}\n\nchar panickeyp[] = \"ALL SWITCHES NORMAL\\0\";\nchar mztrtr0[] = \"LOAD MZT MODE - OFF\\0\";\nchar mztrtr1[] = \"LOAD MZT MODE - RECORD\\0\";\nchar mztrtr2[] = \"LOAD MZT MODE - REPLAY\\0\";\nchar snesdevicemsg[] = \"P1: P2: \\0\";\nchar windissw[] = \"WINDOWING DISABLED\\0\";\nchar winenasw[] = \"WINDOWING ENABLED\\0\";\nchar ofsdissw[] = \"OFFSET MODE DISABLED\\0\";\nchar ofsenasw[] = \"OFFSET MODE ENABLED\\0\";\nchar ngena[] = \"NEW GFX ENGINE ENABLED\\0\";\nchar ngdis[] = \"NEW GFX ENGINE DISABLED\\0\";\nchar vollv[] = \"VOLUME LEVEL : \\0\";\nchar frlev[] = \"FRAME SKIP SET TO \\0\";\nchar frlv0[] = \"AUTO FRAMERATE ENABLED\\0\";\nchar pluse1234en[] = \"USE PLAYER 1/2 with 3/4 ON\\0\";\nchar pluse1234dis[] = \"USE PLAYER 1/2 with 3/4 OFF\\0\";\nchar sndchena[] = \"SOUND CH ENABLED\\0\";\nchar sndchdis[] = \"SOUND CH DISABLED\\0\";\nchar sprlayena[] = \"SPRITE LAYER ENABLED\\0\";\nchar sprlaydis[] = \"SPRITE LAYER DISABLED\\0\";\nchar bglayermsg[] = \"BG LAYER DISABLED\\0\";\nchar gammamsg[] = \"GAMMA LEVEL: \\0\";\n\nextern unsigned int MsgCount, MessageOn;\nextern unsigned short t1cc;\nextern unsigned char pressed[], scrndis, disableeffects, osm2dis, current_zst;\nextern unsigned char mousexloc, mouseyloc, snesinputdefault1, snesinputdefault2;\n/*\nextern unsigned char Voice0Disable, Voice1Disable, Voice2Disable, Voice3Disable;\nextern unsigned char Voice4Disable, Voice5Disable, Voice6Disable, Voice7Disable;\nextern unsigned char Voice0Status, Voice1Status, Voice2Status, Voice3Status;\nextern unsigned char Voice4Status, Voice5Status, Voice6Status, Voice7Status;\n*/\nvoid Get_MousePositionDisplacement();\nvoid set_state_message(char *, char *);\n\nvoid adjbglayermsg(char num, char toggleon)\n{\n if(toggleon)\n memcpy(&bglayermsg[10], \"ENABLED \",8);\n else\n memcpy(&bglayermsg[10], \"DISABLED\",8);\n\n memcpy(&bglayermsg[2], &num, 1);\n Msgptr = bglayermsg;\n MessageOn = MsgCount;\n}\n\nvoid adjgammamsg()\n{\n gammalevel16b = gammalevel >> 1;\n if(gammalevel < 10)\n gammamsg[13] = ' ';\n else\n gammamsg[13] = '1';\n gammamsg[14] = gammalevel%10+48;\n Msgptr = gammamsg;\n MessageOn = MsgCount;\n}\n\n/*\nvoid adjsoundchmsg(char *soundch, char *soundstatus, char num)\n{\n *soundch ^= 0x01;\n *soundstatus = 0;\n sndchena[9] = num;\n sndchdis[9] = num;\n if(*soundch == 0x01)\n Msgptr = sndchena;\n else\n Msgptr = sndchdis;\n MessageOn = MsgCount;\n}\n*/\n\nstatic void cycleinputdevicemsg()\n{\n if(!device1)\n {\n memcpy(&snesdevicemsg[4],\"GAMEPAD\",7);\n }\n else\n {\n memcpy(&snesdevicemsg[4],\"MOUSE \",7);\n }\n\n switch(device2)\n {\n case 1: memcpy(&snesdevicemsg[17], \"MOUSE \",14);\n break;\n case 2: memcpy(&snesdevicemsg[17], \"SUPER SCOPE \",14);\n break;\n case 3: memcpy(&snesdevicemsg[17], \"1 JUSTIFIER \",14);\n break;\n case 4: memcpy(&snesdevicemsg[17], \"2 JUSTIFIERS\",14);\n break;\n default: memcpy(&snesdevicemsg[17], \"GAMEPAD \",14);\n }\n}\n\nstatic void cycleinputs(bool input1, bool input2)\n{\n if (input2)\n {\n cycleinputdevice2();\n if (input1 && device2_wrap) { cycleinputdevice1(); }\n if(device2 == 2)\n {\n mousexloc = 128;\n mouseyloc = 112;\n }\n device2_wrap = false;\n }\n else if (input1)\n {\n cycleinputdevice1();\n }\n\n cycleinputdevicemsg();\n Msgptr = snesdevicemsg;\n MessageOn = MsgCount;\n asm_call(Get_MousePositionDisplacement);\n}\n\n#define PRESSED(key) ((pressed[(key)] == 1) && (pressed[(key)]=2))\n#define SCREEN_FLIP(num) adjbglayermsg((num)+'1', !((scrndis ^= BIT(num)) & BIT(num)))\n#define STATE_SELECT(num) current_zst = (current_zst/10)*10+num; set_state_message(\"STATE SLOT \", \" SELECTED.\");\n#define KEY_HANDLE(key_base, action, num) if (PRESSED(key_base ## num)) { action(num); }\n\nvoid QuickKeyCheck()\n{\n // disable all necessary backgrounds\n\n KEY_HANDLE(KeyBGDisble, SCREEN_FLIP, 0)\n KEY_HANDLE(KeyBGDisble, SCREEN_FLIP, 1)\n KEY_HANDLE(KeyBGDisble, SCREEN_FLIP, 2)\n KEY_HANDLE(KeyBGDisble, SCREEN_FLIP, 3)\n if (PRESSED(KeySprDisble))\n {\n scrndis ^= 0x10;\n if(scrndis & 0x10)\n Msgptr = sprlaydis;\n else\n Msgptr = sprlayena;\n MessageOn = MsgCount;\n }\n\n if (PRESSED(KeyEmuSpeedDown))\n {\n if(EmuSpeed)\n EmuSpeed--;\n }\n\n if (PRESSED(KeyEmuSpeedUp))\n {\n if(EmuSpeed < 58)\n EmuSpeed++;\n }\n\n if (PRESSED(KeyResetSpeed))\n {\n EmuSpeed = 29;\n }\n\n if (PRESSED(KeyResetAll))\n {\n/* Voice0Disable = 1;\n Voice1Disable = 1;\n Voice2Disable = 1;\n Voice3Disable = 1;\n Voice4Disable = 1;\n Voice5Disable = 1;\n Voice6Disable = 1;\n Voice7Disable = 1; */\n scrndis = 0;\n disableeffects = 0;\n osm2dis = 0;\n EmuSpeed = 29;\n device1 = snesinputdefault1;\n device2 = snesinputdefault2;\n Msgptr = panickeyp;\n MessageOn = MsgCount;\n }\n\n if (PRESSED(KeyRTRCycle))\n {\n MZTForceRTR++;\n switch(MZTForceRTR)\n {\n case 1: Msgptr = mztrtr1;\n break;\n case 2: Msgptr = mztrtr2;\n break;\n default: Msgptr = mztrtr0;\n MZTForceRTR = 0;\n }\n\n MessageOn = MsgCount;\n }\n\n if (PRESSED(KeyExtraEnab1))\n {\n cycleinputs(true, false);\n }\n\n if (PRESSED(KeyExtraEnab2))\n {\n cycleinputs(false, true);\n }\n\n if (PRESSED(KeyExtraRotate))\n {\n cycleinputs(true, true);\n }\n\n if (PRESSED(KeyWinDisble))\n {\n disableeffects ^= 1;\n if(disableeffects)\n Msgptr = windissw;\n else\n Msgptr = winenasw;\n MessageOn = MsgCount;\n }\n\n if (PRESSED(KeyOffsetMSw))\n {\n osm2dis ^= 1;\n if(osm2dis)\n Msgptr = ofsdissw;\n else\n Msgptr = ofsenasw;\n MessageOn = MsgCount;\n }\n\n if (PRESSED(KeyFRateUp))\n {\n if(frameskip < 10)\n {\n FPSOn = 0;\n frameskip++;\n frlev[18] = frameskip+47;\n Msgptr = frlev;\n MessageOn = MsgCount;\n }\n }\n\n if (PRESSED(KeyFRateDown))\n {\n if(frameskip)\n {\n frameskip--;\n if(frameskip)\n {\n frlev[18] = frameskip+47;\n Msgptr = frlev;\n }\n else\n {\n Msgptr = frlv0;\n t1cc = 0;\n }\n\n MessageOn = MsgCount;\n }\n }\n\n if (PRESSED(KeyDisplayBatt))\n {\n DisplayBatteryStatus();\n }\n\n if (PRESSED(KeyIncreaseGamma))\n {\n if(gammalevel < 15)\n {\n gammalevel++;\n adjgammamsg();\n }\n }\n\n if (PRESSED(KeyDecreaseGamma))\n {\n if(gammalevel)\n {\n gammalevel--;\n adjgammamsg();\n }\n }\n\n if (PRESSED(KeyDisplayFPS))\n {\n if(!frameskip)\n FPSOn ^= 1;\n }\n\n // do state selects\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 0)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 1)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 2)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 3)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 4)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 5)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 6)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 7)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 8)\n KEY_HANDLE(KeyStateSlc, STATE_SELECT, 9)\n\n if (PRESSED(KeyIncStateSlot))\n {\n current_zst = (current_zst+1)%100;\n set_state_message(\"STATE SLOT \", \" SELECTED.\");\n }\n\n if (PRESSED(KeyDecStateSlot))\n {\n current_zst = (current_zst+99)%100;\n set_state_message(\"STATE SLOT \", \" SELECTED.\");\n }\n\n if (PRESSED(KeyUsePlayer1234))\n {\n pl12s34 ^= 1;\n if(pl12s34)\n Msgptr = pluse1234en;\n else\n Msgptr = pluse1234dis;\n MessageOn = MsgCount;\n }\n\n/*\n if (PRESSED(KeyDisableSC0))\n {\n adjsoundchmsg(&Voice0Disable, &Voice0Status, '1');\n }\n\n if (PRESSED(KeyDisableSC1))\n {\n adjsoundchmsg(&Voice1Disable, &Voice1Status, '2');\n }\n\n if (PRESSED(KeyDisableSC2))\n {\n adjsoundchmsg(&Voice2Disable, &Voice2Status, '3');\n }\n\n if (PRESSED(KeyDisableSC3))\n {\n adjsoundchmsg(&Voice3Disable, &Voice3Status, '4');\n }\n\n if (PRESSED(KeyDisableSC4))\n {\n adjsoundchmsg(&Voice4Disable, &Voice4Status, '5');\n }\n\n if (PRESSED(KeyDisableSC5))\n {\n adjsoundchmsg(&Voice5Disable, &Voice5Status, '6');\n }\n\n if (PRESSED(KeyDisableSC6))\n {\n adjsoundchmsg(&Voice6Disable, &Voice6Status, '7');\n }\n\n if (PRESSED(KeyDisableSC7))\n {\n adjsoundchmsg(&Voice7Disable, &Voice7Status, '8');\n }\n*/\n}\n" }, { "alpha_fraction": 0.6994167566299438, "alphanum_fraction": 0.7162404656410217, "avg_line_length": 29.32653045654297, "blob_id": "ee2c3d76052c421f5981ec888d2830292cde7c49", "content_id": "2e5adea9e8c76d47d1c64e00396cda603dce93ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4458, "license_type": "no_license", "max_line_length": 85, "num_lines": 147, "path": "/src/cpu/zspc/zspc.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "// SPC emulator interface customized for zsnes\n\n// zspc 0.9.0\n#ifndef ZSPC_H\n#define ZSPC_H\n\n#include <stddef.h>\n#include \"stdint.h\"\n\n#ifdef __cplusplus\n\textern \"C\" {\n#endif\n\n\n// Error type returned from functions which can fail. NULL on success,\n// otherwise pointer to error string.\ntypedef const char* zspc_err_t;\n\n// 16-bit signed sample\ntypedef short zspc_sample_t;\n\n// Sample count. Always a multiple of 2, since output is in stereo pairs.\ntypedef int zspc_sample_count_t;\n\n// 1024000 SPC clocks per second, sample pair every 32 clocks, 32000 pairs per second\nenum { zspc_clock_rate = 1024000 };\nenum { zspc_clocks_per_sample = 32 };\nenum { zspc_sample_rate = 32000 };\n\n\n//// Setup\n\n// Initializes SPC emulator\nzspc_err_t zspc_init( void );\n\n// Resets SPC to power-on state. This resets your output buffer, so you must\n// call zspc_set_output() after this.\nvoid zspc_reset( void );\n\n// Emulates pressing reset switch on SNES. This resets your output buffer, so\n// you must call zspc_set_output() after this.\nvoid zspc_soft_reset( void );\n\n\n//// Sample output\n\n// Sets output sample rate. Defaults to 32000 sample pairs per second.\nzspc_err_t zspc_set_rate( int rate );\n\n// Sets destination for output samples. If out is NULL, output samples are\n// thrown away.\nvoid zspc_set_output( zspc_sample_t* out, zspc_sample_count_t out_size );\n\n// Number of samples written to output buffer since last call to zspc_set_output().\nzspc_sample_count_t zspc_sample_count( void );\n\n\n//// Sound adjustment\n\n// Mutes voices corresponding to non-zero bits in mask. Reduces emulation accuracy.\nenum { zspc_voice_count = 8 };\nvoid zspc_mute_voices( int mask );\n\n// Enables/disables filter (low-pass and bass adjuster)\nvoid zspc_enable_filter( int enable );\n\n// Sets overall volume, where zspc_gain_unit/2 is half, zspc_gain_unit*2 is double.\nenum { zspc_gain_unit = 64 };\nvoid zspc_set_gain( int gain );\n\n// Sets bass level of filter\nenum { zspc_bass_norm = 8 }; // level that SNES outputs on RCA\nenum { zspc_bass_min = 0 }; // mostly removed\nenum { zspc_bass_max = 31 }; // level you'd get from digital output\nvoid zspc_set_bass( int mode );\n\n\n//// SPC music playback\n\n// Loads SPC data into emulator\nzspc_err_t zspc_load_spc( void const* zspc_in, long size );\n\n// Clears echo region. Useful after loading an SPC as many have garbage in echo.\nvoid zspc_clear_echo( void );\n\n// Plays for count samples and writes samples to out. Discards samples if out\n// is NULL.\nzspc_err_t zspc_play( zspc_sample_count_t count, zspc_sample_t* out );\n\n\n//// State save/load\n\n// Saves/loads exact emulator state\nenum { zspc_state_size = 67 * 1024L }; // maximum space needed when saving\ntypedef void (*zspc_copy_func_t)( unsigned char** io, void* state, size_t );\nvoid zspc_copy_state( unsigned char** io, zspc_copy_func_t );\n\n// Writes minimal SPC file header to spc_out\nvoid zspc_init_header( void* spc_out );\n\n// Saves emulator state as SPC file data. Writes zspc_file_size bytes to zspc_out.\n// Does not set up SPC header; use zspc_init_header() for that.\nenum { zspc_file_size = 0x10200 }; // spc_out must have this many bytes allocated\nvoid zspc_save_spc( void* spc_out );\n\n// Returns non-zero if new key-on events occurred since last check. Useful for\n// trimming silence while saving an SPC.\nint zspc_check_kon( void );\n\n\n//// Debugging\n\n// Runs CPU to current zspc_time. If any new instructions were executed, returns\n// pointer to string with disassembly of *first* instruction executed and registers\n// *before* instruction executed, otherwise returns NULL.\nconst char* zspc_log_cpu( void );\n\n// Disassembles one instruction at addr and returns length of instruction\n// (1, 2, or 3 bytes). Writes at most zspc_disasm_max chars to out.\nenum { zspc_disasm_max = 48 };\nint zspc_disasm( int addr, char* out );\n\n\n//// Functions called from asm\n\n// Current SPC clock count. You should increment this directly.\nextern uint32_t zspc_time;\n\n// These globals are used to pass values to/from functions below.\n// You can access these as 8-bit bytes, since the upper 24 bits are always zero.\nextern uint32_t zspc_port;\nextern uint32_t zspc_data;\n\n// Flushes to buffer all samples that should be generated by now (current zspc_time)\nvoid zspc_flush_samples( void );\n\n// Reads/writes port\nenum { zspc_port_count = 4 };\nvoid/* zspc_data */ zspc_read_port (/* zspc_port */ void);\nvoid/* void */ zspc_write_port(/* zspc_port, zspc_data */ void);\n\n\n#ifdef __cplusplus\n\t}\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.598061740398407, "alphanum_fraction": 0.639026939868927, "avg_line_length": 26.091760635375977, "blob_id": "1c96d307a38946f591da5cf6c2b7857b14048199", "content_id": "5631e2950ab1fada3400dbb144a7f3f000ff32f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 40449, "license_type": "no_license", "max_line_length": 126, "num_lines": 1493, "path": "/src/zstate.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n\n\n#ifdef __UNIXSDL__\n#include \"gblhdr.h\"\n#define DIR_SLASH \"/\"\n#else\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <ctype.h>\n#include <zlib.h>\n#include <time.h>\n#ifdef __WIN32__\n#include <io.h>\n#else\n#include <unistd.h>\n#endif\n#define DIR_SLASH \"\\\\\"\n#endif\n#include <stdarg.h>\n#include \"gblvars.h\"\n#include \"asm_call.h\"\n#include \"zpath.h\"\n#include \"cfg.h\"\n#include \"zmovie.h\"\n#include \"chips/dsp4emu.h\"\n#include \"input.h\"\n\n#define NUMCONV_FR3\n#define NUMCONV_FW3\n#include \"numconv.h\"\n\nvoid SA1UpdateDPageC(), unpackfunct(), repackfunct();\nvoid PrepareOffset(), ResetOffset(), initpitch(), UpdateBanksSDD1();\nvoid procexecloop(), outofmemory();\n\nvoid copy_spc7110_state_data(uint8_t **, void (*)(unsigned char **, void *, size_t), bool);\n\nextern uint8_t cacheud, ccud, intrset, cycpl, cycphb, xdbt, xpbt, xp;\nextern uint8_t xe, xirqb, debugger, curnmi;\nextern uint16_t curypos, stackand, stackor, xat, xst, xdt, xxt, xyt, xpc;\nextern uint32_t Curtableaddr, cycpblt;\n\nstatic void copy_snes_data(uint8_t **buffer, void (*copy_func)(uint8_t **, void *, size_t))\n{\n //65816 status, etc.\n copy_func(buffer, &curcyc, 1);\n copy_func(buffer, &curypos, 2);\n copy_func(buffer, &cacheud, 1);\n copy_func(buffer, &ccud, 1);\n copy_func(buffer, &intrset, 1);\n copy_func(buffer, &cycpl, 1);\n copy_func(buffer, &cycphb, 1);\n //copy_func(buffer, &spcon, 1);\n copy_func(buffer, &stackand, 2);\n copy_func(buffer, &stackor, 2);\n copy_func(buffer, &xat, 2);\n copy_func(buffer, &xdbt, 1);\n copy_func(buffer, &xpbt, 1);\n copy_func(buffer, &xst, 2);\n copy_func(buffer, &xdt, 2);\n copy_func(buffer, &xxt, 2);\n copy_func(buffer, &xyt, 2);\n copy_func(buffer, &xp, 1);\n copy_func(buffer, &xe, 1);\n copy_func(buffer, &xpc, 2);\n copy_func(buffer, &xirqb, 1);\n copy_func(buffer, &debugger, 1);\n copy_func(buffer, &Curtableaddr, 4);\n copy_func(buffer, &curnmi, 1);\n //SPC Timers\n //copy_func(buffer, &cycpbl, 4);\n //copy_func(buffer, &cycpblt, 4);\n //SNES PPU Register status\n //copy_func(buffer, &sndrot, 3019);\n}\n\nstatic void copy_spc_data(uint8_t **buffer, void (*copy_func)(uint8_t **, void *, size_t))\n{\n //SPC stuff, DSP stuff\n //copy_func(buffer, SPCRAM, PHspcsave);\n// copy_func(buffer, &BRRBuffer, PHdspsave);\n// copy_func(buffer, &DSPMem, sizeof(DSPMem));\n}\n\nstatic void copy_extra_data(uint8_t **buffer, void (*copy_func)(uint8_t **, void *, size_t))\n{\n copy_func(buffer, &soundcycleft, 4);\n copy_func(buffer, &curexecstate, 4);\n copy_func(buffer, &nmiprevaddrl, 4);\n copy_func(buffer, &nmiprevaddrh, 4);\n copy_func(buffer, &nmirept, 4);\n copy_func(buffer, &nmiprevline, 4);\n copy_func(buffer, &nmistatus, 4);\n copy_func(buffer, &joycontren, 4);\n copy_func(buffer, &NextLineCache, 1);\n //copy_func(buffer, &spc700read, 10*4);\n //copy_func(buffer, &timer2upd, 4);\n copy_func(buffer, &xa, 14*4);\n //copy_func(buffer, &spcnumread, 1);\n copy_func(buffer, &opcd, 6*4);\n copy_func(buffer, &HIRQCycNext, 4);\n copy_func(buffer, &HIRQNextExe, 1);\n copy_func(buffer, &oamaddr, 14*4);\n copy_func(buffer, &prevoamptr, 1);\n}\n\nstatic size_t load_save_size;\n\nenum copy_state_method { csm_save_zst_new,\n csm_load_zst_new,\n csm_load_zst_old,\n csm_save_rewind,\n csm_load_rewind };\n\nstatic void copy_state_data(uint8_t *buffer, void (*copy_func)(uint8_t **, void *, size_t), enum copy_state_method method)\n{\n copy_snes_data(&buffer, copy_func);\n\n //WRAM (128k), VRAM (64k)\n copy_func(&buffer, wramdata, 8192*16);\n copy_func(&buffer, vram, 4096*16);\n\n if (spcon)\n {\n //copy_spc_data(&buffer, copy_func);\n /*\n if (buffer) //Rewind stuff\n {\n copy_func(&buffer, &echoon0, PHdspsave2);\n }\n */\n }\n\n if (C4Enable)\n {\n copy_func(&buffer, C4Ram, 2048*4);\n }\n\n if (SFXEnable)\n {\n copy_func(&buffer, sfxramdata, 8192*16);\n copy_func(&buffer, &SfxR0, PHnum2writesfxreg);\n }\n\n if (SA1Enable)\n {\n copy_func(&buffer, &SA1Mode, PHnum2writesa1reg);\n copy_func(&buffer, SA1RAMArea, 8192*16);\n if (method != csm_load_zst_old)\n {\n copy_func(&buffer, &SA1Status, 3);\n copy_func(&buffer, &SA1xpc, 1*4);\n copy_func(&buffer, &sa1dmaptr, 2*4);\n }\n }\n\n if (DSP1Enable && (method != csm_load_zst_old))\n {\n copy_func(&buffer, &DSP1COp, 70+128);\n copy_func(&buffer, &Op00Multiplicand, 3*4+128);\n copy_func(&buffer, &Op10Coefficient, 4*4+128);\n copy_func(&buffer, &Op04Angle, 4*4+128);\n copy_func(&buffer, &Op08X, 5*4+128);\n copy_func(&buffer, &Op18X, 5*4+128);\n copy_func(&buffer, &Op28X, 4*4+128);\n copy_func(&buffer, &Op0CA, 5*4+128);\n copy_func(&buffer, &Op02FX, 11*4+3*4+28*8+128);\n copy_func(&buffer, &Op0AVS, 5*4+14*8+128);\n copy_func(&buffer, &Op06X, 6*4+10*8+4+128);\n copy_func(&buffer, &Op01m, 4*4+128);\n copy_func(&buffer, &Op0DX, 6*4+128);\n copy_func(&buffer, &Op03F, 6*4+128);\n copy_func(&buffer, &Op14Zr, 9*4+128);\n copy_func(&buffer, &Op0EH, 4*4+128);\n }\n\n if (SETAEnable)\n {\n copy_func(&buffer, setaramdata, 256*16);\n\n //Todo: copy the SetaCmdEnable? For completeness we should do it\n //but currently we ignore it anyway.\n }\n\n if (SPC7110Enable)\n {\n copy_func(&buffer, &SPCMultA, PHnum2writespc7110reg);\n copy_spc7110_state_data(&buffer, copy_func, (method == csm_load_zst_new) || (method == csm_load_rewind));\n }\n\n if (DSP4Enable)\n {\n copy_func(&buffer, &DSP4.waiting4command, sizeof(DSP4.waiting4command));\n copy_func(&buffer, &DSP4.half_command, sizeof(DSP4.half_command));\n copy_func(&buffer, &DSP4.command, sizeof(DSP4.command));\n copy_func(&buffer, &DSP4.in_count, sizeof(DSP4.in_count));\n copy_func(&buffer, &DSP4.in_index, sizeof(DSP4.in_index));\n copy_func(&buffer, &DSP4.out_count, sizeof(DSP4.out_count));\n copy_func(&buffer, &DSP4.out_index, sizeof(DSP4.out_index));\n copy_func(&buffer, &DSP4.parameters, sizeof(DSP4.parameters));\n copy_func(&buffer, &DSP4.output, sizeof(DSP4.output));\n\n copy_func(&buffer, &DSP4_vars.DSP4_Logic, sizeof(DSP4_vars.DSP4_Logic));\n copy_func(&buffer, &DSP4_vars.lcv, sizeof(DSP4_vars.lcv));\n copy_func(&buffer, &DSP4_vars.distance, sizeof(DSP4_vars.distance));\n copy_func(&buffer, &DSP4_vars.raster, sizeof(DSP4_vars.raster));\n copy_func(&buffer, &DSP4_vars.segments, sizeof(DSP4_vars.segments));\n copy_func(&buffer, &DSP4_vars.world_x, sizeof(DSP4_vars.world_x));\n copy_func(&buffer, &DSP4_vars.world_y, sizeof(DSP4_vars.world_y));\n copy_func(&buffer, &DSP4_vars.world_dx, sizeof(DSP4_vars.world_dx));\n copy_func(&buffer, &DSP4_vars.world_dy, sizeof(DSP4_vars.world_dy));\n copy_func(&buffer, &DSP4_vars.world_ddx, sizeof(DSP4_vars.world_ddx));\n copy_func(&buffer, &DSP4_vars.world_ddy, sizeof(DSP4_vars.world_ddy));\n copy_func(&buffer, &DSP4_vars.world_xenv, sizeof(DSP4_vars.world_xenv));\n copy_func(&buffer, &DSP4_vars.world_yofs, sizeof(DSP4_vars.world_yofs));\n copy_func(&buffer, &DSP4_vars.view_x1, sizeof(DSP4_vars.view_x1));\n copy_func(&buffer, &DSP4_vars.view_y1, sizeof(DSP4_vars.view_y1));\n copy_func(&buffer, &DSP4_vars.view_x2, sizeof(DSP4_vars.view_x2));\n copy_func(&buffer, &DSP4_vars.view_y2, sizeof(DSP4_vars.view_y2));\n copy_func(&buffer, &DSP4_vars.view_dx, sizeof(DSP4_vars.view_dx));\n copy_func(&buffer, &DSP4_vars.view_dy, sizeof(DSP4_vars.view_dy));\n copy_func(&buffer, &DSP4_vars.view_xofs1, sizeof(DSP4_vars.view_xofs1));\n copy_func(&buffer, &DSP4_vars.view_yofs1, sizeof(DSP4_vars.view_yofs1));\n copy_func(&buffer, &DSP4_vars.view_xofs2, sizeof(DSP4_vars.view_xofs2));\n copy_func(&buffer, &DSP4_vars.view_yofs2, sizeof(DSP4_vars.view_yofs2));\n copy_func(&buffer, &DSP4_vars.view_yofsenv, sizeof(DSP4_vars.view_yofsenv));\n copy_func(&buffer, &DSP4_vars.view_turnoff_x, sizeof(DSP4_vars.view_turnoff_x));\n copy_func(&buffer, &DSP4_vars.view_turnoff_dx, sizeof(DSP4_vars.view_turnoff_dx));\n copy_func(&buffer, &DSP4_vars.viewport_cx, sizeof(DSP4_vars.viewport_cx));\n copy_func(&buffer, &DSP4_vars.viewport_cy, sizeof(DSP4_vars.viewport_cy));\n copy_func(&buffer, &DSP4_vars.viewport_left, sizeof(DSP4_vars.viewport_left));\n copy_func(&buffer, &DSP4_vars.viewport_right, sizeof(DSP4_vars.viewport_right));\n copy_func(&buffer, &DSP4_vars.viewport_top, sizeof(DSP4_vars.viewport_top));\n copy_func(&buffer, &DSP4_vars.viewport_bottom, sizeof(DSP4_vars.viewport_bottom));\n copy_func(&buffer, &DSP4_vars.sprite_x, sizeof(DSP4_vars.sprite_x));\n copy_func(&buffer, &DSP4_vars.sprite_y, sizeof(DSP4_vars.sprite_y));\n copy_func(&buffer, &DSP4_vars.sprite_attr, sizeof(DSP4_vars.sprite_attr));\n copy_func(&buffer, &DSP4_vars.sprite_size, sizeof(DSP4_vars.sprite_size));\n copy_func(&buffer, &DSP4_vars.sprite_clipy, sizeof(DSP4_vars.sprite_clipy));\n copy_func(&buffer, &DSP4_vars.sprite_count, sizeof(DSP4_vars.sprite_count));\n copy_func(&buffer, &DSP4_vars.poly_clipLf, sizeof(DSP4_vars.poly_clipLf));\n copy_func(&buffer, &DSP4_vars.poly_clipRt, sizeof(DSP4_vars.poly_clipRt));\n copy_func(&buffer, &DSP4_vars.poly_ptr, sizeof(DSP4_vars.poly_ptr));\n copy_func(&buffer, &DSP4_vars.poly_raster, sizeof(DSP4_vars.poly_raster));\n copy_func(&buffer, &DSP4_vars.poly_top, sizeof(DSP4_vars.poly_top));\n copy_func(&buffer, &DSP4_vars.poly_bottom, sizeof(DSP4_vars.poly_bottom));\n copy_func(&buffer, &DSP4_vars.poly_cx, sizeof(DSP4_vars.poly_cx));\n copy_func(&buffer, &DSP4_vars.poly_start, sizeof(DSP4_vars.poly_start));\n copy_func(&buffer, &DSP4_vars.poly_plane, sizeof(DSP4_vars.poly_plane));\n copy_func(&buffer, &DSP4_vars.OAM_attr, sizeof(DSP4_vars.OAM_attr));\n copy_func(&buffer, &DSP4_vars.OAM_index, sizeof(DSP4_vars.OAM_index));\n copy_func(&buffer, &DSP4_vars.OAM_bits, sizeof(DSP4_vars.OAM_bits));\n copy_func(&buffer, &DSP4_vars.OAM_RowMax, sizeof(DSP4_vars.OAM_RowMax));\n copy_func(&buffer, &DSP4_vars.OAM_Row, sizeof(DSP4_vars.OAM_Row));\n }\n\n if (method != csm_load_zst_old)\n {\n copy_extra_data(&buffer, copy_func);\n\n //We don't load SRAM from new states if box isn't checked\n if ((method != csm_load_zst_new) || SRAMState)\n {\n copy_func(&buffer, sram, ramsize);\n }\n\n if ((method == csm_save_rewind) || (method == csm_load_rewind))\n {\n copy_func(&buffer, &tempesi, 4);\n copy_func(&buffer, &tempedi, 4);\n copy_func(&buffer, &tempedx, 4);\n copy_func(&buffer, &tempebp, 4);\n }\n }\n}\n\nstatic void memcpyinc(uint8_t **dest, void *src, size_t len)\n{\n memcpy(*dest, src, len);\n *dest += len;\n}\n\nstatic void memcpyrinc(uint8_t **src, void *dest, size_t len)\n{\n memcpy(dest, *src, len);\n *src += len;\n}\n\nextern uint32_t RewindTimer, DblRewTimer;\nextern uint8_t EMUPause;\n\nuint8_t *StateBackup = 0;\nuint8_t AllocatedRewindStates, LatestRewindPos, EarliestRewindPos;\nbool RewindPosPassed;\n\nsize_t rewind_state_size, cur_zst_size, old_zst_size;\n\nextern uint8_t romispal;\nvoid zmv_rewind_save(size_t, bool);\nvoid zmv_rewind_load(size_t, bool);\n\nvoid ClearCacheCheck()\n{\n memset(vidmemch2, 1, sizeof(vidmemch2));\n memset(vidmemch4, 1, sizeof(vidmemch4));\n memset(vidmemch8, 1, sizeof(vidmemch8));\n}\n\n//Code to handle special frames for pausing, and desync checking\nuint8_t *SpecialPauseBackup = 0, PauseFrameMode = 0;\n/*\nPause frame modes\n\n0 - no pause frame stored\n1 - pause frame ready to be stored\n2 - pause frame stored\n3 - pause frame ready for reload\n*/\n\nvoid BackupPauseFrame()\n{\n if (SpecialPauseBackup)\n {\n copy_state_data(SpecialPauseBackup, memcpyinc, csm_save_rewind);\n PauseFrameMode = 2;\n }\n}\n\nvoid RestorePauseFrame()\n{\n if (SpecialPauseBackup)\n {\n copy_state_data(SpecialPauseBackup, memcpyrinc, csm_load_rewind);\n //ClearCacheCheck();\n PauseFrameMode = 0;\n }\n}\n\nvoid DeallocPauseFrame()\n{\n if (SpecialPauseBackup) { free(SpecialPauseBackup); }\n}\n\n#define ActualRewindFrames (uint32_t)(RewindFrames * (romispal ? 10 : 12))\n\nvoid BackupCVFrame()\n{\n uint8_t *RewindBufferPos = StateBackup + LatestRewindPos*rewind_state_size;\n\n if (MovieProcessing == MOVIE_PLAYBACK) { zmv_rewind_save(LatestRewindPos, true); }\n else if (MovieProcessing == MOVIE_RECORD) { zmv_rewind_save(LatestRewindPos, false); }\n copy_state_data(RewindBufferPos, memcpyinc, csm_save_rewind);\n\n if (RewindPosPassed)\n {\n EarliestRewindPos = (EarliestRewindPos+1)%AllocatedRewindStates;\n RewindPosPassed = false;\n }\n// printf(\"Backing up in #%u, earliest: #%u, allocated: %u\\n\", LatestRewindPos, EarliestRewindPos, AllocatedRewindStates);\n\n LatestRewindPos = (LatestRewindPos+1)%AllocatedRewindStates;\n\n if (LatestRewindPos == EarliestRewindPos) { RewindPosPassed = true; }\n\n RewindTimer = ActualRewindFrames;\n DblRewTimer += (DblRewTimer) ? 0 : ActualRewindFrames;\n// printf(\"New backup slot: #%u, timer %u, check %u\\n\", LatestRewindPos, RewindTimer, DblRewTimer);\n}\n\nvoid RestoreCVFrame()\n{\n uint8_t *RewindBufferPos;\n\n if (LatestRewindPos != ((EarliestRewindPos+1)%AllocatedRewindStates))\n {\n if (DblRewTimer > ActualRewindFrames)\n {\n if (LatestRewindPos == 1 || AllocatedRewindStates == 1)\n { LatestRewindPos = AllocatedRewindStates-1; }\n else { LatestRewindPos = (LatestRewindPos) ? LatestRewindPos-2 : AllocatedRewindStates-2; }\n }\n else\n {\n LatestRewindPos = (LatestRewindPos) ? LatestRewindPos-1 : AllocatedRewindStates-1;\n }\n }\n else { LatestRewindPos = EarliestRewindPos; }\n\n RewindBufferPos = StateBackup + LatestRewindPos*rewind_state_size;\n //printf(\"Restoring from #%u, earliest: #%u\\n\", LatestRewindPos, EarliestRewindPos);\n\n if (MovieProcessing == MOVIE_RECORD)\n {\n zmv_rewind_load(LatestRewindPos, false);\n }\n else\n {\n if (MovieProcessing == MOVIE_PLAYBACK)\n {\n zmv_rewind_load(LatestRewindPos, true);\n }\n\n if (PauseRewind || EMUPause)\n {\n PauseFrameMode = EMUPause = true;\n }\n }\n\n copy_state_data(RewindBufferPos, memcpyrinc, csm_load_rewind);\n ClearCacheCheck();\n\n LatestRewindPos = (LatestRewindPos+1)%AllocatedRewindStates;\n RewindTimer = ActualRewindFrames;\n DblRewTimer = 2*ActualRewindFrames;\n}\n\nvoid SetupRewindBuffer()\n{\n //For special rewind case to help out pauses\n DeallocPauseFrame();\n SpecialPauseBackup = malloc(rewind_state_size);\n\n //For standard rewinds\n if (StateBackup) { free(StateBackup); }\n for (; RewindStates; RewindStates--)\n {\n StateBackup = 0;\n StateBackup = (uint8_t *)malloc(rewind_state_size*RewindStates);\n if (StateBackup) { break; }\n }\n AllocatedRewindStates = RewindStates;\n}\n\nvoid DeallocRewindBuffer()\n{\n if (StateBackup) { free(StateBackup); }\n}\n\nstatic size_t state_size;\n\nstatic void state_size_tally(uint8_t **dest, void *src, size_t len)\n{\n state_size += len;\n}\n\nvoid InitRewindVars()\n{\n uint8_t almost_useless_array[1]; //An array is needed for copy_state_data to give the correct size\n state_size = 0;\n copy_state_data(almost_useless_array, state_size_tally, csm_save_rewind);\n rewind_state_size = state_size;\n\n SetupRewindBuffer();\n LatestRewindPos = 0;\n EarliestRewindPos = 0;\n RewindPosPassed = false;\n RewindTimer = 1;\n DblRewTimer = 1;\n}\n\nvoid InitRewindVarsForMovie()\n{\n LatestRewindPos = 0;\n EarliestRewindPos = 0;\n RewindPosPassed = false;\n RewindTimer = 1;\n DblRewTimer = 1;\n}\n\n//This is used to preserve system load state between game loads\nstatic uint8_t *BackupSystemBuffer = 0;\n\nvoid BackupSystemVars()\n{\n uint8_t *buffer;\n\n if (!BackupSystemBuffer)\n {\n state_size = 0;\n copy_snes_data(&buffer, state_size_tally);\n copy_spc_data(&buffer, state_size_tally);\n copy_extra_data(&buffer, state_size_tally);\n BackupSystemBuffer = (uint8_t *)malloc(state_size);\n }\n\n if (BackupSystemBuffer)\n {\n buffer = BackupSystemBuffer;\n copy_snes_data(&buffer, memcpyinc);\n copy_spc_data(&buffer, memcpyinc);\n copy_extra_data(&buffer, memcpyinc);\n }\n}\n\nvoid RestoreSystemVars()\n{\n if (BackupSystemBuffer)\n {\n uint8_t *buffer = BackupSystemBuffer;\n InitRewindVars();\n copy_snes_data(&buffer, memcpyrinc);\n copy_spc_data(&buffer, memcpyrinc);\n copy_extra_data(&buffer, memcpyrinc);\n }\n}\n\nvoid DeallocSystemVars()\n{\n if (BackupSystemBuffer) { free(BackupSystemBuffer); }\n}\n\nextern uintptr_t spcBuffera;\n/*\nextern uintptr_t Voice0BufPtr, Voice1BufPtr, Voice2BufPtr, Voice3BufPtr;\nextern uintptr_t Voice4BufPtr, Voice5BufPtr, Voice6BufPtr, Voice7BufPtr;\n*/\n//extern uintptr_t spcPCRam, spcRamDP;\n\nvoid PrepareSaveState()\n{\n //spcPCRam -= (uintptr_t)SPCRAM;\n //spcRamDP -= (uintptr_t)SPCRAM;\n\n/* Voice0BufPtr -= spcBuffera;\n Voice1BufPtr -= spcBuffera;\n Voice2BufPtr -= spcBuffera;\n Voice3BufPtr -= spcBuffera;\n Voice4BufPtr -= spcBuffera;\n Voice5BufPtr -= spcBuffera;\n Voice6BufPtr -= spcBuffera;\n Voice7BufPtr -= spcBuffera;*/\n}\n\nextern uintptr_t SA1Stat;\nextern uint8_t IRAM[2049], *SA1Ptr, *SA1RegPCS, *CurBWPtr, *SA1BWPtr, *SNSBWPtr;\n\nvoid SaveSA1()\n{\n SA1Stat &= 0xFFFFFF00;\n SA1Ptr -= (uintptr_t)SA1RegPCS;\n\n if (SA1RegPCS == IRAM)\n {\n SA1Stat = (SA1Stat & 0xFFFFFF00) + 1;\n }\n\n if (SA1RegPCS == IRAM-0x3000)\n {\n SA1Stat = (SA1Stat & 0xFFFFFF00) + 2;\n }\n\n SA1RegPCS -= (uintptr_t)romdata;\n CurBWPtr -= (uintptr_t)romdata;\n SA1BWPtr -= (uintptr_t)romdata;\n SNSBWPtr -= (uintptr_t)romdata;\n}\n\nvoid RestoreSA1()\n{\n SA1RegPCS += (uintptr_t)romdata;\n CurBWPtr += (uintptr_t)romdata;\n SA1BWPtr += (uintptr_t)romdata;\n SNSBWPtr += (uintptr_t)romdata;\n\n if ((SA1Stat & 0xFF) == 1)\n {\n SA1RegPCS = IRAM;\n }\n\n if ((SA1Stat & 0xFF) == 2)\n {\n SA1RegPCS = IRAM-0x3000;\n }\n\n SA1Ptr += (uintptr_t)SA1RegPCS;\n SA1RAMArea = romdata + 4096*1024;\n}\n\n#define ResState(Voice_BufPtr) \\\n Voice_BufPtr += spcBuffera; \\\n if (Voice_BufPtr >= spcBuffera + 65536*4) \\\n { \\\n Voice_BufPtr = spcBuffera; \\\n }\n\nvoid ResetState()\n{\n //spcPCRam += (uintptr_t)SPCRAM;\n //spcRamDP += (uintptr_t)SPCRAM;\n\n/* ResState(Voice0BufPtr);\n ResState(Voice1BufPtr);\n ResState(Voice2BufPtr);\n ResState(Voice3BufPtr);\n ResState(Voice4BufPtr);\n ResState(Voice5BufPtr);\n ResState(Voice6BufPtr);\n ResState(Voice7BufPtr);*/\n}\n\nextern uint32_t SfxRomBuffer, SfxCROM;\nextern uint32_t SfxLastRamAdr, SfxRAMMem, MsgCount, MessageOn;\nextern uint8_t AutoIncSaveSlot, cbitmode, NoPictureSave;\nextern char *Msgptr;\nextern uint16_t PrevPicture[64*56];\n\nstatic FILE *fhandle;\nvoid CapturePicture();\n\nstatic void write_save_state_data(uint8_t **dest, void *data, size_t len)\n{\n fwrite(data, 1, len, fhandle);\n}\n\nstatic const char zst_header_old[] = \"ZSNES Save State File V0.6\\x1a\\x3c\";\nstatic const char zst_header_cur[] = \"ZSNES Save State File V143\\x1a\\x8f\";\n\nvoid calculate_state_sizes()\n{\n state_size = 0;\n copy_state_data(0, state_size_tally, csm_save_zst_new);\n cur_zst_size = state_size + sizeof(zst_header_cur)-1;\n\n state_size = 0;\n copy_state_data(0, state_size_tally, csm_load_zst_old);\n old_zst_size = state_size + sizeof(zst_header_old)-1;\n}\n\nuint32_t current_zst = 0;\nuint32_t newest_zst = 0;\ntime_t newestfiledate;\n\nchar *zst_name()\n{\n static char buffer[7];\n if ((MovieProcessing == MOVIE_PLAYBACK) || (MovieProcessing == MOVIE_RECORD))\n {\n sprintf(buffer, \"%.2d.zst\", (unsigned int)current_zst);\n return(buffer);\n }\n strcpy(buffer, \"zst\");\n if (current_zst)\n {\n buffer[2] = (current_zst%10)+'0';\n if (current_zst > 9)\n {\n buffer[1] = (current_zst/10)+'0';\n }\n }\n setextension(ZStateName, buffer);\n return(ZStateName);\n}\n\nvoid zst_determine_newest()\n{\n struct stat filestat;\n char *zst_path;\n\n if (MovieInProgress())\n {\n mzt_chdir_up();\n zst_path = ZMoviePath;\n }\n else\n {\n zst_path = ZSStatePath;\n }\n\n if (!stat_dir(zst_path, zst_name(), &filestat) && filestat.st_mtime > newestfiledate)\n {\n newestfiledate = filestat.st_mtime;\n newest_zst = current_zst;\n }\n if (MovieInProgress()) { mzt_chdir_down(); }\n}\n\nvoid zst_init()\n{\n newestfiledate = 0;\n\n if (LatestSave)\n {\n for (current_zst = 0; current_zst < 100; current_zst++)\n {\n zst_determine_newest();\n }\n current_zst = newest_zst;\n zst_name();\n }\n}\n\nint zst_exists()\n{\n int ret;\n char *zst_path;\n\n if (MovieInProgress())\n {\n mzt_chdir_up();\n zst_path = ZMoviePath;\n }\n else\n {\n zst_path = ZSStatePath;\n }\n\n ret = access_dir(zst_path, zst_name(), F_OK) ? 0 : 1;\n if (MovieInProgress()){ mzt_chdir_down(); }\n\n return(ret);\n}\n\n\nstatic bool zst_save_compressed(FILE *fp)\n{\n size_t data_size = cur_zst_size - (sizeof(zst_header_cur)-1);\n uint8_t *buffer = 0;\n\n bool worked = false;\n\n if ((buffer = (uint8_t *)malloc(data_size)))\n {\n unsigned long compressed_size = compressBound(data_size);\n uint8_t *compressed_buffer = 0;\n\n if ((compressed_buffer = (uint8_t *)malloc(compressed_size)))\n {\n copy_state_data(buffer, memcpyinc, csm_save_zst_new);\n if (compress2(compressed_buffer, &compressed_size, buffer, data_size, Z_BEST_COMPRESSION) == Z_OK)\n {\n fwrite3(compressed_size, fp);\n fwrite(compressed_buffer, 1, compressed_size, fp);\n worked = true;\n }\n free(compressed_buffer);\n }\n free(buffer);\n }\n\n if (!worked) //Compression failed for whatever reason\n {\n fwrite3(cur_zst_size | 0x00800000, fp); //Uncompressed ZST will never break 8MB\n }\n\n return(worked);\n}\n\nvoid zst_save(FILE *fp, bool Thumbnail, bool Compress)\n{\n PrepareOffset();\n PrepareSaveState();\n unpackfunct();\n\n if (SFXEnable)\n {\n SfxRomBuffer -= SfxCROM;\n SfxLastRamAdr -= SfxRAMMem;\n }\n\n if (SA1Enable)\n {\n SaveSA1(); //Convert SA-1 stuff to standard, non displacement format\n }\n\n if (!Compress || !zst_save_compressed(fp)) //If we don't want compressed or compression failed\n {\n fwrite(zst_header_cur, 1, sizeof(zst_header_cur)-1, fp); //-1 for null\n\n fhandle = fp; //Set global file handle\n copy_state_data(0, write_save_state_data, csm_save_zst_new);\n\n if (Thumbnail)\n {\n CapturePicture();\n fwrite(PrevPicture, 1, 64*56*sizeof(uint16_t), fp);\n }\n }\n\n if (SFXEnable)\n {\n SfxRomBuffer += SfxCROM;\n SfxLastRamAdr += SfxRAMMem;\n }\n\n if (SA1Enable)\n {\n RestoreSA1(); //Convert back SA-1 stuff\n }\n\n ResetOffset();\n ResetState();\n}\n\n/*\nMerges all the passed strings into buffer. Make sure to pass an extra parameter as 0 after all the strings.\nCopies at most buffer_len characters. Result is always null terminated.\nReturns how many bytes are needed to store all strings.\nThus if return is <= buffer_len, everything was copied.\n*/\nstatic size_t string_merge(char *buffer, size_t buffer_len, ...)\n{\n char *s;\n size_t copied = 0, needed = 0;\n\n va_list ap;\n va_start(ap, buffer_len);\n\n if (buffer && buffer_len) { *buffer = 0; }\n\n while ((s = va_arg(ap, char *)))\n {\n needed += strlen(s);\n if (buffer && (copied+1 < buffer_len))\n {\n strncpy(buffer+copied, s, buffer_len-copied);\n buffer[buffer_len-1] = 0;\n copied += strlen(buffer+copied);\n }\n }\n\n va_end(ap);\n return(needed+1);\n}\n\nstatic char txtmsg[30];\n\nvoid set_state_message(char *prefix, char *suffix)\n{\n char num[3];\n sprintf(num, \"%d\", (unsigned int)current_zst);\n string_merge(txtmsg, sizeof(txtmsg), prefix, isextension(ZStateName, \"zss\") ? \"AUTO\" : num, suffix, 0);\n\n Msgptr = txtmsg;\n MessageOn = MsgCount;\n}\n\nvoid statesaver()\n{\n if (MovieProcessing == MOVIE_RECORD)\n {\n //'Auto increment savestate slot' code\n current_zst += AutoIncSaveSlot;\n current_zst %= 100;\n\n if (mzt_save(current_zst, (cbitmode && !NoPictureSave) ? true : false, false))\n {\n set_state_message(\"RR STATE \", \" SAVED.\");\n }\n else\n {\n current_zst += 100-AutoIncSaveSlot;\n current_zst %= 100;\n }\n return;\n }\n\n if ((MovieProcessing == MOVIE_PLAYBACK) || (MovieProcessing == MOVIE_DUMPING_NEW))\n {\n //'Auto increment savestate slot' code\n current_zst += AutoIncSaveSlot;\n current_zst %= 100;\n\n if (mzt_save(current_zst, (cbitmode && !NoPictureSave) ? true : false, true))\n {\n set_state_message(\"RR STATE \", \" SAVED.\");\n }\n else\n {\n current_zst += 100-AutoIncSaveSlot;\n current_zst %= 100;\n }\n return;\n }\n\n //'Auto increment savestate slot' code\n if(!isextension(ZStateName, \"zss\"))\n {\n current_zst += (char) AutoIncSaveSlot;\n current_zst %= 100;\n zst_name();\n }\n\n if ((fhandle = fopen_dir(ZSStatePath, ZStateName, \"wb\")))\n {\n zst_save(fhandle, (bool)(cbitmode && !NoPictureSave), false);\n fclose(fhandle);\n\n //Display message onscreen, 'STATE XX SAVED.'\n set_state_message(\"STATE \", \" SAVED.\");\n }\n else\n {\n //Display message onscreen, 'UNABLE TO SAVE.'\n Msgptr = \"UNABLE TO SAVE.\";\n MessageOn = MsgCount;\n\n if(!isextension(ZStateName, \"zss\"))\n {\n current_zst += 100-(char) AutoIncSaveSlot;\n current_zst %= 100;\n zst_name();\n }\n }\n}\n\nextern uint32_t Totalbyteloaded, SfxMemTable[256], SfxCPB;\nextern uint32_t SfxPBR, SfxROMBR, SfxRAMBR, SCBRrel, SfxSCBR;\nextern uint8_t pressed[256+128+64], multchange, ioportval, SDD1Enable;\nextern uint8_t nexthdma;\n\nstatic void read_save_state_data(uint8_t **dest, void *data, size_t len)\n{\n load_save_size += fread(data, 1, len, fhandle);\n}\n\nstatic bool zst_load_compressed(FILE *fp, size_t compressed_size)\n{\n unsigned long data_size = cur_zst_size - (sizeof(zst_header_cur)-1);\n uint8_t *buffer = 0;\n bool worked = false;\n\n if ((buffer = (uint8_t *)malloc(data_size)))\n {\n uint8_t *compressed_buffer = 0;\n\n if ((compressed_buffer = (uint8_t *)malloc(compressed_size)))\n {\n fread(compressed_buffer, 1, compressed_size, fp);\n if (uncompress(buffer, &data_size, compressed_buffer, compressed_size) == Z_OK)\n {\n copy_state_data(buffer, memcpyrinc, csm_load_zst_new);\n worked = true;\n }\n free(compressed_buffer);\n }\n free(buffer);\n }\n return(worked);\n}\n\nbool zst_load(FILE *fp, size_t Compressed)\n{\n size_t zst_version = 0;\n\n if (Compressed)\n {\n if (!zst_load_compressed(fp, Compressed))\n {\n return(false);\n }\n }\n else\n {\n char zst_header_check[sizeof(zst_header_cur)-1];\n\n Totalbyteloaded += fread(zst_header_check, 1, sizeof(zst_header_check), fp);\n\n if (!memcmp(zst_header_check, zst_header_cur, sizeof(zst_header_check)-2))\n {\n zst_version = 143; //v1.43+\n }\n\n if (!memcmp(zst_header_check, zst_header_old, sizeof(zst_header_check)-2))\n {\n zst_version = 60; //v0.60 - v1.42\n }\n\n if (!zst_version) { return(false); } //Pre v0.60 saves are no longer loaded\n\n load_save_size = 0;\n fhandle = fp; //Set global file handle\n copy_state_data(0, read_save_state_data, (zst_version == 143) ? csm_load_zst_new: csm_load_zst_old );\n Totalbyteloaded += load_save_size;\n }\n\n if (SFXEnable)\n {\n SfxCPB = SfxMemTable[(SfxPBR & 0xFF)];\n SfxCROM = SfxMemTable[(SfxROMBR & 0xFF)];\n SfxRAMMem = (uintptr_t)sfxramdata + ((SfxRAMBR & 0xFF) << 16);\n SfxRomBuffer += SfxCROM;\n SfxLastRamAdr += SfxRAMMem;\n SCBRrel = (SfxSCBR << 10) + (uintptr_t)sfxramdata;\n }\n\n if (SA1Enable)\n {\n RestoreSA1(); //Convert back SA-1 stuff\n SA1UpdateDPageC();\n }\n\n if (SDD1Enable)\n {\n UpdateBanksSDD1();\n }\n\n //Clear cache check if state loaded\n ClearCacheCheck();\n\n if (zst_version < 143) //Set new vars which old states did not have\n {\n prevoamptr = 0xFF;\n ioportval = 0xFF;\n //spcnumread = 0;\n }\n\n if (MovieProcessing != MOVIE_RECORD)\n {\n nexthdma = 0;\n }\n\n repackfunct();\n initpitch();\n ResetOffset();\n ResetState();\n procexecloop();\n\n return(true);\n}\n\n//Wrapper for above\nbool zst_compressed_loader(FILE *fp)\n{\n size_t data_size = fread3(fp);\n return((data_size & 0x00800000) ? zst_load(fp, 0) : zst_load(fp, data_size));\n}\n\n#define PH65816regsize 36\nvoid zst_sram_load(FILE *fp)\n{\n fseek(fp, sizeof(zst_header_cur)-1 + PH65816regsize + 199635, SEEK_CUR);\n //if (spcon) { fseek(fp, PHspcsave /*+ PHdspsave + sizeof(DSPMem)*/, SEEK_CUR); }\n if (C4Enable) { fseek(fp, 8192, SEEK_CUR); }\n if (SFXEnable) { fseek(fp, PHnum2writesfxreg + 131072, SEEK_CUR); }\n if (SA1Enable)\n {\n fseek(fp, PHnum2writesa1reg, SEEK_CUR);\n fread(SA1RAMArea, 1, 131072, fp); // SA-1 sram\n fseek(fp, 15, SEEK_CUR);\n }\n if (DSP1Enable) { fseek(fp, 2874, SEEK_CUR); }\n if (SETAEnable) { fread(setaramdata, 1, 4096, fp); } // SETA sram\n if (SPC7110Enable) { fseek(fp, PHnum2writespc7110reg + 6, SEEK_CUR); }\n if (DSP4Enable) {fseek(fp, 1294, SEEK_CUR); }\n fseek(fp, 220, SEEK_CUR);\n if (ramsize) { fread(sram, 1, ramsize, fp); } // normal sram\n}\n\nvoid zst_sram_load_compressed(FILE *fp)\n{\n size_t compressed_size = fread3(fp);\n\n if (compressed_size & 0x00800000)\n {\n zst_sram_load(fp);\n }\n else\n {\n unsigned long data_size = cur_zst_size - (sizeof(zst_header_cur)-1);\n uint8_t *buffer = 0;\n\n if ((buffer = (uint8_t *)malloc(data_size)))\n {\n uint8_t *compressed_buffer = 0;\n if ((compressed_buffer = (uint8_t *)malloc(compressed_size)))\n {\n fread(compressed_buffer, 1, compressed_size, fp);\n if (uncompress(buffer, &data_size, compressed_buffer, compressed_size) == Z_OK)\n {\n uint8_t *data = buffer + PH65816regsize + 199635;\n //if (spcon) { data += PHspcsave /*+ PHdspsave + sizeof(DSPMem)*/; }\n if (C4Enable) { data += 8192; }\n if (SFXEnable) { data += PHnum2writesfxreg + 131072; }\n if (SA1Enable)\n {\n data += PHnum2writesa1reg;\n memcpyrinc(&data, SA1RAMArea, 131072); // SA-1 sram\n data += 15;\n }\n if (DSP1Enable) { data += 2874; }\n if (SETAEnable) { memcpyrinc(&data, setaramdata, 4096); } // SETA sram\n if (SPC7110Enable) { data += PHnum2writespc7110reg + 6; }\n if (DSP4Enable) { data += 1294; }\n data += 220;\n if (ramsize) { memcpyrinc(&data, sram, ramsize); } // normal sram\n }\n free(compressed_buffer);\n }\n free(buffer);\n }\n }\n}\n\n\nextern uint8_t Voice0Disable, Voice1Disable, Voice2Disable, Voice3Disable;\nextern uint8_t Voice4Disable, Voice5Disable, Voice6Disable, Voice7Disable;\n\nvoid stateloader(char *statename, bool keycheck, bool xfercheck)\n{\n extern uint8_t PauseLoad;\n\n if (keycheck)\n {\n pressed[1] = 0;\n pressed[KeyLoadState] = 2;\n multchange = 1;\n MessageOn = MsgCount;\n }\n\n if (MZTForceRTR == RTR_REPLAY_TO_RECORD && (MovieProcessing == MOVIE_PLAYBACK))\n {\n MovieRecord();\n }\n else if (MZTForceRTR == RTR_RECORD_TO_REPLAY && (MovieProcessing == MOVIE_RECORD))\n {\n MovieStop();\n MoviePlay();\n }\n\n switch (MovieProcessing)\n {\n case MOVIE_PLAYBACK:\n if (mzt_load(current_zst, true))\n {\n Msgptr = \"CHAPTER LOADED.\";\n MessageOn = MsgCount;\n }\n else\n {\n set_state_message(\"UNABLE TO LOAD STATE \", \".\");\n }\n return;\n case MOVIE_RECORD:\n if (mzt_load(current_zst, false))\n {\n set_state_message(\"RR STATE \", \" LOADED.\");\n\n if (PauseLoad || EMUPause)\n {\n PauseFrameMode = EMUPause = true;\n }\n }\n else\n {\n set_state_message(\"UNABLE TO LOAD STATE \", \".\");\n }\n return;\n case MOVIE_OLD_PLAY:\n {\n extern char CMovieExt;\n size_t fname_len = strlen(statename);\n setextension(statename, \"zmv\");\n if (isdigit(CMovieExt)) { statename[fname_len-1] = CMovieExt; }\n }\n case MOVIE_ENDING_DUMPING: case MOVIE_DUMPING_NEW: case MOVIE_DUMPING_OLD:\n return;\n break;\n }\n\n if(!isextension(ZStateName, \"zss\"))\n {\n zst_name();\n }\n\n //Actual state loading code\n if ((fhandle = fopen_dir(ZSStatePath, statename, \"rb\")))\n {\n if (xfercheck) { Totalbyteloaded = 0; }\n\n if (zst_load(fhandle, 0))\n {\n set_state_message(\"STATE \", \" LOADED.\"); // 'STATE XX LOADED.'\n\n if (PauseLoad || EMUPause)\n {\n PauseFrameMode = EMUPause = true;\n }\n }\n else\n {\n set_state_message(\"STATE \", \" TOO OLD.\"); // 'STATE X TOO OLD.' - I don't think this is always accurate -Nach\n }\n fclose(fhandle);\n }\n else\n {\n set_state_message(\"UNABLE TO LOAD STATE \", \".\"); // 'UNABLE TO LOAD STATE XX.'\n }\n\n Voice0Disable = Voice1Disable = Voice2Disable = Voice3Disable = 1;\n Voice4Disable = Voice5Disable = Voice6Disable = Voice7Disable = 1;\n}\n\nvoid debugloadstate()\n{\n stateloader(ZStateName, 0, 0);\n}\n\nvoid loadstate()\n{\n stateloader(ZStateName, 1, 0);\n}\n\nvoid loadstate2()\n{\n stateloader(ZStateName, 0, 1);\n}\n\nvoid LoadSecondState()\n{\n setextension(ZStateName, \"zss\");\n loadstate2();\n zst_name();\n}\n\nvoid SaveSecondState()\n{\n setextension(ZStateName, \"zss\");\n statesaver();\n zst_name();\n}\n\nextern uint8_t CHIPBATT, sramsavedis, *sram2, nosaveSRAM;\nvoid SaveCombFile();\n\n// Sram saving\nvoid SaveSramData()\n{\n extern uint32_t sramb4save;\n if (*ZSaveName && (!SRAMSave5Sec || sramb4save))\n {\n FILE *fp = 0;\n uint8_t special = 0;\n uint8_t *data_to_save;\n\n setextension(ZSaveName, \"srm\");\n\n if (ramsize && !sramsavedis)\n {\n if (SFXEnable)\n {\n data_to_save=(uint8_t *)sfxramdata;\n special = 1;\n }\n else if (SA1Enable)\n {\n data_to_save = (uint8_t *)SA1RAMArea;\n special=1;\n }\n else if (SETAEnable)\n {\n data_to_save = (uint8_t *)setaramdata;\n special=1;\n }\n else { data_to_save = (uint8_t *)sram; }\n\n if (!special || CHIPBATT)\n {\n if (!nosaveSRAM && (fp = fopen_dir(ZSramPath, ZSaveName,\"wb\")))\n {\n fwrite(data_to_save, 1, ramsize, fp);\n fclose(fp);\n }\n if (!nosaveSRAM && *ZSaveST2Name && (fp = fopen_dir(ZSramPath, ZSaveST2Name, \"wb\")))\n {\n fwrite(sram2, 1, ramsize, fp);\n fclose(fp);\n }\n }\n }\n sramb4save = 0;\n }\n SaveCombFile();\n}\n\nextern bool SramExists;\nvoid OpenSramFile()\n{\n FILE *fp;\n\n setextension(ZSaveName, \"srm\");\n if ((fp = fopen_dir(ZSramPath, ZSaveName, \"rb\")))\n {\n fread(sram, 1, ramsize, fp);\n fclose(fp);\n\n SramExists = true;\n\n if (*ZSaveST2Name && (fp = fopen_dir(ZSramPath, ZSaveST2Name, \"rb\")))\n {\n fread(sram2, 1, ramsize, fp);\n fclose(fp);\n }\n }\n else\n {\n SramExists = false;\n }\n}\n\n/*\nSPC File Format - Invented by _Demo_ & zsKnight\nCleaned up by Nach\n\n00000h-00020h - File Header : SNES-SPC700 Sound File Data v0.00 (33 bytes)\n00021h-00023h - 0x1a,0x1a,0x1a (3 bytes)\n00024h - 10 (1 byte)\n00025h - PC Register value (1 Word)\n00027h - A Register Value (1 byte)\n00028h - X Register Value (1 byte)\n00029h - Y Register Value (1 byte)\n0002Ah - Status Flags Value (1 byte)\n0002Bh - Stack Register Value (1 byte)\n0002Ch-0002Dh - Reserved (2 bytes)\n0002Eh-0004Dh - SubTitle/Song Name (32 bytes)\n0004Eh-0006Dh - Title of Game (32 bytes)\n0006Eh-0007Dh - Name of Dumper (16 bytes)\n0007Eh-0009Dh - Comments (32 bytes)\n0009Eh-000A1h - Date the SPC was Dumped (4 bytes)\n000A2h-000A8h - Reserved (7 bytes)\n000A9h-000ACh - Length of SPC in seconds (4 bytes)\n000ADh-000AFh - Fade out length in milliseconds (3 bytes)\n000B0h-000CFh - Author of Song (32 bytes)\n000D0h - Default Channel Disables (0 = enable, 1 = disable) (1 byte)\n000D1h - Emulator used to dump .spc file (1 byte)\n (0 = UNKNOWN, 1 = ZSNES, 2 = SNES9X)\n (Note : Contact the authors if you're an snes emu author with\n an .spc capture in order to assign you a number)\n000D2h-000FFh - Reserved (46 bytes)\n00100h-100FFh - SPCRam (64 KB)\n10100h-101FFh - DSPRam (256 bytes)\n*/\n\n//extern uint8_t spcextraram[64];\n//extern uint8_t spcP, spcA, spcX, spcY, spcS, spcNZ;\nextern uint32_t infoloc;\n\nchar spcsaved[16];\nvoid savespcdata()\n{\n size_t fname_len;\n unsigned int i = 0;\n\n setextension(ZSaveName, \"spc\");\n fname_len = strlen(ZSaveName);\n\n while (i < 100)\n {\n if (i)\n {\n sprintf(ZSaveName-1+fname_len - ((i < 10) ? 0 : 1), \"%d\", i);\n }\n if (access_dir(ZSpcPath, ZSaveName, F_OK))\n {\n break;\n }\n i++;\n }\n if (i < 100)\n {\n FILE *fp = fopen_dir(ZSpcPath, ZSaveName, \"wb\");\n if (fp)\n {\n uint8_t ssdatst[256];\n time_t t = time(0);\n struct tm *lt = localtime(&t);\n\n //Assemble N/Z flags into P\n/*\n spcP &= 0xFD;\n if (!spcNZ)\n {\n spcP |= 2;\n }\n spcP &= 0x7F;\n if (spcNZ & 0x80)\n {\n spcP |= 0x80;\n }\n*/\n\n strcpy((char *)ssdatst, \"SNES-SPC700 Sound File Data v0.30\"); //00000h - File Header : SNES-SPC700 Sound File Data v0.00\n ssdatst[0x21] = ssdatst[0x22] = ssdatst[0x23] = 0x1a; //00021h - 0x1a,0x1a,0x1a\n ssdatst[0x24] = 10; //00024h - 10\n //*((uint16_t *)(ssdatst+0x25)) = spcPCRam-(uint32_t)SPCRAM; //00025h - PC Register value (1 Word)\n //ssdatst[0x27] = spcA; //00027h - A Register Value (1 byte)\n //ssdatst[0x28] = spcX; //00028h - X Register Value (1 byte)\n //ssdatst[0x29] = spcY; //00029h - Y Register Value (1 byte)\n //ssdatst[0x2A] = spcP; //0002Ah - Status Flags Value (1 byte)\n //ssdatst[0x2B] = spcS; //0002Bh - Stack Register Value (1 byte)\n\n ssdatst[0x2C] = 0; //0002Ch - Reserved\n ssdatst[0x2D] = 0; //0002Dh - Reserved\n\n PrepareSaveState();\n\n memset(ssdatst+0x2E, 0, 32); //0002Eh-0004Dh - SubTitle/Song Name\n memset(ssdatst+0x4E, 0, 32); //0004Eh-0006Dh - Title of Game\n memcpy(ssdatst+0x4E, ((uint8_t *)romdata)+infoloc, 21);\n memset(ssdatst+0x6E, 0, 16); //0006Eh-0007Dh - Name of Dumper\n memset(ssdatst+0x7E, 0, 32); //0007Eh-0009Dh - Comments\n\n //0009Eh-000A1h - Date the SPC was Dumped\n ssdatst[0x9E] = lt->tm_mday;\n ssdatst[0x9F] = lt->tm_mon+1;\n ssdatst[0xA0] = (lt->tm_year+1900) & 0xFF;\n ssdatst[0xA1] = ((lt->tm_year+1900) >> 8) & 0xFF;\n\n memset(ssdatst+0xA2, 0, 7); //000A2h-000A8h - Reserved\n memset(ssdatst+0xA9, 0, 4); //000A9h-000ACh - Length of SPC in seconds\n memset(ssdatst+0xAD, 0, 3); //000ADh-000AFh - Fade out time in milliseconds\n memset(ssdatst+0xB0, 0, 32); //000B0h-000CFh - Author of Song\n\n //Set Channel Disables\n ssdatst[0xD0] = 0; //000D0h - Default Channel Disables (0 = enable, 1 = disable)\n if (Voice0Disable) { ssdatst[0xD0] |= BIT(0); }\n if (Voice1Disable) { ssdatst[0xD0] |= BIT(1); }\n if (Voice2Disable) { ssdatst[0xD0] |= BIT(2); }\n if (Voice3Disable) { ssdatst[0xD0] |= BIT(3); }\n if (Voice4Disable) { ssdatst[0xD0] |= BIT(4); }\n if (Voice5Disable) { ssdatst[0xD0] |= BIT(5); }\n if (Voice6Disable) { ssdatst[0xD0] |= BIT(6); }\n if (Voice7Disable) { ssdatst[0xD0] |= BIT(7); }\n\n ssdatst[0xD1] = 1; //000D1h - Emulator used to dump .spc file\n memset(ssdatst+0xD2, 0, 46); //000D2h-000FFh - Reserved\n\n fwrite(ssdatst, 1, sizeof(ssdatst), fp);\n //fwrite(SPCRAM, 1, 65536, fp); //00100h-100FFh - SPCRam\n\n for (i = 0; i < 128; i++) //10100h-1017Fh - DSPRam\n {\n //fputc(dsp_read(i), fp);\n }\n\n memset(ssdatst, 0, 64);\n fwrite(ssdatst, 64, 1, fp); //10180h-101BFh - Reserved\n //fwrite(spcextraram, 1, 64, fp); //101C0h-101FFh - IPL ROM image?\n fclose(fp);\n\n ResetState();\n\n sprintf(spcsaved, \"%s FILE SAVED.\", ZSaveName+fname_len-3);\n }\n }\n}\n\nvoid SaveGameSpecificInput()\n{\n if (!*ZSaveName)\n {\n psr_cfg_run(write_input_vars, ZCfgPath, \"zinput.cfg\");\n }\n\n if (GameSpecificInput && *ZSaveName)\n {\n setextension(ZSaveName, \"inp\");\n psr_cfg_run(write_input_vars, ZInpPath, ZSaveName);\n }\n}\n\nvoid LoadGameSpecificInput()\n{\n if (GameSpecificInput && *ZSaveName)\n {\n psr_cfg_run(read_input_vars, ZCfgPath, \"zinput.cfg\");\n\n setextension(ZSaveName, \"inp\");\n psr_cfg_run(read_input_vars, ZInpPath, ZSaveName);\n }\n}\n\n" }, { "alpha_fraction": 0.5993822813034058, "alphanum_fraction": 0.6066622734069824, "avg_line_length": 21.11219596862793, "blob_id": "0dd48149e7a9f5c980b8356f93ea01012c6ce7d4", "content_id": "9054391a07443737ba7c58358fc9d0c7c09f58f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4533, "license_type": "no_license", "max_line_length": 144, "num_lines": 205, "path": "/src/zdir.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#define _ATFILE_SOURCE\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n#include \"zpath.h\"\n#include \"zdir.h\"\n\n#ifndef __UNIXSDL__\n\n#define FIND_GOOD(handle) ((handle) != -1)\n#define FIND_FAIL(handle) ((handle) == -1)\n#define ff_name name\n#define ff_fsize size\n#define ff_attrib attrib\n#define WILD_ALL \"*\"\n\n//Note, these are faster than the built in DJGPP/MinGW ones\nz_DIR *z_opendir(const char *path)\n{\n z_DIR *dir = 0;\n if (path && *path)\n {\n char search[PATH_SIZE];\n strcpy(search, path);\n strcatslash(search);\n strcat(search, WILD_ALL);\n\n dir = malloc(sizeof(z_DIR));\n if (dir)\n {\n dir->find_first_handle = _findfirst(search, &dir->fileinfo);\n if (FIND_FAIL(dir->find_first_handle))\n {\n //ENOENT set by findfirst already\n free(dir);\n dir = 0;\n }\n }\n else\n {\n errno = ENOMEM;\n }\n }\n else\n {\n errno = EINVAL;\n }\n\n return(dir);\n}\n\nstruct z_dirent *z_readdir(z_DIR *dir)\n{\n struct z_dirent *entry = 0;\n if (FIND_GOOD(dir->find_first_handle))\n {\n entry = &dir->entry;\n strcpy(entry->d_name, dir->fileinfo.ff_name);\n if (FIND_FAIL(_findnext(dir->find_first_handle, &dir->fileinfo)))\n {\n _findclose(dir->find_first_handle);\n dir->find_first_handle = -1;\n }\n }\n else\n {\n errno = EBADF;\n }\n\n return(entry);\n}\n\nint z_closedir(z_DIR *dir)\n{\n int result = 0;\n\n if (dir)\n {\n if (FIND_GOOD(dir->find_first_handle))\n {\n _findclose(dir->find_first_handle);\n }\n free(dir);\n }\n else\n {\n result = -1;\n errno = EBADF;\n }\n\n return(result);\n}\n\nstruct dirent_info *readdir_info(z_DIR *dir)\n{\n static struct dirent_info info;\n struct dirent_info *infop = 0;\n\n if (FIND_GOOD(dir->find_first_handle))\n {\n strcpy(dir->entry.d_name, dir->fileinfo.ff_name);\n\n info.name = dir->entry.d_name;\n info.size = dir->fileinfo.ff_fsize;\n info.mode = S_IREAD;\n if (!(dir->fileinfo.ff_attrib & _A_RDONLY)) { info.mode |= S_IWRITE; }\n if (dir->fileinfo.ff_attrib & _A_SUBDIR) { info.mode |= S_IFDIR; }\n else { info.mode |= S_IFREG; }\n infop = &info;\n\n if (FIND_FAIL(_findnext(dir->find_first_handle, &dir->fileinfo)))\n {\n _findclose(dir->find_first_handle);\n dir->find_first_handle = -1;\n }\n }\n else\n {\n errno = EBADF;\n }\n\n return(infop);\n}\n\n#else\n#include \"linux/lib.h\"\n\nstruct dirent_info *readdir_info(z_DIR *dir)\n{\n static struct dirent_info info;\n struct dirent_info *infop = 0;\n\n struct dirent *entry = readdir(dir);\n if (entry)\n {\n struct stat stat_buffer;\n if (!fstatat(dirfd(dir), entry->d_name, &stat_buffer, 0))\n {\n info.name = entry->d_name;\n info.size = stat_buffer.st_size;\n info.mode = stat_buffer.st_mode;\n info.uid = stat_buffer.st_uid;\n info.gid = stat_buffer.st_gid;\n infop = &info;\n }\n else\n {\n infop = readdir_info(dir);\n }\n }\n return(infop);\n}\n\nint dirent_access(struct dirent_info *entry, int mode)\n{\n int accessable = 0; //This is accessable, non access is -1\n\n if (!entry)\n {\n accessable = -1;\n errno = EACCES;\n }\n else if (mode)\n {\n uid_t uid = geteuid();\n gid_t gid = getegid();\n\n if (!(\n (!(mode&R_OK)||((entry->mode&S_IROTH)||((gid == entry->gid)&&(entry->mode&S_IRGRP))||((uid == entry->uid)&&(entry->mode&S_IRUSR)))) &&\n (!(mode&W_OK)||((entry->mode&S_IWOTH)||((gid == entry->gid)&&(entry->mode&S_IWGRP))||((uid == entry->uid)&&(entry->mode&S_IWUSR)))) &&\n (!(mode&X_OK)||((entry->mode&S_IXOTH)||((gid == entry->gid)&&(entry->mode&S_IXGRP))||((uid == entry->uid)&&(entry->mode&S_IXUSR))))\n ))\n {\n accessable = -1;\n errno = EACCES;\n }\n }\n\n return(accessable);\n}\n\n#endif\n" }, { "alpha_fraction": 0.7346774339675903, "alphanum_fraction": 0.7443548440933228, "avg_line_length": 25.382978439331055, "blob_id": "1d638c584a11632db6408e7f179548f266bcc7d2", "content_id": "71b79c80c5c1e8b62cb17aeba71df3693c22b7dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2480, "license_type": "no_license", "max_line_length": 93, "num_lines": 94, "path": "/src/win/winlink.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#ifndef WINLINK_H\n#define WINLINK_H\n\ntypedef HRESULT (WINAPI* lpDirectDrawCreateEx)(GUID FAR *lpGuid, LPVOID *lplpDD, REFIID iid,\n IUnknown FAR *pUnkOuter);\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n extern BYTE changeRes;\n extern DWORD converta;\n extern unsigned int BitConv32Ptr;\n extern unsigned int RGBtoYUVPtr;\n extern unsigned short resolutn;\n extern BYTE PrevRes;\n extern BYTE GUIWFVID[];\n extern BYTE GUIDSIZE[];\n extern BYTE GUISMODE[];\n extern BYTE GUIDSMODE[];\n extern BYTE GUIHQ2X[];\n extern BYTE GUIHQ3X[];\n extern BYTE GUIHQ4X[];\n extern BYTE GUINTVID[];\n extern BYTE hqFilterlevel;\n extern WORD totlines;\n extern DWORD CurMode;\n extern DWORD WindowWidth;\n extern DWORD WindowHeight;\n extern DWORD SurfaceX;\n extern DWORD SurfaceY;\n extern BYTE BitDepth;\n extern DWORD GBitMask;\n extern WORD Refresh;\n extern DWORD FirstVid;\n extern DWORD FirstFull;\n extern DWORD DMode;\n extern DWORD SMode;\n extern DWORD DSMode;\n extern DWORD NTSCMode;\n extern DWORD prevHQMode;\n extern DWORD prevNTSCMode;\n extern DWORD prevScanlines;\n extern HWND hMainWindow;\n extern BYTE curblank;\n extern WORD totlines;\n extern DWORD FullScreen;\n extern RECT rcWindow;\n extern RECT BlitArea;\n extern BYTE AltSurface;\n extern lpDirectDrawCreateEx pDirectDrawCreateEx;\n extern BYTE *SurfBuf;\n extern int X;\n extern DWORD newmode;\n extern WINDOWPLACEMENT wndpl;\n extern RECT rc1;\n\n void Clear2xSaIBuffer();\n void drawscreenwin();\n void clear_display();\n char CheckTVRatioReq();\n void KeepTVRatio();\n\n void CheckAlwaysOnTop();\n\n#ifdef __cplusplus\n}\n\nBOOL ReInitSound();\nvoid ReleaseDirectDraw();\nvoid DrawScreen();\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.5174000859260559, "alphanum_fraction": 0.5726200938224792, "avg_line_length": 30.60909080505371, "blob_id": "c1ce36d96cbca7a0c9a6011e60a6371981a12922", "content_id": "d25a9cee59368177205a16dc31fe634013c7f862", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3477, "license_type": "no_license", "max_line_length": 77, "num_lines": 110, "path": "/src/chips/sa1emu.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2007 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <stdint.h>\n\nextern unsigned char SA1_BRF[16];\nextern unsigned int SA1_CC2_line; //should be cleared to zero on reset\n\nextern unsigned int SA1DMAChar, SA1DMADest, SA1DMASource;\nextern uint8_t IRAM[2048], *SA1RAMArea;\n\n#define DMACB (SA1DMAChar&3)\n#define DDA SA1DMADest\n#define SA1_IRAM IRAM\n\nvoid SA1_DMA_CC2() {\n //select register file index (0-7 or 8-15)\n const unsigned char *brf = &SA1_BRF[(SA1_CC2_line & 1) << 3];\n unsigned bpp = 2 << (2 - DMACB);\n unsigned addr = DDA & 0x07ff;\n unsigned byte;\n addr &= ~((1 << (7 - DMACB)) - 1);\n addr += (SA1_CC2_line & 8) * bpp;\n addr += (SA1_CC2_line & 7) * 2;\n\n for(byte = 0; byte < bpp; byte++) {\n uint8_t output = 0;\n unsigned bit;\n for(bit = 0; bit < 8; bit++) {\n output |= ((brf[bit] >> byte) & 1) << (7 - bit);\n }\n SA1_IRAM[(addr + ((byte & 6) << 3) + (byte & 1)) & 0x07ff] = output;\n }\n\n SA1_CC2_line = (SA1_CC2_line + 1) & 15;\n}\n\n#define SA1_BWRAM SA1RAMArea\n#define DSA SA1DMASource\n#define BWRAM_SIZE 0x40000\n#define DMASIZE ((SA1DMAChar>>2)&7)\n\nunsigned char SA1_DMA_VALUE;\nunsigned int SA1_DMA_ADDR;\n\nvoid SA1_DMA_CC1()\n{\n //16 bytes/char (2bpp); 32 bytes/char (4bpp); 64 bytes/char (8bpp)\n unsigned charmask = (1 << (6 - DMACB)) - 1;\n\n if((SA1_DMA_ADDR & charmask) == 0) {\n //buffer next character to I-RAM\n unsigned bpp = 2 << (2 - DMACB);\n unsigned bpl = (8 << DMASIZE) >> DMACB;\n unsigned bwmask = BWRAM_SIZE - 1;\n unsigned tile = ((SA1_DMA_ADDR - DSA) & bwmask) >> (6 - DMACB);\n unsigned ty = (tile >> DMASIZE);\n unsigned tx = tile & ((1 << DMASIZE) - 1);\n unsigned bwaddr = DSA + ty * 8 * bpl + tx * bpp;\n unsigned y;\n\n for(y = 0; y < 8; y++) {\n uint64_t data = 0;\n unsigned byte, x;\n for(byte = 0; byte < bpp; byte++) {\n data |= (uint64_t)SA1_BWRAM[(bwaddr + byte) & bwmask] << (byte << 3);\n }\n bwaddr += bpl;\n\n uint8_t out[] = { 0, 0, 0, 0, 0, 0, 0, 0 };\n for(x = 0; x < 8; x++) {\n out[0] |= (data & 1) << (7 - x); data >>= 1;\n out[1] |= (data & 1) << (7 - x); data >>= 1;\n if(DMACB == 2) continue;\n out[2] |= (data & 1) << (7 - x); data >>= 1;\n out[3] |= (data & 1) << (7 - x); data >>= 1;\n if(DMACB == 1) continue;\n out[4] |= (data & 1) << (7 - x); data >>= 1;\n out[5] |= (data & 1) << (7 - x); data >>= 1;\n out[6] |= (data & 1) << (7 - x); data >>= 1;\n out[7] |= (data & 1) << (7 - x); data >>= 1;\n }\n\n for(byte = 0; byte < bpp; byte++) {\n unsigned p = DDA + (y << 1) + ((byte & 6) << 3) + (byte & 1);\n SA1_IRAM[p & 0x07ff] = out[byte];\n }\n }\n }\n\n SA1_DMA_VALUE = SA1_IRAM[(DDA + (SA1_DMA_ADDR & charmask)) & 0x07ff];\n}\n" }, { "alpha_fraction": 0.5287202596664429, "alphanum_fraction": 0.5945713520050049, "avg_line_length": 24.457420349121094, "blob_id": "da99bfab8f0222bb9135f05eee70960b417e3a10", "content_id": "ab18bc3cad5187027839e7e92a900f3eeb40644e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10463, "license_type": "no_license", "max_line_length": 89, "num_lines": 411, "path": "/src/win/gl_draw.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <windows.h>\n#include <stdint.h>\n#include <GL/gl.h>\n#include <GL/glext.h>\n#include \"gl_draw.h\"\n#include \"../cfg.h\"\n#include \"winlink.h\"\n\n\n// OPENGL VARIABLES\nstatic unsigned short *glvidbuffer = 0;\nstatic GLuint gltextures[4];\nstatic uint32_t gltexture256, gltexture512;\nstatic uint32_t glfilters = GL_NEAREST;\nstatic uint32_t glscanready = 0;\n\nint gl_start(int width, int height, int req_depth, int FullScreen)\n{\n int i;\n\n PIXELFORMATDESCRIPTOR pfd;\n int iFormat;\n\n /* get the device context (DC) */\n hDC = GetDC(hMainWindow);\n\n /* set the pixel format for the DC */\n ZeroMemory(&pfd, sizeof(pfd));\n pfd.nSize = sizeof(pfd);\n pfd.nVersion = 1;\n pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;\n pfd.iPixelType = PFD_TYPE_RGBA;\n pfd.cColorBits = 16;\n pfd.cDepthBits = 32;\n pfd.iLayerType = PFD_MAIN_PLANE;\n iFormat = ChoosePixelFormat(hDC, &pfd);\n SetPixelFormat(hDC, iFormat, &pfd);\n\n /* create and enable the render context (RC) */\n hRC = wglCreateContext(hDC);\n wglMakeCurrent(hDC, hRC);\n\n SurfaceX = width; SurfaceY = height;\n glvidbuffer = (unsigned short*)malloc(512 * 512 * sizeof(short));\n gl_clearwin();\n if (BilinearFilter)\n {\n glfilters = GL_LINEAR;\n if (GUIOn2 && !FilteredGUI)\n {\n glfilters = GL_NEAREST;\n }\n }\n else\n {\n glfilters = GL_NEAREST;\n }\n\n\n // Grab mouse in fullscreen mode\n //todo\n\n\n\n /* Setup some GL stuff */\n\n glEnable(GL_TEXTURE_1D);\n glEnable(GL_TEXTURE_2D);\n\n glViewport(0, 0, SurfaceX, SurfaceY);\n\n /*\n * gltextures[0]: 2D texture, 256x224\n * gltextures[1]: 2D texture, 512x224\n * gltextures[3]: 1D texture, 256 lines of alternating alpha\n */\n glGenTextures(4, gltextures);\n for (i = 0; i < 3; i++)\n {\n glBindTexture(GL_TEXTURE_2D, gltextures[i]);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, glfilters);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, glfilters);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);\n }\n\n if (sl_intensity)\n {\n gl_scanlines();\n }\n\n return TRUE;\n}\n\nvoid gl_end()\n{\n wglMakeCurrent(NULL, NULL);\n wglDeleteContext(hRC);\n ReleaseDC(hMainWindow, hDC);\n\n glDeleteTextures(4, gltextures);\n free(glvidbuffer);\n}\n\nextern uint32_t AddEndBytes;\nextern uint32_t NumBytesPerLine;\nextern uint8_t *WinVidMemStart;\nextern uint8_t NGNoTransp;\nvoid copy640x480x16bwin();\nextern uint8_t SpecialLine[224]; /* 0 if lo-res, > 0 if hi-res */\n\nvoid gl_clearwin()\n{\n glClear(GL_COLOR_BUFFER_BIT);\n if (En2xSaI)\n {\n memset(glvidbuffer, 0, 512 * 448 * 2);\n }\n}\n\n/* gl_drawspan:\n * Puts a quad on the screen for hires/lores portions, starting at line start,\n * and ending at line end..\n * Builds the 256x256/512x256 textures if gltexture256 or gltexture512 == 0\n */\nstatic void gl_drawspan(int hires, int start, int end)\n{\n int i, j;\n\n switch (hires)\n {\n case 0:\n break;\n case 3:\n case 7:\n hires = 2;\n break;\n default:\n hires = 1;\n break;\n }\n\n if (hires)\n {\n if (hires != gltexture512)\n {\n unsigned short *vbuf1 = &((unsigned short *)vidbuffer)[16];\n unsigned short *vbuf2 = &((unsigned short *)vidbuffer)[75036 * 2 + 16];\n unsigned short *vbuf = &glvidbuffer[0];\n\n if (hires > 1) // mode 7\n {\n for (j = 224; j--;)\n {\n for (i = 256; i--;)\n {\n *vbuf++ = *vbuf1++;\n }\n for (i = 256; i--;)\n {\n *vbuf++ = *vbuf2++;\n }\n vbuf1 += 32;\n vbuf2 += 32;\n }\n glBindTexture(GL_TEXTURE_2D, gltextures[1]);\n glTexImage2D(GL_TEXTURE_2D, 0, 3, 256, 512, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5,\n glvidbuffer);\n\n gltexture512 = 2;\n }\n else\n {\n for (j = 224; j--;)\n {\n for (i = 256; i--;)\n {\n *vbuf++ = *vbuf1++;\n *vbuf++ = *vbuf2++;\n }\n vbuf1 += 32;\n vbuf2 += 32; // skip the two 16-pixel-wide columns\n }\n\n glBindTexture(GL_TEXTURE_2D, gltextures[1]);\n glTexImage2D(GL_TEXTURE_2D, 0, 3, 512, 256, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5,\n glvidbuffer);\n\n gltexture512 = 1;\n }\n }\n\n glBindTexture(GL_TEXTURE_2D, gltextures[1]);\n glBegin(GL_QUADS);\n glTexCoord2f(0.0f, (224.0 / 256.0) * (start / 224.0));\n glVertex2f(-1.0f, (112 - start) / 112.0);\n glTexCoord2f(1.0f, (224.0 / 256.0) * (start / 224.0));\n glVertex2f(1.0f, (112 - start) / 112.0);\n glTexCoord2f(1.0f, (224.0 / 256.0) * (end / 224.0));\n glVertex2f(1.0f, (112 - end) / 112.0);\n glTexCoord2f(0.0f, (224.0 / 256.0) * (end / 224.0));\n glVertex2f(-1.0f, (112 - end) / 112.0);\n glEnd();\n }\n else\n {\n glBindTexture(GL_TEXTURE_2D, gltextures[0]);\n if (!gltexture256)\n {\n glPixelStorei(GL_UNPACK_SKIP_PIXELS, 16);\n glPixelStorei(GL_UNPACK_ROW_LENGTH, 288);\n\n glTexImage2D(GL_TEXTURE_2D, 0, 3, 256, 256, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5,\n ((unsigned short*)vidbuffer) + 288);\n\n glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);\n glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);\n\n gltexture256 = 1;\n }\n\n glBegin(GL_QUADS);\n glTexCoord2f(0.0f, (224.0 / 256.0) * (start / 224.0));\n glVertex2f(-1.0f, (112 - start) / 112.0);\n glTexCoord2f(1.0f, (224.0 / 256.0) * (start / 224.0));\n glVertex2f(1.0f, (112 - start) / 112.0);\n glTexCoord2f(1.0f, (224.0 / 256.0) * (end / 224.0));\n glVertex2f(1.0f, (112 - end) / 112.0);\n glTexCoord2f(0.0f, (224.0 / 256.0) * (end / 224.0));\n glVertex2f(-1.0f, (112 - end) / 112.0);\n glEnd();\n }\n}\n\nvoid gl_drawwin()\n{\n int i;\n\n NGNoTransp = 0; // Set this value to 1 within the appropriate\n // video mode if you want to add a custom\n // transparency routine or hardware\n // transparency. This only works if\n // the value of newengen is equal to 1.\n // (see ProcessTransparencies in newgfx16.asm\n // for ZSNES' current transparency code)\n UpdateVFrame();\n if (curblank != 0)\n {\n return;\n }\n\n if (SurfaceX >= 512 && (hqFilter || En2xSaI) && 0)\n {\n AddEndBytes = 0;\n NumBytesPerLine = 1024;\n WinVidMemStart = (void*)glvidbuffer;\n\n if (hqFilter)\n {\n hq2x_16b();\n }\n else\n {\n copy640x480x16bwin();\n }\n\n /* Display 1 512x448 quad for the 512x448 buffer */\n glBindTexture(GL_TEXTURE_2D, gltextures[1]);\n glTexEnvi(GL_TEXTURE_2D, GL_TEXTURE_ENV_MODE, GL_DECAL);\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 512, 512, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5,\n glvidbuffer);\n\n glDisable(GL_DEPTH_TEST);\n glDisable(GL_LIGHTING);\n glDisable(GL_BLEND);\n\n glBegin(GL_QUADS);\n glTexCoord2f(0.0f, 0.0f);\n glVertex3f(-1.0f, 1.0f, -1.0f);\n glTexCoord2f(1.0f, 0.0f);\n glVertex3f(1.0f, 1.0f, -1.0f);\n glTexCoord2f(1.0f, 448.0f / 512.0f);\n glVertex3f(1.0f, -1.0f, -1.0f);\n glTexCoord2f(0.0f, 448.0f / 512.0f);\n glVertex3f(-1.0f, -1.0f, -1.0f);\n glEnd();\n }\n else\n {\n /*\n * This code splits the hires/lores portions up, and draws\n * them with gl_drawspan\n */\n int lasthires, lasthires_line = 0;\n\n gltexture256 = gltexture512 = 0;\n\n lasthires = SpecialLine[1];\n for (i = 0; i < 224; i++)\n {\n if (SpecialLine[i + 1])\n {\n if (lasthires)\n {\n continue;\n }\n gl_drawspan(lasthires, lasthires_line, i);\n\n lasthires = SpecialLine[i + 1];\n lasthires_line = i;\n }\n else\n {\n if (!lasthires)\n {\n continue;\n }\n gl_drawspan(lasthires, lasthires_line, i);\n\n lasthires = SpecialLine[i + 1];\n lasthires_line = i;\n }\n }\n\n if (i - lasthires_line > 1)\n {\n gl_drawspan(lasthires, lasthires_line, i);\n }\n\n /*\n * This is here rather than right outside this if because the\n * GUI doesn't allow scanlines to be selected while filters are\n * on.. There is no technical reason they can't be on while\n * filters are on, however. Feel free to change the GUI, and\n * move this outside the if (En2xSaI) {}, if you do.\n */\n if (sl_intensity)\n {\n glDisable(GL_TEXTURE_2D);\n glEnable(GL_BLEND);\n\n if (sl_intensity != glscanready)\n {\n gl_scanlines();\n }\n\n glBlendFunc(GL_DST_COLOR, GL_ZERO);\n glBindTexture(GL_TEXTURE_1D, gltextures[3]);\n glBegin(GL_QUADS);\n for (i = 0; i < SurfaceY; i += 256)\n {\n glTexCoord1f(0.0f);\n glVertex3f(-1.0f, (SurfaceY - i * 2.0) / SurfaceY, -1.0f);\n glTexCoord1f(0.0f);\n glVertex3f(1.0f, (SurfaceY - i * 2.0) / SurfaceY, -1.0f);\n glTexCoord1f(1.0f);\n glVertex3f(1.0f, (SurfaceY - (i + 256) * 2.0) / SurfaceY, -1.0f);\n glTexCoord1f(1.0f);\n glVertex3f(-1.0f, (SurfaceY - (i + 256) * 2.0) / SurfaceY, -1.0f);\n }\n glEnd();\n\n glDisable(GL_BLEND);\n glEnable(GL_TEXTURE_2D);\n }\n }\n SwapBuffers(hDC);\n}\n\nvoid gl_scanlines()\n{\n GLubyte scanbuffer[256][4];\n int i, j = (100 - sl_intensity) * 256 / 100;\n\n for (i = 0; i < 256; i += 2)\n {\n scanbuffer[i][0] = scanbuffer[i][1] = scanbuffer[i][2] = j;\n scanbuffer[i][3] = 0xFF;\n\n scanbuffer[i + 1][0] = scanbuffer[i + 1][1] = scanbuffer[i + 1][2] = 0xFF;\n scanbuffer[i + 1][3] = 0xFF;\n }\n\n glBindTexture(GL_TEXTURE_1D, gltextures[3]);\n glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);\n glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);\n glTexImage1D(GL_TEXTURE_1D, 0, GL_RGBA, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, scanbuffer);\n\n glscanready = sl_intensity;\n}\n" }, { "alpha_fraction": 0.7209302186965942, "alphanum_fraction": 0.7358803749084473, "avg_line_length": 26.363636016845703, "blob_id": "e7baf16ad9894d4236f432ae7c91176a9ee854cd", "content_id": "6f11c7b29d8af963e3d808ce9eef7c1b3b331f4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1204, "license_type": "no_license", "max_line_length": 72, "num_lines": 44, "path": "/src/win/lib.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include \"lib.h\"\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n#include \"../zpath.h\"\n\n#define fullpath _fullpath\n\n\n//This file contains library functions that can be found on other OSs\n\nchar *realpath(const char *path, char *resolved_path)\n{\n char *ret = 0;\n\n if (!path || !resolved_path) { errno = EINVAL; }\n else if (!access(path, F_OK))\n {\n ret = fullpath(resolved_path, path, PATH_SIZE);\n }\n\n return(ret);\n}\n" }, { "alpha_fraction": 0.6209016442298889, "alphanum_fraction": 0.6789617538452148, "avg_line_length": 28.280000686645508, "blob_id": "adca5ec5c850076e868f1def143c652defdc7e72", "content_id": "b3e6d8e86357a69bc7b53c40de96fd981713de8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1464, "license_type": "no_license", "max_line_length": 121, "num_lines": 50, "path": "/src/tools/cver.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2007-2008 Nach ( http://www.zsnes.com )\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <stdio.h>\n\n#if defined(__i386__) || defined(_M_IX86)\nstatic int x86_32 = 1;\n#else\nstatic int x86_32 = 0;\n#endif\n\n#if defined(__x86_64__) || defined(_M_X64)\nstatic int x86_64 = 1;\n#else\nstatic int x86_64 = 0;\n#endif\n\n#ifdef __STDC_VERSION__\nstatic int stdcv = __STDC_VERSION__;\n#else\nstatic int stdcv = 198900;\n#endif\n\nint main()\n{\n #ifdef __GNUC__\n printf(\"Compiler: GCC\\nMajor: %u\\nMinor: %u\\nMicro: %u\\nVersion: %s\\n\",\n __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__, __VERSION__);\n #elif defined(_MSC_VER)\n printf(\"Compiler: MSVC\\nMajor: %u\\nMinor: %u\\nVersion: %u\\n\",\n _MSC_VER/100, _MSC_VER%100, _MSC_VER);\n #endif\n printf(\"C99: %s\\nx86-32: %s\\nx86-64: %s\\n\", (stdcv >= 199901L) ? \"Yes\":\"No\", x86_32 ? \"Yes\":\"No\", x86_64 ? \"Yes\":\"No\");\n fflush(stdout);\n return(0);\n}\n" }, { "alpha_fraction": 0.7241379022598267, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 18.33333396911621, "blob_id": "41ccbe378ec8e5d5c6e4832426f8e9a3eed3961c", "content_id": "8b96e2cb8dc9e5212553953aa7abb5d3e972d1d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 116, "license_type": "no_license", "max_line_length": 71, "num_lines": 6, "path": "/src/gui/gui.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "#include \"gui.h\"\n\nZSNESMainForm::ZSNESMainForm(QMainWindow *parent) : QMainWindow(parent)\n{\n ui.setupUi(this);\n}\n" }, { "alpha_fraction": 0.7337786555290222, "alphanum_fraction": 0.75, "avg_line_length": 22.288888931274414, "blob_id": "367112ae9c5f40c9b3e2d2d30fbc659f66d75269", "content_id": "374fc4db4327dc0d8fdd79096038bf6682658311", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 72, "num_lines": 45, "path": "/src/debugger/zthread.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#ifndef ZTHREAD_H\n#define ZTHREAD_H\n\n#include <QThread>\n#include <setjmp.h>\n\nclass ZSNESThread : public QThread\n{\n Q_OBJECT\n\n private:\n bool running;\n jmp_buf jump;\n\n public:\n ZSNESThread();\n void run();\n void done();\n\n public slots:\n void prepare_close();\n};\n\n#endif\n" }, { "alpha_fraction": 0.7412333488464355, "alphanum_fraction": 0.7750906944274902, "avg_line_length": 30.80769157409668, "blob_id": "2a9efad5c462e038347a241eaa5b74745197444d", "content_id": "b43c182f28f5b54e4967e46c44c345ba6c909ea6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 827, "license_type": "no_license", "max_line_length": 94, "num_lines": 26, "path": "/src/jma/crc32.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2004-2008 NSRT Team ( http://nsrt.edgeemu.com )\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#ifndef CRC32_H\n#define CRC32_H\n\nnamespace CRC32lib\n{\n unsigned int CRC32(const unsigned char *, size_t, register unsigned int crc32 = 0xFFFFFFFF);\n}\n\n#endif\n" }, { "alpha_fraction": 0.6356368660926819, "alphanum_fraction": 0.6560975313186646, "avg_line_length": 23.35313606262207, "blob_id": "641cbb916aff5bf4081f7cb827c8a9cb2eaeced9", "content_id": "3bd3207930ade2e531758913b990d4ebac4e901e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7380, "license_type": "no_license", "max_line_length": 147, "num_lines": 303, "path": "/src/linux/audio.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include \"../gblhdr.h\"\n#include <stdbool.h>\n\n#ifdef __LIBAO__\n#include <ao/ao.h>\n#include <pthread.h>\n#include <signal.h>\n#endif\n\n#include \"../asm_call.h\"\n#include \"../cfg.h\"\n#include \"../cpu/zspc/zspc.h\"\n\n#ifdef __LIBAO__\nstatic pthread_t audio_thread;\nstatic pthread_mutex_t audio_mutex = PTHREAD_MUTEX_INITIALIZER;\nstatic pthread_cond_t audio_wait = PTHREAD_COND_INITIALIZER;\nstatic ao_device *audio_device = 0;\nstatic volatile size_t samples_waiting = false;\n#endif\n\nunsigned char *sdl_audio_buffer = 0;\nint sdl_audio_buffer_len = 0, sdl_audio_buffer_fill = 0;\nint sdl_audio_buffer_head = 0, sdl_audio_buffer_tail = 0;\nunsigned char sound_sdl = false;\n\nint SoundEnabled = 1;\nunsigned char PrevStereoSound;\nunsigned int PrevSoundQuality;\n\nstatic const int freqtab[7] = { 8000, 11025, 22050, 44100, 16000, 32000, 48000 };\n#define RATE freqtab[SoundQuality = ((SoundQuality > 6) ? 1 : SoundQuality)]\n\n\n#ifdef __LIBAO__\nstatic short ao_buffer1[1070];\nstatic short ao_buffer2[2048];\nstatic unsigned int ao_samples = 0;\n\nvoid SoundWrite_ao()\n{\n if (!pthread_mutex_trylock(&audio_mutex))\n {\n zspc_flush_samples();\n ao_samples = zspc_sample_count();\n //printf(\"Samples Ready: %u\\n\", ao_samples);\n memcpy(ao_buffer2, ao_buffer1, ao_samples*sizeof(short));\n zspc_set_output(ao_buffer1, sizeof(ao_buffer1)/sizeof(short));\n\n pthread_cond_broadcast(&audio_wait); //Send signal\n pthread_mutex_unlock(&audio_mutex);\n }\n else\n {\n pthread_cond_broadcast(&audio_wait); //Send signal\n }\n}\n\nstatic void *SoundThread_ao(void *useless)\n{\n for (;;)\n {\n unsigned int samples;\n pthread_mutex_lock(&audio_mutex);\n\n //The while() is there to prevent error codes from breaking havoc\n while (!ao_samples)\n {\n pthread_cond_wait(&audio_wait, &audio_mutex); //Wait for signal\n }\n\n samples = ao_samples;\n ao_samples = 0;\n pthread_mutex_unlock(&audio_mutex);\n\n ao_play(audio_device, (char*)ao_buffer2, samples*2);\n }\n return(0);\n}\n\nstatic int SoundInit_ao()\n{\n int driver_id = ao_driver_id(libAoDriver);\n if (driver_id < 0) { driver_id = ao_default_driver_id(); }\n\n ao_sample_format driver_format;\n driver_format.bits = 16;\n driver_format.channels = StereoSound+1;\n driver_format.rate = freqtab[SoundQuality = ((SoundQuality > 6) ? 1 : SoundQuality)];\n driver_format.byte_format = AO_FMT_LITTLE;\n\n if (audio_device)\n {\n ao_close(audio_device);\n }\n else\n {\n if (pthread_create(&audio_thread, 0, SoundThread_ao, 0))\n {\n puts(\"pthread_create() failed.\");\n }\n }\n\n //ao_option driver_options = { \"buf_size\", \"32768\", 0 };\n\n audio_device = ao_open_live(driver_id, &driver_format, 0);\n if (audio_device)\n {\n ao_info *di = ao_driver_info(driver_id);\n printf(\"\\nAudio Opened.\\nDriver: %s\\nChannels: %u\\nRate: %u\\n\\n\", di->name, driver_format.channels, driver_format.rate);\n\n memset(ao_buffer1, 0, sizeof(ao_buffer1));\n memset(ao_buffer2, 0, sizeof(ao_buffer2));\n zspc_set_output(ao_buffer1, sizeof(ao_buffer1)/sizeof(short));\n }\n else\n {\n SoundEnabled = 0;\n puts(\"Audio Open Failed\");\n return(false);\n }\n return(true);\n}\n\n#endif\n\nvoid SoundWrite_sdl()\n{\n/*\n SDL_LockAudio();\n if (dsp_sample_count) //Lets have less memset()s\n {\n sdl_audio_buffer_tail += dsp_samples_pull((short *)(sdl_audio_buffer+sdl_audio_buffer_tail), (sdl_audio_buffer_len-sdl_audio_buffer_tail)/2)*2;\n }\n SDL_UnlockAudio();\n*/\n/*\n extern unsigned int T36HZEnabled;\n\n SDL_LockAudio();\n while (dsp_sample_count && (sdl_audio_buffer_fill < sdl_audio_buffer_len))\n {\n short *dest = (short *)(sdl_audio_buffer+sdl_audio_buffer_tail);\n size_t pull = 512;\n if (T36HZEnabled)\n {\n memset(dest, 0, pull);\n }\n else\n {\n pull = dsp_samples_pull(dest, 256)*2;\n }\n\n sdl_audio_buffer_fill += pull;\n sdl_audio_buffer_tail += pull;\n if (sdl_audio_buffer_tail >= sdl_audio_buffer_len) { sdl_audio_buffer_tail = 0; }\n }\n SDL_UnlockAudio();\n*/\n}\n\nstatic void SoundUpdate_sdl(void *userdata, unsigned char *stream, int len)\n{\n size_t extra = 0;\n if (len > sdl_audio_buffer_tail)\n {\n extra = len-sdl_audio_buffer_tail;\n len = sdl_audio_buffer_tail;\n }\n if (len)\n {\n memcpy(stream, sdl_audio_buffer, len);\n sdl_audio_buffer_tail -= len;\n memmove(sdl_audio_buffer, sdl_audio_buffer+len, sdl_audio_buffer_tail);\n }\n if (extra)\n {\n memset(stream+len, 0, extra);\n }\n/*\n int left = sdl_audio_buffer_len - sdl_audio_buffer_head;\n\n if (left > 0)\n {\n if (left <= len)\n {\n memcpy(stream, &sdl_audio_buffer[sdl_audio_buffer_head], left);\n stream += left;\n len -= left;\n sdl_audio_buffer_head = 0;\n sdl_audio_buffer_fill -= left;\n }\n\n if (len)\n {\n memcpy(stream, &sdl_audio_buffer[sdl_audio_buffer_head], len);\n sdl_audio_buffer_head += len;\n sdl_audio_buffer_fill -= len;\n }\n }\n*/\n}\n\nstatic int SoundInit_sdl()\n{\n const int samptab[7] = { 1, 1, 2, 4, 2, 4, 4 };\n SDL_AudioSpec audiospec;\n SDL_AudioSpec wanted;\n\n SDL_CloseAudio();\n\n if (sdl_audio_buffer)\n {\n free(sdl_audio_buffer);\n sdl_audio_buffer = 0;\n }\n sdl_audio_buffer_len = 0;\n\n wanted.freq = RATE;\n wanted.channels = StereoSound+1;\n wanted.samples = samptab[SoundQuality] * 128 * wanted.channels;\n wanted.format = AUDIO_S16LSB;\n wanted.userdata = 0;\n wanted.callback = SoundUpdate_sdl;\n\n if (SDL_OpenAudio(&wanted, &audiospec) < 0)\n {\n SoundEnabled = 0;\n return(false);\n }\n SDL_PauseAudio(0);\n\n sdl_audio_buffer_len = audiospec.size*2;\n sdl_audio_buffer_len = (sdl_audio_buffer_len + 255) & ~255; // Align to SPCSize\n if (!(sdl_audio_buffer = malloc(sdl_audio_buffer_len)))\n {\n SDL_CloseAudio();\n puts(\"Audio Open Failed\");\n SoundEnabled = 0;\n return(false);\n }\n\n sound_sdl = true;\n printf(\"\\nAudio Opened.\\nDriver: Simple DirectMedia Layer output\\nChannels: %u\\nRate: %u\\n\\n\", wanted.channels, wanted.freq);\n return(true);\n}\n\n\nint InitSound()\n{\n sound_sdl = false;\n if (!SoundEnabled)\n {\n return(false);\n }\n\n PrevSoundQuality = SoundQuality;\n PrevStereoSound = StereoSound;\n\n #ifdef __LIBAO__\n if (strcmp(libAoDriver, \"sdl\") && !(!strcmp(libAoDriver, \"auto\") && !strcmp(ao_driver_info(ao_default_driver_id())->name, \"null\")))\n {\n return(SoundInit_ao());\n }\n #endif\n return(SoundInit_sdl());\n}\n\nvoid DeinitSound()\n{\n #ifdef __LIBAO__\n if (audio_device)\n {\n pthread_kill(audio_thread, SIGTERM);\n pthread_mutex_destroy(&audio_mutex);\n pthread_cond_destroy(&audio_wait);\n ao_close(audio_device);\n }\n #endif\n SDL_CloseAudio();\n if (sdl_audio_buffer) { free(sdl_audio_buffer); }\n}\n\n" }, { "alpha_fraction": 0.5710613131523132, "alphanum_fraction": 0.6496511101722717, "avg_line_length": 19.628787994384766, "blob_id": "35507350adcdfd93150edf1b9ede70bccc9064a3", "content_id": "08790c76ff85e8bea088605775781998c603563c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2723, "license_type": "no_license", "max_line_length": 72, "num_lines": 132, "path": "/src/chips/obc1emu.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <stdint.h>\n#include <stdbool.h>\n\nstatic uint8_t *OBC1_RAM = 0;\n\nint OBC1_Address;\nint OBC1_BasePtr;\nint OBC1_Shift;\n\nuint16_t obc1_address;\nuint8_t obc1_byte;\n\nvoid GetOBC1 ()\n{\n\tswitch(obc1_address) {\n\t\tcase 0x7ff0:\n\t\t\tobc1_byte = OBC1_RAM[OBC1_BasePtr + (OBC1_Address << 2)];\n\t\t\tbreak;\n\n\t\tcase 0x7ff1:\n\t\t\tobc1_byte = OBC1_RAM[OBC1_BasePtr + (OBC1_Address << 2) + 1];\n\t\t\tbreak;\n\n\t\tcase 0x7ff2:\n\t\t\tobc1_byte = OBC1_RAM[OBC1_BasePtr + (OBC1_Address << 2) + 2];\n\t\t\tbreak;\n\n\t\tcase 0x7ff3:\n\t\t\tobc1_byte = OBC1_RAM[OBC1_BasePtr + (OBC1_Address << 2) + 3];\n\t\t\tbreak;\n\n\t\tcase 0x7ff4:\n\t\t\tobc1_byte = OBC1_RAM[OBC1_BasePtr + (OBC1_Address >> 2) + 0x200];\n\t\t\tbreak;\n\n\t\tdefault:\n\t\t\tobc1_byte = OBC1_RAM[obc1_address & 0x1fff];\n\t}\n}\n\n\nvoid SetOBC1 ()\n{\n\tswitch(obc1_address) {\n\t\tcase 0x7ff0:\n\t\t{\n\t\t\tOBC1_RAM[OBC1_BasePtr + (OBC1_Address << 2)] = obc1_byte;\n\t\t\tbreak;\n\t\t}\n\n\t\tcase 0x7ff1:\n\t\t{\n\t\t\tOBC1_RAM[OBC1_BasePtr + (OBC1_Address << 2) + 1] = obc1_byte;\n\t\t\tbreak;\n\t\t}\n\n\t\tcase 0x7ff2:\n\t\t{\n\t\t\tOBC1_RAM[OBC1_BasePtr + (OBC1_Address << 2) + 2] = obc1_byte;\n\t\t\tbreak;\n\t\t}\n\n\t\tcase 0x7ff3:\n\t\t{\n\t\t\tOBC1_RAM[OBC1_BasePtr + (OBC1_Address << 2) + 3] = obc1_byte;\n\t\t\tbreak;\n\t\t}\n\n\t\tcase 0x7ff4:\n\t\t{\n\t\t\tunsigned char Temp;\n\n\t\t\tTemp = OBC1_RAM[OBC1_BasePtr + (OBC1_Address >> 2) + 0x200];\n\t\t\tTemp = (Temp & ~(3 << OBC1_Shift)) | ((obc1_byte & 3) << OBC1_Shift);\n\t\t\tOBC1_RAM[OBC1_BasePtr + (OBC1_Address >> 2) + 0x200] = Temp;\n\t\t\tbreak;\n\t\t}\n\n\t\tcase 0x7ff5:\n\t\t{\n\t\t\tif (obc1_byte & 1)\n\t\t\t\tOBC1_BasePtr = 0x1800;\n\t\t\telse\n\t\t\t\tOBC1_BasePtr = 0x1c00;\n\n\t\t\tbreak;\n\t\t}\n\n\t\tcase 0x7ff6:\n\t\t{\n\t\t\tOBC1_Address = obc1_byte & 0x7f;\n\t\t\tOBC1_Shift = (obc1_byte & 3) << 1;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tOBC1_RAM[obc1_address & 0x1fff] = obc1_byte;\n}\n\nextern unsigned char *romdata;\nvoid InitOBC1()\n{\n\tOBC1_RAM = romdata+0x400000;\n\tif (OBC1_RAM[0x1ff5] & 1)\n\t\tOBC1_BasePtr = 0x1800;\n\telse\n\t\tOBC1_BasePtr = 0x1c00;\n\n\tOBC1_Address = OBC1_RAM[0x1ff6] & 0x7f;\n\tOBC1_Shift = (OBC1_RAM[0x1ff6] & 3) << 1;\n}\n" }, { "alpha_fraction": 0.7061224579811096, "alphanum_fraction": 0.716035008430481, "avg_line_length": 27.11475372314453, "blob_id": "c0bef9e55fcfd4bb4a202dae67fb1f519507e1d2", "content_id": "f9223d6a30418ce84d714cb35dd7fa3664a5544f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1715, "license_type": "no_license", "max_line_length": 80, "num_lines": 61, "path": "/src/tools/fileutil.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 Nach, grinvader ( http://www.zsnes.com )\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n/*\nThis is part of a toolkit used to assist in ZSNES development\n*/\n\n#ifndef FILEUTIL_H\n#define FILEUTIL_H\n\n#include <string.h>\n#include <sys/stat.h>\n\nvoid parse_dir(const char *, void (*func)(const char *, struct stat&));\nbool parse_path(const char *, void (*func)(const char *, struct stat&));\n\ninline bool extension_match(const char *filename, const char *ext)\n{\n size_t filen_len = strlen(filename);\n size_t ext_len = strlen(ext);\n return((filen_len > ext_len) && !strcasecmp(filename+filen_len-ext_len, ext));\n}\n\ninline bool is_c_file(const char *filename)\n{\n return(extension_match(filename, \".c\") ||\n extension_match(filename, \".h\"));\n}\n\ninline bool is_cpp_file(const char *filename)\n{\n return(extension_match(filename, \".cpp\"));\n}\n\ninline bool is_psr_file(const char *filename)\n{\n return(extension_match(filename, \".psr\"));\n}\n\ninline bool is_asm_file(const char *filename)\n{\n return(extension_match(filename, \".asm\") ||\n extension_match(filename, \".inc\") ||\n extension_match(filename, \".mac\"));\n}\n\n#endif\n" }, { "alpha_fraction": 0.6235578060150146, "alphanum_fraction": 0.643949568271637, "avg_line_length": 20.176136016845703, "blob_id": "79e3513dbf5592c1707f69720d26a66f565a1380", "content_id": "199383ac3b29312addef67407df25e0dc6ed36e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3727, "license_type": "no_license", "max_line_length": 106, "num_lines": 176, "path": "/src/linux/x11.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include \"../gblhdr.h\"\n#include \"x11.h\"\n\n#include <SDL_syswm.h>\n\n\n#ifdef SDL_VIDEO_DRIVER_X11\n#include <dlfcn.h>\n#include <unistd.h>\n#include <sys/wait.h>\n#include \"safelib.h\"\n\n\nstatic Display *SDL_Display = 0;\nstatic Window SDL_Window = 0; //unsigned long\n\nstatic void *libXtst = 0;\nint (*XTestFakeKeyEvent)(Display *, unsigned int, Bool, unsigned long) = 0;\n\nstatic bool XScreenSaverSwitchedOff = false;\n\nvoid *dlopen_family(const char *lib, int flag)\n{\n void *p = dlopen(lib, flag);\n if (!p)\n {\n char buffer[256];\n unsigned int i;\n for (i = 0; i < 10; i++)\n {\n snprintf(buffer, sizeof(buffer), \"%s.%u\", lib, i);\n if ((p = dlopen(buffer, flag)))\n {\n break;\n }\n }\n }\n return(p);\n}\n\nvoid X11_Init()\n{\n SDL_SysWMinfo info;\n SDL_VERSION(&info.version);\n\n if (!SDL_Display && (SDL_GetWMInfo(&info) > 0) && (info.subsystem == SDL_SYSWM_X11))\n {\n SDL_Display = info.info.x11.display;\n SDL_Window = info.info.x11.window;\n\n libXtst = dlopen_family(\"libXtst.so\", RTLD_LAZY);\n if (libXtst)\n {\n XTestFakeKeyEvent = dlsym(libXtst, \"XTestFakeKeyEvent\");\n }\n else\n {\n puts(dlerror());\n }\n\n atexit(X11_Deinit);\n }\n}\n\nvoid X11_Deinit()\n{\n XScreenSaverOn();\n if (libXtst)\n {\n XTestFakeKeyEvent = 0;\n\n dlclose(libXtst);\n libXtst = 0;\n }\n SDL_Display = 0;\n}\n\n\nstatic bool xdg_screensaver(char *command)\n{\n bool success = false;\n if (SDL_Window)\n {\n pid_t pid = safe_fork(0, 0);\n if (pid != -1) //Successful Fork\n {\n if (pid) //Parent\n {\n int status;\n waitpid(pid, &status, 0);\n success = WIFEXITED(status) && !WEXITSTATUS(status);\n }\n else //Child\n {\n char numbuffer[21];\n char *const arglist[] = { \"xdg-screensaver\", command, numbuffer, 0 };\n\n snprintf(numbuffer, sizeof(numbuffer), \"%lu\", (unsigned long)SDL_Window); //Cast just in case\n execvp(arglist[0], arglist);\n _exit(-1);\n }\n }\n }\n return(success);\n}\n\nbool XScreenSaverOff()\n{\n if (!XScreenSaverSwitchedOff)\n {\n XScreenSaverSwitchedOff = xdg_screensaver(\"suspend\");\n }\n return(XScreenSaverSwitchedOff);\n}\n\nbool XScreenSaverOn()\n{\n if (XScreenSaverSwitchedOff)\n {\n XScreenSaverSwitchedOff = !xdg_screensaver(\"resume\");\n }\n return(!XScreenSaverSwitchedOff);\n}\n\n\nvoid CircumventXScreenSaver()\n{\n if (XTestFakeKeyEvent)\n {\n static time_t last_time = 0;\n time_t current_time = time(0);\n if ((current_time - 50) > last_time) //Screensaver can be as low as every 60 seconds, preempt it at 50\n {\n XTestFakeKeyEvent(SDL_Display, 255, True, 0);\n XSync(SDL_Display, False);\n XTestFakeKeyEvent(SDL_Display, 255, False, 0);\n XSync(SDL_Display, False);\n\n last_time = current_time;\n }\n }\n}\n\n\n#else\n\nvoid X11_Init() {}\nvoid X11_Deinit() {}\n\nbool XScreenSaverOff() { return(false); }\nbool XScreenSaverOn() { return(false); }\n\nvoid CircumventXScreenSaver() {}\n\n#endif\n" }, { "alpha_fraction": 0.6156163811683655, "alphanum_fraction": 0.6229326725006104, "avg_line_length": 28.6770076751709, "blob_id": "90c00af07dfce2ad9dc8fddf6569229ae6913d38", "content_id": "5325f04ce89b716c79077efe742b1496c7c1184a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 16265, "license_type": "no_license", "max_line_length": 127, "num_lines": 548, "path": "/src/jma/jma.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 NSRT Team ( http://nsrt.edgeemu.com )\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <sstream>\n#include \"jma.h\"\nusing namespace std;\n\n#include \"portable.h\"\n#include \"7z.h\"\n#include \"crc32.h\"\n\nnamespace JMA\n{\n const char jma_magic[] = { 'J', 'M', 'A', 0, 'N' };\n const unsigned int jma_header_length = 5;\n const unsigned char jma_version = 1;\n const unsigned int jma_version_length = 1;\n const unsigned int jma_total_header_length = jma_header_length + jma_version_length + UINT_SIZE;\n\n //Convert DOS/zip/JMA integer time to to time_t\n time_t uint_to_time(unsigned short date, unsigned short time)\n {\n tm formatted_time;\n\n formatted_time.tm_mday = date & 0x1F;\n formatted_time.tm_mon = ((date >> 5) & 0xF) - 1;\n formatted_time.tm_year = ((date >> 9) & 0x7f) + 80;\n formatted_time.tm_sec = (time & 0x1F) * 2;\n formatted_time.tm_min = (time >> 5) & 0x3F;\n formatted_time.tm_hour = (time >> 11) & 0x1F;\n\n return(mktime(&formatted_time));\n }\n\n\n //Retreive the file block, what else?\n void jma_open::retrieve_file_block() throw(jma_errors)\n {\n unsigned char uint_buffer[UINT_SIZE];\n unsigned char ushort_buffer[USHORT_SIZE];\n\n //File block size is the last UINT in the file\n stream.seekg(-UINT_SIZE,ios::end);\n stream.read((char *)uint_buffer, UINT_SIZE);\n size_t file_block_size = charp_to_uint(uint_buffer);\n\n //Currently at the end of the file, so that's the file size\n size_t jma_file_size = stream.tellg();\n\n //The file block can't be larger than the JMA file without it's header.\n //This if can probably be improved\n if (file_block_size >= jma_file_size-jma_total_header_length)\n {\n throw(JMA_BAD_FILE);\n }\n\n //Seek to before file block so we can read the file block\n stream.seekg(-((int)file_block_size+UINT_SIZE),ios::end);\n\n //This is needed if the file block is compressed\n stringstream decompressed_file_block;\n //Pointer to where to read file block from (file or decompressed buffer)\n istream *file_block_stream;\n\n //Setup file info buffer and byte to read with\n jma_file_info file_info;\n char byte;\n\n stream.get(byte);\n if (!byte) //If file block is compressed\n {\n //Compressed size isn't counting the byte we just read or the UINT for compressed size\n size_t compressed_size = file_block_size - (1+UINT_SIZE);\n\n //Read decompressed size / true file block size\n stream.read((char *)uint_buffer, UINT_SIZE);\n file_block_size = charp_to_uint(uint_buffer);\n\n //Setup access methods for decompression\n ISequentialInStream_Istream compressed_data(stream);\n ISequentialOutStream_Ostream decompressed_data(decompressed_file_block);\n\n //Decompress the data\n if (!decompress_lzma_7z(compressed_data, compressed_size, decompressed_data, file_block_size))\n {\n throw(JMA_DECOMPRESS_FAILED);\n }\n\n //Go to beginning, setup pointer to buffer\n decompressed_file_block.seekg(0, ios::beg);\n file_block_stream = &decompressed_file_block;\n }\n else\n {\n stream.putback(byte); //Putback byte, byte is part of filename, not compressed indicator\n file_block_stream = &stream;\n }\n\n\n //Minimum file name length is 2 bytes, a char and a null\n //Minimum comment length is 1 byte, a null\n //There are currently 2 UINTs and 2 USHORTs per file\n while (file_block_size >= 2+1+UINT_SIZE*2+USHORT_SIZE*2) //This does allow for a gap, but that's okay\n {\n //First stored in the file block is the file name null terminated\n file_info.name = \"\";\n\n file_block_stream->get(byte);\n while (byte)\n {\n file_info.name += byte;\n file_block_stream->get(byte);\n }\n\n //There must be a file name or the file is bad\n if (!file_info.name.length())\n {\n throw(JMA_BAD_FILE);\n }\n\n //Same trick as above for the comment\n file_info.comment = \"\";\n\n file_block_stream->get(byte);\n while (byte)\n {\n file_info.comment += byte;\n file_block_stream->get(byte);\n }\n\n //Next is a UINT representing the file's size\n file_block_stream->read((char *)uint_buffer, UINT_SIZE);\n file_info.size = charp_to_uint(uint_buffer);\n\n //Followed by CRC32\n file_block_stream->read((char *)uint_buffer, UINT_SIZE);\n file_info.crc32 = charp_to_uint(uint_buffer);\n\n //Special USHORT representation of file's date\n file_block_stream->read((char *)ushort_buffer, USHORT_SIZE);\n file_info.date = charp_to_ushort(ushort_buffer);\n\n //Special USHORT representation of file's time\n file_block_stream->read((char *)ushort_buffer, USHORT_SIZE);\n file_info.time = charp_to_ushort(ushort_buffer);\n\n file_info.buffer = 0; //Pointing to null till we decompress files\n\n files.push_back(file_info); //Put file info into our structure\n\n //Subtract size of the file info we just read\n file_block_size -= file_info.name.length()+file_info.comment.length()+2+UINT_SIZE*2+USHORT_SIZE*2;\n }\n }\n\n //Constructor for opening JMA files for reading\n jma_open::jma_open(const char *compressed_file_name) throw (jma_errors)\n {\n decompressed_buffer = 0;\n compressed_buffer = 0;\n\n stream.open(compressed_file_name, ios::in | ios::binary);\n if (!stream.is_open())\n {\n throw(JMA_NO_OPEN);\n }\n\n //Header is \"JMA\\0N\"\n unsigned char header[jma_header_length];\n stream.read((char *)header, jma_header_length);\n if (memcmp(jma_magic, header, jma_header_length))\n {\n throw(JMA_BAD_FILE);\n }\n\n //Not the cleanest code but logical\n stream.read((char *)header, 5);\n if (*header <= jma_version)\n {\n chunk_size = charp_to_uint(header+1); //Chunk size is a UINT that follows version #\n retrieve_file_block();\n }\n else\n {\n throw(JMA_UNSUPPORTED_VERSION);\n }\n }\n\n //Destructor only has to close the stream if neccesary\n jma_open::~jma_open()\n {\n if (stream.is_open())\n {\n stream.close();\n }\n }\n\n //Return a vector containing useful info about the files in the JMA\n vector<jma_public_file_info> jma_open::get_files_info()\n {\n vector<jma_public_file_info> file_info_vector;\n jma_public_file_info file_info;\n\n for (vector<jma_file_info>::iterator i = files.begin(); i != files.end(); i++)\n {\n file_info.name = i->name;\n file_info.comment = i->comment;\n file_info.size = i->size;\n file_info.datetime = uint_to_time(i->date, i->time);\n file_info.crc32 = i->crc32;\n file_info_vector.push_back(file_info);\n }\n\n return(file_info_vector);\n }\n\n //Skip forward a given number of chunks\n void jma_open::chunk_seek(unsigned int chunk_num) throw(jma_errors)\n {\n //Check the stream is open\n if (!stream.is_open())\n {\n throw(JMA_NO_OPEN);\n }\n\n //Clear possible errors so the seek will work\n stream.clear();\n\n //Move forward over header\n stream.seekg(jma_total_header_length, ios::beg);\n\n unsigned char int4_buffer[UINT_SIZE];\n\n while (chunk_num--)\n {\n //Read in size of chunk\n stream.read((char *)int4_buffer, UINT_SIZE);\n\n //Skip chunk plus it's CRC32\n stream.seekg(charp_to_uint(int4_buffer)+UINT_SIZE, ios::cur);\n }\n }\n\n //Return a vector of pointers to each file in the JMA, the buffer to hold all the files\n //must be initilized outside.\n vector<unsigned char *> jma_open::get_all_files(unsigned char *buffer) throw(jma_errors)\n {\n //If there's no stream we can't read from it, so exit\n if (!stream.is_open())\n {\n throw(JMA_NO_OPEN);\n }\n\n //Seek to the first chunk\n chunk_seek(0);\n\n //Set the buffer that decompressed data goes to\n decompressed_buffer = buffer;\n\n //If the JMA is not solid\n if (chunk_size)\n {\n unsigned char int4_buffer[UINT_SIZE];\n size_t size = get_total_size(files);\n\n //For each chunk in the file...\n for (size_t remaining_size = size; remaining_size; remaining_size -= chunk_size)\n {\n //Read the compressed size\n stream.read((char *)int4_buffer, UINT_SIZE);\n size_t compressed_size = charp_to_uint(int4_buffer);\n\n //Allocate memory of the correct size to hold the compressed data in the JMA\n //Throw error on failure as that is unrecoverable from\n try\n {\n compressed_buffer = new unsigned char[compressed_size];\n }\n catch (bad_alloc xa)\n {\n throw(JMA_NO_MEM_ALLOC);\n }\n\n //Read all the compressed data in\n stream.read((char *)compressed_buffer, compressed_size);\n\n //Read the expected CRC of compressed data from the file\n stream.read((char *)int4_buffer, UINT_SIZE);\n\n //If it doesn't match, throw error and cleanup memory\n if (CRC32lib::CRC32(compressed_buffer, compressed_size) != charp_to_uint(int4_buffer))\n {\n delete[] compressed_buffer;\n throw(JMA_BAD_FILE);\n }\n\n //Decompress the data, cleanup memory on failure\n if (!decompress_lzma_7z(compressed_buffer, compressed_size,\n decompressed_buffer+size-remaining_size,\n (remaining_size > chunk_size) ? chunk_size : remaining_size))\n {\n delete[] compressed_buffer;\n throw(JMA_DECOMPRESS_FAILED);\n }\n delete[] compressed_buffer;\n\n if (remaining_size <= chunk_size) //If we just decompressed the remainder\n {\n break;\n }\n }\n }\n else //Solidly compressed JMA\n {\n unsigned char int4_buffer[UINT_SIZE];\n\n //Read the size of the compressed data\n stream.read((char *)int4_buffer, UINT_SIZE);\n size_t compressed_size = charp_to_uint(int4_buffer);\n\n //Get decompressed size\n size_t size = get_total_size(files);\n\n //Setup access methods for decompression\n ISequentialInStream_Istream compressed_data(stream);\n ISequentialOutStream_Array decompressed_data(reinterpret_cast<char*>(decompressed_buffer), size);\n\n //Decompress the data\n if (!decompress_lzma_7z(compressed_data, compressed_size, decompressed_data, size))\n {\n throw(JMA_DECOMPRESS_FAILED);\n }\n\n /*\n //Allocate memory of the right size to hold the compressed data in the JMA\n try\n {\n compressed_buffer = new unsigned char[compressed_size];\n }\n catch (bad_alloc xa)\n {\n throw(JMA_NO_MEM_ALLOC);\n }\n\n //Copy the compressed data into memory\n stream.read((char *)compressed_buffer, compressed_size);\n size_t size = get_total_size(files);\n\n //Read the CRC of the compressed data\n stream.read((char *)int4_buffer, UINT_SIZE);\n\n //If it doesn't match, complain\n if (CRC32lib::CRC32(compressed_buffer, compressed_size) != charp_to_uint(int4_buffer))\n {\n delete[] compressed_buffer;\n throw(JMA_BAD_FILE);\n }\n\n //Decompress the data\n if (!decompress_lzma_7z(compressed_buffer, compressed_size, decompressed_buffer, size))\n {\n delete[] compressed_buffer;\n throw(JMA_DECOMPRESS_FAILED);\n }\n delete[] compressed_buffer;\n */\n }\n\n vector<unsigned char *> file_pointers;\n size_t size = 0;\n\n //For each file, add it's pointer to the vector, size is pointer offset in the buffer\n for (vector<jma_file_info>::iterator i = files.begin(); i != files.end(); i++)\n {\n i->buffer = decompressed_buffer+size;\n file_pointers.push_back(decompressed_buffer+size);\n size += i->size;\n }\n\n //Return the vector of pointers\n return(file_pointers);\n }\n\n //Extracts the file with a given name found in the archive to the given buffer\n void jma_open::extract_file(string& name, unsigned char *buffer) throw(jma_errors)\n {\n if (!stream.is_open())\n {\n throw(JMA_NO_OPEN);\n }\n\n size_t size_to_skip = 0;\n size_t our_file_size = 0;\n\n //Search through the vector of file information\n for (vector<jma_file_info>::iterator i = files.begin(); i != files.end(); i++)\n {\n if (i->name == name)\n {\n //Set the variable so we can tell we found it\n our_file_size = i->size;\n break;\n }\n\n //Keep a running total of size\n size_to_skip += i->size;\n }\n\n if (!our_file_size) //File with the specified name was not found in the archive\n {\n throw(JMA_FILE_NOT_FOUND);\n }\n\n //If the JMA only contains one file, we can skip a lot of overhead\n if (files.size() == 1)\n {\n get_all_files(buffer);\n return;\n }\n\n if (chunk_size) //we are using non-solid archive..\n {\n unsigned int chunks_to_skip = size_to_skip / chunk_size;\n\n //skip over requisite number of chunks\n chunk_seek(chunks_to_skip);\n\n //Allocate memory for compressed and decompressed data\n unsigned char *comp_buffer = 0, *decomp_buffer = 0;\n try\n {\n //Compressed data size is <= non compressed size\n unsigned char *combined_buffer = new unsigned char[chunk_size*2];\n comp_buffer = combined_buffer;\n decomp_buffer = combined_buffer+chunk_size;\n }\n catch (bad_alloc xa)\n {\n throw(JMA_NO_MEM_ALLOC);\n }\n\n size_t first_chunk_offset = size_to_skip % chunk_size;\n unsigned char int4_buffer[UINT_SIZE];\n for (size_t i = 0; i < our_file_size;)\n {\n //Get size\n stream.read((char *)int4_buffer, UINT_SIZE);\n size_t compressed_size = charp_to_uint(int4_buffer);\n\n //Read all the compressed data in\n stream.read((char *)comp_buffer, compressed_size);\n\n //Read the CRC of the compressed data\n stream.read((char *)int4_buffer, UINT_SIZE);\n\n //If it doesn't match, complain\n if (CRC32lib::CRC32(comp_buffer, compressed_size) != charp_to_uint(int4_buffer))\n {\n delete[] comp_buffer;\n throw(JMA_BAD_FILE);\n }\n\n //Decompress chunk\n if (!decompress_lzma_7z(comp_buffer, compressed_size, decomp_buffer, chunk_size))\n {\n delete[] comp_buffer;\n throw(JMA_DECOMPRESS_FAILED);\n }\n\n size_t copy_amount = our_file_size-i > chunk_size-first_chunk_offset ? chunk_size-first_chunk_offset : our_file_size-i;\n\n memcpy(buffer+i, decomp_buffer+first_chunk_offset, copy_amount);\n first_chunk_offset = 0; //Set to zero since this is only for the first iteration\n i += copy_amount;\n }\n delete[] comp_buffer;\n }\n else //Solid JMA\n {\n unsigned char *decomp_buffer = 0;\n try\n {\n decomp_buffer = new unsigned char[get_total_size(files)];\n }\n catch (bad_alloc xa)\n {\n throw(JMA_NO_MEM_ALLOC);\n }\n\n get_all_files(decomp_buffer);\n\n memcpy(buffer, decomp_buffer+size_to_skip, our_file_size);\n\n delete[] decomp_buffer;\n }\n }\n\n bool jma_open::is_solid()\n {\n return(chunk_size ? false : true);\n }\n\n const char *jma_error_text(jma_errors error)\n {\n switch (error)\n {\n case JMA_NO_CREATE:\n return(\"JMA could not be created\");\n\n case JMA_NO_MEM_ALLOC:\n return(\"Memory for JMA could be allocated\");\n\n case JMA_NO_OPEN:\n return(\"JMA could not be opened\");\n\n case JMA_BAD_FILE:\n return(\"Invalid/Corrupt JMA\");\n\n case JMA_UNSUPPORTED_VERSION:\n return(\"JMA version not supported\");\n\n case JMA_COMPRESS_FAILED:\n return(\"JMA compression failed\");\n\n case JMA_DECOMPRESS_FAILED:\n return(\"JMA decompression failed\");\n\n case JMA_FILE_NOT_FOUND:\n return(\"File not found in JMA\");\n }\n return(\"Unknown error\");\n }\n\n}\n\n\n" }, { "alpha_fraction": 0.7320902943611145, "alphanum_fraction": 0.7487733364105225, "avg_line_length": 29.878787994384766, "blob_id": "a4f4a66dcb20d4b3685b0b134638b7f49db859e6", "content_id": "34db97eae26a429f42acc8ff05c618d0a93bb0dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1019, "license_type": "no_license", "max_line_length": 72, "num_lines": 33, "path": "/src/gui/testgui.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <QApplication>\n#include \"gui.h\"\n\nint main(int argc, char *argv[])\n{\n QApplication app(argc, argv);\n QMainWindow *widget = new QMainWindow;\n Ui::ZSNESMainForm ui;\n ui.setupUi(widget);\n widget->show();\n return app.exec();\n}\n" }, { "alpha_fraction": 0.6133497357368469, "alphanum_fraction": 0.6322157382965088, "avg_line_length": 23.200992584228516, "blob_id": "272b0413a83f732129e3fc12527cb62dc4878723", "content_id": "8ff7431ddb48a771b4ab22d6c3b208737f96b344", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9753, "license_type": "no_license", "max_line_length": 204, "num_lines": 403, "path": "/src/playspc.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "#define __STDC_CONSTANT_MACROS\n#include <stdio.h>\n#include <stdlib.h>\n#include <assert.h>\n#include <string.h>\n#include <stdint.h>\n#include <stdbool.h>\n#include \"cpu/zspc/zspc.h\"\n\n#define NUMCONV_BT32\n#include \"numconv.h\"\n\n#ifdef __WIN32__\n#include <dsound.h>\n\ntypedef bool adev_t;\n\nstatic LPDIRECTSOUND8 lpds = 0;\nstatic LPDIRECTSOUNDBUFFER hdspribuf = 0; //primary direct sound buffer\nstatic LPDIRECTSOUNDBUFFER hdsbuf = 0; //secondary direct sound buffer (stream buffer)\nstatic int ds_buffer_size = 0; //size in bytes of the direct sound buffer\nstatic int ds_write_offset = 0; //offset of the write cursor in the direct sound buffer\nstatic int ds_min_free_space = 0; //if the free space is below this value get_space() will return 0\n\nstatic HRESULT ds_error(HRESULT res)\n{\n#ifdef DEBUG\n switch (res)\n {\n case DS_OK: puts(\"Okay\"); break;\n case DSERR_BUFFERLOST: puts(\"Buffer Lost\"); break;\n case DSERR_INVALIDCALL: puts(\"Invalid Call\"); break;\n case DSERR_INVALIDPARAM: puts(\"Invalid Param\"); break;\n case DSERR_PRIOLEVELNEEDED: puts(\"Priority Needed\"); break;\n case DSERR_BADFORMAT: puts(\"Bad Format\"); break;\n case DSERR_OUTOFMEMORY: puts(\"Out of Memory\"); break;\n case DSERR_UNSUPPORTED: puts(\"Unsupported\"); break;\n default: puts(\"Unknown Error\"); break;\n }\n#endif\n return(res);\n}\n\nstatic void ds_initialize()\n{\n WAVEFORMATEX wfx;\n DSBUFFERDESC dsbpridesc;\n DSBUFFERDESC dsbdesc;\n\n ds_error(DirectSoundCreate8(0, &lpds, 0));\n ds_error(lpds->SetCooperativeLevel(GetDesktopWindow(), DSSCL_EXCLUSIVE));\n\n memset(&wfx, 0, sizeof(wfx));\n wfx.wFormatTag = WAVE_FORMAT_PCM;\n wfx.nChannels = 2;\n wfx.nSamplesPerSec = 32000;\n wfx.wBitsPerSample = 16;\n wfx.nBlockAlign = wfx.nChannels * (wfx.wBitsPerSample >> 3);\n wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;\n\n //Fill in primary sound buffer descriptor\n memset(&dsbpridesc, 0, sizeof(DSBUFFERDESC));\n dsbpridesc.dwSize = sizeof(DSBUFFERDESC);\n dsbpridesc.dwFlags = DSBCAPS_PRIMARYBUFFER;\n\n //Fill in the secondary sound buffer (=stream buffer) descriptor\n memset(&dsbdesc, 0, sizeof(DSBUFFERDESC));\n dsbdesc.dwSize = sizeof(DSBUFFERDESC);\n dsbdesc.dwFlags = DSBCAPS_GETCURRENTPOSITION2 //Better position accuracy\n | DSBCAPS_GLOBALFOCUS //Allows background playing\n | DSBCAPS_CTRLVOLUME; //volume control enabled\n\n dsbdesc.dwBufferBytes = wfx.nChannels * wfx.nSamplesPerSec * (16>>3);\n dsbdesc.lpwfxFormat = &wfx;\n ds_buffer_size = dsbdesc.dwBufferBytes;\n\n //create primary buffer and set its format\n ds_error(lpds->CreateSoundBuffer(&dsbpridesc, &hdspribuf, 0));\n ds_error(hdspribuf->SetFormat((WAVEFORMATEX *)&wfx));\n\n // now create the stream buffer\n if (ds_error(lpds->CreateSoundBuffer(&dsbdesc, &hdsbuf, 0)) != DS_OK)\n {\n if (dsbdesc.dwFlags & DSBCAPS_LOCHARDWARE)\n {\n // Try without DSBCAPS_LOCHARDWARE\n dsbdesc.dwFlags &= ~DSBCAPS_LOCHARDWARE;\n ds_error(lpds->CreateSoundBuffer(&dsbdesc, &hdsbuf, 0));\n }\n }\n\n ds_write_offset = 0;\n ds_min_free_space = wfx.nBlockAlign;\n}\n\nstatic int ds_write_buffer(unsigned char *data, int len)\n{\n HRESULT res;\n LPVOID lpvPtr1;\n DWORD dwBytes1;\n LPVOID lpvPtr2;\n DWORD dwBytes2;\n\n // Lock the buffer\n res = ds_error(hdsbuf->Lock(ds_write_offset, len, &lpvPtr1, &dwBytes1, &lpvPtr2, &dwBytes2, 0));\n // If the buffer was lost, restore and retry lock.\n if (res == DSERR_BUFFERLOST)\n {\n hdsbuf->Restore();\n res = ds_error(hdsbuf->Lock(ds_write_offset, len, &lpvPtr1, &dwBytes1, &lpvPtr2, &dwBytes2, 0));\n }\n\n if (SUCCEEDED(res))\n {\n // Write to pointers without reordering.\n memcpy(lpvPtr1, data, dwBytes1);\n if (lpvPtr2) { memcpy(lpvPtr2, data+dwBytes1, dwBytes2); }\n ds_write_offset += dwBytes1+dwBytes2;\n if (ds_write_offset >= ds_buffer_size) { ds_write_offset = dwBytes2; }\n\n // Release the data back to DirectSound.\n res = ds_error(hdsbuf->Unlock(lpvPtr1, dwBytes1, lpvPtr2, dwBytes2));\n if (SUCCEEDED(res))\n {\n // Success.\n DWORD status;\n ds_error(hdsbuf->GetStatus(&status));\n if (!(status & DSBSTATUS_PLAYING))\n {\n ds_error(hdsbuf->Play(0, 0, DSBPLAY_LOOPING));\n }\n return(dwBytes1+dwBytes2);\n }\n }\n // Lock, Unlock, or Restore failed.\n return(0);\n}\n\nstatic bool ds_play(adev_t, char *samples_buffer, size_t samples_count)\n{\n unsigned char *data = (unsigned char *)samples_buffer;\n int samples_outputted, samples_remaining;\n\n samples_remaining = samples_count;\n for (;;)\n {\n DWORD play_offset;\n int space, len = samples_remaining;\n\n // make sure we have enough space to write data\n ds_error(hdsbuf->GetCurrentPosition(&play_offset, 0));\n space = ds_buffer_size-(ds_write_offset-play_offset);\n if (space > ds_buffer_size) { space -= ds_buffer_size; } // ds_write_offset < play_offset\n if (space < len) { len = space; }\n\n samples_outputted = ds_write_buffer((unsigned char *)data, len);\n\n data += samples_outputted;\n samples_remaining -= samples_outputted;\n if (samples_outputted < samples_remaining) { Sleep(500); }\n else { break; }\n }\n return(true);\n}\n\nvoid ds_shutdown()\n{\n if (hdsbuf) { hdsbuf->Release(); }\n if (hdspribuf) { hdspribuf->Release(); }\n if (lpds) { lpds->Release(); }\n}\n\nadev_t ds_init()\n{\n return(true);\n}\n\n#define ao_initialize ds_initialize\n#define ao_shutdown ds_shutdown\n#define ao_init ds_init\n#define ao_play ds_play\n\n#else\n#include <ao/ao.h>\n\ntypedef ao_device *adev_t;\n\nadev_t ao_init()\n{\n static ao_device *dev = 0;\n if (!dev)\n {\n struct ao_sample_format format = { 16, 32000, 2, AO_FMT_LITTLE };\n dev = ao_open_live(ao_default_driver_id(), &format, NULL);\n\n if (dev)\n {\n ao_info *di = ao_driver_info(dev->driver_id);\n printf(\"\\nAudio Opened.\\nDriver: %s\\nChannels: %u\\nRate: %u\\n\\n\", di->name, format.channels, format.rate);\n }\n }\n\n return(dev);\n}\n\n/*\n#include <unistd.h>\n#include <fcntl.h>\n#include <sys/soundcard.h>\n#include <sys/ioctl.h>\n\ntypedef int adev_t;\n\nstatic adev_t dev;\n\nstatic void ao_initialize()\n{\n char *devname = \"/dev/dsp\";\n if ((dev = open(devname, O_WRONLY, O_NONBLOCK)) > 0)\n {\n int cooked = 1;\n int rate = 32000;\n int channels = 2;\n int format = AFMT_S16_LE;\n\n#if SOUND_VERSION >= 0x040000\n if (ioctl(dev, SNDCTL_DSP_COOKEDMODE, &cooked) == -1)\n {\n perror(\"Cooked\");\n }\n#endif\n\n if (ioctl(dev, SNDCTL_DSP_CHANNELS, &channels) == -1)\n {\n perror (\"Channels\");\n }\n\n if (ioctl(dev, SNDCTL_DSP_SETFMT, &format) == -1)\n {\n perror (\"Format\");\n }\n\n if (ioctl(dev, SNDCTL_DSP_SPEED, &rate) == -1)\n {\n perror (\"Rate\");\n }\n }\n else\n {\n perror(devname);\n }\n}\n\nstatic void ao_shutdown()\n{\n close(dev);\n}\n\nstatic adev_t ao_init()\n{\n return((dev > 0) ? dev : 0);\n}\n\nstatic bool ao_play(adev_t device, char *samples_buffer, size_t samples_count)\n{\n write(device, samples_buffer, samples_count);\n}\n*/\n\n#endif //End of OS specific code\n\n\nstruct header_t\n{\n char tag[33];\n uint8_t marker[3];\n uint8_t marker2;\n uint8_t pc[2];\n uint8_t a, x, y, statflags, stack;\n uint8_t reserved[2];\n char song[32];\n char game[32];\n char dumper[16];\n char comment[32];\n uint8_t date[4];\n uint8_t reserved2[7];\n uint8_t len_secs[4];\n uint8_t fade_msec[3];\n char author[32];\n uint8_t mute_mask;\n uint8_t emulator;\n uint8_t reserved3[46];\n};\n\nstatic bool read_spcfile(const char *fname, struct header_t *header)\n{\n bool success = false;\n FILE *fp = fopen(fname, \"rb\");\n if (fp)\n {\n uint8_t spcdata[67000]; //We should adjust this later for the max possible size;\n size_t size = fread(spcdata, 1, sizeof(spcdata), fp);\n memcpy(header, spcdata, sizeof(struct header_t));\n zspc_load_spc(spcdata, size);\n zspc_clear_echo();\n fclose(fp);\n success = true;\n }\n else\n {\n printf(\"Failed to open %s.\\n\", fname);\n }\n return(success);\n}\n\nstatic void print_spcinfo(struct header_t *header)\n{\n char *emulator;\n switch (header->emulator)\n {\n case 1:\n emulator = \"ZSNES\";\n break;\n case 2:\n emulator = \"Snes9x\";\n break;\n default:\n emulator = \"Unknown\";\n break;\n }\n printf(\"Emulator: %s\\n Dumper: %s\\n Game: %s\\n Title: %s\\n Artist: %s\\n Length: %u\\n\\n\", emulator, header->dumper, header->game, header->song, header->author, bytes_to_uint32(header->len_secs));\n}\n\nbool all_silence(int16_t *buffer, size_t len)\n{\n while (len--)\n {\n if ((*buffer < -2) || (*buffer > 2)) { return(false); }\n }\n return(true);\n}\n\nvoid run_spc(adev_t dev, struct header_t *header)\n{\n if (dev)\n {\n int16_t samples_buffer[2048];\n size_t samples_count = sizeof(samples_buffer)/sizeof(int16_t);\n\n size_t samples_played = 0, silence_count = 0;\n uint32_t play_secs = bytes_to_uint32(header->len_secs);\n\n if (!play_secs)\n {\n play_secs = ~0;\n }\n\n while (samples_played/(zspc_sample_rate*2) < play_secs)\n {\n zspc_play(samples_count, samples_buffer);\n\n if (all_silence(samples_buffer, samples_count))\n {\n silence_count++;\n if (silence_count == 40) //~2.5 seconds of silence\n {\n play_secs = 0;\n }\n }\n else\n {\n silence_count = 0;\n }\n\n if (!ao_play(dev, (char*)samples_buffer, samples_count*2))\n {\n exit(-1);\n }\n samples_played += samples_count;\n }\n }\n}\n\nint main(int argc, char *argv[])\n{\n struct header_t header;\n assert(sizeof(struct header_t) == 0x100);\n\n ao_initialize();\n atexit(ao_shutdown);\n\n while (--argc)\n {\n zspc_init();\n if (read_spcfile(*(++argv), &header))\n {\n print_spcinfo(&header);\n run_spc(ao_init(), &header);\n }\n }\n\n return(0);\n}\n" }, { "alpha_fraction": 0.509314775466919, "alphanum_fraction": 0.5356442332267761, "avg_line_length": 26.409486770629883, "blob_id": "d210041e85f923031778a6ddb6ca5ebee693df6f", "content_id": "a0bcd51567e9aa2ca8f8d82765ab0e8d9c0581b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 34676, "license_type": "no_license", "max_line_length": 117, "num_lines": 1265, "path": "/src/net/ztcp.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n\n\n/**********************************************************\\\n* ZSNES TCP/IP MODULE FOR NETWORK PLAY *\n* *\n* Coded by the ZSNES team *\n* TCP/IP drivers coded by _Demo_, revised by Pharos *\n* UDP drivers coded by _Demo_, revised by zsKnight *\n* Gameplay netplay implementation by zsKnight *\n* UDP Packet loss/out of order algorithm/implementation *\n* by zsKnight, assistance on normal packets by Pharos *\n\\**********************************************************/\n\n// UDP Algorithm:\n//\n// UDP Header (1 byte): 1 = Normal Packet w/ reply req, 2 = Reply Packet,\n// 3 = Gameplay Packet (single byte),\n// 4 = Gameplay Packet (larger packet), 5 = Re-request gameplay\n// packet\n//\n// Normal Packets:\n// Note: The actual implementation turned out to be quite different\n// than the below descriptions.\n// First byte contains the packet counter, followed by packet contents.\n// Remote will send a Reply Packet (just contains packet counter)\n// Each packet buffer will have a timer counter which decreases after\n// every 1/60 seconds (value set at start is 60). If this reaches 0\n// that packet will be re-sent and reset the timer value back to 60.\n// If the local side receives the reply packet, it will set the timer\n// counter to -1.\n//\n// Gameplay Packets:\n// Note: Gameplay counter is separate from normal packet counter.\n// Note2: When referring to TCP/IP, it refers to the Normal Packets above.\n// Each packet in TCP/IP will contain a byte counter when UDP is\n// enabled.\n// Each UDP packet will contain a byte counter, the number of packets,\n// then each packet will contain a byte size only if there are > 1\n// packets. If the packet is just one byte long and contains a value<2,\n// it will follow by a byte containing info on how many packets its has\n// been like that for (it will not go beyond 32). If the packet is\n// more than one byte long, it will repeat that packet as the extra\n// packets for the next 3 packets, with the first byte of those packets\n// as the byte counter of that packet, then the second as the size.\n// Also, the send data will be stored in a 256*32 byte buffer in case\n// of packet loss.\n// When receiving, since no UDP packets will exceed 32bytes in length,\n// there will be a 256*32 byte buffer and a 256 byte flag buffer.\n// The flag clearing pointer will move at an offset of 128 from the\n// actual point of the receive buffer. When it receives data from\n// the UDP (or TCP), if the byte count of the data matches the\n// receive pointer, it will just send the data directly and increase the\n// receive pointer. Else it will fill the buffer accordingly based on\n// the send data (for a maximum of 32 bytes). Then if the bit on the\n// flag buffer is set for the current receive pointer, return the\n// appropriate buffer and increase receive pointer.\n// In case of packet loss, if no data has been received for every 500ms, the\n// local side would send a re-send package request. What this would\n// do is let the remote side build up a package containing all the\n// data from the requested send point to the current receive point.\n// A resend request will start off with 0x00,0xFF, then the counter\n// number. A resent packet will start off with 0x00,0xFE, the # of\n// packets, then the packet data (size of packet, data). A resend will\n// only be done if the requested packet is within the past 64 packets.\n// In-game chat will be moved to a separate packet in TCP/IP\n\n#ifdef __UNIXSDL__\n#include \"gblhdr.h\"\n#define closesocket(A) close(A)\n#define CopyMemory(A,B,C) memcpy(A,B,C)\n#define STUB_FUNCTION fprintf(stderr,\"STUB: %s at \" __FILE__ \", line %d, thread %d\\n\",__FUNCTION__,__LINE__,getpid())\n#define UINT unsigned int\n#define WORD unsigned short\n#define SOCKET int\n#define SOCKADDR_IN struct sockaddr_in\n#define LPSOCKADDR struct sockaddr*\n#define LPHOSTENT struct hostent*\n#define HOSTENT struct hostent\n#define LPINADDR struct in_addr*\n#define LPIN_ADDR struct in_addr*\n#define SOCKET_ERROR -1\n#define INVALID_SOCKET -1\n#define ioctlsocket ioctl\n#define FD_SET_VAR fd_set\n#else\n#include <stdio.h>\n#include <time.h>\n#include <windows.h>\n#include <winsock.h>\n#define FD_SET_VAR FD_SET\n#endif\n\nint RecvPtr;\nint RecvPtr2;\nunsigned char RecvFlags[256];\nunsigned char RecvBuffer[256*32];\nint RecvBufferSize[256];\n\nint SendPtr;\nint SendPtr2;\nunsigned char SendBuffer[256*32];\nint SendBufferSize[256];\n\nint SendRepeated;\n\nint PrevSPacket[16];\nint PrevSData[16*32];\nint PrevSSize[16];\nint PrevSPtr[16];\n\nint tcperr;\nunsigned short portval;\nint UDPEnable = 1;\nint UDPConfig = 1;\nint UDPBackTrace = 6;\nint blahblahblah = 0;\nint CounterA = -1;\nint CounterB = -1;\nint UDPMode2 = 0;\n\nint packetnum,packetnumhead;\nint packetrecvhead;\nunsigned char packetdata[2048*16];\nunsigned char packetrdata[2048*32];\nint packetconfirm[256];\nint packetreceived[256];\nint packetreceivesize[256];\nint packetsize[256];\nunsigned char cpacketdata[2048+32];\nUINT ConnectAddr;\nint packettimeleft[256];\nint packetresent[256];\nint PacketCounter=0;\nunsigned char CLatencyVal=0;\n\n\nSOCKET gamesocket; /* tcp socket for the game */\nSOCKET serversocket; /* tcp socket when the server is listening */\n\nSOCKET ugamesocket; /* udp socket sending */\nSOCKET userversocket; /* udp socket listening */\n\nSOCKADDR_IN serveraddress; /* address of the server */\nSOCKADDR_IN ugameaddress; /* address of the server */\nSOCKADDR_IN userveraddress; /* address of the server */\n\nchar blah[256];\nchar remotehost[256];\nchar hostname[50] = \"IP N/A\";\n\n// Function Prototypes\n\nint SendData(int dsize,unsigned char *dptr);\nint GetData(int dsize,unsigned char *dptr);\nint GetLeftUDP();\n\n/**********************************************************\\\n* Initialize the zsnes tcpip module *\n* - no parameters *\n* - return 0 on success other value on error *\n* *\n* - no known side effects *\n\\**********************************************************/\n\nint InitTCP()\n{\n#ifndef __UNIXSDL__\n WORD versionneeded = MAKEWORD(2,2);\n WSADATA wsadata;\n#endif\n\n UDPEnable=0;\n\n#ifndef __UNIXSDL__\n /* Startup winsock */\n WSAStartup(versionneeded, &wsadata);\n\n /* Verify version number and exit on wrong version */\n if (wsadata.wVersion != versionneeded)\n {\n return(-1);\n }\n serversocket=INVALID_SOCKET;\n#endif\n return(0);\n}\n\n\n/**********************************************************\\\n* Deinitialize the zsnes tcpip module *\n* - no parameters *\n* *\n* - no known side effects *\n\\**********************************************************/\n\nvoid DeInitTCP()\n{\n#ifndef __UNIXSDL__\n WSACleanup();\n#endif\n}\n\n/**********************************************************\\\n* Gets UDP Status through sending data *\n* - no parameters *\n* *\n* - no known side effects *\n\\**********************************************************/\n\nvoid GetUDPStatus() {\n int retval;\n\n UDPEnable=UDPConfig;\n\n if (!UDPEnable){\n blah[0]=0;\n retval = send(gamesocket,blah,1,0);\n gethostname(blah,255);\n retval = send(gamesocket,blah,strlen(blah),0);\n }\n else {\n blah[0]=1;\n retval = send(gamesocket,blah,1,0);\n gethostname(blah,255);\n retval = send(gamesocket,blah,strlen(&blah[1])+1,0);\n }\n\n retval = recv(gamesocket,blah,256,0);\n if (blah[0]==0) UDPEnable=0;\n retval = recv(gamesocket,blah,256,0);\n}\n\n/**********************************************************\\\n* Connect to game server *\n* - parameters *\n* - pointer server name *\n* - server port *\n* - return 0 on success other value on error *\n* *\n* - no known side effects *\n\\**********************************************************/\n\nint isipval(char *name){\n int i=0;\n\n while(name[i]!=0){\n if (!((name[i]=='.') || ((name[i]>='0') && (name[i]<='9'))))\n return(0);\n i++;\n }\n return(1);\n}\n\nint ConnectServer(char *servername, unsigned int port)\n{\n char blah[255];\n int retval,i;\n LPHOSTENT host1=NULL;\n int yesip;\n\n packetnum = 0;\n packetnumhead = 0;\n packetrecvhead = 0;\n RecvPtr = 0;\n SendPtr = 0;\n RecvPtr2 = 0;\n SendPtr2 = 0;\n\n ConnectAddr = 0;\n SendRepeated = 0;\n for (i=0;i<16;i++)\n PrevSPacket[i]=0;\n\n /* get host and verify if it is valid */\n yesip = isipval(servername);\n if (!yesip){\n host1 = gethostbyname(servername);\n if (host1 == NULL)\n {\n return(-1);\n }\n }\n\n// return(-1);\n if (UDPConfig) UDPEnable = 1;\n\n if (UDPEnable)\n {\n PacketCounter=1;\n for (i=0;i<256;i++) {packettimeleft[i]=-1; packetconfirm[i]=1; packetreceived[i]=0; RecvFlags[i]=0;}\n\n userveraddress.sin_family = AF_INET;\n ugameaddress.sin_family = AF_INET;\n\n if (!yesip)\n {\n ugameaddress.sin_addr = *( (LPIN_ADDR) *host1->h_addr_list );\n }\n else\n {\n ugameaddress.sin_addr.s_addr = inet_addr(servername);\n }\n\n ConnectAddr = ugameaddress.sin_addr.s_addr;\n\n userveraddress.sin_addr.s_addr = INADDR_ANY;\n\n// port++;\n ugameaddress.sin_port = htons((unsigned short) port);\n userveraddress.sin_port = htons((unsigned short) port);\n// port--;\n\n userversocket = socket(AF_INET, SOCK_DGRAM,0);\n ugamesocket = socket(AF_INET, SOCK_DGRAM,0);\n\n if (ugamesocket == INVALID_SOCKET)\n {\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n tcperr=WSAGetLastError();\n sprintf(blah,\"Could not initialize UDP(2) : %d\",tcperr);\n MessageBox(NULL,blah,\"Error\",MB_SYSTEMMODAL|MB_OK);\n#endif\n return(-2);\n }\n\n if (userversocket == INVALID_SOCKET)\n {\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n tcperr=WSAGetLastError();\n sprintf(blah,\"Could not initialize UDP(2.5) : %d\",tcperr);\n MessageBox(NULL,blah,\"Error\",MB_SYSTEMMODAL|MB_OK);\n#endif\n return(-2);\n }\n\n if (bind(userversocket,(struct sockaddr*)&userveraddress,sizeof(userveraddress))==\n SOCKET_ERROR)\n {\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n tcperr=WSAGetLastError();\n sprintf(blah,\"Could not initialize UDP(16) : %d\",tcperr);\n MessageBox(NULL,blah,\"Error\",MB_SYSTEMMODAL|MB_OK);\n#endif\n return(-2);\n }\n\n\n// blah[0]=1;\n// retval = sendto(ugamesocket,blah,1,0,(struct sockaddr*)&ugameaddress,sizeof(struct sockaddr));\n// if (retval == SOCKET_ERROR) return(-1);\n\n blah[0]=1;\n SendData(1,blah);\n\n// retval = sendto(ugamesocket,blah,5,0,(struct sockaddr*)&ugameaddress,sizeof(struct sockaddr));\n// blah[0]=0;\n// i = sizeof(struct sockaddr);\n// retval = recvfrom(userversocket,blah,5,0,(struct sockaddr*)&userveraddress,&i);\n\n// MessageBox(NULL,blah,\n// \"Error\",\n// MB_SYSTEMMODAL|MB_OK);\n\n return(0);\n\n// retval = send(gamesocket,blah,1,0);\n// retval = recv(gamesocket,blah,1,0);\n }\n\n\n /* create the game socket and verify if it is valid */\n gamesocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);\n if (gamesocket == INVALID_SOCKET)\n {\n return(-2);\n }\n\n\n /* initialize server address */\n serveraddress.sin_family = AF_INET;\n if (!yesip)\n serveraddress.sin_addr = *( (LPIN_ADDR) *host1->h_addr_list );\n else\n serveraddress.sin_addr.s_addr = inet_addr(servername);\n\n serveraddress.sin_port = htons((unsigned short)port);\n\n\n /* try to connect to the server */\n retval = connect( gamesocket,\n (LPSOCKADDR)&serveraddress,\n sizeof(struct sockaddr));\n if (retval == SOCKET_ERROR)\n {\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n sprintf(blah,\"Could not connect to other side\");\n MessageBox(NULL,blah,\n \"Error\",\n MB_SYSTEMMODAL|MB_OK);\n#endif\n\n closesocket(gamesocket);\n return(-3);\n }\n\n// GetUDPStatus();\n\n return(0);\n}\n\nint WaitForServer(){\n int i;\n\n if (UDPEnable){\n if ((i=GetData(1,blah))){\n if ((i==1) && (blah[0]==1))\n return(1);\n }\n return(0);\n }\n return(1);\n}\n\n\n/**********************************************************\\\n* Disconnect from game server *\n* - no parameters *\n* *\n* - no known side effects *\n\\**********************************************************/\n\nvoid Disconnect()\n{\n if (UDPEnable)\n {\n closesocket(ugamesocket);\n closesocket(userversocket);\n return;\n }\n PacketCounter=0;\n closesocket(gamesocket);\n}\n\n\n/**********************************************************\\\n* Start the game server *\n* - parameters *\n - port number\n* - return 0 on success other value on error *\n* *\n* - no known side effects *\n\\**********************************************************/\n\nint StartServerCycle(unsigned short port)\n{\n int retval,i;\n\n portval = port;\n packetnum = 0;\n packetnumhead = 0;\n packetrecvhead = 0;\n ConnectAddr = 0;\n SendRepeated = 0;\n RecvPtr = 0;\n SendPtr = 0;\n RecvPtr2 = 0;\n SendPtr2 = 0;\n\n for (i=0;i<16;i++)\n PrevSPacket[i]=0;\n\n\n if (UDPConfig) UDPEnable = 1;\n\n if (UDPEnable)\n {\n /* get host and verify if it is valid */\n PacketCounter=1;\n for (i=0;i<256;i++) {packettimeleft[i]=-1; packetconfirm[i]=1; packetreceived[i]=0; RecvFlags[i]=0;}\n\n userveraddress.sin_family = AF_INET;\n ugameaddress.sin_family = AF_INET;\n\n userveraddress.sin_addr.s_addr = INADDR_ANY;\n ugameaddress.sin_addr.s_addr = INADDR_ANY;\n\n// portval++;\n ugameaddress.sin_port = htons((unsigned short) portval);\n userveraddress.sin_port = htons((unsigned short) portval);\n// portval--;\n\n userversocket = socket(AF_INET, SOCK_DGRAM,0);\n ugamesocket = socket(AF_INET, SOCK_DGRAM,0);\n\n if (userversocket == INVALID_SOCKET)\n {\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n tcperr=WSAGetLastError();\n sprintf(blah,\"Could not initialize UDP(5) : %d\",tcperr);\n MessageBox(NULL,blah,\"Error\",MB_SYSTEMMODAL|MB_OK);\n#endif\n return(-2);\n }\n if (bind(userversocket,(struct sockaddr*)&userveraddress,sizeof(userveraddress))==\n SOCKET_ERROR)\n {\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n tcperr=WSAGetLastError();\n sprintf(blah,\"Could not initialize UDP(6) : %d\",tcperr);\n MessageBox(NULL,blah,\"Error\",MB_SYSTEMMODAL|MB_OK);\n#endif\n return(-2);\n }\n\n\n blah[0]=2;\n blah[1]='C';\n blah[2]='B';\n blah[3]='A';\n blah[4]=0;\n\n\n// retval = recvfrom(userversocket,blah,5,0,\n// (struct sockaddr *)&userveraddress,&socklen);\n\n ugameaddress.sin_addr.s_addr = userveraddress.sin_addr.s_addr;\n\n ugamesocket = socket(AF_INET, SOCK_DGRAM,0);\n\n return(0);\n\n// retval = send(gamesocket,blah,1,0);\n// retval = recv(gamesocket,blah,1,0);\n\n }\n\n /* Create the listen socket */\n serversocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);\n if (serversocket == INVALID_SOCKET)\n {\n#ifndef __UNIXSDL__\n tcperr=WSAGetLastError();\n#endif\n\n return(-1);\n }\n\n serveraddress.sin_family = AF_INET;\n serveraddress.sin_addr.s_addr = INADDR_ANY;\n serveraddress.sin_port = htons(port);\n\n /* bind name and socket */\n retval = bind(serversocket,\n (LPSOCKADDR)&serveraddress,\n sizeof(struct sockaddr));\n if (retval == SOCKET_ERROR)\n {\n#ifndef __UNIXSDL__\n tcperr=WSAGetLastError();\n#endif\n closesocket(serversocket);\n return(-2);\n }\n\n /* setup socket to listen */\n retval = listen(serversocket, SOMAXCONN);\n if (retval == SOCKET_ERROR)\n {\n#ifndef __UNIXSDL__\n tcperr=WSAGetLastError();\n#endif\n closesocket(serversocket);\n return(-3);\n }\n\n return 0;\n}\n\nint acceptzuser()\n{\n if (UDPEnable)\n {\n return(0);\n }\n\n /* wait for connection */\n\n gamesocket = accept(serversocket, NULL, NULL);\n if (gamesocket == INVALID_SOCKET)\n {\n#ifndef __UNIXSDL__\n tcperr=WSAGetLastError();\n#endif\n closesocket(serversocket);\n serversocket=-1;\n return(-1);\n }\n\n// GetUDPStatus();\n\n return(0);\n}\n\nint ServerCheckNewClient()\n{\n FD_SET_VAR zrf;\n struct timeval nto;\n int r;\n\n if (UDPEnable)\n {\n r=GetData(256,blah);\n if (r == -1) return(-1);\n if (r > 0){\n ugameaddress.sin_addr.s_addr=userveraddress.sin_addr.s_addr;\n ConnectAddr = ugameaddress.sin_addr.s_addr;\n blah[0]=1;\n r=SendData(1,blah);\n return(1);\n }\n return(0);\n }\n\n if(serversocket == INVALID_SOCKET)\n {\n return(-1);\n }\n nto.tv_sec=0;\n nto.tv_usec=0; /* return immediately */\n\n FD_ZERO(&zrf);\n FD_SET(serversocket,&zrf);\n r=select(serversocket+1,&zrf,0,0,&nto);\n\n if(r == -1)\n {\n#ifndef __UNIXSDL__\n tcperr=WSAGetLastError();\n#endif\n return(-2);\n }\n if(r == 0)\n {\n return(0);\n }\n if(FD_ISSET(serversocket,&zrf))\n {\n return 1;\n }\n return(0);\n\n}\n\n\n/**********************************************************\\\n* Stop the game server *\n* - no parameters *\n* *\n* - no known side effects *\n\\**********************************************************/\n\nvoid StopServer()\n{\n if (UDPEnable)\n {\n closesocket(ugamesocket);\n closesocket(userversocket);\n return;\n }\n PacketCounter=0;\n closesocket(gamesocket);\n closesocket(serversocket);\n}\n\n\n/**********************************************************\\\n* Send data *\n* - parameters : *\n* - size of data *\n* - pointer to data *\n* - return 0 on success other value on error *\n* *\n* - side effects : *\n* - close the socket on error *\n\\**********************************************************/\n\nint PacketReceive()\n{\n int dataleft,i,i2,i3,i4,i5,i6,i7,retval;\n\n dataleft=GetLeftUDP();\n if (dataleft<=0) return(dataleft);\n i = sizeof(userveraddress);\n retval = recvfrom(userversocket,cpacketdata,2048+32,0,(struct sockaddr *)&userveraddress,&i);\n if ((ConnectAddr!=0) && (ConnectAddr != userveraddress.sin_addr.s_addr)) return(0);\n if (retval == SOCKET_ERROR)\n {\n closesocket(ugamesocket);\n return(-1);\n }\n if ((cpacketdata[0]==1) && (retval>0)) {\n i=(unsigned char)cpacketdata[1];\n blah[0]=2;\n blah[1]=cpacketdata[1];\n sendto(ugamesocket,blah,2,0,(struct sockaddr *)&ugameaddress,sizeof(ugameaddress));\n if (!packetreceived[i]){\n packetreceived[i]=1;\n packetreceivesize[i]=retval-2;\n CopyMemory(&(packetrdata[2048*(i & 0x0F)]),&(cpacketdata[2]),retval-2);\n }\n }\n if (cpacketdata[0]==2){\n packetconfirm[cpacketdata[1]]=1;\n while ((packetconfirm[packetnumhead]) && (packetnum!=packetnumhead))\n packetnumhead=(packetnumhead+1) & 0xFF;\n }\n\n if ((cpacketdata[0]==16) && (cpacketdata[1]!=SendPtr)){\n i=cpacketdata[1];\n cpacketdata[0]=17;\n cpacketdata[2]=SendPtr;\n i3=3;\n while (i!=SendPtr){\n cpacketdata[i3]=SendBufferSize[i];\n i3++;\n for (i4=0;i4<SendBufferSize[i];i4++){\n cpacketdata[i3]=SendBuffer[i4+(i << 5)];\n i3++;\n }\n i=(i+1) & 0xFF;\n }\n sendto(ugamesocket,cpacketdata,i3,0,(struct sockaddr *)&ugameaddress,sizeof(ugameaddress));\n return(0);\n }\n\n if (cpacketdata[0]==17){\n i2=cpacketdata[1];\n i3=3;\n while (i2!=cpacketdata[2]){\n i4=cpacketdata[i3];\n i3++;\n RecvFlags[i2]=1;\n RecvBufferSize[i2]=i4;\n for (i5=0;i5<i4;i5++){\n RecvBuffer[(i2 << 5)+i5]=cpacketdata[i3];\n i3++;\n }\n i2=(i2+1) & 0xFF;\n }\n }\n\n i2=RecvPtr+(RecvPtr2 << 8);\n i3=(cpacketdata[2]+(cpacketdata[3] << 8))-i2;\n if (i3<0) i3+=65536;\n\n if ((((cpacketdata[0] & 0xF7)==4) || ((cpacketdata[0] & 0xF7)==5))\n && ((i3>=0) && (i3<=127))) {\n\n\n CLatencyVal=cpacketdata[1];\n i=cpacketdata[2];\n i3=0;\n\n if ((cpacketdata[0] & 0x07)==4){\n for (i2=0;i2<cpacketdata[4];i2++){\n RecvBuffer[((i-i2) & 0xFF) << 5] = 0;\n RecvBuffer[(((i-i2) & 0xFF) << 5)+1] = CLatencyVal;\n RecvFlags[((i-i2) & 0xFF)] = 1;\n RecvBufferSize[((i-i2) & 0xFF)] = 2;\n }\n i3+=5;\n } else {\n for (i2=0;i2<cpacketdata[4];i2++){\n RecvBuffer[(i << 5) + i2] = cpacketdata[i2+5];\n }\n RecvFlags[i] = 1;\n RecvBufferSize[i] = cpacketdata[4];\n i3+=cpacketdata[4]+5;\n }\n if (cpacketdata[0] & 0x08){\n retval=cpacketdata[i3];\n i3++;\n for (i2=0;i2<retval;i2++){\n i=cpacketdata[i3];\n i5=cpacketdata[i3+1];\n i3+=2;\n RecvFlags[i] = 1;\n RecvBufferSize[i] = i5;\n if ((cpacketdata[i3]==0) && (i5==3)){\n i7 = cpacketdata[i3+2];\n for (i6=0;i6<i7;i6++){\n RecvFlags[(i-i6) & 0xFF] = 1;\n RecvBufferSize[(i-i6) & 0xFF] = 2;\n }\n for (i4=0;i4<i5;i4++){\n for (i6=0;i6<i7;i6++)\n RecvBuffer[(((i-i6) & 0xFF) << 5)+i4]=cpacketdata[i3];\n i3++;\n }\n }\n else\n {\n for (i4=0;i4<i5;i4++){\n RecvBuffer[(i << 5)+i4]=cpacketdata[i3];\n i3++;\n }\n }\n }\n }\n }\n\n return(0);\n}\n\n\nvoid PacketResend()\n{\n int i;\n for (i=0;i<256;i++) {\n if ((packettimeleft[i]==0) && (packetconfirm[i]==0)){\n packettimeleft[i]=180;\n if (packetresent[i]==1) packettimeleft[i]=60;\n if (packetresent[i]==2) packettimeleft[i]=90;\n if (packetsize[i]>512) packettimeleft[packetnum]=60*3;\n packetresent[i]++;\n CopyMemory(&(cpacketdata[2]),&(packetdata[2048*(i & 0x0F)]),packetsize[i]);\n cpacketdata[0]=1;\n cpacketdata[1]=(char)i;\n sendto(ugamesocket,cpacketdata,packetsize[i]+2,0,(struct sockaddr *)&ugameaddress,sizeof(ugameaddress));\n }\n }\n}\n\nextern void UpdateVFrame();\n\nint SendData(int dsize,unsigned char *dptr)\n{\n int retval;\n\n if (UDPEnable){\n/* retval = sendto(ugamesocket,dptr,dsize,0,(struct sockaddr *)&ugameaddress,sizeof(ugameaddress));\n if (retval == SOCKET_ERROR)\n {\n closesocket(gamesocket);\n return(-1);\n }\n return(0); */\n\n if (((packetnum-packetnumhead) & 0xFF) >= 15){\n// sprintf(message1,\"Packet Overflow.\");\n// MessageBox (NULL, message1, \"Init Error\" , MB_ICONERROR );\n\n // wait for receive packet, call JoyRead while waiting\n while (((packetnum-packetnumhead) & 0xFF) >= 15){\n PacketResend();\n PacketReceive();\n UpdateVFrame();\n while ((packetconfirm[packetnumhead]) && (packetnum!=packetnumhead))\n packetnumhead=(packetnumhead+1) & 0xFF;\n }\n }\n CopyMemory(&(cpacketdata[2]),dptr,dsize);\n CopyMemory(&(packetdata[2048*(packetnum & 0x0F)]),dptr,dsize);\n packetsize[packetnum]=dsize;\n packetconfirm[packetnum]=0;\n cpacketdata[0]=1;\n cpacketdata[1]=(char)packetnum;\n retval = sendto(ugamesocket,cpacketdata,dsize+2,0,(struct sockaddr *)&ugameaddress,sizeof(ugameaddress));\n packettimeleft[packetnum]=60;\n if (dsize>512) packettimeleft[packetnum]=90;\n packetresent[packetnum]=1;\n packetnum=(packetnum+1) & 0xFF;\n if (retval == SOCKET_ERROR)\n {\n closesocket(ugamesocket);\n return(-1);\n }\n return(0);\n }\n\n /* send data with the socket */\n retval = send(gamesocket,dptr,dsize,0);\n if (retval == SOCKET_ERROR)\n {\n closesocket(gamesocket);\n return(-1);\n }\n return(0);\n}\n\nextern int PacketSendSize;\nextern unsigned char PacketSendArray[2048+256];\n\nint SendDataNop()\n{\n return (SendData(PacketSendSize,PacketSendArray));\n}\n\n\n/**********************************************************\\\n* Send data UDP *\n* - parameters : *\n* - size of data *\n* - pointer to data *\n* - return 0 on success other value on error *\n* *\n* - side effects : *\n* - close the socket on error *\n\\**********************************************************/\n\nint AttachEnd(int psb){\n int i,i2,i3,ps;\n//int PrevSPacket[4];\n//int PrevSData[4*32];\n//int PrevSSize[4];\n\n ps=psb;\n i2=0;\n for (i=0;i<(UDPBackTrace-1);i++){\n if (PrevSPacket[i]) i2++;\n }\n// if (PrevSPacket[0]) i2=0;\n if (i2){\n cpacketdata[0]+=8;\n cpacketdata[ps]=(char)i2;\n ps++;\n for (i=0;i<(UDPBackTrace-1);i++){\n if (PrevSPacket[i]){\n cpacketdata[ps]=PrevSPtr[i];\n cpacketdata[ps+1]=PrevSSize[i];\n ps+=2;\n for (i3=0;i3<PrevSSize[i];i3++){\n cpacketdata[ps]=PrevSData[i*32+i3];\n ps++;\n }\n }\n }\n for (i=0;i<(UDPBackTrace-2);i++){\n PrevSPacket[i]=PrevSPacket[i+1];\n PrevSSize[i]=PrevSSize[i+1];\n PrevSPtr[i]=PrevSPtr[i+1];\n CopyMemory(&(PrevSData[i*32]),&(PrevSData[i*32+32]),32);\n }\n }\n\n return ps;\n}\n\nint SendDataUDP(int dsize,unsigned char *dptr)\n{\n int retval,i;\n int packetsize;\n\n\n\n// return (SendData(dsize,dptr));\n\n if (UDPEnable){\n\n/*int SendPtr;\nchar SendBuffer[256*32];\nchar SendBufferSize[256];*/\n blahblahblah++;\n\n packetsize = 0;\n\n for (i=0;i<dsize;i++)\n SendBuffer[SendPtr*32+i]=dptr[i];\n SendBufferSize[SendPtr]=dsize;\n\n if ((dsize == 2) && (dptr[0]<=1)){\n if (SendRepeated < 32) SendRepeated++;\n cpacketdata[0]=4;\n cpacketdata[1]=dptr[1];\n cpacketdata[2]=(char)SendPtr;\n cpacketdata[3]=(char)SendPtr2;\n cpacketdata[4]=(char)SendRepeated;\n packetsize=5;\n packetsize=AttachEnd(packetsize);\n PrevSPacket[UDPBackTrace-2]=0;\n SendPtr=(SendPtr+1) & 0xFF;\n if (!SendPtr) SendPtr2=(SendPtr2+1) & 0xFF;\n retval = sendto(ugamesocket,cpacketdata,packetsize,0,(struct sockaddr *)&ugameaddress,sizeof(ugameaddress));\n if (retval == SOCKET_ERROR)\n {\n closesocket(gamesocket);\n return(-1);\n }\n } else {\n if (SendRepeated){\n PrevSPacket[UDPBackTrace-2]=1;\n PrevSSize[UDPBackTrace-2]=3;\n PrevSData[(UDPBackTrace-2)*32]=0;\n PrevSData[(UDPBackTrace-2)*32+1]=dptr[1];\n PrevSData[(UDPBackTrace-2)*32+2]=SendRepeated;\n PrevSPtr[UDPBackTrace-2]=(SendPtr-1) & 0xFF;\n }\n SendRepeated=0;\n cpacketdata[0]=5;\n cpacketdata[1]=dptr[1];\n cpacketdata[2]=SendPtr;\n cpacketdata[3]=SendPtr2;\n cpacketdata[4]=dsize;\n packetsize=5;\n for (i=0;i<dsize;i++)\n cpacketdata[i+5]=dptr[i];\n packetsize+=dsize;\n packetsize=AttachEnd(packetsize);\n\n PrevSPacket[UDPBackTrace-2]=1;\n PrevSSize[UDPBackTrace-2]=dsize;\n for (i=0;i<dsize;i++)\n PrevSData[(UDPBackTrace-2)*32+i]=dptr[i];\n PrevSPtr[UDPBackTrace-2]=SendPtr;\n\n SendPtr=(SendPtr+1) & 0xFF;\n if (!SendPtr) SendPtr2=(SendPtr2+1) & 0xFF;\n retval = sendto(ugamesocket,cpacketdata,packetsize,0,(struct sockaddr *)&ugameaddress,sizeof(ugameaddress));\n if (retval == SOCKET_ERROR)\n {\n closesocket(gamesocket);\n return(-1);\n }\n }\n return(0);\n }\n\n /* send data with the socket */\n retval = sendto(gamesocket,dptr,dsize,0,(struct sockaddr *) &ugameaddress,sizeof(struct sockaddr));\n if (retval == SOCKET_ERROR)\n {\n closesocket(gamesocket);\n return(-1);\n }\n return(0);\n}\n\nint SendDataUDPNop()\n{\n return (SendDataUDP(PacketSendSize,PacketSendArray));\n}\n\n/**********************************************************\\\n* Get data left *\n* - return size left on success negative value on error *\n* *\n* - side effects : *\n* - close the socket on error *\n\\**********************************************************/\n\nint GetLeft()\n{\n int retval;\n int tempsize;\n retval = ioctlsocket(gamesocket,FIONREAD,&tempsize);\n if (retval == SOCKET_ERROR)\n {\n closesocket(gamesocket);\n return(-1);\n }\n return(tempsize);\n}\n\nint GetLeftUDP()\n{\n FD_SET_VAR zrf;\n struct timeval nto;\n int r;\n\n nto.tv_sec=0;\n nto.tv_usec=0; /* return immediately */\n\n FD_ZERO(&zrf);\n FD_SET(userversocket,&zrf);\n r=select(userversocket+1,&zrf,0,0,&nto);\n\n if (r == SOCKET_ERROR)\n {\n closesocket(userversocket);\n return(-1);\n }\n return(r);\n}\n\n/**********************************************************\\\n* Receive data *\n* - parameters : *\n* - size of data *\n* - pointer to data *\n* - return size on success negative value on error *\n* *\n* - side effects : *\n* - close the socket on error *\n\\**********************************************************/\n\nint GetData(int dsize,unsigned char *dptr)\n{\n int retval,i;\n int dataleft;\n\n retval=0;\n\n // Temporary UDP routines\n if (UDPEnable) {\n\n PacketResend();\n PacketReceive();\n\n i=packetrecvhead;\n if (packetreceived[i]){\n CopyMemory(dptr,&(packetrdata[2048*(i & 0x0F)]),packetreceivesize[i]);\n retval = packetreceivesize[i];\n packetreceived[(i+128) & 0xFF]=0;\n packetrecvhead=(packetrecvhead+1) & 0xFF;\n return(retval);\n }\n\n i=RecvPtr;\n if ((RecvFlags[i]) && (UDPMode2)){\n CopyMemory(dptr,&(RecvBuffer[32*i]),RecvBufferSize[i]);\n retval = RecvBufferSize[i];\n RecvFlags[(i+128) & 0xFF]=0;\n RecvPtr=(RecvPtr+1) & 0xFF;\n if (!RecvPtr) RecvPtr2=(RecvPtr2+1) & 0xFF;\n CounterA=90;\n return(retval);\n }\n\n if ((CounterA==0) & (UDPMode2)){\n // Send 16+RecvPtr\n cpacketdata[0]=16;\n cpacketdata[1]=RecvPtr;\n sendto(ugamesocket,cpacketdata,2,0,(struct sockaddr *)&ugameaddress,sizeof(ugameaddress));\n CounterA=90;\n return(0);\n }\n\n return(0);\n }\n\n dataleft=GetLeft();\n if(dataleft==0) return(0);\n\n if(dataleft<dsize)\n {\n dsize=dataleft;\n }\n /* get data with the socket */\n retval = recv(gamesocket,dptr,dsize,0);\n if (retval == SOCKET_ERROR)\n {\n closesocket(gamesocket);\n return(-1);\n }\n return(retval);\n}\n\nextern unsigned char PacketRecvArray[2048+256];\n\nint GetDataNop()\n{\n return (GetData(2048,PacketRecvArray));\n}\n\nvoid GetHostName()\n{\n HOSTENT* phe;\n\n if (!InitTCP()){\n\n strcpy(hostname,\"YOUR IP: \");\n gethostname(blah,255);\n phe = gethostbyname(blah);\n strcpy(blah, inet_ntoa(*(struct in_addr*)phe->h_addr));\n strcat(hostname,blah);\n }\n}\n\nvoid UDPWait1Sec(){\n CounterB=60;\n while (CounterB>0)\n UpdateVFrame();\n}\n\nvoid UDPClearVars(){\n int i;\n CounterA=-1;\n RecvPtr = 0;\n SendPtr = 0;\n for (i=0;i<16;i++)\n PrevSPacket[i]=0;\n for (i=0;i<256;i++)\n RecvFlags[i]=0;\n}\n\nvoid UDPEnableMode(){\n UDPMode2=1;\n}\n\nvoid UDPDisableMode(){\n UDPMode2=0;\n}\n\nvoid WinErrorA2(){\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n char message1[256];\n sprintf(message1,\"Failed waiting for checksum.\");\n MessageBox (NULL, message1, \"Init Error\" , MB_ICONERROR );\n#endif\n}\n\nvoid WinErrorB2(){\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n char message1[256];\n sprintf(message1,\"Failed waiting for confirmation.\");\n MessageBox (NULL, message1, \"Init Error\" , MB_ICONERROR );\n#endif\n}\n\nvoid WinErrorC2(){\n#ifdef __UNIXSDL__\n STUB_FUNCTION;\n#else\n char message1[256];\n sprintf(message1,\"Failed waiting for confirmation(B).\");\n MessageBox (NULL, message1, \"Init Error\" , MB_ICONERROR );\n#endif\n}\n\n\n\n" }, { "alpha_fraction": 0.6109201908111572, "alphanum_fraction": 0.6254295706748962, "avg_line_length": 24.42718505859375, "blob_id": "16d01ab09800b0f8cb5b5fbe7799e742dd9a18aa", "content_id": "05085a4079bebe390ca7501fb84538b5b85aeab5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5238, "license_type": "no_license", "max_line_length": 97, "num_lines": 206, "path": "/src/cpu/zspc/zspc.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "// zspc 0.9.0. http://www.slack.net/~ant/\n\n#include \"zspc.h\"\n\n#include \"resamp.h\"\n#include \"spc_filt.h\"\n#include \"disasm.h\"\n#include <stdio.h>\n#include <ctype.h>\n\n/* Copyright (C) 2007 Shay Green. This module is free software; you\ncan redistribute it and/or modify it under the terms of the GNU Lesser\nGeneral Public License as published by the Free Software Foundation; either\nversion 2.1 of the License, or (at your option) any later version. This\nmodule is distributed in the hope that it will be useful, but WITHOUT ANY\nWARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\ndetails. You should have received a copy of the GNU Lesser General Public\nLicense along with this module; if not, write to the Free Software Foundation,\nInc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */\n\n// speeds emulation up a bit\n#define SPC_DISABLE_TEMPO 1\n\n// Included here rather than individually to avoid having to\n// change several makefiles when source files are added/removed\n#include \"snes_spc.cpp\"\n#include \"snes_spm.cpp\"\n#include \"snes_sps.cpp\"\n\nextern \"C\" {\n#include \"spc_bootrom.c\"\n}\n\nuint32_t zspc_time;\n\nstatic SNES_SPC spc;\nstatic zspc_sample_t* out_begin;\nstatic zspc_sample_t* out_pos;\nstatic zspc_sample_t* out_end;\nstatic SPC_Filter filter;\n\nstatic void set_buf()\n{\n\tspc.set_output( resampler_buffer(), resampler_max_write() );\n}\n\nstatic void clear_buf()\n{\n\tzspc_time = 0;\n\tresampler_clear();\n\tfilter.clear();\n\tset_buf();\n}\n\nstatic void flush_to_resampler()\n{\n\tint count = spc.sample_count();\n\tif ( count > resampler_max_write() )\n\t{\n\t\t//assert( false ); // fails if too much sound was buffered\n\t\tcount = resampler_max_write();\n\t}\n\tfilter.run( resampler_buffer(), count );\n\tresampler_write( count );\n}\n\nvoid zspc_enable_filter( int enable )\n{\n\tfilter.enable( enable );\n}\n\nvoid zspc_set_gain( int gain )\n{\n\tfilter.set_gain( gain * (SPC_Filter::gain_unit / zspc_gain_unit) );\n}\n\nvoid zspc_set_bass( int bass )\n{\n\tfilter.set_bass( bass );\n}\n\nzspc_err_t zspc_init()\n{\n zspc_err_t err = spc.init();\n if ( err ) return err;\n\n zspc_set_rate( 32000 );\n\n static unsigned char boot [SNES_SPC::rom_size];\n if ( !boot [0] )\n assemble_bootrom( boot );\n spc.init_rom( boot );\n zspc_reset();\n\n return 0;\n}\n\nzspc_err_t zspc_set_rate( int rate )\n{\n\tresampler_set_rate( 32000, rate );\n\treturn 0;\n}\n\nvoid zspc_set_output( zspc_sample_t* p, zspc_sample_count_t n )\n{\n\tout_pos = p;\n\tif ( p )\n\t{\n\t\tout_begin = p;\n\t\tout_end = p + n;\n\t}\n}\n\nzspc_sample_count_t zspc_sample_count()\n{\n\treturn out_pos ? out_pos - out_begin : 0;\n}\n\nzspc_err_t zspc_play( zspc_sample_count_t n, zspc_sample_t* out )\n{\n\tzspc_sample_t* const end = out + n;\n\twhile ( (out += resampler_read( out, end - out )) < end ) // read from resampler\n\t{\n\t\t// refill resampler\n\t\tset_buf();\n\t\tspc.end_frame( RESAMPLER_BUF_SIZE / 2 * (zspc_clocks_per_sample / 2) );\n\t\tflush_to_resampler();\n\t}\n\treturn 0; // TODO: check for CPU error\n}\n\nvoid zspc_reset () { spc.reset(); clear_buf(); }\nvoid zspc_soft_reset () { spc.soft_reset(); clear_buf(); }\nvoid zspc_mute_voices ( int mask ) { spc.mute_voices( mask ); }\n\nzspc_err_t zspc_load_spc( void const* p, long n )\n{\n\tzspc_err_t err = spc.load_spc( p, n );\n\tclear_buf();\n\treturn err;\n}\n\nvoid zspc_clear_echo () { spc.clear_echo(); }\n\nvoid zspc_copy_state ( unsigned char** p, zspc_copy_func_t f ) { spc.copy_state( p, f ); }\nvoid zspc_init_header ( void* spc_out ) { SNES_SPC::init_header( spc_out ); }\nvoid zspc_save_spc ( void* spc_out ) { spc.save_spc( spc_out ); }\nint zspc_check_kon () { return spc.check_kon(); }\n\nint zspc_disasm( int addr, char* out )\n{\n\tout += sprintf( out, \"%04X \", addr );\n\tbyte const* instr = &spc.ram() [addr];\n\treturn spc_disasm( addr, instr [0], instr [1], instr [2], out );\n}\n\nconst char* zspc_log_cpu()\n{\n\t// Save registers and see if PC changes after running\n\tSNES_SPC::regs_t r = spc.regs();\n\tspc.run_until( zspc_time );\n\tif ( spc.regs().pc == r.pc )\n\t\treturn 0;\n\n\t// Disassemble first instruction\n\tchar opcode [zspc_disasm_max];\n\tzspc_disasm( r.pc, opcode );\n\n\t// Decode PSW\n\tchar flags [9] = \"nvdbhizc\";\n\tfor ( int i = 0; i < 8; i++ )\n\t\tif ( r.psw << i & 0x80 )\n\t\t\tflags [i] = toupper( flags [i] );\n\n\t// Return disassembly string\n\tstatic char str [64 + spc_disasm_max];\n\tsprintf( str, \"%-24s; A=%02X X=%02X Y=%02X SP=%03X PSW=%02X %s\",\n\t\t\topcode, (int) r.a, (int) r.x, (int) r.y, (int) r.sp + 0x100, (int) r.psw, flags );\n\treturn str;\n}\n\n\n//// Functions called from asm\n\nuint32_t zspc_port;\nuint32_t zspc_data;\n\nvoid zspc_read_port () { zspc_data = spc.read_port( zspc_time, zspc_port ); }\nvoid zspc_write_port() { spc.write_port( zspc_time, zspc_port, zspc_data ); }\n\nvoid zspc_flush_samples()\n{\n\tspc.end_frame( zspc_time );\n\tzspc_time = 0;\n\tif ( out_pos )\n\t{\n\t\tflush_to_resampler();\n\t\tout_pos += resampler_read( out_pos, out_end - out_pos );\n\t\tset_buf();\n\t}\n}\n\n#include \"spc_filt.cpp\"\n#include \"disasm.c\"\n#include \"resamp.c\"\n" }, { "alpha_fraction": 0.49309033155441284, "alphanum_fraction": 0.5312781929969788, "avg_line_length": 21.662015914916992, "blob_id": "b3dc1e3e62390f18d0e46d7b02713276c70a4baa", "content_id": "661ebc81dd4ecfda2b7111e33b5c946ae8191d6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 73086, "license_type": "no_license", "max_line_length": 360, "num_lines": 3225, "path": "/src/win/winlink.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#define DIRECTINPUT_VERSION 0x0800\n#define DIRECTSOUND_VERSION 0x0800\n#define __STDC_CONSTANT_MACROS\n\nextern \"C\"\n{\n#include <windows.h>\n#include <stdio.h>\n}\n#include <stdint.h>\n#include <math.h>\n#include <ctype.h>\n#include <dsound.h>\n#include <dinput.h>\n#include <winuser.h>\n#include \"resource.h\"\n#include \"../cpu/zspc/zspc.h\"\n\nextern \"C\"\n{\n#include \"gl_draw.h\"\n#include \"../cfg.h\"\n#include \"../input.h\"\n#include \"../zmovie.h\"\n#include \"../asm_call.h\"\n void zexit(), zexit_error();\n}\n\n#include \"winlink.h\"\n\n#ifdef QT_DEBUGGER\n#include \"../debugger/load.h\"\n#endif\n\nDWORD Moving = 0;\nDWORD SoundBufferSize = 1024 * 18;\nDWORD FirstSound = 1;\n\nint SoundEnabled = 1;\nint UsePrimaryBuffer = 0;\n\nDWORD FirstActivate = 1;\n\nHANDLE debugWindow = 0;\n\nextern \"C\"\n{\n HWND hMainWindow;\n HINSTANCE hInst;\n HDC hDC;\n HGLRC hRC;\n DWORD FullScreen = 0;\n DWORD WindowWidth = 256;\n DWORD WindowHeight = 224;\n}\n\nLPDIRECTSOUND8 lpDirectSound = NULL;\nLPDIRECTSOUNDBUFFER8 lpSoundBuffer = NULL;\nLPDIRECTSOUNDBUFFER lpPrimaryBuffer = NULL;\nDSBUFFERDESC dsbd;\n\nLPVOID lpvPtr1;\nDWORD dwBytes1;\nLPVOID lpvPtr2;\nDWORD dwBytes2;\n\nLPDIRECTINPUT8 DInput = NULL;\nLPDIRECTINPUTDEVICE8 MouseInput = NULL;\nLPDIRECTINPUTDEVICE8 KeyboardInput = NULL;\nLPDIRECTINPUTDEVICE8 JoystickInput[5];\nDIJOYSTATE js[5];\n\nDWORD X1Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD X2Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD Y1Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD Y2Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD Z1Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD Z2Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD RX1Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD RX2Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD RY1Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD RY2Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD RZ1Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD RZ2Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD S01Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD S02Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD S11Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD S12Disable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD POVDisable[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD NumPOV[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\nDWORD NumBTN[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\n\nDWORD CurrentJoy = 0;\n\nBYTE BitDepth;\nBYTE BackColor = 0;\nDEVMODE mode;\nHWND DebugWindowHandle;\n\nfloat MouseMinX = 0;\nfloat MouseMaxX = 256;\nfloat MouseMinY = 0;\nfloat MouseMaxY = 223;\nfloat MouseX;\nfloat MouseY;\nfloat MouseMoveX;\nfloat MouseMoveY;\nBYTE MouseButtonPressed;\nBYTE IsActivated = 1;\nBYTE PrevRes=0;\n\nextern \"C\"\n{\n DWORD MouseButton;\n DWORD SurfaceX = 0;\n DWORD SurfaceY = 0;\n RECT BlitArea;\n BYTE AltSurface = 0;\n RECT rcWindow;\n DWORD GBitMask;\n}\n\nHANDLE hLock, hThread;\nDWORD dwThreadId, dwThreadParam, semaphore_run;\n\nextern \"C\"\n{\n int SemaphoreMax = 5;\n void InitSemaphore();\n void ShutdownSemaphore();\n void DisplayWIPDisclaimer();\n void InitDebugger();\n void DockDebugger();\n void Clear2xSaIBuffer();\n void clear_display();\n DWORD CurMode = ~0;\n extern WORD totlines;\n}\nstatic char dinput8_dll[] = { \"dinput8.dll\\0\" };\nstatic char dinput8_imp[] = { \"DirectInput8Create\\0\" };\n\nstatic char ddraw_dll[] = { \"ddraw.dll\\0\" };\nstatic char ddraw_imp[] = { \"DirectDrawCreateEx\\0\" };\n\nstatic char dsound_dll[] = { \"dsound.dll\\0\" };\nstatic char dsound_imp[] = { \"DirectSoundCreate8\\0\" };\n\nstatic HMODULE hM_ddraw = NULL, hM_dsound = NULL, hM_dinput8 = NULL;\n\ntypedef HRESULT (WINAPI* lpDirectInput8Create)(HINSTANCE hinst, DWORD dwVersion, REFIID riidltf,\n LPVOID *ppvOut, LPUNKNOWN punkOuter);\nstatic lpDirectInput8Create pDirectInput8Create;\n\ntypedef HRESULT (WINAPI* lpDirectDrawCreateEx)(GUID FAR *lpGuid, LPVOID *lplpDD, REFIID iid,\n IUnknown FAR *pUnkOuter);\n\nextern \"C\"\n{\n lpDirectDrawCreateEx pDirectDrawCreateEx;\n}\n\ntypedef HRESULT (WINAPI* lpDirectSoundCreate8)(LPCGUID pcGuidDevice, LPDIRECTSOUND8 *ppDS8,\n LPUNKNOWN pUnkOuter);\nstatic lpDirectSoundCreate8 pDirectSoundCreate8;\n\nextern \"C\" void FreeDirectX()\n{\n FreeLibrary(hM_dsound);\n FreeLibrary(hM_ddraw);\n FreeLibrary(hM_dinput8);\n zexit();\n}\n\nextern \"C\" void DXLoadError()\n{\n if (MessageBox(NULL,\n \"Sorry, you need to install or reinstall DirectX v8.0 or higher\\nto use ZSNESW.\\nWould you like to go to the DirectX homepage?\",\n \"Error\", MB_ICONINFORMATION | MB_YESNO) == IDYES)\n {\n ShellExecute(NULL, NULL, \"http://www.microsoft.com/directx/\", NULL, NULL, 0);\n }\n FreeDirectX();\n}\n\nextern \"C\" void ImportDirectX()\n{\n hM_dinput8 = LoadLibrary(dinput8_dll);\n\n if (hM_dinput8 == NULL)\n {\n DXLoadError();\n }\n\n pDirectInput8Create = (lpDirectInput8Create) GetProcAddress(hM_dinput8, dinput8_imp);\n\n if (pDirectInput8Create == NULL)\n {\n char err[256];\n wsprintf(err, \"Failed to import %s:%s\", dinput8_dll, dinput8_imp);\n MessageBox(NULL, err, \"Error\", MB_ICONERROR);\n DXLoadError();\n }\n\n hM_ddraw = LoadLibrary(ddraw_dll);\n\n if (hM_ddraw == NULL)\n {\n char err[256];\n wsprintf(err, \"Failed to import %s\", ddraw_dll);\n MessageBox(NULL, err, \"Error\", MB_ICONERROR);\n DXLoadError();\n }\n\n pDirectDrawCreateEx = (lpDirectDrawCreateEx) GetProcAddress(hM_ddraw, ddraw_imp);\n\n if (pDirectDrawCreateEx == NULL)\n {\n char err[256];\n wsprintf(err, \"Failed to import %s:%s\", ddraw_dll, ddraw_imp);\n MessageBox(NULL, err, \"Error\", MB_ICONERROR);\n DXLoadError();\n }\n\n hM_dsound = LoadLibrary(dsound_dll);\n\n if (hM_dsound == NULL)\n {\n char err[256];\n wsprintf(err, \"Failed to import %s\", dsound_dll);\n MessageBox(NULL, err, \"Error\", MB_ICONERROR);\n DXLoadError();\n }\n\n pDirectSoundCreate8 = (lpDirectSoundCreate8) GetProcAddress(hM_dsound, dsound_imp);\n\n if (pDirectSoundCreate8 == NULL)\n {\n char err[256];\n wsprintf(err, \"Failed to import %s:%s\", dsound_dll, dsound_imp);\n MessageBox(NULL, err, \"Error\", MB_ICONERROR);\n DXLoadError();\n }\n}\n\n// milliseconds per world update\n#define UPDATE_TICKS_GAME (1000.0/59.948743718592964824120603015060)\n#define UPDATE_TICKS_GAMEPAL (20.0)\n#define UPDATE_TICKS_GUI (1000.0/36.0)\n#define UPDATE_TICKS_UDP (1000.0/60.0)\n\ndouble start, end, freq, update_ticks_pc, start2, end2, update_ticks_pc2;\n\nvoid ReleaseDirectDraw();\nvoid ReleaseDirectSound();\nvoid ReleaseDirectInput();\nint InitDirectDraw();\nint ReInitSound();\n\nextern \"C\"\n{\n void MultiMouseInit();\n void MultiMouseShutdown();\n extern BYTE device1, device2;\n extern BYTE GUIOn;\n extern BYTE GUIOn2;\n DWORD InputEn = 0;\n void reInitSound()\n {\n ReInitSound();\n }\n}\n\nBOOL InputAcquire()\n{\n for (unsigned int i = 0; i < 5; i++)\n {\n if (JoystickInput[i])\n {\n JoystickInput[i]->Acquire();\n }\n }\n\n if (device1 && device2 && !GUIOn2)\n {\n MultiMouseInit();\n }\n else if (MouseInput && GUIOn2)\n {\n MouseInput->Acquire();\n }\n if (KeyboardInput)\n {\n KeyboardInput->Acquire();\n }\n InputEn = 1;\n return TRUE;\n}\n\nBOOL InputDeAcquire()\n{\n if (KeyboardInput)\n {\n KeyboardInput->Unacquire();\n }\n\n for (unsigned int i = 0; i < 5; i++)\n {\n if (JoystickInput[i])\n {\n JoystickInput[i]->Unacquire();\n }\n }\n\n if (device1 && device2 && !GUIOn2)\n {\n MultiMouseShutdown();\n }\n else if (MouseInput)\n {\n MouseInput->Unacquire();\n }\n InputEn = 0;\n return TRUE;\n}\n\nextern \"C\"\n{\n void initwinvideo();\n void DosExit();\n extern BYTE EMUPause;\n extern int CurKeyPos;\n extern int CurKeyReadPos;\n extern int KeyBuffer[16];\n extern BYTE debugger;\n}\n\nextern \"C\" void CheckPriority()\n{\n if (HighPriority == 1)\n {\n if (!SetPriorityClass(GetCurrentProcess(), ABOVE_NORMAL_PRIORITY_CLASS))\n {\n SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);\n }\n }\n else\n {\n SetPriorityClass(GetCurrentProcess(), NORMAL_PRIORITY_CLASS);\n }\n}\n\nextern \"C\" void CheckAlwaysOnTop()\n{\n if (AlwaysOnTop == 1)\n {\n SetWindowPos(hMainWindow, HWND_TOPMOST, 0, 0, 0, 0, SWP_NOMOVE | SWP_NOSIZE);\n }\n else\n {\n SetWindowPos(hMainWindow, HWND_NOTOPMOST, 0, 0, 0, 0, SWP_NOMOVE | SWP_NOSIZE);\n }\n}\n\nextern \"C\" void CheckScreenSaver()\n{\n if (DisableScreenSaver == 1 && IsActivated == 1)\n {\n SystemParametersInfo(SPI_SETSCREENSAVEACTIVE, FALSE, 0, SPIF_SENDCHANGE);\n SystemParametersInfo(SPI_SETLOWPOWERACTIVE, FALSE, 0, SPIF_SENDCHANGE);\n SystemParametersInfo(SPI_SETPOWEROFFACTIVE, FALSE, 0, SPIF_SENDCHANGE);\n }\n else\n {\n SystemParametersInfo(SPI_SETSCREENSAVEACTIVE, TRUE, 0, SPIF_SENDCHANGE);\n SystemParametersInfo(SPI_SETLOWPOWERACTIVE, TRUE, 0, SPIF_SENDCHANGE);\n SystemParametersInfo(SPI_SETPOWEROFFACTIVE, TRUE, 0, SPIF_SENDCHANGE);\n }\n}\n\nextern \"C\" void MinimizeWindow()\n{\n ShowWindow(hMainWindow, SW_MINIMIZE);\n IsActivated = 0;\n}\n\nBOOL InputRead()\n{\n static int PrevZ = 0;\n MouseMoveX = 0;\n MouseMoveY = 0;\n if (MouseInput && InputEn == 1)\n {\n DIMOUSESTATE dims;\n HRESULT hr;\n hr = MouseInput->GetDeviceState(sizeof(DIMOUSESTATE), &dims);\n\n if (SUCCEEDED(hr))\n {\n MouseMoveX = (float)dims.lX;\n MouseMoveY = (float)dims.lY;\n\n if (MouseWheel == 1)\n {\n long zDelta = dims.lZ - PrevZ;\n if (!dims.lZ)\n {\n zDelta = 0;\n }\n while (zDelta > 0)\n {\n zDelta -= 40;\n if (!((CurKeyPos + 1 == CurKeyReadPos) ||\n ((CurKeyPos + 1 == 16) && (CurKeyReadPos == 0))))\n {\n KeyBuffer[CurKeyPos] = 72 + 256;\n CurKeyPos++;\n if (CurKeyPos == 16)\n {\n CurKeyPos = 0;\n }\n }\n }\n while (zDelta < 0)\n {\n zDelta += 40;\n if (!((CurKeyPos + 1 == CurKeyReadPos) ||\n ((CurKeyPos + 1 == 16) && (CurKeyReadPos == 0))))\n {\n KeyBuffer[CurKeyPos] = 80 + 256;\n CurKeyPos++;\n if (CurKeyPos == 16)\n {\n CurKeyPos = 0;\n }\n }\n }\n PrevZ = dims.lZ;\n }\n\n MouseButton = (dims.rgbButtons[0] >> 7) | (dims.rgbButtons[1] >> 6) |\n (dims.rgbButtons[2] >> 5) | (dims.rgbButtons[3] >> 4);\n }\n else\n {\n return FALSE;\n }\n }\n return TRUE;\n}\n\nvoid ExitFunction()\n{\n // We need to clean up the debug window if it's running\n\n if (debugWindow)\n {\n FreeConsole();\n }\n\n IsActivated = 0;\n CheckScreenSaver();\n ReleaseDirectInput();\n ReleaseDirectSound();\n ReleaseDirectDraw();\n FreeLibrary(hM_dsound);\n FreeLibrary(hM_ddraw);\n FreeLibrary(hM_dinput8);\n DestroyWindow(hMainWindow);\n}\n\nextern \"C\"\n{\n bool ctrlptr = false;\n}\n\nLRESULT CALLBACK Main_Proc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam)\n{\n static bool shiftpr;\n bool accept;\n int vkeyval = 0;\n\n#ifdef QT_DEBUGGER\n if (debugger_quit) { debug_exit(0); }\n#endif\n\n switch (uMsg)\n {\n case WM_KEYDOWN:\n // sent when user presses a key\n if (!((CurKeyPos + 1 == CurKeyReadPos) || ((CurKeyPos + 1 == 16) && (CurKeyReadPos == 0))))\n {\n accept = false;\n\n if (wParam == 16)\n {\n shiftpr = true;\n }\n else if (wParam == 17)\n {\n ctrlptr = true;\n }\n if (((wParam >= 'A') && (wParam <= 'Z')) || ((wParam >= 'a') && (wParam <= 'z')) ||\n (wParam == 27) || (wParam == 32) || (wParam == 8) || (wParam == 13) || (wParam == 9))\n {\n accept = true; vkeyval = wParam;\n }\n if ((wParam >= '0') && (wParam <= '9'))\n {\n accept = true; vkeyval = wParam;\n if (shiftpr)\n {\n switch (wParam)\n {\n case '1':\n vkeyval = '!'; break;\n case '2':\n vkeyval = '@'; break;\n case '3':\n vkeyval = '#'; break;\n case '4':\n vkeyval = '$'; break;\n case '5':\n vkeyval = '%'; break;\n case '6':\n vkeyval = '^'; break;\n case '7':\n vkeyval = '&'; break;\n case '8':\n vkeyval = '*'; break;\n case '9':\n vkeyval = '('; break;\n case '0':\n vkeyval = ')'; break;\n }\n }\n }\n if ((wParam >= VK_NUMPAD0) && (wParam <= VK_NUMPAD9))\n {\n accept = true; vkeyval = wParam - VK_NUMPAD0 + '0';\n }\n if (!shiftpr)\n {\n switch (wParam)\n {\n case 189:\n vkeyval = '-'; accept = true; break;\n case 187:\n vkeyval = '='; accept = true; break;\n case 219:\n vkeyval = '['; accept = true; break;\n case 221:\n vkeyval = ']'; accept = true; break;\n case 186:\n vkeyval = ';'; accept = true; break;\n case 222:\n vkeyval = 39; accept = true; break;\n case 188:\n vkeyval = ','; accept = true; break;\n case 190:\n vkeyval = '.'; accept = true; break;\n case 191:\n vkeyval = '/'; accept = true; break;\n case 192:\n vkeyval = '`'; accept = true; break;\n case 220:\n vkeyval = 92; accept = true; break;\n }\n }\n else\n {\n switch (wParam)\n {\n case 189:\n vkeyval = '_'; accept = true; break;\n case 187:\n vkeyval = '+'; accept = true; break;\n case 219:\n vkeyval = '{'; accept = true; break;\n case 221:\n vkeyval = '}'; accept = true; break;\n case 186:\n vkeyval = ':'; accept = true; break;\n case 222:\n vkeyval = '\"'; accept = true; break;\n case 188:\n vkeyval = '<'; accept = true; break;\n case 190:\n vkeyval = '>'; accept = true; break;\n case 191:\n vkeyval = '?'; accept = true; break;\n case 192:\n vkeyval = '~'; accept = true; break;\n case 220:\n vkeyval = '|'; accept = true; break;\n }\n }\n switch (wParam)\n {\n case 33:\n vkeyval = 256 + 73; accept = true; break;\n case 38:\n vkeyval = 256 + 72; accept = true; break;\n case 36:\n vkeyval = 256 + 71; accept = true; break;\n case 39:\n vkeyval = 256 + 77; accept = true; break;\n case 12:\n vkeyval = 256 + 76; accept = true; break;\n case 37:\n vkeyval = 256 + 75; accept = true; break;\n case 34:\n vkeyval = 256 + 81; accept = true; break;\n case 40:\n vkeyval = 256 + 80; accept = true; break;\n case 35:\n vkeyval = 256 + 79; accept = true; break;\n case 107:\n vkeyval = '+'; accept = true; break;\n case 109:\n vkeyval = '-'; accept = true; break;\n case 106:\n vkeyval = '*'; accept = true; break;\n case 111:\n vkeyval = '/'; accept = true; break;\n case 110:\n vkeyval = '.'; accept = true; break;\n }\n if (accept)\n {\n KeyBuffer[CurKeyPos] = vkeyval;\n CurKeyPos++;\n if (CurKeyPos == 16)\n {\n CurKeyPos = 0;\n }\n }\n }\n break;\n case WM_KEYUP:\n // sent when user releases a key\n if (wParam == 16)\n {\n shiftpr = false;\n }\n else if (wParam == 17)\n {\n ctrlptr = false;\n }\n break;\n case WM_MOUSEMOVE:\n if (MouseInput && GUIOn2)\n {\n MouseInput->Acquire();\n }\n break;\n case WM_MOVE:\n break;\n case WM_PAINT:\n ValidateRect(hWnd, NULL);\n break;\n case WM_ACTIVATE:\n if (LOWORD(wParam) != WA_INACTIVE)\n {\n IsActivated = 1;\n\t\tif (debugger) { if (!IsWindowVisible(DebugWindowHandle)) ShowWindow(DebugWindowHandle, SW_SHOW);}\n\n if (FirstActivate == 0)\n {\n initwinvideo();\n }\n if (PauseFocusChange && !MovieProcessing)\n {\n EMUPause = 0;\n }\n InputAcquire();\n\n\t\tif (FirstActivate == 1)\n {\n FirstActivate = 0;\n }\n if (FullScreen == 1)\n {\n Clear2xSaIBuffer();\n }\n CheckPriority();\n CheckScreenSaver();\n\t\tSetForegroundWindow(hMainWindow);\n\n }\n if (LOWORD(wParam) == WA_INACTIVE)\n {\n IsActivated = 0;\n if (PauseFocusChange)\n {\n EMUPause = 1;\n }\n InputDeAcquire();\n if (GUIOn || GUIOn2 || EMUPause)\n {\n SetPriorityClass(GetCurrentProcess(), IDLE_PRIORITY_CLASS);\n }\n CheckScreenSaver();\n }\n break;\n case WM_SETFOCUS:\n if (FullScreen == 0)\n {\n ShowWindow(hMainWindow, SW_SHOWNORMAL);\n }\n CheckPriority();\n CheckScreenSaver();\n InputAcquire();\n break;\n case WM_KILLFOCUS:\n InputDeAcquire();\n IsActivated = 0;\n if (GUIOn || GUIOn2 || EMUPause)\n {\n SetPriorityClass(GetCurrentProcess(), IDLE_PRIORITY_CLASS);\n }\n CheckScreenSaver();\n break;\n case WM_DESTROY:\n break;\n case WM_CLOSE:\n break;\n }\n return DefWindowProc(hWnd, uMsg, wParam, lParam);;\n}\n\nint RegisterWinClass()\n{\n if (AllowMultipleInst == 0)\n {\n HWND hFindWindow;\n hFindWindow = FindWindow(\"ZSNES\", NULL);\n\n if (hFindWindow != NULL)\n {\n ShowWindow(hFindWindow, SW_SHOWNORMAL);\n SetForegroundWindow(hFindWindow);\n DosExit();\n }\n }\n\n WNDCLASS wcl;\n\n wcl.style = CS_OWNDC | CS_HREDRAW | CS_VREDRAW | CS_NOCLOSE;\n wcl.cbClsExtra = 0;\n wcl.cbWndExtra = 0;\n wcl.hIcon = LoadIcon(hInst, MAKEINTRESOURCE(IDI_ICON1));\n wcl.hCursor = NULL;\n wcl.hInstance = hInst;\n wcl.lpfnWndProc = (WNDPROC)Main_Proc;\n wcl.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH);\n wcl.lpszMenuName = NULL;\n wcl.lpszClassName = \"ZSNES\";\n\n if (RegisterClass(&wcl) == 0)\n {\n return FALSE;\n }\n\n return TRUE;\n}\n\nBYTE PrevStereoSound;\nDWORD PrevSoundQuality;\n\nBOOL InitSound()\n{\n WAVEFORMATEX wfx;\n DSBCAPS dsbcaps;\n\n SoundEnabled = 0;\n\n if (soundon == 0)\n {\n return FALSE;\n }\n\n PrevSoundQuality = SoundQuality;\n PrevStereoSound = StereoSound;\n\n if (DS_OK == pDirectSoundCreate8(NULL, &lpDirectSound, NULL))\n {\n lpDirectSound->Initialize(NULL);\n\n if (PrimaryBuffer)\n {\n if (DS_OK != lpDirectSound->SetCooperativeLevel(hMainWindow, DSSCL_WRITEPRIMARY))\n {\n if (DS_OK != lpDirectSound->SetCooperativeLevel(hMainWindow, DSSCL_EXCLUSIVE))\n {\n return FALSE;\n }\n }\n else\n {\n UsePrimaryBuffer = 1;\n }\n }\n else\n {\n if (DS_OK != lpDirectSound->SetCooperativeLevel(hMainWindow, DSSCL_NORMAL))\n {\n if (DS_OK != lpDirectSound->SetCooperativeLevel(hMainWindow, DSSCL_EXCLUSIVE))\n {\n return FALSE;\n }\n }\n else\n {\n UsePrimaryBuffer = 0;\n }\n }\n }\n else\n {\n return FALSE;\n }\n\n wfx.wFormatTag = WAVE_FORMAT_PCM;\n\n switch (SoundQuality)\n {\n case 0:\n wfx.nSamplesPerSec = 8000;\n SoundBufferSize = 1024 * 2;\n break;\n case 1:\n wfx.nSamplesPerSec = 11025;\n SoundBufferSize = 1024 * 2;\n break;\n case 2:\n wfx.nSamplesPerSec = 22050;\n SoundBufferSize = 1024 * 4;\n break;\n case 3:\n wfx.nSamplesPerSec = 44100;\n SoundBufferSize = 1024 * 8;\n break;\n case 4:\n wfx.nSamplesPerSec = 16000;\n SoundBufferSize = 1024 * 4;\n break;\n case 5:\n wfx.nSamplesPerSec = 32000;\n SoundBufferSize = 1024 * 8;\n break;\n case 6:\n wfx.nSamplesPerSec = 48000;\n SoundBufferSize = 1024 * 8;\n break;\n default:\n wfx.nSamplesPerSec = 11025;\n SoundBufferSize = 1024 * 2;\n }\n\n if (StereoSound == 1)\n {\n wfx.nChannels = 2;\n wfx.nBlockAlign = 4;\n SoundBufferSize *= 2;\n }\n else\n {\n wfx.nChannels = 1;\n wfx.nBlockAlign = 2;\n }\n\n wfx.wBitsPerSample = 16;\n wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;\n wfx.cbSize = 0;\n\n memset(&dsbd, 0, sizeof(DSBUFFERDESC));\n dsbd.dwSize = sizeof(DSBUFFERDESC);\n dsbd.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_STICKYFOCUS;\n if (UsePrimaryBuffer)\n {\n dsbd.dwFlags |= DSBCAPS_PRIMARYBUFFER;\n }\n dsbd.dwBufferBytes = UsePrimaryBuffer ? 0 : SoundBufferSize;\n dsbd.lpwfxFormat = UsePrimaryBuffer ? NULL : &wfx;\n\n if (DS_OK == lpDirectSound->CreateSoundBuffer(&dsbd, &lpPrimaryBuffer, NULL))\n {\n if (!UsePrimaryBuffer)\n {\n if (DS_OK ==\n lpPrimaryBuffer->QueryInterface(IID_IDirectSoundBuffer8, (LPVOID *)&lpSoundBuffer))\n {\n if (DS_OK != lpSoundBuffer->Play(0, 0, DSBPLAY_LOOPING))\n {\n return FALSE;\n }\n }\n else\n {\n return FALSE;\n }\n }\n else\n {\n lpPrimaryBuffer->SetFormat(&wfx);\n dsbcaps.dwSize = sizeof(DSBCAPS);\n lpPrimaryBuffer->GetCaps(&dsbcaps);\n SoundBufferSize = dsbcaps.dwBufferBytes;\n\n if (DS_OK != lpPrimaryBuffer->Play(0, 0, DSBPLAY_LOOPING))\n {\n return FALSE;\n }\n }\n\n SoundEnabled = 1;\n FirstSound = 0;\n return TRUE;\n }\n else\n {\n return FALSE;\n }\n}\n\nBOOL ReInitSound()\n{\n WAVEFORMATEX wfx;\n DSBCAPS dsbcaps;\n\n if (lpSoundBuffer)\n {\n lpSoundBuffer->Stop();\n lpSoundBuffer->Release();\n lpSoundBuffer = NULL;\n }\n\n if (lpPrimaryBuffer)\n {\n lpPrimaryBuffer->Stop();\n lpPrimaryBuffer->Release();\n lpPrimaryBuffer = NULL;\n }\n\n if (soundon == 0)\n {\n SoundEnabled = 0;\n ReleaseDirectSound();\n return FALSE;\n }\n else if (SoundEnabled == 0)\n {\n return InitSound();\n }\n\n SoundEnabled = 0;\n\n PrevSoundQuality = SoundQuality;\n PrevStereoSound = StereoSound;\n\n wfx.wFormatTag = WAVE_FORMAT_PCM;\n\n switch (SoundQuality)\n {\n case 0:\n wfx.nSamplesPerSec = 8000;\n SoundBufferSize = 1024 * 2;\n break;\n case 1:\n wfx.nSamplesPerSec = 11025;\n SoundBufferSize = 1024 * 2;\n break;\n case 2:\n wfx.nSamplesPerSec = 22050;\n SoundBufferSize = 1024 * 4;\n break;\n case 3:\n wfx.nSamplesPerSec = 44100;\n SoundBufferSize = 1024 * 8;\n break;\n case 4:\n wfx.nSamplesPerSec = 16000;\n SoundBufferSize = 1024 * 4;\n break;\n case 5:\n wfx.nSamplesPerSec = 32000;\n SoundBufferSize = 1024 * 8;\n break;\n case 6:\n wfx.nSamplesPerSec = 48000;\n SoundBufferSize = 1024 * 8;\n break;\n default:\n wfx.nSamplesPerSec = 11025;\n SoundBufferSize = 1024 * 2;\n }\n\n if (StereoSound == 1)\n {\n wfx.nChannels = 2;\n wfx.nBlockAlign = 4;\n SoundBufferSize *= 2;\n }\n else\n {\n wfx.nChannels = 1;\n wfx.nBlockAlign = 2;\n }\n\n wfx.wBitsPerSample = 16;\n wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;\n wfx.cbSize = 0;\n\n memset(&dsbd, 0, sizeof(DSBUFFERDESC));\n dsbd.dwSize = sizeof(DSBUFFERDESC);\n dsbd.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_STICKYFOCUS;\n if (UsePrimaryBuffer)\n {\n dsbd.dwFlags |= DSBCAPS_PRIMARYBUFFER;\n }\n dsbd.dwBufferBytes = UsePrimaryBuffer ? 0 : SoundBufferSize;\n dsbd.lpwfxFormat = UsePrimaryBuffer ? NULL : &wfx;\n\n if (DS_OK == lpDirectSound->CreateSoundBuffer(&dsbd, &lpPrimaryBuffer, NULL))\n {\n if (!UsePrimaryBuffer)\n {\n if (DS_OK ==\n lpPrimaryBuffer->QueryInterface(IID_IDirectSoundBuffer8, (LPVOID *)&lpSoundBuffer))\n {\n if (DS_OK != lpSoundBuffer->Play(0, 0, DSBPLAY_LOOPING))\n {\n return FALSE;\n }\n }\n else\n {\n return FALSE;\n }\n }\n else\n {\n lpPrimaryBuffer->SetFormat(&wfx);\n dsbcaps.dwSize = sizeof(DSBCAPS);\n lpPrimaryBuffer->GetCaps(&dsbcaps);\n SoundBufferSize = dsbcaps.dwBufferBytes;\n\n if (DS_OK != lpPrimaryBuffer->Play(0, 0, DSBPLAY_LOOPING))\n {\n return FALSE;\n }\n }\n\n SoundEnabled = 1;\n FirstSound = 0;\n return TRUE;\n }\n else\n {\n return FALSE;\n }\n}\n\nBOOL FAR PASCAL InitJoystickInput(LPCDIDEVICEINSTANCE pdinst, LPVOID pvRef)\n{\n LPDIRECTINPUT8 pdi = (LPDIRECTINPUT8)pvRef;\n GUID DeviceGuid = pdinst->guidInstance;\n\n if (CurrentJoy > 4)\n {\n return DIENUM_CONTINUE;\n }\n\n // Create the DirectInput joystick device.\n if (pdi->CreateDevice(DeviceGuid, &JoystickInput[CurrentJoy], NULL) != DI_OK)\n {\n return DIENUM_CONTINUE;\n }\n\n if (JoystickInput[CurrentJoy]->SetDataFormat(&c_dfDIJoystick) != DI_OK)\n {\n JoystickInput[CurrentJoy]->Release();\n return DIENUM_CONTINUE;\n }\n\n if (JoystickInput[CurrentJoy]->SetCooperativeLevel(hMainWindow,\n DISCL_EXCLUSIVE | DISCL_BACKGROUND) != DI_OK)\n {\n JoystickInput[CurrentJoy]->Release();\n return DIENUM_CONTINUE;\n }\n\n DIPROPRANGE diprg;\n\n diprg.diph.dwSize = sizeof(diprg);\n diprg.diph.dwHeaderSize = sizeof(diprg.diph);\n diprg.diph.dwObj = DIJOFS_X;\n diprg.diph.dwHow = DIPH_BYOFFSET;\n diprg.lMin = joy_sensitivity * -1;\n diprg.lMax = joy_sensitivity;\n\n if (FAILED(JoystickInput[CurrentJoy]->SetProperty(DIPROP_RANGE, &diprg.diph)))\n {\n X1Disable[CurrentJoy] = 1;\n X2Disable[CurrentJoy] = 1;\n }\n\n diprg.diph.dwObj = DIJOFS_Y;\n\n if (FAILED(JoystickInput[CurrentJoy]->SetProperty(DIPROP_RANGE, &diprg.diph)))\n {\n Y1Disable[CurrentJoy] = 1;\n Y2Disable[CurrentJoy] = 1;\n }\n\n diprg.diph.dwObj = DIJOFS_Z;\n if (FAILED(JoystickInput[CurrentJoy]->SetProperty(DIPROP_RANGE, &diprg.diph)))\n {\n Z1Disable[CurrentJoy] = 1;\n Z2Disable[CurrentJoy] = 1;\n }\n\n diprg.diph.dwObj = DIJOFS_RX;\n if (FAILED(JoystickInput[CurrentJoy]->SetProperty(DIPROP_RANGE, &diprg.diph)))\n {\n RX1Disable[CurrentJoy] = 1;\n RX2Disable[CurrentJoy] = 1;\n }\n\n diprg.diph.dwObj = DIJOFS_RY;\n if (FAILED(JoystickInput[CurrentJoy]->SetProperty(DIPROP_RANGE, &diprg.diph)))\n {\n RY1Disable[CurrentJoy] = 1;\n RY2Disable[CurrentJoy] = 1;\n }\n\n diprg.diph.dwObj = DIJOFS_RZ;\n if (FAILED(JoystickInput[CurrentJoy]->SetProperty(DIPROP_RANGE, &diprg.diph)))\n {\n RZ1Disable[CurrentJoy] = 1;\n RZ2Disable[CurrentJoy] = 1;\n }\n\n diprg.diph.dwObj = DIJOFS_SLIDER(0);\n if (FAILED(JoystickInput[CurrentJoy]->SetProperty(DIPROP_RANGE, &diprg.diph)))\n {\n S01Disable[CurrentJoy] = 1;\n S02Disable[CurrentJoy] = 1;\n }\n\n diprg.diph.dwObj = DIJOFS_SLIDER(1);\n if (FAILED(JoystickInput[CurrentJoy]->SetProperty(DIPROP_RANGE, &diprg.diph)))\n {\n S11Disable[CurrentJoy] = 1;\n S12Disable[CurrentJoy] = 1;\n }\n\n DIDEVCAPS didc;\n\n didc.dwSize = sizeof(DIDEVCAPS);\n\n if (JoystickInput[CurrentJoy]->GetCapabilities(&didc) != DI_OK)\n {\n JoystickInput[CurrentJoy]->Release();\n return DIENUM_CONTINUE;\n }\n\n if (didc.dwButtons <= 16)\n {\n NumBTN[CurrentJoy] = didc.dwButtons;\n }\n else\n {\n NumBTN[CurrentJoy] = 16;\n }\n\n if (didc.dwPOVs)\n {\n NumPOV[CurrentJoy] = didc.dwPOVs;\n }\n else\n {\n POVDisable[CurrentJoy] = 1;\n }\n\n DIPROPDWORD dipdw;\n\n dipdw.diph.dwSize = sizeof(DIPROPDWORD);\n dipdw.diph.dwHeaderSize = sizeof(dipdw.diph);\n dipdw.diph.dwHow = DIPH_BYOFFSET;\n dipdw.dwData = 2500;\n dipdw.diph.dwObj = DIJOFS_X;\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_DEADZONE, &dipdw.diph);\n\n dipdw.diph.dwObj = DIJOFS_Y;\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_DEADZONE, &dipdw.diph);\n\n dipdw.diph.dwObj = DIJOFS_Z;\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_DEADZONE, &dipdw.diph);\n\n dipdw.diph.dwObj = DIJOFS_RX;\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_DEADZONE, &dipdw.diph);\n\n dipdw.diph.dwObj = DIJOFS_RY;\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_DEADZONE, &dipdw.diph);\n\n dipdw.diph.dwObj = DIJOFS_RZ;\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_DEADZONE, &dipdw.diph);\n\n dipdw.diph.dwObj = DIJOFS_SLIDER(0);\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_DEADZONE, &dipdw.diph);\n\n dipdw.diph.dwObj = DIJOFS_SLIDER(1);\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_DEADZONE, &dipdw.diph);\n\n dipdw.diph.dwSize = sizeof(DIPROPDWORD);\n dipdw.diph.dwHeaderSize = sizeof(dipdw.diph);\n dipdw.diph.dwHow = DIPH_DEVICE;\n dipdw.dwData = DIPROPAXISMODE_ABS;\n dipdw.diph.dwObj = 0;\n\n JoystickInput[CurrentJoy]->SetProperty(DIPROP_AXISMODE, &dipdw.diph);\n\n CurrentJoy += 1;\n\n return DIENUM_CONTINUE;\n}\n\nvoid ReleaseDirectInput()\n{\n if (MouseInput)\n {\n MouseInput->Release();\n MouseInput = NULL;\n }\n\n if (KeyboardInput)\n {\n KeyboardInput->Release();\n KeyboardInput = NULL;\n }\n\n for (int i = 0; i < 5; i++)\n {\n if (JoystickInput[i])\n {\n JoystickInput[i]->Release();\n JoystickInput[i] = NULL;\n }\n }\n\n if (DInput)\n {\n DInput->Release();\n DInput = NULL;\n }\n}\n\nvoid ReleaseDirectSound()\n{\n if (lpSoundBuffer)\n {\n lpSoundBuffer->Release();\n lpSoundBuffer = NULL;\n }\n\n if (lpPrimaryBuffer)\n {\n lpPrimaryBuffer->Release();\n lpPrimaryBuffer = NULL;\n }\n\n if (lpDirectSound)\n {\n lpDirectSound->Release();\n lpDirectSound = NULL;\n }\n}\n\nvoid DInputError()\n{\n char message1[256];\n\n sprintf(message1,\n \"Error initializing DirectInput\\nYou may need to install DirectX 8.0a or higher located at www.microsoft.com/directx%c\",\n 0);\n MessageBox(NULL, message1, \"DirectInput Error\", MB_ICONERROR);\n}\n\nbool InitInput()\n{\n char message1[256];\n HRESULT hr;\n\n if (FAILED(hr = pDirectInput8Create(hInst, DIRECTINPUT_VERSION, IID_IDirectInput8A, (void**)&DInput,\n NULL)))\n {\n sprintf(message1,\n \"Error initializing DirectInput\\nYou may need to install DirectX 8.0a or higher located at www.microsoft.com/directx%c\",\n 0);\n MessageBox(NULL, message1, \"DirectInput Error\", MB_ICONERROR);\n\n switch (hr)\n {\n case DIERR_BETADIRECTINPUTVERSION:\n sprintf(message1, \"Beta %X\\n\", (unsigned int) hr);\n MessageBox(NULL, message1, \"Init\", MB_ICONERROR);\n break;\n case DIERR_INVALIDPARAM:\n sprintf(message1, \"Invalid %X\\n\", (unsigned int) hr);\n MessageBox(NULL, message1, \"Init\", MB_ICONERROR);\n break;\n case DIERR_OLDDIRECTINPUTVERSION:\n sprintf(message1, \"OLDDIRECTINPUTVERSION %X\\n\", (unsigned int) hr);\n MessageBox(NULL, message1, \"Init\", MB_ICONERROR);\n break;\n case DIERR_OUTOFMEMORY:\n sprintf(message1, \"OUTOFMEMORY %X\\n\", (unsigned int) hr);\n MessageBox(NULL, message1, \"Init\", MB_ICONERROR);\n break;\n default:\n sprintf(message1, \"UNKNOWN %X\\n\", (unsigned int) hr);\n MessageBox(NULL, message1, \"Init\", MB_ICONERROR);\n break;\n }\n return FALSE;\n }\n\n hr = DInput->CreateDevice(GUID_SysKeyboard, &KeyboardInput, NULL);\n if (FAILED(hr))\n {\n DInputError();return FALSE;\n }\n\n hr = KeyboardInput->SetDataFormat(&c_dfDIKeyboard);\n if (FAILED(hr))\n {\n DInputError();return FALSE;\n }\n\n hr = KeyboardInput->SetCooperativeLevel(hMainWindow, DISCL_NONEXCLUSIVE | DISCL_FOREGROUND);\n\n hr = DInput->CreateDevice(GUID_SysMouse, &MouseInput, NULL);\n if (FAILED(hr))\n {\n DInputError();return FALSE;\n }\n\n hr = MouseInput->SetDataFormat(&c_dfDIMouse);\n if (FAILED(hr))\n {\n DInputError();return FALSE;\n }\n\n hr = MouseInput->SetCooperativeLevel(hMainWindow, DISCL_EXCLUSIVE | DISCL_FOREGROUND);\n if (FAILED(hr))\n {\n DInputError();return FALSE;\n }\n\n JoystickInput[0] = NULL;JoystickInput[1] = NULL;JoystickInput[2] = NULL;JoystickInput[3] = NULL;JoystickInput[4] =\n NULL;\n\n hr = DInput->EnumDevices(DI8DEVCLASS_GAMECTRL, InitJoystickInput, DInput, DIEDFL_ATTACHEDONLY);\n\n if (FAILED(hr))\n {\n DInputError(); return FALSE;\n }\n\n InputAcquire();\n\n return TRUE;\n}\n\nvoid TestJoy()\n{\n int i;\n\n for (i = 0; i < 5; i++)\n {\n if (JoystickInput[i])\n {\n JoystickInput[i]->Poll();\n\n if (JoystickInput[i]->GetDeviceState(sizeof(DIJOYSTATE), &js[i]) == DIERR_INPUTLOST)\n {\n if (JoystickInput[i])\n {\n JoystickInput[i]->Acquire();\n }\n if (FAILED(JoystickInput[i]->GetDeviceState(sizeof(DIJOYSTATE), &js[i])))\n {\n return;\n }\n }\n\n if (!X1Disable[i] && (js[i].lX > 0))\n {\n X1Disable[i] = 1;\n }\n\n if (!X2Disable[i] && (js[i].lX < 0))\n {\n X2Disable[i] = 1;\n }\n\n if (!Y1Disable[i] && (js[i].lY > 0))\n {\n Y1Disable[i] = 1;\n }\n\n if (!Y2Disable[i] && (js[i].lY < 0))\n {\n Y2Disable[i] = 1;\n }\n\n if (!Z1Disable[i] && (js[i].lZ > 0))\n {\n Z1Disable[i] = 1;\n }\n\n if (!Z2Disable[i] && (js[i].lZ < 0))\n {\n Z2Disable[i] = 1;\n }\n\n if (!RY1Disable[i] && (js[i].lRy > 0))\n {\n RY1Disable[i] = 1;\n }\n\n if (!RY2Disable[i] && (js[i].lRy < 0))\n {\n RY2Disable[i] = 1;\n }\n\n if (!RZ1Disable[i] && (js[i].lRz > 0))\n {\n RZ1Disable[i] = 1;\n }\n\n if (!RZ2Disable[i] &&(js[i].lRz < 0))\n {\n RZ2Disable[i] = 1;\n }\n\n if (!S01Disable[i] && (js[i].rglSlider[0] > 0))\n {\n S01Disable[i] = 1;\n }\n\n if (!S02Disable[i] && (js[i].rglSlider[0] < 0))\n {\n S02Disable[i] = 1;\n }\n\n if (!S11Disable[i] && (js[i].rglSlider[1] > 0))\n {\n S11Disable[i] = 1;\n }\n\n if (!S12Disable[i] && (js[i].rglSlider[1] < 0))\n {\n S12Disable[i] = 1;\n }\n }\n }\n}\n\nextern \"C\"\n{\n //BYTE changeRes = 1;\n extern DWORD converta;\n extern unsigned int BitConv32Ptr;\n extern unsigned int RGBtoYUVPtr;\n extern unsigned short resolutn;\n extern BYTE GUIWFVID[];\n extern BYTE GUIDSIZE[];\n extern BYTE GUISMODE[];\n extern BYTE GUIDSMODE[];\n extern BYTE GUIHQ2X[];\n extern BYTE GUIHQ3X[];\n extern BYTE GUIHQ4X[];\n extern BYTE GUINTVID[];\n extern BYTE hqFilterlevel;\n BYTE changeRes = 1;\n}\n\nDWORD FirstVid = 1;\nDWORD FirstFull = 1;\nDWORD SMode = 0;\nDWORD NTSCMode = 0;\nDWORD prevHQMode = ~0;\nDWORD prevNTSCMode = 0;\nDWORD prevScanlines = ~0;\n\nDWORD LockSurface();\nvoid UnlockSurface();\nvoid clear_ddraw();\n\n//The big extern\nextern \"C\"\n{\n void WinUpdateDevices();\n char CheckOGLMode();\n BYTE *SurfBuf;\n DWORD DMode = 0;\n DWORD DSMode = 0;\n WORD Refresh = 0;\n\n short Buffer[1800 * 2];\n\n int X, Y;\n DWORD pitch;\n MSG msg;\n DWORD SurfBufD;\n int count, x, count2;\n HRESULT hr;\n int i;\n short *Sound;\n DWORD CurrentPos;\n DWORD WritePos;\n DWORD T60HZEnabled = 0;\n DWORD T36HZEnabled = 0;\n\n DWORD WINAPI SemaphoreThread(LPVOID lpParam)\n {\n while (semaphore_run)\n {\n if (T60HZEnabled)\n {\n ReleaseSemaphore(hLock, 1, NULL);\n Sleep(1);\n }\n else\n {\n Sleep(20);\n }\n }\n return 0;\n }\n\n void InitSemaphore()\n {\n if (!hLock)\n {\n hLock = CreateSemaphore(NULL, 1, SemaphoreMax, NULL);\n\n semaphore_run = 1;\n\n hThread = CreateThread(NULL, 0, SemaphoreThread, &dwThreadParam, 0, &dwThreadId);\n }\n }\n\n void ShutdownSemaphore()\n {\n if (hLock)\n {\n semaphore_run = 0;\n\n WaitForSingleObject(hThread, INFINITE);\n CloseHandle(hThread);\n\n CloseHandle(hLock);\n\n hLock = NULL;\n }\n }\n\n extern unsigned int pressed;\n extern unsigned char romispal;\n\n void Start60HZ()\n {\n update_ticks_pc2 = UPDATE_TICKS_UDP * freq / 1000.0;\n\n if (romispal == 1)\n {\n update_ticks_pc = UPDATE_TICKS_GAMEPAL * freq / 1000.0;\n }\n else\n {\n update_ticks_pc = UPDATE_TICKS_GAME * freq / 1000.0;\n }\n\n QueryPerformanceCounter((LARGE_INTEGER *)&start);\n QueryPerformanceCounter((LARGE_INTEGER *)&start2);\n\n T36HZEnabled = 0;\n T60HZEnabled = 1;\n\n InitSemaphore();\n\n if (device1 && device2)\n {\n MouseInput->Unacquire();\n MultiMouseInit();\n }\n\n //if (!device1 && !device2) MouseInput->Unacquire();\n }\n\n void Stop60HZ()\n {\n T60HZEnabled = 0;\n\n if (device1 && device2)\n {\n MultiMouseShutdown();\n }\n\n MouseInput->Acquire();\n\n ShutdownSemaphore();\n }\n\n void Start36HZ()\n {\n update_ticks_pc2 = UPDATE_TICKS_UDP * freq / 1000.0;\n update_ticks_pc = UPDATE_TICKS_GUI * freq / 1000.0;\n\n QueryPerformanceCounter((LARGE_INTEGER *)&start);\n QueryPerformanceCounter((LARGE_INTEGER *)&start2);\n\n T60HZEnabled = 0;\n T36HZEnabled = 1;\n }\n\n void Stop36HZ()\n {\n T36HZEnabled = 0;\n }\n\n char WinMessage[256];\n void clearwin();\n\n char WinName[] = { \"ZSNESW\\0\" };\n void NTSCFilterInit();\n void NTSCFilterDraw(int SurfaceX, int SurfaceY, int pitch, unsigned char *buffer);\n\n extern \"C\" char GUIM7VID[];\n\n void SetHiresOpt()\n {\n if (CustomResX >= 512 && CustomResY >= 448)\n {\n GUIM7VID[cvidmode] = 1;\n }\n else\n {\n GUIM7VID[cvidmode] = 0;\n }\n }\n\n void KeepTVRatio()\n {\n int ratiox = WindowWidth*3;\n int ratioy = WindowHeight*4;\n\n int marginchange;\n int marginmod;\n\n if (ratiox < ratioy)\n {\n marginchange = (WindowHeight - (ratiox / 4)) / 2;\n marginmod = (WindowHeight - (ratiox / 4)) % 2;\n rcWindow.top += marginchange;\n rcWindow.bottom -= (marginchange + marginmod);\n }\n else if (ratiox > ratioy)\n {\n marginchange = (WindowWidth - (ratioy / 3)) / 2;\n marginmod = (WindowWidth - (ratioy / 3)) % 2;\n rcWindow.left += marginchange;\n rcWindow.right -= (marginchange + marginmod);\n }\n }\n\n extern char GUIKEEP43[];\n\n char CheckTVRatioReq()\n {\n return(GUIKEEP43[cvidmode] && Keep4_3Ratio);\n }\n\n WINDOWPLACEMENT wndpl;\n RECT rc1;\n DWORD newmode = 0;\n\n void initwinvideo()\n {\n DWORD HQMode = 0;\n newmode = 0;\n\n if (FirstActivate && NTSCFilter)\n {\n NTSCFilterInit();\n }\n\n if (cvidmode == 37 || cvidmode == 38 || cvidmode == 41)\n {\n SetHiresOpt();\n }\n\n if (hqFilter != 0)\n {\n if ((GUIHQ2X[cvidmode] != 0) && (hqFilterlevel == 2))\n {\n HQMode = 2;\n }\n if ((GUIHQ3X[cvidmode] != 0) && (hqFilterlevel == 3))\n {\n HQMode = 3;\n }\n if ((GUIHQ4X[cvidmode] != 0) && (hqFilterlevel == 4))\n {\n HQMode = 4;\n }\n }\n\n if ((CurMode != cvidmode) || (prevHQMode != HQMode) || (prevNTSCMode != NTSCFilter) ||\n (changeRes))\n {\n CurMode = cvidmode;\n prevHQMode = HQMode;\n prevNTSCMode = NTSCFilter;\n changeRes = 0;\n newmode = 1;\n SurfaceX = 256;\n SurfaceY = 240;\n X = 0;\n Y = 0;\n FullScreen = GUIWFVID[cvidmode];\n DMode = GUIDSIZE[cvidmode];\n SMode = GUISMODE[cvidmode];\n DSMode = GUIDSMODE[cvidmode];\n NTSCMode = GUINTVID[cvidmode];\n\n switch (cvidmode)\n {\n case 0:\n WindowWidth = 256;\n WindowHeight = 224;\n break;\n case 1:\n WindowWidth = 640;\n WindowHeight = 480;\n break;\n case 2:\n case 3:\n case 43:\n WindowWidth = 512;\n WindowHeight = 448;\n break;\n case 4:\n case 5:\n case 6:\n case 7:\n case 8:\n case 44:\n case 45:\n WindowWidth = 640;\n WindowHeight = 480;\n break;\n case 9:\n case 10:\n case 46:\n WindowWidth = 768;\n WindowHeight = 672;\n break;\n case 11:\n case 12:\n case 13:\n case 14:\n case 15:\n case 47:\n case 48:\n WindowWidth = 800;\n WindowHeight = 600;\n break;\n case 16:\n case 17:\n case 18:\n case 19:\n case 20:\n case 49:\n case 50:\n WindowWidth = 1024;\n WindowHeight = 768;\n break;\n case 21:\n case 22:\n case 51:\n WindowWidth = 1024;\n WindowHeight = 896;\n break;\n case 23:\n case 24:\n case 25:\n case 26:\n case 27:\n case 52:\n case 53:\n WindowWidth = 1280;\n WindowHeight = 960;\n break;\n case 28:\n case 29:\n case 30:\n case 31:\n case 32:\n case 54:\n case 55:\n WindowWidth = 1280;\n WindowHeight = 1024;\n break;\n case 33:\n case 34:\n case 35:\n case 36:\n case 37:\n case 56:\n case 57:\n WindowWidth = 1600;\n WindowHeight = 1200;\n break;\n case 38:\n case 39:\n case 40:\n case 41:\n case 42:\n case 58:\n case 59:\n WindowWidth = CustomResX;\n WindowHeight = CustomResY;\n break;\n default:\n WindowWidth = 256;\n WindowHeight = 224;\n break;\n }\n\n if (DMode == 1)\n {\n if ((DSMode == 1) || (FullScreen == 0))\n {\n SurfaceX = 512;\n }\n else\n {\n SurfaceX = 640;\n }\n\n SurfaceY = 480;\n\n if (NTSCMode && NTSCFilter)\n {\n SurfaceX = 602;\n SurfaceY = 446;\n }\n }\n else\n {\n if ((SMode == 0) && (FullScreen == 1))\n {\n SurfaceX = 320;\n }\n else\n {\n SurfaceX = 256;\n }\n SurfaceY = 240;\n }\n\n switch (HQMode)\n {\n case 2:\n SurfaceX = 512;\n SurfaceY = 480;\n break;\n case 3:\n SurfaceX = 768;\n SurfaceY = 720;\n break;\n case 4:\n SurfaceX = 1024;\n SurfaceY = 960;\n break;\n }\n\n BlitArea.top = 0;\n BlitArea.left = 0;\n BlitArea.right = SurfaceX;\n\n if (PrevRes == 0)\n {\n PrevRes = resolutn;\n }\n }\n\n if (((PrevStereoSound != StereoSound) || (PrevSoundQuality != SoundQuality)) && FirstSound != 1)\n {\n ReInitSound();\n }\n\n if (!FirstVid)\n {\n /*\n if (X<0)X=0;\n if (X>(int)(GetSystemMetrics(SM_CXSCREEN) - WindowWidth)) X=(GetSystemMetrics(SM_CXSCREEN) - WindowWidth);\n if (Y<0)Y=0;\n if (Y>(int)(GetSystemMetrics(SM_CYSCREEN) - WindowHeight)) Y=(GetSystemMetrics(SM_CYSCREEN) - WindowHeight);\n */\n\n if (FullScreen == 1)\n {\n X = 0; Y = 0;\n }\n\n if (FullScreen == 0 && newmode == 1)\n {\n X = MainWindowX; Y = MainWindowY;\n }\n else if (FullScreen == 0)\n {\n MainWindowX = X; MainWindowY = Y;\n }\n\n MoveWindow(hMainWindow, X, Y, WindowWidth, WindowHeight, TRUE);\n\n\t wndpl.length = sizeof(wndpl);\n GetWindowPlacement(hMainWindow, &wndpl);\n SetRect(&rc1, 0, 0, WindowWidth, WindowHeight);\n\n AdjustWindowRectEx(&rc1, GetWindowLong(hMainWindow, GWL_STYLE), GetMenu(hMainWindow) != NULL,\n GetWindowLong(hMainWindow, GWL_EXSTYLE));\n\n GetClientRect(hMainWindow, &rcWindow);\n ClientToScreen(hMainWindow, (LPPOINT)&rcWindow);\n ClientToScreen(hMainWindow, (LPPOINT)&rcWindow + 1);\n\n\t if (debugger) DockDebugger();\n\n\t}\n else\n {\n atexit(ExitFunction);\n\n if (!QueryPerformanceFrequency((LARGE_INTEGER *)&freq))\n {\n return;\n }\n\n if (!RegisterWinClass())\n {\n zexit_error();\n }\n X = (GetSystemMetrics(SM_CXSCREEN) - WindowWidth) / 2;\n Y = (GetSystemMetrics(SM_CYSCREEN) - WindowHeight) / 2;\n\n if (FullScreen == 1)\n {\n X = 0; Y = 0;\n }\n\n if (hMainWindow)\n {\n CloseWindow(hMainWindow);\n }\n\n if (SaveMainWindowPos == 1 && MainWindowX != -1 && FullScreen == 0)\n {\n X = MainWindowX; Y = MainWindowY;\n }\n\n hMainWindow = CreateWindow(\"ZSNES\", WinName, WS_VISIBLE | WS_POPUP, X, Y,\n //WS_OVERLAPPED \"ZSNES\"\n WindowWidth, WindowHeight, NULL, NULL, hInst, NULL);\n\n if (!hMainWindow)\n {\n return;\n }\n\n // Hide the cursor\n ShowCursor(0);\n\n // Set window attributes\n ShowWindow(hMainWindow, SW_SHOWNORMAL);\n SetWindowText(hMainWindow, \"ZSNES\");\n\n // Run ZSNES Windows GUI callback functions to set initial values\n CheckPriority();\n CheckAlwaysOnTop();\n CheckScreenSaver();\n\n // Init various DirectX subsystems\n InitInput();\n InitSound();\n TestJoy();\n\n if (debugger) DockDebugger();\n }\n\n if (FirstVid == 1)\n {\n FirstVid = 0;\n if (KitchenSync)\n {\n Refresh = totlines == 263 ? 120 : 100;\n }\n else if (KitchenSyncPAL && totlines == 314)\n {\n Refresh = 100;\n }\n else if (ForceRefreshRate)\n {\n Refresh = SetRefreshRate;\n }\n InitDirectDraw();\n clearwin();\n Clear2xSaIBuffer();\n clear_display();\n }\n else if (newmode == 1 && Moving != 1)\n {\n ReleaseDirectDraw();\n InitDirectDraw();\n if (CheckTVRatioReq())\n {\n KeepTVRatio();\n }\n clearwin();\n Clear2xSaIBuffer();\n clear_display();\n }\n\n if (CheckOGLMode()) gl_start(WindowWidth, WindowHeight, 16, FullScreen);\n }\n\n extern unsigned int vidbuffer;\n extern void SoundProcess();\n extern int DSPBuffer;\n int *DSPBuffer1;\n DWORD ScreenPtr;\n DWORD ScreenPtr2;\n void GUI36hzcall();\n void Game60hzcall();\n\nstatic int ds_buffer_size = 0; //size in bytes of the direct sound buffer\nstatic int ds_write_offset = 0; //offset of the write cursor in the direct sound buffer\nstatic int ds_min_free_space = 0; //if the free space is below this value get_space() will return 0\n\n static int ds_write_buffer(unsigned char *data, int len)\n{\n HRESULT res;\n LPVOID lpvPtr1;\n DWORD dwBytes1;\n LPVOID lpvPtr2;\n DWORD dwBytes2;\n\n // Lock the buffer\n res = lpSoundBuffer->Lock(ds_write_offset, len, &lpvPtr1, &dwBytes1, &lpvPtr2, &dwBytes2, 0);\n // If the buffer was lost, restore and retry lock.\n if (res == DSERR_BUFFERLOST)\n {\n lpSoundBuffer->Restore();\n res = lpSoundBuffer->Lock(ds_write_offset, len, &lpvPtr1, &dwBytes1, &lpvPtr2, &dwBytes2, 0);\n }\n\n if (SUCCEEDED(res))\n {\n // Write to pointers without reordering.\n memcpy(lpvPtr1, data, dwBytes1);\n if (lpvPtr2) { memcpy(lpvPtr2, data+dwBytes1, dwBytes2); }\n ds_write_offset += dwBytes1+dwBytes2;\n if (ds_write_offset >= ds_buffer_size) { ds_write_offset = dwBytes2; }\n\n // Release the data back to DirectSound.\n res = lpSoundBuffer->Unlock(lpvPtr1, dwBytes1, lpvPtr2, dwBytes2);\n if (SUCCEEDED(res))\n {\n // Success.\n DWORD status;\n lpSoundBuffer->GetStatus(&status);\n if (!(status & DSBSTATUS_PLAYING))\n {\n lpSoundBuffer->Play(0, 0, DSBPLAY_LOOPING);\n }\n return(dwBytes1+dwBytes2);\n }\n }\n // Lock, Unlock, or Restore failed.\n return(0);\n}\n\nstatic bool ds_play(char *samples_buffer, size_t samples_count)\n{\n unsigned char *data = (unsigned char *)samples_buffer;\n int samples_outputted, samples_remaining;\n\n samples_remaining = samples_count;\n for (;;)\n {\n DWORD play_offset;\n int space, len = samples_remaining;\n\n // make sure we have enough space to write data\n lpSoundBuffer->GetCurrentPosition(&play_offset, 0);\n space = ds_buffer_size-(ds_write_offset-play_offset);\n if (space > ds_buffer_size) { space -= ds_buffer_size; } // ds_write_offset < play_offset\n if (space < len) { len = space; }\n\n samples_outputted = ds_write_buffer((unsigned char *)data, len);\n\n data += samples_outputted;\n samples_remaining -= samples_outputted;\n if (samples_outputted < samples_remaining) { Sleep(500); }\n else { break; }\n }\n return(true);\n}\n\nvoid CheckTimers()\n {\n // Lame fix for GUI using 100% CPU\n if (GUIOn || GUIOn2 || EMUPause)\n {\n Sleep(1);\n }\n\n QueryPerformanceCounter((LARGE_INTEGER *)&end2);\n\n while ((end2 - start2) >= update_ticks_pc2)\n {\n start2 += update_ticks_pc2;\n }\n\n if (T60HZEnabled == 1)\n {\n QueryPerformanceCounter((LARGE_INTEGER *)&end);\n\n while ((end - start) >= update_ticks_pc)\n {\n Game60hzcall();\n start += update_ticks_pc;\n }\n }\n\n if (T36HZEnabled == 1)\n {\n QueryPerformanceCounter((LARGE_INTEGER *)&end);\n\n while ((end - start) >= update_ticks_pc)\n {\n GUI36hzcall();\n start += update_ticks_pc;\n }\n }\n }\n\n volatile int SPCSize;\n volatile int buffer_ptr;\n extern unsigned char MMXSupport;\n\n void UpdateVFrame()\n {\n int DataNeeded;\n SPCSize = 256;\n\n //if (StereoSound==1) SPCSize=256;\n\n while (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))\n {\n TranslateMessage(&msg);\n DispatchMessage(&msg);\n }\n\n WinUpdateDevices();\n CheckTimers();\n\n if (SoundEnabled == 0)\n {\n return;\n }\n\n if (!UsePrimaryBuffer)\n {\n lpSoundBuffer->GetCurrentPosition(&CurrentPos, &WritePos);\n }\n else\n {\n lpPrimaryBuffer->GetCurrentPosition(&CurrentPos, &WritePos);\n }\n }\n\n extern unsigned char curblank;\n extern DWORD AddEndBytes;\n extern DWORD NumBytesPerLine;\n extern unsigned char *WinVidMemStart;\n void copy640x480x16bwin();\n void hq2x_16b();\n void hq2x_32b();\n void hq3x_16b();\n void hq3x_32b();\n void hq4x_16b();\n void hq4x_32b();\n extern unsigned char NGNoTransp;\n void ClearWin16();\n void ClearWin32();\n\n void clearwin()\n {\n pitch = LockSurface();\n if (pitch == 0)\n {\n return;\n }\n\n SurfBufD = (DWORD)&SurfBuf[0];\n\n if (AltSurface == 0)\n {\n switch (BitDepth)\n {\n case 16:\n ClearWin16();\n break;\n case 32:\n ClearWin32();\n break;\n }\n }\n else\n {\n ClearWin16();\n }\n\n UnlockSurface();\n }\n\n void initDirectDraw()\n {\n InitDirectDraw();\n if (CheckTVRatioReq())\n {\n KeepTVRatio();\n }\n clearwin();\n Clear2xSaIBuffer();\n clear_display();\n }\n\n void clear_display()\n {\n if(!CheckOGLMode())\n {\n clear_ddraw();\n }\n }\n\n void DrawWin256x224x16();\n void DrawWin256x224x32();\n void DrawWin320x240x16();\n\n volatile uint64_t copymaskRB = UINT64_C(0x001FF800001FF800);\n volatile uint64_t copymaskG = UINT64_C(0x0000FC000000FC00);\n volatile uint64_t copymagic = UINT64_C(0x0008010000080100);\n\n void drawscreenwin()\n {\n DWORD i, j, color32;\n DWORD *SURFDW;\n\n NGNoTransp = 0; // Set this value to 1 within the appropriate\n // video mode if you want to add a custom\n // transparency routine or hardware\n // transparency. This only works if\n // the value of newengen is equal to 1.\n // (see ProcessTransparencies in newgfx16.asm\n // for ZSNES' current transparency code)\n\n UpdateVFrame();\n if (curblank != 0)\n {\n return;\n }\n\n if (!(pitch = LockSurface()))\n {\n return;\n }\n\n ScreenPtr = vidbuffer;\n ScreenPtr += 16 * 2 + 32 * 2 + 256 * 2;\n\n DWORD HQMode = 0;\n\n if (MMXSupport == 0)\n {\n hqFilter = 0;\n }\n\n if (hqFilter != 0)\n {\n if ((GUIHQ2X[cvidmode] != 0) && (hqFilterlevel == 2))\n {\n HQMode = 2;\n }\n if ((GUIHQ3X[cvidmode] != 0) && (hqFilterlevel == 3))\n {\n HQMode = 3;\n }\n if ((GUIHQ4X[cvidmode] != 0) && (hqFilterlevel == 4))\n {\n HQMode = 4;\n }\n }\n\n if (PrevRes != resolutn)\n {\n if ((SurfaceX == 640) || (SurfaceX == 320))\n {\n BlitArea.bottom = SurfaceY;\n }\n else if (!NTSCFilter)\n {\n BlitArea.bottom = (SurfaceY / 240) * resolutn;\n }\n\n if ((FullScreen == 0) && (SMode == 0) && (DSMode == 0) && !NTSCFilter)\n {\n WindowHeight = (WindowHeight / 224) * resolutn;\n }\n\n initwinvideo();\n PrevRes = resolutn;\n }\n\n if (prevHQMode != HQMode)\n {\n initwinvideo();\n }\n\n if (prevNTSCMode != NTSCFilter)\n {\n initwinvideo();\n }\n\n if (prevScanlines != scanlines)\n {\n initwinvideo();\n prevScanlines = scanlines;\n }\n\n if (changeRes)\n {\n initwinvideo();\n }\n\n SurfBufD = (DWORD)&SurfBuf[0];\n SURFDW = (DWORD *)&SurfBuf[0];\n\n if (!(KitchenSync || (KitchenSyncPAL && totlines == 314)) && Refresh != 0 && !ForceRefreshRate)\n {\n Refresh = 0;\n ReleaseDirectDraw();\n InitDirectDraw();\n clearwin();\n Clear2xSaIBuffer();\n clear_display();\n }\n\n if (KitchenSync && Refresh != 120 && totlines == 263)\n {\n Refresh = 120;\n ReleaseDirectDraw();\n InitDirectDraw();\n clearwin();\n Clear2xSaIBuffer();\n clear_display();\n }\n\n if ((KitchenSync || KitchenSyncPAL) && Refresh != 100 && totlines == 314)\n {\n Refresh = 100;\n ReleaseDirectDraw();\n InitDirectDraw();\n clearwin();\n Clear2xSaIBuffer();\n clear_display();\n }\n\n if (!KitchenSync && KitchenSyncPAL && totlines == 263 && Refresh != SetRefreshRate &&\n ForceRefreshRate)\n {\n Refresh = SetRefreshRate;\n ReleaseDirectDraw();\n InitDirectDraw();\n clearwin();\n Clear2xSaIBuffer();\n clear_display();\n }\n\n if (!HQMode)\n {\n if (SurfaceX == 256 && SurfaceY == 240)\n {\n switch (BitDepth)\n {\n case 16:\n {\n DrawWin256x224x16();\n break;\n }\n case 32:\n {\n DrawWin256x224x32();\n break;\n }\n case 24:\n MessageBox(NULL,\n \"Sorry. ZSNESw does not work in windowed 24 bit color modes. \\nClick 'OK' to switch to a full screen mode.\",\n \"DDRAW Error\", MB_ICONERROR);\n cvidmode = 3;\n initwinvideo();\n Sleep(1000);\n drawscreenwin();\n break;\n default:\n UnlockSurface();\n MessageBox(NULL, \"Mode only available in 16 and 32 bit color\", \"DDRAW Error\",\n MB_ICONERROR);\n cvidmode = 2;\n initwinvideo();\n Sleep(1000);\n drawscreenwin();\n break;\n }\n }\n\n if (SurfaceX == 320 && SurfaceY == 240)\n {\n switch (BitDepth)\n {\n case 16:\n {\n DrawWin320x240x16();\n break;\n }\n case 32:\n for (j = 0; j < 8; j++)\n {\n SURFDW = (DWORD *)&SurfBuf[j * pitch];\n color32 = 0x7F000000;\n\n for (i = 0; i < 320; i++)\n {\n SURFDW[i] = color32;\n }\n }\n\n for (j = 8; (int)j < (resolutn - 1) + 8; j++)\n {\n color32 = 0x7F000000;\n for (i = 0; i < 32; i++)\n {\n SURFDW[i] = color32;\n }\n\n for (i = 32; i < (256 + 32); i++)\n {\n color32 = (((*(WORD *)(ScreenPtr)) & 0xF800) << 8) +\n (((*(WORD *)(ScreenPtr)) & 0x07E0) << 5) +\n (((*(WORD *)(ScreenPtr)) & 0x001F) << 3) + 0xFF000000;\n //SURFDW[i]=color32;\n ScreenPtr += 2;\n }\n\n color32 = 0x7F000000;\n for (i = (256 + 32); i < 320; i++)\n {\n SURFDW[i] = color32;\n }\n\n ScreenPtr = ScreenPtr + 576 - 512;\n SURFDW = (DWORD *)&SurfBuf[(j)*pitch];\n }\n\n for (j = ((resolutn - 1) + 8); j < 240; j++)\n {\n SURFDW = (DWORD *)&SurfBuf[j * pitch];\n\n color32 = 0x7F000000;\n for (i = 0; i < 320; i++)\n {\n SURFDW[i] = color32;\n }\n }\n break;\n default:\n UnlockSurface();\n MessageBox(NULL, \"Mode only available in 16 and 32 bit color\", \"DDRAW Error\",\n MB_ICONERROR);\n cvidmode = 2;\n initwinvideo();\n Sleep(1000);\n drawscreenwin();\n break;\n }\n }\n\n if (SurfaceX == 512 && SurfaceY == 480)\n {\n switch (BitDepth)\n {\n case 16:\n case 32:\n // using 16bpp AltSurface\n AddEndBytes = pitch - 1024;\n NumBytesPerLine = pitch;\n WinVidMemStart = &SurfBuf[0];\n asm_call(copy640x480x16bwin);\n break;\n default:\n UnlockSurface();\n MessageBox(NULL, \"Mode only available in 16 and 32 bit color\", \"DDRAW Error\",\n MB_ICONERROR);\n cvidmode = 2;\n initwinvideo();\n Sleep(1000);\n drawscreenwin();\n }\n }\n\n if (SurfaceX == 640 && SurfaceY == 480)\n {\n switch (BitDepth)\n {\n case 16:\n case 32:\n // using 16bpp AltSurface\n AddEndBytes = pitch - 1024;\n NumBytesPerLine = pitch;\n WinVidMemStart = &SurfBuf[(240 - resolutn) * pitch + 64 * 2];\n asm_call(copy640x480x16bwin);\n break;\n default:\n UnlockSurface();\n MessageBox(NULL, \"Mode only available in 16 and 32 bit color\", \"DDRAW Error\",\n MB_ICONERROR);\n cvidmode = 2;\n initwinvideo();\n Sleep(1000);\n drawscreenwin();\n }\n }\n if (SurfaceX == 602 && SurfaceY == 446)\n {\n switch (BitDepth)\n {\n case 16:\n case 32:\n // using 16bpp AltSurface\n AddEndBytes = pitch - 1024;\n NumBytesPerLine = pitch;\n WinVidMemStart = &SurfBuf[0];\n NTSCFilterDraw(SurfaceX, SurfaceY, pitch, WinVidMemStart);\n break;\n default:\n UnlockSurface();\n MessageBox(NULL, \"Mode only available in 16 and 32 bit color\", \"DDRAW Error\",\n MB_ICONERROR);\n cvidmode = 2;\n initwinvideo();\n Sleep(1000);\n drawscreenwin();\n }\n }\n }\n else\n {\n // HQMode != 0\n NumBytesPerLine = pitch;\n AddEndBytes = pitch - SurfaceX * (BitDepth / 8);\n WinVidMemStart = &SurfBuf[0];\n switch (BitDepth)\n {\n case 16:\n {\n switch (HQMode)\n {\n case 2:\n hq2x_16b(); break;\n case 3:\n hq3x_16b(); break;\n case 4:\n hq4x_16b(); break;\n }\n break;\n }\n case 32:\n {\n switch (HQMode)\n {\n case 2:\n hq2x_32b(); break;\n case 3:\n hq3x_32b(); break;\n case 4:\n hq4x_32b(); break;\n }\n break;\n }\n default:\n {\n UnlockSurface();\n MessageBox(NULL, \"Mode only available in 16 and 32 bit color\", \"DDRAW Error\",\n MB_ICONERROR);\n cvidmode = 2;\n initwinvideo();\n Sleep(1000);\n drawscreenwin();\n }\n }\n }\n UnlockSurface();\n if (CheckOGLMode()) gl_drawwin();\n else DrawScreen();\n }\n\n void SwitchFullScreen();\n void WriteLine();\n\n void WinUpdateDevices()\n {\n int i, j;\n unsigned char *keys;\n unsigned char keys2[256];\n for (i = 0; i < 256; i++)\n {\n keys2[i] = 0;\n }\n keys = (unsigned char*)&pressed;\nWriteLine();\n if (KeyboardInput && InputEn == 1)\n {\n if (FAILED(KeyboardInput->GetDeviceState(256, keys2)))\n {\n KeyboardInput->Acquire();\n if (FAILED(KeyboardInput->GetDeviceState(256, keys2)))\n {\n return;\n }\n }\n }\n else\n {\n return;\n }\n if (keys2[0x38] != 0 && keys2[0x3E] != 0)\n {\n zexit();\n }\n if (keys2[0xB8] != 0 && keys2[0x1C] != 0 || keys2[0x38] != 0 && keys2[0x1C] != 0)\n {\n asm_call(SwitchFullScreen);\n return;\n }\n\n for (i = 0; i < 256; i++)\n {\n if (keys2[i] == 0)\n {\n keys[i] = 0;\n }\n if (keys2[i] != 0 && keys[i] == 0)\n {\n keys[i] = 1;\n }\n }\n\n keys[0] = 0;\n\n for (i = 0; i < 5; i++)\n {\n if (JoystickInput[i])\n {\n for (j = 0; j < 32; j++)\n {\n keys[0x100 + i * 32 + j] = 0;\n }\n\n JoystickInput[i]->Poll();\n\n if (JoystickInput[i]->GetDeviceState(sizeof(DIJOYSTATE), &js[i]) == DIERR_INPUTLOST)\n {\n if (JoystickInput[i])\n {\n JoystickInput[i]->Acquire();\n }\n if (FAILED(JoystickInput[i]->GetDeviceState(sizeof(DIJOYSTATE), &js[i])))\n {\n return;\n }\n }\n\n if (!X1Disable[i])\n {\n if (js[i].lX > 0)\n {\n keys[0x100 + i * 32 + 0] = 1;\n }\n }\n\n if (!X2Disable[i])\n {\n if (js[i].lX < 0)\n {\n keys[0x100 + i * 32 + 1] = 1;\n }\n }\n\n if (!Y1Disable[i])\n {\n if (js[i].lY > 0)\n {\n keys[0x100 + i * 32 + 2] = 1;\n }\n }\n\n if (!Y2Disable[i])\n {\n if (js[i].lY < 0)\n {\n keys[0x100 + i * 32 + 3] = 1;\n }\n }\n\n if (!Z1Disable[i])\n {\n if (js[i].lZ > 0)\n {\n keys[0x100 + i * 32 + 4] = 1;\n }\n }\n\n if (!Z2Disable[i])\n {\n if (js[i].lZ < 0)\n {\n keys[0x100 + i * 32 + 5] = 1;\n }\n }\n\n if (!RY1Disable[i])\n {\n if (js[i].lRy > 0)\n {\n keys[0x100 + i * 32 + 6] = 1;\n }\n }\n\n if (!RY2Disable[i])\n {\n if (js[i].lRy < 0)\n {\n keys[0x100 + i * 32 + 7] = 1;\n }\n }\n\n if (!RZ1Disable[i])\n {\n if (js[i].lRz > 0)\n {\n keys[0x100 + i * 32 + 8] = 1;\n }\n }\n\n if (!RZ2Disable[i])\n {\n if (js[i].lRz < 0)\n {\n keys[0x100 + i * 32 + 9] = 1;\n }\n }\n\n if (!S01Disable[i])\n {\n if (js[i].rglSlider[0] > 0)\n {\n keys[0x100 + i * 32 + 10] = 1;\n }\n }\n\n if (!S02Disable[i])\n {\n if (js[i].rglSlider[0] < 0)\n {\n keys[0x100 + i * 32 + 11] = 1;\n }\n }\n\n if (!S11Disable[i])\n {\n if (js[i].rglSlider[1] > 0)\n {\n keys[0x100 + i * 32 + 12] = 1;\n }\n }\n\n if (!S12Disable[i])\n {\n if (js[i].rglSlider[1] < 0)\n {\n keys[0x100 + i * 32 + 13] = 1;\n }\n }\n\n if (!POVDisable[i])\n {\n for (int p = 0; (unsigned long) p < NumPOV[i]; p++)\n {\n switch (js[i].rgdwPOV[p])\n {\n case 0:\n keys[0x100 + i * 32 + 3] = 1;\n break;\n case 4500:\n keys[0x100 + i * 32 + 0] = 1;\n keys[0x100 + i * 32 + 3] = 1;\n break;\n case 9000:\n keys[0x100 + i * 32 + 0] = 1;\n break;\n case 13500:\n keys[0x100 + i * 32 + 0] = 1;\n keys[0x100 + i * 32 + 2] = 1;\n break;\n case 18000:\n keys[0x100 + i * 32 + 2] = 1;\n break;\n case 22500:\n keys[0x100 + i * 32 + 1] = 1;\n keys[0x100 + i * 32 + 2] = 1;\n break;\n case 27000:\n keys[0x100 + i * 32 + 1] = 1;\n break;\n case 31500:\n keys[0x100 + i * 32 + 1] = 1;\n keys[0x100 + i * 32 + 3] = 1;\n break;\n }\n }\n }\n\n if (NumBTN[i])\n {\n for (j = 0; (unsigned long) j < NumBTN[i]; j++)\n {\n if (js[i].rgbButtons[j])\n {\n keys[0x100 + i * 32 + 16 + j] = 1;\n }\n }\n }\n }\n else\n {\n for (j = 0; j < 32; j++)\n {\n keys[0x100 + i * 32 + j] = 0;\n }\n }\n }\n }\n\n int GetMouseX()\n {\n InputRead();\n MouseX += MouseMoveX / MouseSensitivity;\n\n if (MouseX > MouseMaxX)\n {\n MouseX = MouseMaxX;\n\n if (TrapMouseCursor == 1)\n {\n if (abs((int)MouseMoveX) > (10 / MouseSensitivity) && T36HZEnabled == 1 && FullScreen == 0 &&\n MouseButtonPressed == 0)\n {\n MouseInput->Unacquire();\n SetCursorPos(X + WindowWidth + 32, (int)(Y + (MouseY * WindowHeight / 224)));\n }\n }\n else if (FullScreen == 0 && device1 == 0 && device2 == 0 && MouseButtonPressed == 0 &&\n GUIOn2 == 1)\n {\n MouseInput->Unacquire();\n SetCursorPos(X + WindowWidth + 1, (int)(Y + (MouseY * WindowHeight / 224)));\n }\n }\n\n if (MouseX < MouseMinX)\n {\n MouseX = MouseMinX;\n\n if (TrapMouseCursor == 1)\n {\n if (abs((int)MouseMoveX) > (10 / MouseSensitivity) && T36HZEnabled == 1 && FullScreen == 0 &&\n MouseButtonPressed == 0)\n {\n MouseInput->Unacquire();\n SetCursorPos(X - 32, (int)(Y + (MouseY * WindowHeight / 224)));\n }\n }\n else if (FullScreen == 0 && device1 == 0 && device2 == 0 && MouseButtonPressed == 0 &&\n GUIOn2 == 1)\n {\n MouseInput->Unacquire();\n SetCursorPos(X - 1, (int)(Y + (MouseY * WindowHeight / 224)));\n }\n }\n return((int)MouseX);\n }\n\n int GetMouseY()\n {\n MouseY += MouseMoveY / MouseSensitivity;\n\n if (MouseY > MouseMaxY)\n {\n MouseY = MouseMaxY;\n\n if (TrapMouseCursor == 1)\n {\n if (abs((int)MouseMoveY) > (10 / MouseSensitivity) && T36HZEnabled == 1 && FullScreen == 0 &&\n MouseButtonPressed == 0)\n {\n MouseInput->Unacquire();\n SetCursorPos((int)(X + (MouseX * WindowWidth / 256)), Y + WindowHeight + 32);\n }\n }\n else if (FullScreen == 0 && device1 == 0 && device2 == 0 && MouseButtonPressed == 0 &&\n GUIOn2 == 1)\n {\n MouseInput->Unacquire();\n SetCursorPos((int)(X + (MouseX * WindowWidth / 256)), Y + WindowHeight + 1);\n }\n }\n\n if (MouseY < MouseMinY)\n {\n MouseY = MouseMinY;\n\n if (TrapMouseCursor == 1)\n {\n if (abs((int)MouseMoveY) > (10 / MouseSensitivity) && T36HZEnabled == 1 && FullScreen == 0 &&\n MouseButtonPressed == 0)\n {\n MouseInput->Unacquire();\n SetCursorPos((int)(X + (MouseX * WindowWidth / 256)), Y - 32);\n }\n }\n else if (FullScreen == 0 && device1 == 0 && device2 == 0 && MouseButtonPressed == 0 &&\n GUIOn2 == 1)\n {\n MouseInput->Unacquire();\n SetCursorPos((int)(X + (MouseX * WindowWidth / 256)), Y - 1);\n }\n }\n\n return((int)MouseY);\n }\n\n int GetMouseMoveX()\n {\n return((int)MouseMoveX / MouseSensitivity);\n }\n\n int GetMouseMoveY()\n {\n return((int)MouseMoveY / MouseSensitivity);\n }\n\n int GetMouseButton()\n {\n if (MouseButton == (lhguimouse ? 2 : 1))\n {\n MouseButtonPressed = 1;\n }\n else\n {\n MouseButtonPressed = 0;\n }\n if (MouseButton & (lhguimouse ? 1 : 2))\n {\n while (MouseButton != 0 && T36HZEnabled && FullScreen == 0)\n {\n Moving = 1;\n X += (int)MouseMoveX;\n Y += (int)MouseMoveY;\n\n InputRead();\n initwinvideo();\n }\n }\n if (Moving == 1)\n {\n Moving = 0;\n initwinvideo();\n }\n return((int)MouseButton);\n }\n\n void SetMouseMinX(int MinX)\n {\n MouseMinX = (float)MinX;\n }\n\n void SetMouseMaxX(int MaxX)\n {\n MouseMaxX = (float)MaxX;\n }\n\n void SetMouseMinY(int MinY)\n {\n MouseMinY = (float)MinY;\n }\n\n void SetMouseMaxY(int MaxY)\n {\n MouseMaxY = (float)MaxY;\n }\n\n void SetMouseX(int X)\n {\n MouseX = (float)X;\n }\n\n void SetMouseY(int Y)\n {\n MouseY = (float)Y;\n }\n\n void FrameSemaphore()\n {\n if (T60HZEnabled)\n {\n double delay;\n QueryPerformanceCounter((LARGE_INTEGER *)&end);\n\n delay = ((update_ticks_pc - (end - start)) * 1000.0 / freq) - 3.0;\n\n if (delay > 0.0)\n {\n WaitForSingleObject(hLock, (unsigned int) delay);\n }\n }\n }\n\n void ZsnesPage()\n {\n ShellExecute(NULL, NULL, \"http://www.zsnes.com/\", NULL, NULL, 0);\n MouseX = 0;\n MouseY = 0;\n }\n\n void DocsPage()\n {\n ShellExecute(NULL, NULL, \"http://zsnes-docs.sourceforge.net/\", NULL, NULL, 0);\n MouseX = 0;\n MouseY = 0;\n }\n\n char *CBBuffer;\n unsigned int CBLength;\n void PasteClipBoard()\n {\n if (OpenClipboard(0))\n {\n char *p = (char *)GetClipboardData(CF_TEXT);\n if (p)\n {\n strncpy(CBBuffer, p, CBLength);\n CBBuffer[CBLength - 1] = 0;\n\n for (p = CBBuffer; *p; p++)\n {\n if (isspace(*p))\n {\n *p = ' ';\n }\n }\n for (p--; p >= CBBuffer; p--)\n {\n if (isspace(*p))\n {\n *p = 0;\n }\n else\n {\n break;\n }\n }\n }\n CloseClipboard();\n }\n }\n\n void WriteLine()\n {\n char buf[50];\n sprintf(buf, \"%d\\n\", zspc_time);\n WriteConsole(debugWindow, buf, strlen(buf), NULL, NULL);\n }\n\n // This function creates the debug console\n void InitDebugger()\n {\n if (AllocConsole())\n {\n debugWindow = GetStdHandle(STD_OUTPUT_HANDLE);\n WriteConsole(debugWindow, \"Welcome to the ZSNES Debugger v0.01\\n\",\n sizeof(\"Welcome to the ZSNES Debugger v0.01\\n\"), NULL, NULL);\n }\n }\n\n int CheckBattery()\n {\n SYSTEM_POWER_STATUS SysPowerStat;\n GetSystemPowerStatus(&SysPowerStat);\n\n if (SysPowerStat.ACLineStatus == 0) //Definitly running off of battery\n {\n return(1); //Running off of battery\n }\n\n if ((SysPowerStat.BatteryFlag == 255) || //Unknown\n (SysPowerStat.BatteryFlag & 128)) //No battery\n {\n return(-1);\n }\n\n if ((SysPowerStat.BatteryFlag & 8) || //Charging\n (SysPowerStat.ACLineStatus == 1)) //Plugged in\n {\n return(0); //Plugged in\n }\n\n return(1); //Running off of battery\n }\n\n int CheckBatteryTime()\n {\n SYSTEM_POWER_STATUS SysPowerStat;\n GetSystemPowerStatus(&SysPowerStat);\n return SysPowerStat.BatteryLifeTime;\n }\n\n int CheckBatteryPercent()\n {\n SYSTEM_POWER_STATUS SysPowerStat;\n GetSystemPowerStatus(&SysPowerStat);\n return((SysPowerStat.BatteryLifePercent == 255) ? -1 : SysPowerStat.BatteryLifePercent);\n }\n\n extern unsigned int delayvalue;\n\n // Delay function for GUI\n void DoSleep()\n {\n // Fraction value for windows version of sleep\n delayvalue /= 100;\n\n Sleep(delayvalue);\n }\n\n void DisplayWIPDisclaimer()\n {\n unsigned int version_hash();\n unsigned int CurrentBuildNum = version_hash();\n\n if (CurrentBuildNum != PrevBuildNum)\n {\n MessageBox(NULL,\n \"This build of ZSNES is a WORK IN PROGRESS. This means that it is known to contain bugs and certain features\\nmay or may not be working correctly. This build is not any representation of final work and is provided AS IS\\nfor people to try bleeding edge code.\\n\\nPlease see http://zsnes.game-host.org/~pagefault/ for a list of current issues.\",\n \"Disclaimer\", MB_OK);\n PrevBuildNum = CurrentBuildNum;\n }\n }\n\n void DockDebugger()\n {\n RECT MainWindowXY;\n ZeroMemory(&MainWindowXY, sizeof(RECT));\n GetWindowRect(hMainWindow, &MainWindowXY);\n DebugWindowHandle = FindWindow(NULL ,\"ZSNES Debugger\");\n SetWindowPos(DebugWindowHandle, HWND_TOP, MainWindowXY.right, MainWindowXY.top, 0, 0, SWP_NOSIZE);\n }\n\n}\n\n" }, { "alpha_fraction": 0.7406250238418579, "alphanum_fraction": 0.75390625, "avg_line_length": 22.703702926635742, "blob_id": "2b0c9a41ad9783335f0b1e15b340e9088949d3bc", "content_id": "d5987977daa66519c77cf2ab9ce31980c3fd1c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1280, "license_type": "no_license", "max_line_length": 72, "num_lines": 54, "path": "/src/debugger/ui.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#ifndef UI_H\n#define UI_H\n\n#include <QMainWindow>\n#include <QCloseEvent>\n\n#ifdef __UNIXSDL__ //I hate this hack -Nach\n#include \"debugger/ui_debugger.h\"\n#else\n#include \"ui_debugger.h\"\n#endif\n\nclass QtDebugger : public QMainWindow\n{\n Q_OBJECT\n\n private:\n Ui::Debugger ui;\n\n static QtDebugger *singleton;\n\n QtDebugger(QWidget *parent);\n ~QtDebugger();\n\n private slots:\n void on_pauseButton_clicked();\n\n public:\n static void showQtDebugger(QWidget *parent);\n static void destroyQtDebugger();\n};\n\n#endif\n" }, { "alpha_fraction": 0.6702263951301575, "alphanum_fraction": 0.6787697672843933, "avg_line_length": 25.602272033691406, "blob_id": "921e5911743d776477413ec09804806eaa823ddd", "content_id": "4a17249ff86458096522177b4ed0d02d75ee6323", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2341, "license_type": "no_license", "max_line_length": 96, "num_lines": 88, "path": "/src/jma/jma.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 NSRT Team ( http://nsrt.edgeemu.com )\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#ifndef JMA_H\n#define JMA_H\n\n#include <string>\n#include <fstream>\n#include <vector>\n#include <time.h>\n\nnamespace JMA\n{\n enum jma_errors { JMA_NO_CREATE, JMA_NO_MEM_ALLOC, JMA_NO_OPEN, JMA_BAD_FILE,\n JMA_UNSUPPORTED_VERSION, JMA_COMPRESS_FAILED, JMA_DECOMPRESS_FAILED,\n JMA_FILE_NOT_FOUND };\n\n struct jma_file_info_base\n {\n std::string name;\n std::string comment;\n size_t size;\n unsigned int crc32;\n };\n\n struct jma_public_file_info : jma_file_info_base\n {\n time_t datetime;\n };\n\n struct jma_file_info : jma_file_info_base\n {\n unsigned short date;\n unsigned short time;\n const unsigned char *buffer;\n };\n\n template<class jma_file_type>\n inline size_t get_total_size(std::vector<jma_file_type>& files)\n {\n size_t size = 0;\n for (typename std::vector<jma_file_type>::iterator i = files.begin(); i != files.end(); i++)\n {\n size += i->size; //We do have a problem if this wraps around\n }\n\n return(size);\n }\n\n class jma_open\n {\n public:\n jma_open(const char *) throw(jma_errors);\n ~jma_open();\n\n std::vector<jma_public_file_info> get_files_info();\n std::vector<unsigned char *> get_all_files(unsigned char *) throw(jma_errors);\n void extract_file(std::string& name, unsigned char *) throw(jma_errors);\n bool is_solid();\n\n private:\n std::ifstream stream;\n std::vector<jma_file_info> files;\n size_t chunk_size;\n unsigned char *decompressed_buffer;\n unsigned char *compressed_buffer;\n\n void chunk_seek(unsigned int) throw(jma_errors);\n void retrieve_file_block() throw(jma_errors);\n };\n\n const char *jma_error_text(jma_errors);\n}\n#endif\n" }, { "alpha_fraction": 0.453200101852417, "alphanum_fraction": 0.536933958530426, "avg_line_length": 28.38290023803711, "blob_id": "cd3c212399db0df2cb120d33210a645b73aa6a84", "content_id": "2ca0e23a2d3cdf73cfffe6031905b99560d64b25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7906, "license_type": "no_license", "max_line_length": 84, "num_lines": 269, "path": "/src/chips/sdd1emu.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n\n/*******************************************************************************\n S-DD1 C emulator code\n (c) Copyright 2003 Brad Jorsch with research by\n Andreas Naive and John Weidman\n*******************************************************************************/\n/* S-DD1 decompressor\n *\n * Based on code and documentation by Andreas Naive, who deserves a great deal\n * of thanks and credit for figuring this out.\n *\n * Andreas says:\n * The author is greatly indebted with The Dumper, without whose help and\n * patience providing him with real S-DD1 data the research had never been\n * possible. He also wish to note that in the very beggining of his research,\n * Neviksti had done some steps in the right direction. By last, the author is\n * indirectly indebted to all the people that worked and contributed in the\n * S-DD1 issue in the past.\n */\n\n#include <string.h>\n\n#ifndef __GNUC__\n#define INLINE static\n#else\n#define INLINE static inline\n#endif\n\nstatic int valid_bits;\nstatic unsigned short in_stream;\nstatic unsigned char *in_buf;\nstatic unsigned char bit_ctr[8];\nstatic unsigned char context_states[32];\nstatic int context_MPS[32];\nstatic int bitplane_type;\nstatic int high_context_bits;\nstatic int low_context_bits;\nstatic int prev_bits[8];\n\nstatic struct {\n unsigned char code_size;\n unsigned char MPS_next;\n unsigned char LPS_next;\n} evolution_table[] = {\n /* 0 */ { 0,25,25},\n /* 1 */ { 0, 2, 1},\n /* 2 */ { 0, 3, 1},\n /* 3 */ { 0, 4, 2},\n /* 4 */ { 0, 5, 3},\n /* 5 */ { 1, 6, 4},\n /* 6 */ { 1, 7, 5},\n /* 7 */ { 1, 8, 6},\n /* 8 */ { 1, 9, 7},\n /* 9 */ { 2,10, 8},\n /* 10 */ { 2,11, 9},\n /* 11 */ { 2,12,10},\n /* 12 */ { 2,13,11},\n /* 13 */ { 3,14,12},\n /* 14 */ { 3,15,13},\n /* 15 */ { 3,16,14},\n /* 16 */ { 3,17,15},\n /* 17 */ { 4,18,16},\n /* 18 */ { 4,19,17},\n /* 19 */ { 5,20,18},\n /* 20 */ { 5,21,19},\n /* 21 */ { 6,22,20},\n /* 22 */ { 6,23,21},\n /* 23 */ { 7,24,22},\n /* 24 */ { 7,24,23},\n /* 25 */ { 0,26, 1},\n /* 26 */ { 1,27, 2},\n /* 27 */ { 2,28, 4},\n /* 28 */ { 3,29, 8},\n /* 29 */ { 4,30,12},\n /* 30 */ { 5,31,16},\n /* 31 */ { 6,32,18},\n /* 32 */ { 7,24,22}\n};\n\nstatic unsigned char run_table[128] = {\n 128, 64, 96, 32, 112, 48, 80, 16, 120, 56, 88, 24, 104, 40, 72,\n 8, 124, 60, 92, 28, 108, 44, 76, 12, 116, 52, 84, 20, 100, 36,\n 68, 4, 126, 62, 94, 30, 110, 46, 78, 14, 118, 54, 86, 22, 102,\n 38, 70, 6, 122, 58, 90, 26, 106, 42, 74, 10, 114, 50, 82, 18,\n 98, 34, 66, 2, 127, 63, 95, 31, 111, 47, 79, 15, 119, 55, 87,\n 23, 103, 39, 71, 7, 123, 59, 91, 27, 107, 43, 75, 11, 115, 51,\n 83, 19, 99, 35, 67, 3, 125, 61, 93, 29, 109, 45, 77, 13, 117,\n 53, 85, 21, 101, 37, 69, 5, 121, 57, 89, 25, 105, 41, 73, 9,\n 113, 49, 81, 17, 97, 33, 65, 1\n};\n\nINLINE unsigned char GetCodeword(int bits){\n unsigned char tmp;\n\n if(!valid_bits){\n in_stream|=*(in_buf++);\n valid_bits=8;\n }\n in_stream<<=1;\n valid_bits--;\n in_stream^=0x8000;\n if(in_stream&0x8000) return 0x80+(1<<bits);\n tmp=(in_stream>>8) | (0x7f>>bits);\n in_stream<<=bits;\n valid_bits-=bits;\n if(valid_bits<0){\n in_stream |= (*(in_buf++))<<(-valid_bits);\n valid_bits+=8;\n }\n return run_table[tmp];\n}\n\nINLINE unsigned char GolombGetBit(int code_size){\n if(!bit_ctr[code_size]) bit_ctr[code_size]=GetCodeword(code_size);\n bit_ctr[code_size]--;\n if(bit_ctr[code_size]==0x80){\n bit_ctr[code_size]=0;\n return 2; /* secret code for 'last zero'. ones are always last. */\n }\n return (bit_ctr[code_size]==0)?1:0;\n}\n\nINLINE unsigned char ProbGetBit(unsigned char context){\n unsigned char state=context_states[context];\n unsigned char bit=GolombGetBit(evolution_table[state].code_size);\n\n if(bit&1){\n context_states[context]=evolution_table[state].LPS_next;\n if(state<2){\n context_MPS[context]^=1;\n return context_MPS[context]; /* just inverted, so just return it */\n } else{\n return context_MPS[context]^1; /* we know bit is 1, so use a constant */\n }\n } else if(bit){\n context_states[context]=evolution_table[state].MPS_next;\n /* zero here, zero there, no difference so drop through. */\n }\n return context_MPS[context]; /* we know bit is 0, so don't bother xoring */\n}\n\nINLINE unsigned char GetBit(unsigned char cur_bitplane){\n unsigned char bit;\n\n bit=ProbGetBit(((cur_bitplane&1)<<4)\n | ((prev_bits[cur_bitplane]&high_context_bits)>>5)\n | (prev_bits[cur_bitplane]&low_context_bits));\n\n prev_bits[cur_bitplane] <<= 1;\n prev_bits[cur_bitplane] |= bit;\n return bit;\n}\n\nstatic unsigned char cur_plane;\nstatic unsigned char num_bits;\nstatic unsigned char next_byte;\n\nvoid SDD1_init(unsigned char *in){\n bitplane_type=in[0]>>6;\n\n switch(in[0]&0x30){\n case 0x00:\n high_context_bits=0x01c0;\n low_context_bits =0x0001;\n break;\n case 0x10:\n high_context_bits=0x0180;\n low_context_bits =0x0001;\n break;\n case 0x20:\n high_context_bits=0x00c0;\n low_context_bits =0x0001;\n break;\n case 0x30:\n high_context_bits=0x0180;\n low_context_bits =0x0003;\n break;\n }\n\n in_stream=(in[0]<<11) | (in[1]<<3);\n valid_bits=5;\n in_buf=in+2;\n memset(bit_ctr, 0, sizeof(bit_ctr));\n memset(context_states, 0, sizeof(context_states));\n memset(context_MPS, 0, sizeof(context_MPS));\n memset(prev_bits, 0, sizeof(prev_bits));\n\n cur_plane=0;\n num_bits=0;\n}\n\nunsigned char SDD1_get_byte(){\n unsigned char bit;\n unsigned char byte=0;\n\n switch(bitplane_type){\n case 0:\n num_bits+=16;\n if(num_bits&16){\n next_byte=0;\n for(bit=0x80; bit; bit>>=1){\n if(GetBit(0)) byte |= bit;\n if(GetBit(1)) next_byte |= bit;\n }\n return byte;\n } else {\n return next_byte;\n }\n\n case 1:\n num_bits+=16;\n if(num_bits&16){\n next_byte=0;\n for(bit=0x80; bit; bit>>=1){\n if(GetBit(cur_plane)) byte |= bit;\n if(GetBit(cur_plane+1)) next_byte |= bit;\n }\n return byte;\n } else {\n if(!num_bits) cur_plane = (cur_plane+2)&7;\n return next_byte;\n }\n\n case 2:\n num_bits+=16;\n if(num_bits&16){\n next_byte=0;\n for(bit=0x80; bit; bit>>=1){\n if(GetBit(cur_plane)) byte |= bit;\n if(GetBit(cur_plane+1)) next_byte |= bit;\n }\n return byte;\n } else {\n if(!num_bits) cur_plane ^= 2;\n return next_byte;\n }\n\n case 3:\n for(cur_plane=0, bit=1; bit; bit<<=1, cur_plane++){\n if(GetBit(cur_plane)) byte |= bit;\n }\n return byte;\n\n default:\n /* should never happen */\n return 0;\n }\n}\n\n\n" }, { "alpha_fraction": 0.7281066179275513, "alphanum_fraction": 0.7557978630065918, "avg_line_length": 26.514286041259766, "blob_id": "c18ef6195d39a7741b5cf7f44cb82800594cc39c", "content_id": "5e13a1f4baa7242e1a6ffbd9106d134c741b6701", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5778, "license_type": "no_license", "max_line_length": 115, "num_lines": 210, "path": "/src/jma/iiostrm.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 NSRT Team ( http://nsrt.edgeemu.com )\nCopyright (C) 2002 Andrea Mazzoleni ( http://advancemame.sf.net )\nCopyright (C) 2001-4 Igor Pavlov ( http://www.7-zip.org )\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense version 2.1 as published by the Free Software Foundation.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n*/\n\n#ifndef __IINOUTSTREAMS_H\n#define __IINOUTSTREAMS_H\n\n#include <string>\n#include <fstream>\n\n#include \"portable.h\"\n\n\nclass ISequentialInStream\n{\npublic:\n virtual HRESULT Read(void *, UINT32, UINT32 *) = 0;\n\n virtual ~ISequentialInStream() {}\n};\n\n\nclass ISequentialInStream_Array : public ISequentialInStream\n{\n const char *data;\n unsigned int size;\npublic:\n ISequentialInStream_Array(const char *Adata, unsigned Asize) : data(Adata), size(Asize) { }\n\n HRESULT Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialInStream_Array() {}\n};\n\nclass ISequentialInStream_String : public ISequentialInStream\n{\n std::string& data;\npublic:\n ISequentialInStream_String(std::string& Adata) : data(Adata) { }\n\n HRESULT Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialInStream_String() {}\n};\n\nclass ISequentialInStream_Istream : public ISequentialInStream\n{\n std::istream& data;\npublic:\n ISequentialInStream_Istream(std::istream& Adata) : data(Adata) { }\n\n HRESULT Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialInStream_Istream() {}\n};\n\n\n\nclass ISequentialOutStream\n{\npublic:\n virtual bool overflow_get() const = 0;\n virtual unsigned int size_get() const = 0;\n\n virtual HRESULT Write(const void *, UINT32, UINT32 *) = 0;\n\n virtual ~ISequentialOutStream() {}\n};\n\n\nclass ISequentialOutStream_Array : public ISequentialOutStream\n{\n char *data;\n unsigned int size;\n bool overflow;\n unsigned int total;\npublic:\n ISequentialOutStream_Array(char *Adata, unsigned Asize) : data(Adata), size(Asize), overflow(false), total(0) { }\n\n bool overflow_get() const { return(overflow); }\n unsigned int size_get() const { return(total); }\n\n HRESULT Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialOutStream_Array() {}\n};\n\nclass ISequentialOutStream_String : public ISequentialOutStream\n{\n std::string& data;\n unsigned int total;\npublic:\n ISequentialOutStream_String(std::string& Adata) : data(Adata), total(0) { }\n\n bool overflow_get() const { return(false); }\n unsigned int size_get() const { return(total); }\n\n HRESULT Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialOutStream_String() {}\n};\n\n\nclass ISequentialOutStream_Ostream : public ISequentialOutStream\n{\n std::ostream& data;\n unsigned int total;\npublic:\n ISequentialOutStream_Ostream(std::ostream& Adata) : data(Adata), total(0) { }\n\n bool overflow_get() const { return(false); }\n unsigned int size_get() const { return(total); }\n\n HRESULT Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialOutStream_Ostream() {}\n};\n\n\n\nclass ISequentialStreamCRC32\n{\nprotected:\n unsigned int crc32;\npublic:\n ISequentialStreamCRC32() : crc32(0) {}\n unsigned int crc32_get() const { return(crc32); }\n\n virtual ~ISequentialStreamCRC32() {}\n};\n\n\nclass ISequentialInStreamCRC32_Array : public ISequentialInStream_Array, public ISequentialStreamCRC32\n{\npublic:\n ISequentialInStreamCRC32_Array(const char *Adata, unsigned Asize) : ISequentialInStream_Array(Adata, Asize) { }\n\n HRESULT Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialInStreamCRC32_Array() {}\n};\n\nclass ISequentialInStreamCRC32_String : public ISequentialInStream_String, public ISequentialStreamCRC32\n{\npublic:\n ISequentialInStreamCRC32_String(std::string& Adata) : ISequentialInStream_String(Adata) { }\n\n HRESULT Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialInStreamCRC32_String() {}\n};\n\nclass ISequentialInStreamCRC32_Istream : public ISequentialInStream_Istream, public ISequentialStreamCRC32\n{\npublic:\n ISequentialInStreamCRC32_Istream(std::istream& Adata) : ISequentialInStream_Istream(Adata) { }\n\n HRESULT Read(void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialInStreamCRC32_Istream() {}\n};\n\n\nclass ISequentialOutStreamCRC32_Array : public ISequentialOutStream_Array, public ISequentialStreamCRC32\n{\npublic:\n ISequentialOutStreamCRC32_Array(char *Adata, unsigned Asize) : ISequentialOutStream_Array(Adata, Asize) { }\n\n HRESULT Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialOutStreamCRC32_Array() {}\n};\n\nclass ISequentialOutStreamCRC32_String : public ISequentialOutStream_String, public ISequentialStreamCRC32\n{\npublic:\n ISequentialOutStreamCRC32_String(std::string& Adata) : ISequentialOutStream_String(Adata) { }\n\n HRESULT Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialOutStreamCRC32_String() {}\n};\n\n\nclass ISequentialOutStreamCRC32_Ostream : public ISequentialOutStream_Ostream, public ISequentialStreamCRC32\n{\npublic:\n ISequentialOutStreamCRC32_Ostream(std::ostream& Adata) : ISequentialOutStream_Ostream(Adata) { }\n\n HRESULT Write(const void *aData, UINT32 aSize, UINT32 *aProcessedSize);\n\n virtual ~ISequentialOutStreamCRC32_Ostream() {}\n};\n\n#endif\n" }, { "alpha_fraction": 0.7001897692680359, "alphanum_fraction": 0.7248576879501343, "avg_line_length": 27.33333396911621, "blob_id": "478cc1c26fedb371fd109a8fccb9ffb469d7e9a2", "content_id": "7f4498ba4376485dadd93d250da472c5ac6f1f1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2635, "license_type": "no_license", "max_line_length": 83, "num_lines": 93, "path": "/src/jma/lencoder.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 NSRT Team ( http://nsrt.edgeemu.com )\nCopyright (C) 2002 Andrea Mazzoleni ( http://advancemame.sf.net )\nCopyright (C) 2001-4 Igor Pavlov ( http://www.7-zip.org )\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense version 2.1 as published by the Free Software Foundation.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n*/\n\n#ifndef __LENCODER_H\n#define __LENCODER_H\n\n#include \"btreecd.h\"\n\nnamespace NLength {\n\nconst UINT32 kNumPosStatesBitsMax = 4;\nconst int kNumPosStatesMax = (1 << kNumPosStatesBitsMax);\n\n\nconst int kNumPosStatesBitsEncodingMax = 4;\nconst int kNumPosStatesEncodingMax = (1 << kNumPosStatesBitsEncodingMax);\n\n\nconst int kNumMoveBits = 5;\n\nconst int kNumLenBits = 3;\nconst int kNumLowSymbols = 1 << kNumLenBits;\nconst int kNumMidBits = 3;\nconst int kNumMidSymbols = 1 << kNumMidBits;\n\nconst int kNumHighBits = 8;\n\nconst int kNumSymbolsTotal = kNumLowSymbols + kNumMidSymbols + (1 << kNumHighBits);\n\nconst int kNumSpecSymbols = kNumLowSymbols + kNumMidSymbols;\n\nclass CDecoder\n{\n CMyBitDecoder<kNumMoveBits> m_Choice;\n CBitTreeDecoder<kNumMoveBits, kNumLenBits> m_LowCoder[kNumPosStatesMax];\n CMyBitDecoder<kNumMoveBits> m_Choice2;\n CBitTreeDecoder<kNumMoveBits, kNumMidBits> m_MidCoder[kNumPosStatesMax];\n CBitTreeDecoder<kNumMoveBits, kNumHighBits> m_HighCoder;\n UINT32 m_NumPosStates;\npublic:\n void Create(UINT32 aNumPosStates)\n { m_NumPosStates = aNumPosStates; }\n void Init()\n {\n m_Choice.Init();\n for (UINT32 aPosState = 0; aPosState < m_NumPosStates; aPosState++)\n {\n m_LowCoder[aPosState].Init();\n m_MidCoder[aPosState].Init();\n }\n m_Choice2.Init();\n m_HighCoder.Init();\n }\n UINT32 Decode(CMyRangeDecoder *aRangeDecoder, UINT32 aPosState)\n {\n if(m_Choice.Decode(aRangeDecoder) == 0)\n return m_LowCoder[aPosState].Decode(aRangeDecoder);\n else\n {\n UINT32 aSymbol = kNumLowSymbols;\n if(m_Choice2.Decode(aRangeDecoder) == 0)\n aSymbol += m_MidCoder[aPosState].Decode(aRangeDecoder);\n else\n {\n aSymbol += kNumMidSymbols;\n aSymbol += m_HighCoder.Decode(aRangeDecoder);\n }\n return aSymbol;\n }\n }\n\n};\n\n}\n\n\n#endif\n" }, { "alpha_fraction": 0.5479505658149719, "alphanum_fraction": 0.5704386234283447, "avg_line_length": 22.642967224121094, "blob_id": "78b2b4511da724ef847873444ec2e219ad56dd2c", "content_id": "3999f6342d29730a665864943872bae864d92fc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 15297, "license_type": "no_license", "max_line_length": 154, "num_lines": 647, "path": "/src/win/dx_ddraw.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#define DIRECTINPUT_VERSION 0x0800\n#define DIRECTSOUND_VERSION 0x0800\n#define __STDC_CONSTANT_MACROS\n\nextern \"C\"\n{\n#include <windows.h>\n#include <stdio.h>\n#include <ddraw.h>\n#include \"../cfg.h\"\n void zexit(), zexit_error();\n}\n\n#include \"winlink.h\"\n\nstatic LPDIRECTDRAW BasiclpDD = NULL;\nstatic LPDIRECTDRAW7 lpDD = NULL;\nstatic LPDIRECTDRAWSURFACE7 DD_Primary = NULL;\nstatic LPDIRECTDRAWSURFACE7 DD_CFB = NULL;\nstatic LPDIRECTDRAWSURFACE7 DD_CFB16 = NULL;\nstatic LPDIRECTDRAWSURFACE7 DD_BackBuffer = NULL;\nstatic LPDIRECTDRAWCLIPPER lpDDClipper = NULL;\n\nDDSURFACEDESC2 ddsd;\n\nvoid DDrawError()\n{\n char message1[256];\n\n strcpy(message1,\n \"Error drawing to the screen\\nMake sure the device is not being used by another process\");\n MessageBox(NULL, message1, \"DirectDraw Error\", MB_ICONERROR);\n}\n\nvoid DrawScreen()\n{\n if (FullScreen == 1)\n {\n if (TripleBufferWin == 1 || KitchenSync == 1 || (KitchenSyncPAL == 1 && totlines == 314))\n {\n if (DD_BackBuffer->Blt(&rcWindow, DD_CFB, &BlitArea, DDBLT_WAIT, NULL) == DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n\n if (DD_Primary->Flip(NULL, DDFLIP_WAIT) == DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n\n if (KitchenSync == 1 || (KitchenSyncPAL == 1 && totlines == 314))\n {\n if (DD_BackBuffer->Blt(&rcWindow, DD_CFB, &BlitArea, DDBLT_WAIT, NULL) == DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n\n if (DD_Primary->Flip(NULL, DDFLIP_WAIT) == DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n }\n }\n else\n {\n if (vsyncon == 1 && curblank != 0x40)\n {\n if (lpDD->WaitForVerticalBlank(DDWAITVB_BLOCKBEGIN, NULL) != DD_OK)\n {\n DDrawError();\n }\n }\n DD_Primary->Blt(&rcWindow, DD_CFB, &BlitArea, DDBLT_WAIT, NULL);\n DD_Primary->Restore();\n }\n }\n else\n {\n if (vsyncon == 1)\n {\n if (lpDD->WaitForVerticalBlank(DDWAITVB_BLOCKBEGIN, NULL) != DD_OK)\n {\n DDrawError();\n }\n }\n DD_Primary->Blt(&rcWindow, AltSurface == 0 ? DD_CFB : DD_CFB16, &BlitArea, DDBLT_WAIT, NULL);\n }\n}\n\n\nDWORD LockSurface()\n{\n HRESULT hRes;\n\n if (AltSurface == 0)\n {\n if (DD_CFB != NULL)\n {\n memset(&ddsd, 0, sizeof(ddsd));\n ddsd.dwSize = sizeof(ddsd);\n ddsd.dwFlags = DDSD_LPSURFACE | DDSD_PITCH;\n\n hRes = DD_CFB->Lock(NULL, &ddsd, DDLOCK_WAIT, NULL);\n\n if (hRes == DD_OK)\n {\n SurfBuf = (BYTE *)ddsd.lpSurface;\n return(ddsd.lPitch);\n }\n else\n {\n if (hRes == DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n DD_CFB->Restore();\n Clear2xSaIBuffer();\n }\n return(0);\n }\n }\n else\n {\n return(0);\n }\n }\n else\n {\n if (DD_CFB16 != NULL)\n {\n memset(&ddsd, 0, sizeof(ddsd));\n ddsd.dwSize = sizeof(ddsd);\n ddsd.dwFlags = DDSD_LPSURFACE | DDSD_PITCH;\n\n hRes = DD_CFB16->Lock(NULL, &ddsd, DDLOCK_WAIT, NULL);\n\n if (hRes == DD_OK)\n {\n SurfBuf = (BYTE *)ddsd.lpSurface;\n return(ddsd.lPitch);\n }\n else\n {\n if (hRes == DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n DD_CFB16->Restore();\n Clear2xSaIBuffer();\n }\n return(0);\n }\n }\n else\n {\n return(0);\n }\n }\n}\n\nvoid UnlockSurface()\n{\n if (AltSurface == 0)\n {\n DD_CFB->Unlock((struct tagRECT *)ddsd.lpSurface);\n }\n else\n {\n DD_CFB16->Unlock((struct tagRECT *)ddsd.lpSurface);\n }\n}\n\nint InitDirectDraw()\n{\n DDSURFACEDESC2 ddsd2;\n DDPIXELFORMAT format;\n\n unsigned int color32, ScreenPtr2;\n int i, j, k, r, g, b, Y, u, v;\n\n ScreenPtr2 = BitConv32Ptr;\n for (i = 0; i < 65536; i++)\n {\n color32 = ((i & 0xF800) << 8) + ((i & 0x07E0) << 5) + ((i & 0x001F) << 3) + 0xFF000000;\n (*(unsigned int*)(ScreenPtr2)) = color32;\n ScreenPtr2 += 4;\n }\n\n for (i = 0; i < 32; i++)\n {\n for (j = 0; j < 64; j++)\n {\n for (k = 0; k < 32; k++)\n {\n r = i << 3;\n g = j << 2;\n b = k << 3;\n Y = (r + g + b) >> 2;\n u = 128 + ((r - b) >> 2);\n v = 128 + ((-r + 2 * g - b) >> 3);\n *(((unsigned int*)RGBtoYUVPtr) + (i << 11) + (j << 5) + k) = (Y << 16) + (u << 8) + v;\n }\n }\n }\n\n if (!hMainWindow)\n {\n zexit_error();\n }\n\n ReleaseDirectDraw();\n\n GetClientRect(hMainWindow, &rcWindow);\n ClientToScreen(hMainWindow, (LPPOINT)&rcWindow);\n ClientToScreen(hMainWindow, (LPPOINT)&rcWindow + 1);\n\n FullScreen = GUIWFVID[cvidmode];\n DSMode = GUIDSMODE[cvidmode];\n\n DWORD HQMode = 0;\n\n if (hqFilter != 0)\n {\n if ((GUIHQ2X[cvidmode] != 0) && (hqFilterlevel == 2))\n {\n HQMode = 2;\n }\n if ((GUIHQ3X[cvidmode] != 0) && (hqFilterlevel == 3))\n {\n HQMode = 3;\n }\n if ((GUIHQ4X[cvidmode] != 0) && (hqFilterlevel == 4))\n {\n HQMode = 4;\n }\n }\n\n BlitArea.top = 0;\n BlitArea.left = 0;\n BlitArea.right = SurfaceX;\n\n if (PrevRes == 0)\n {\n PrevRes = resolutn;\n }\n\n if (!FirstVid)\n {\n /*\n if (X<0)X=0;\n if (X>(int)(GetSystemMetrics(SM_CXSCREEN) - WindowWidth)) X=(GetSystemMetrics(SM_CXSCREEN) - WindowWidth);\n if (Y<0)Y=0;\n if (Y>(int)(GetSystemMetrics(SM_CYSCREEN) - WindowHeight)) Y=(GetSystemMetrics(SM_CYSCREEN) - WindowHeight);\n */\n\n if (FullScreen == 1)\n {\n X = 0; Y = 0;\n }\n\n if (FullScreen == 0 && newmode == 1)\n {\n X = MainWindowX; Y = MainWindowY;\n }\n else if (FullScreen == 0)\n {\n MainWindowX = X; MainWindowY = Y;\n }\n\n MoveWindow(hMainWindow, X, Y, WindowWidth, WindowHeight, TRUE);\n\n wndpl.length = sizeof(wndpl);\n GetWindowPlacement(hMainWindow, &wndpl);\n SetRect(&rc1, 0, 0, WindowWidth, WindowHeight);\n\n AdjustWindowRectEx(&rc1, GetWindowLong(hMainWindow, GWL_STYLE), GetMenu(hMainWindow) != NULL,\n GetWindowLong(hMainWindow, GWL_EXSTYLE));\n\n GetClientRect(hMainWindow, &rcWindow);\n ClientToScreen(hMainWindow, (LPPOINT)&rcWindow);\n ClientToScreen(hMainWindow, (LPPOINT)&rcWindow + 1);\n\n if (FullScreen == 1)\n {\n if (HQMode && !DSMode)\n {\n int marginx = (rcWindow.right - rcWindow.left - BlitArea.right + BlitArea.left) / 2;\n int marginy = (rcWindow.bottom - rcWindow.top - BlitArea.bottom + BlitArea.top) / 2;\n\n if (marginx > 0)\n {\n rcWindow.left += marginx;\n rcWindow.right -= marginx;\n }\n if (marginy > 0)\n {\n rcWindow.top += marginy;\n rcWindow.bottom -= marginy;\n }\n }\n\n if ((DSMode == 1) && (scanlines != 0))\n {\n int OldHeight = rcWindow.bottom - rcWindow.top;\n if ((OldHeight % 240) == 0)\n {\n int NewHeight = (OldHeight / 240) * resolutn;\n rcWindow.top += (OldHeight - NewHeight) / 2;\n rcWindow.bottom = rcWindow.top + NewHeight;\n clear_display();\n }\n }\n }\n if ((SurfaceX == 602) || (SurfaceX == 640) || (SurfaceX == 320))\n {\n BlitArea.bottom = SurfaceY;\n }\n else if (!NTSCFilter)\n {\n BlitArea.bottom = (SurfaceY / 240) * resolutn;\n }\n\n if (CheckTVRatioReq())\n {\n KeepTVRatio();\n }\n }\n if (FullScreen == 1)\n {\n if (HQMode && !DSMode)\n {\n int marginx = (rcWindow.right - rcWindow.left - BlitArea.right + BlitArea.left) / 2;\n int marginy = (rcWindow.bottom - rcWindow.top - BlitArea.bottom + BlitArea.top) / 2;\n if (marginx > 0)\n {\n rcWindow.left += marginx;\n rcWindow.right -= marginx;\n }\n if (marginy > 0)\n {\n rcWindow.top += marginy;\n rcWindow.bottom -= marginy;\n }\n }\n\n if ((DSMode == 1) && (scanlines != 0))\n {\n int OldHeight = rcWindow.bottom - rcWindow.top;\n if ((OldHeight % 240) == 0)\n {\n int NewHeight = (OldHeight / 240) * resolutn;\n rcWindow.top += (OldHeight - NewHeight) / 2;\n rcWindow.bottom = rcWindow.top + NewHeight;\n }\n }\n }\n\n if (pDirectDrawCreateEx(NULL, (void**)&lpDD, IID_IDirectDraw7, NULL) != DD_OK)\n {\n MessageBox(NULL, \"DirectDrawCreateEx failed.\", \"DirectDraw Error\", MB_ICONERROR);\n return FALSE;\n }\n\n if (FullScreen == 1)\n {\n if (lpDD->SetCooperativeLevel(hMainWindow,\n DDSCL_FULLSCREEN | DDSCL_EXCLUSIVE | DDSCL_ALLOWREBOOT) != DD_OK)\n {\n MessageBox(NULL, \"IDirectDraw7::SetCooperativeLevel failed.\", \"DirectDraw Error\",\n MB_ICONERROR);\n return FALSE;\n }\n if (lpDD->SetDisplayMode(WindowWidth, WindowHeight, 16, Refresh, 0) != DD_OK)\n {\n if (lpDD->SetDisplayMode(WindowWidth, WindowHeight, 16, 0, 0) != DD_OK)\n {\n MessageBox(\n NULL,\n \"IDirectDraw7::SetDisplayMode failed.\\nMake sure your video card supports this mode.\",\n \"DirectDraw Error\", MB_ICONERROR);\n return FALSE;\n }\n else\n {\n KitchenSync = 0;\n KitchenSyncPAL = 0;\n Refresh = 0;\n }\n }\n }\n else\n {\n if (lpDD->SetCooperativeLevel(hMainWindow, DDSCL_NORMAL) != DD_OK)\n {\n MessageBox(NULL, \"IDirectDraw7::SetCooperativeLevel failed.\", \"DirectDraw Error\",\n MB_ICONERROR);\n return FALSE;\n }\n CheckAlwaysOnTop();\n }\n\n ZeroMemory(&ddsd2, sizeof(DDSURFACEDESC2));\n ddsd2.dwSize = sizeof(DDSURFACEDESC2);\n ddsd2.dwFlags = DDSD_CAPS;\n ddsd2.ddsCaps.dwCaps = DDSCAPS_PRIMARYSURFACE;\n\n if (FullScreen == 1)\n {\n ddsd2.dwFlags |= DDSD_BACKBUFFERCOUNT;\n ddsd2.dwBackBufferCount = 2;\n ddsd2.ddsCaps.dwCaps |= DDSCAPS_FLIP | DDSCAPS_COMPLEX;\n }\n\n HRESULT hRes = lpDD->CreateSurface(&ddsd2, &DD_Primary, NULL);\n\n if (FullScreen == 1)\n {\n if ((hRes == DDERR_OUTOFMEMORY) || (hRes == DDERR_OUTOFVIDEOMEMORY))\n {\n ddsd2.dwBackBufferCount = 1;\n hRes = lpDD->CreateSurface(&ddsd2, &DD_Primary, NULL);\n }\n }\n\n if (hRes != DD_OK)\n {\n MessageBox(NULL, \"IDirectDraw7::CreateSurface failed.\", \"DirectDraw Error\", MB_ICONERROR);\n return FALSE;\n }\n\n if (FullScreen == 1)\n {\n ddsd2.ddsCaps.dwCaps = DDSCAPS_BACKBUFFER;\n if (DD_Primary->GetAttachedSurface(&ddsd2.ddsCaps, &DD_BackBuffer) != DD_OK)\n {\n MessageBox(NULL, \"IDirectDrawSurface7::GetAttachedSurface failed.\", \"DirectDraw Error\",\n MB_ICONERROR);\n return FALSE;\n }\n }\n else\n {\n if (lpDD->CreateClipper(0, &lpDDClipper, NULL) != DD_OK)\n {\n lpDD->Release();\n lpDD = NULL;\n return FALSE;\n }\n\n if (lpDDClipper->SetHWnd(0, hMainWindow) != DD_OK)\n {\n lpDD->Release();\n lpDD = NULL;\n return FALSE;\n }\n\n if (DD_Primary->SetClipper(lpDDClipper) != DD_OK)\n {\n return FALSE;\n }\n }\n\n format.dwSize = sizeof(DDPIXELFORMAT);\n\n if (DD_Primary->GetPixelFormat(&format) != DD_OK)\n {\n MessageBox(NULL, \"IDirectDrawSurface7::GetPixelFormat failed.\", \"DirectDraw Error\",\n MB_ICONERROR);\n return FALSE;\n }\n\n BitDepth = format.dwRGBBitCount;\n GBitMask = format.dwGBitMask; // 0x07E0 or not\n\n if (BitDepth == 24)\n {\n MessageBox(\n NULL,\n \"ZSNESw does not support 24bit color.\\nPlease change your resolution to either 16bit or 32bit color\",\n \"Error\", MB_OK);\n zexit_error();\n }\n\n converta = (BitDepth == 16 && GBitMask != 0x07E0);\n\n ddsd2.dwSize = sizeof(ddsd2);\n ddsd2.dwFlags = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH;\n ddsd2.ddsCaps.dwCaps = DDSCAPS_OFFSCREENPLAIN;\n ddsd2.dwWidth = SurfaceX;\n ddsd2.dwHeight = SurfaceY;\n\n // create drawing surface\n if (lpDD->CreateSurface(&ddsd2, &DD_CFB, NULL) != DD_OK)\n {\n MessageBox(NULL, \"IDirectDraw7::CreateSurface failed.\", \"DirectDraw Error\", MB_ICONERROR);\n return FALSE;\n }\n\n AltSurface = 0;\n\n // create alt. drawing surface\n if (BitDepth == 32)\n {\n if (DMode == 1 && HQMode == 0)\n {\n ddsd2.ddsCaps.dwCaps |= DDSCAPS_VIDEOMEMORY;\n }\n ddsd2.dwFlags |= DDSD_PIXELFORMAT;\n ddsd2.ddpfPixelFormat.dwSize = sizeof(DDPIXELFORMAT);\n ddsd2.ddpfPixelFormat.dwFlags = DDPF_RGB;\n ddsd2.ddpfPixelFormat.dwRGBBitCount = 16;\n ddsd2.ddpfPixelFormat.dwRBitMask = 0xF800;\n ddsd2.ddpfPixelFormat.dwGBitMask = 0x07E0;\n ddsd2.ddpfPixelFormat.dwBBitMask = 0x001F;\n\n if (lpDD->CreateSurface(&ddsd2, &DD_CFB16, NULL) != DD_OK)\n {\n MessageBox(\n NULL,\n \"IDirectDraw7::CreateSurface failed. You should update your video card drivers. Alternatively, you could use a 16-bit desktop or use a non-D mode.\",\n \"DirectDraw Error\", MB_ICONERROR);\n return FALSE;\n }\n\n if (((SurfaceX == 512) || (SurfaceX == 602) || (SurfaceX == 640)) && (HQMode == 0))\n {\n AltSurface = 1;\n }\n }\n\n return TRUE;\n}\n\nvoid ReleaseDirectDraw()\n{\n if (DD_CFB)\n {\n DD_CFB->Release();\n DD_CFB = NULL;\n }\n\n if (DD_CFB16)\n {\n DD_CFB16->Release();\n DD_CFB16 = NULL;\n }\n\n if (lpDDClipper)\n {\n lpDDClipper->Release();\n lpDDClipper = NULL;\n }\n\n if (DD_Primary)\n {\n DD_Primary->Release();\n DD_Primary = NULL;\n }\n\n if (lpDD)\n {\n lpDD->Release();\n lpDD = NULL;\n }\n}\n\nvoid clear_ddraw()\n{\n if (FullScreen == 1)\n {\n DDBLTFX ddbltfx;\n\n ddbltfx.dwSize = sizeof(ddbltfx);\n ddbltfx.dwFillColor = 0;\n\n if (TripleBufferWin == 1)\n {\n if ((DD_Primary != NULL) && (DD_BackBuffer != NULL))\n {\n if (DD_BackBuffer->Blt(NULL, NULL, NULL, DDBLT_COLORFILL | DDBLT_WAIT, &ddbltfx) ==\n DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n\n if (DD_Primary->Flip(NULL, DDFLIP_WAIT) == DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n\n if (DD_BackBuffer->Blt(NULL, NULL, NULL, DDBLT_COLORFILL | DDBLT_WAIT, &ddbltfx) ==\n DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n\n if (DD_Primary->Flip(NULL, DDFLIP_WAIT) == DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n\n if (DD_BackBuffer->Blt(NULL, NULL, NULL, DDBLT_COLORFILL | DDBLT_WAIT, &ddbltfx) ==\n DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n }\n }\n else\n {\n if (DD_Primary != NULL)\n {\n if (vsyncon == 1)\n {\n if (lpDD->WaitForVerticalBlank(DDWAITVB_BLOCKBEGIN, NULL) != DD_OK)\n {\n DDrawError();\n }\n }\n if (DD_Primary->Blt(NULL, NULL, NULL, DDBLT_COLORFILL | DDBLT_WAIT, &ddbltfx) ==\n DDERR_SURFACELOST)\n {\n DD_Primary->Restore();\n }\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.505286455154419, "alphanum_fraction": 0.5419228076934814, "avg_line_length": 25.409090042114258, "blob_id": "e47c44d06f3b5a658942f5044437458765087076", "content_id": "ea1a64c60ba29831e49c000279f7637d22624287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4067, "license_type": "no_license", "max_line_length": 82, "num_lines": 154, "path": "/src/cpu/zspc/spc_bootrom.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/* Copyright (C) 2007 Shay Green. This module is free software; you\ncan redistribute it and/or modify it under the terms of the GNU Lesser\nGeneral Public License as published by the Free Software Foundation; either\nversion 2.1 of the License, or (at your option) any later version. This\nmodule is distributed in the hope that it will be useful, but WITHOUT ANY\nWARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\ndetails. You should have received a copy of the GNU Lesser General Public\nLicense along with this module; if not, write to the Free Software Foundation,\nInc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */\n\n#include <assert.h>\n#include <string.h>\n\nstruct instr_t { int n; const char* str; };\n\n/* Instructions supported by assembler */\nstatic struct instr_t const instrs [] =\n{\n\t{0xD0,\"BNE\t@`\"},\n\t{0x10,\"BPL\t@`\"},\n\t{0x2F,\"BRA\t@`\"},\n\t{0x78,\"CMP\t#$~~, $~~\"},\n\t{0x7E,\"CMP\tY, $~~\"},\n\t{0x1D,\"DEC\tX\"},\n\t{0xAB,\"INC\t$~~\"},\n\t{0xFC,\"INC\tY\"},\n\t{0x1F,\"JMP\t[!$~~~~+X]\"},\n\t{0x8F,\"MOV\t$~~, #$~~\"},\n\t{0xC4,\"MOV\t$~~, A\"},\n\t{0xCB,\"MOV\t$~~, Y\"},\n\t{0xC6,\"MOV\t(X), A\"},\n\t{0xE8,\"MOV\tA, #$~~\"},\n\t{0xE4,\"MOV\tA, $~~\"},\n\t{0xDD,\"MOV\tA, Y\"},\n\t{0xBD,\"MOV\tSP, X\"},\n\t{0xCD,\"MOV\tX, #$~~\"},\n\t{0x5D,\"MOV\tX, A\"},\n\t{0xEB,\"MOV\tY, $~~\"},\n\t{0xD7,\"MOV\t[$~~]+Y, A\"},\n\t{0xDA,\"MOVW\t$~~, YA\"},\n\t{0xBA,\"MOVW\tYA, $~~\"},\n\t{0xC0,\"$C0\"},\n\t{0xFF,\"$FF\"},\n\t{-1,0}\n};\n\n/* Assembles SPC-700 assembly into machine code. Source points to array of string\npointers to source lines, terminated by a NULL string pointer. Very strict syntax,\nno error reporting. */\nstatic int assemble( const char* const* const source, unsigned char* const out )\n{\n\tint labels [16] = { 0 };\n\tint pass;\n\tint addr;\n\tfor ( pass = 2; pass--; )\n\t{\n\t\tint line;\n\t\taddr = 0;\n\t\tfor ( line = 0; source [line]; line++ )\n\t\t{\n\t\t\tstruct instr_t const* instr;\n\t\t\tconst char* in = source [line];\n\t\t\tif ( *in++ == '@' )\n\t\t\t{\n\t\t\t\tlabels [(*in - '0') & 0x0F] = addr;\n\t\t\t\tin += 3;\n\t\t\t}\n\n\t\t\tfor ( instr = instrs; instr->str; instr++ )\n\t\t\t{\n\t\t\t\tint data = 1;\n\t\t\t\tint i = 0;\n\t\t\t\tfor ( ; instr->str [i] && in [i]; i++ )\n\t\t\t\t{\n\t\t\t\t\tif ( instr->str [i] == '~' )\n\t\t\t\t\t{\n\t\t\t\t\t\tint n = in [i] - '0';\n\t\t\t\t\t\tif ( n > 9 )\n\t\t\t\t\t\t\tn -= 'A' - '9' - 1;\n\t\t\t\t\t\tdata = data * 0x10 + n;\n\t\t\t\t\t}\n\t\t\t\t\telse if ( instr->str [i] == '`' )\n\t\t\t\t\t{\n\t\t\t\t\t\tdata = ((labels [(in [i] - '0') & 0x0F] - 2 - addr) & 0xFF) | 0x100;\n\t\t\t\t\t}\n\t\t\t\t\telse if ( instr->str [i] != in [i] )\n\t\t\t\t\t{\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif ( instr->str [i] == in [i] )\n\t\t\t\t{\n\t\t\t\t\tout [addr++] = instr->n;\n\t\t\t\t\twhile ( data > 1 )\n\t\t\t\t\t{\n\t\t\t\t\t\tout [addr++] = data & 0xFF;\n\t\t\t\t\t\tdata >>= 8;\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn addr;\n}\n\n/* Source code to bootloader for SPC-700 */\nstatic const char* const bootrom_source [] =\n{\n\t\"\tMOV\tX, #$EF\",\t/* Initialize stack pointer */\n\t\"\tMOV\tSP, X\",\n\t\"\tMOV\tA, #$00\",\t/* Clear page 0 */\n\t\"@1:\tMOV\t(X), A\",\n\t\"\tDEC\tX\",\n\t\"\tBNE\t@1\",\n\t\"\tMOV\t$F4, #$AA\",\t/* Tell SNES we're ready */\n\t\"\tMOV\t$F5, #$BB\",\n\t\"@2:\tCMP\t#$F4, $CC\",\t/* Wait for signal from SNES */\n\t\"\tBNE\t@2\",\n\t\"\tBRA\t@6\",\t\t/* Get addr and command */\n\t\"@3:\tMOV\tY, $F4\",\t/* Wait for signal from SNES */\n\t\"\tBNE\t@3\",\n\t\"@4:\tCMP\tY, $F4\",\t/* Signal should be low byte of addr\t */\n\t\"\tBNE\t@5\",\n\t\"\tMOV\tA, $F5\",\t/* Get byte to write */\n\t\"\tMOV\t$F4, Y\",\t/* Acknowledge to SNES */\n\t\"\tMOV\t[$00]+Y, A\",\t/* Write to destination */\n\t\"\tINC\tY\",\t\t/* Increment addr */\n\t\"\tBNE\t@4\",\n\t\"\tINC\t$01\",\t\t/* Increment high byte of addr */\n\t\"@5:\tBPL\t@4\",\t\t/* Keep waiting if <= low byte of addr */\n\t\"\tCMP\tY, $F4\",\t/* Stop if signal > low byte of addr */\n\t\"\tBPL\t@4\",\n\t\"@6:\tMOVW\tYA, $F6\",\t/* Get addr */\n\t\"\tMOVW\t$00, YA\",\n\t\"\tMOVW\tYA, $F4\",\t/* Get command */\n\t\"\tMOV\t$F4, A\",\t/* Acknowledge to SNES */\n\t\"\tMOV\tA, Y\",\n\t\"\tMOV\tX, A\",\n\t\"\tBNE\t@3\",\t\t/* non-zero = transfer */\n\t\"\tJMP\t[!$0000+X]\",\t/* zero = execute */\n\t\"\t$C0\",\t\t\t/* reset vector */\n\t\"\t$FF\",\n\t0\n};\n\n/* Assembles SPC-700 IPL bootrom and writes to out */\nstatic void assemble_bootrom( unsigned char out [0x40] )\n{\n\tint len = assemble( bootrom_source, out );\n\tassert( len == 0x40 );\n}\n" }, { "alpha_fraction": 0.726457417011261, "alphanum_fraction": 0.7902342081069946, "avg_line_length": 41.70212936401367, "blob_id": "79821607bf1a26dcf2cae9bb3d750ea30a65df3a", "content_id": "b38f03b02b0f72fbdb4cc35dfb4a56a9a197e39f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2007, "license_type": "no_license", "max_line_length": 89, "num_lines": 47, "path": "/src/gblvars.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#include <stdbool.h>\n#include <stdint.h>\n\nextern uint32_t cycpbl;\nextern uint32_t PHspcsave, PHdspsave;\nextern uint32_t PHnum2writesa1reg, SA1Mode, prevedi, SA1xpc, sa1dmaptr;\nextern uint32_t soundcycleft, spc700read, timer2upd, xa, PHnum2writesfxreg;\nextern uint32_t opcd, HIRQCycNext, oamaddr, curexecstate, nmiprevaddrl;\nextern uint32_t nmirept, nmiprevline, nmistatus, joycontren;\nextern uint32_t SfxR0, ramsize, nmiprevaddrh;\nextern uint32_t tempesi, tempedi, tempedx, tempebp;\nextern uint32_t SPCMultA, PHnum2writespc7110reg, PHdspsave2;\n\nextern uint32_t *wramdata, *vram, *C4Ram, *sfxramdata, *setaramdata, *sram;\nextern uint8_t *romdata, *SA1RAMArea;\n\nextern unsigned char sndrot, SPCRAM[65472], DSPMem[256], SA1Status;\nextern unsigned char DSP1Enable, DSP1COp, prevoamptr, BRRBuffer[32];\nextern unsigned char curcyc, echoon0, spcnumread, NextLineCache, HIRQNextExe;\nextern unsigned char vidmemch4[4096], vidmemch8[4096], vidmemch2[4096];\n\nextern bool C4Enable, SFXEnable, SA1Enable, SPC7110Enable, SETAEnable, DSP4Enable, spcon;\n\nextern int16_t C4WFXVal, C41FXVal, Op00Multiplicand, Op04Angle, Op08X, Op18X;\nextern int16_t Op28X, Op0CA, Op02FX, Op0AVS, Op06X, Op01m, Op0DX, Op03F, Op14Zr;\nextern int16_t Op0EH, Op10Coefficient;\n" }, { "alpha_fraction": 0.4180789291858673, "alphanum_fraction": 0.6339715123176575, "avg_line_length": 28.939966201782227, "blob_id": "a01c127d629828bdb7a6bc97b5d13fbd75028f58", "content_id": "f3b70b9055bb4e3ecaac4eb2308f21e1cfe215c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 36407, "license_type": "no_license", "max_line_length": 151, "num_lines": 1216, "path": "/src/chips/dsp1emu.c", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 1997-2008 ZSNES Team ( zsKnight, _Demo_, pagefault, Nach )\n\nhttp://www.zsnes.com\nhttp://sourceforge.net/projects/zsnes\nhttps://zsnes.bountysource.com\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n#define __ZSNES__\n\n#if (defined __ZSNES__ && __UNIXSDL__)\n#include \"../gblhdr.h\"\n#else\n\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n#endif\n//#define DebugDSP1\n\n#ifdef DebugDSP1\n\nFILE * LogFile = NULL;\n\nvoid Log_Message (char *Message, ...)\n{\n\tchar Msg[400];\n\tva_list ap;\n\n va_start(ap,Message);\n vsprintf(Msg,Message,ap );\n va_end(ap);\n\n strcat(Msg,\"\\r\\n\\0\");\n fwrite(Msg,strlen(Msg),1,LogFile);\n fflush (LogFile);\n}\n\nvoid Start_Log ()\n{\n\tchar LogFileName[255];\n// [4/15/2001]\tchar *p;\n\n strcpy(LogFileName,\"dsp1emu.log\\0\");\n\n LogFile = fopen(LogFileName,\"wb\");\n}\n\nvoid Stop_Log ()\n{\n if (LogFile)\n {\n fclose(LogFile);\n LogFile = NULL;\n }\n}\n\n#endif\n\nconst unsigned short DSP1ROM[1024] = {\n\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\n\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\n\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\n\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\n\t0x0000,\t0x0000,\t0x0001,\t0x0002,\t0x0004,\t0x0008,\t0x0010,\t0x0020,\n\t0x0040,\t0x0080,\t0x0100,\t0x0200,\t0x0400,\t0x0800,\t0x1000,\t0x2000,\n\t0x4000,\t0x7fff,\t0x4000,\t0x2000,\t0x1000,\t0x0800,\t0x0400,\t0x0200,\n\t0x0100,\t0x0080,\t0x0040,\t0x0020,\t0x0001,\t0x0008,\t0x0004,\t0x0002,\n\t0x0001,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\n\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\n\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\n\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\t0x0000,\n\t0x0000,\t0x0000,\t0x8000,\t0xffe5,\t0x0100,\t0x7fff,\t0x7f02,\t0x7e08,\n\t0x7d12,\t0x7c1f,\t0x7b30,\t0x7a45,\t0x795d,\t0x7878,\t0x7797,\t0x76ba,\n\t0x75df,\t0x7507,\t0x7433,\t0x7361,\t0x7293,\t0x71c7,\t0x70fe,\t0x7038,\n\t0x6f75,\t0x6eb4,\t0x6df6,\t0x6d3a,\t0x6c81,\t0x6bca,\t0x6b16,\t0x6a64,\n\t0x69b4,\t0x6907,\t0x685b,\t0x67b2,\t0x670b,\t0x6666,\t0x65c4,\t0x6523,\n\t0x6484,\t0x63e7,\t0x634c,\t0x62b3,\t0x621c,\t0x6186,\t0x60f2,\t0x6060,\n\t0x5fd0,\t0x5f41,\t0x5eb5,\t0x5e29,\t0x5d9f,\t0x5d17,\t0x5c91,\t0x5c0c,\n\t0x5b88,\t0x5b06,\t0x5a85,\t0x5a06,\t0x5988,\t0x590b,\t0x5890,\t0x5816,\n\t0x579d,\t0x5726,\t0x56b0,\t0x563b,\t0x55c8,\t0x5555,\t0x54e4,\t0x5474,\n\t0x5405,\t0x5398,\t0x532b,\t0x52bf,\t0x5255,\t0x51ec,\t0x5183,\t0x511c,\n\t0x50b6,\t0x5050,\t0x4fec,\t0x4f89,\t0x4f26,\t0x4ec5,\t0x4e64,\t0x4e05,\n\t0x4da6,\t0x4d48,\t0x4cec,\t0x4c90,\t0x4c34,\t0x4bda,\t0x4b81,\t0x4b28,\n\t0x4ad0,\t0x4a79,\t0x4a23,\t0x49cd,\t0x4979,\t0x4925,\t0x48d1,\t0x487f,\n\t0x482d,\t0x47dc,\t0x478c,\t0x473c,\t0x46ed,\t0x469f,\t0x4651,\t0x4604,\n\t0x45b8,\t0x456c,\t0x4521,\t0x44d7,\t0x448d,\t0x4444,\t0x43fc,\t0x43b4,\n\t0x436d,\t0x4326,\t0x42e0,\t0x429a,\t0x4255,\t0x4211,\t0x41cd,\t0x4189,\n\t0x4146,\t0x4104,\t0x40c2,\t0x4081,\t0x4040,\t0x3fff,\t0x41f7,\t0x43e1,\n\t0x45bd,\t0x478d,\t0x4951,\t0x4b0b,\t0x4cbb,\t0x4e61,\t0x4fff,\t0x5194,\n\t0x5322,\t0x54a9,\t0x5628,\t0x57a2,\t0x5914,\t0x5a81,\t0x5be9,\t0x5d4a,\n\t0x5ea7,\t0x5fff,\t0x6152,\t0x62a0,\t0x63ea,\t0x6530,\t0x6672,\t0x67b0,\n\t0x68ea,\t0x6a20,\t0x6b53,\t0x6c83,\t0x6daf,\t0x6ed9,\t0x6fff,\t0x7122,\n\t0x7242,\t0x735f,\t0x747a,\t0x7592,\t0x76a7,\t0x77ba,\t0x78cb,\t0x79d9,\n\t0x7ae5,\t0x7bee,\t0x7cf5,\t0x7dfa,\t0x7efe,\t0x7fff,\t0x0000,\t0x0324,\n\t0x0647,\t0x096a,\t0x0c8b,\t0x0fab,\t0x12c8,\t0x15e2,\t0x18f8,\t0x1c0b,\n\t0x1f19,\t0x2223,\t0x2528,\t0x2826,\t0x2b1f,\t0x2e11,\t0x30fb,\t0x33de,\n\t0x36ba,\t0x398c,\t0x3c56,\t0x3f17,\t0x41ce,\t0x447a,\t0x471c,\t0x49b4,\n\t0x4c3f,\t0x4ebf,\t0x5133,\t0x539b,\t0x55f5,\t0x5842,\t0x5a82,\t0x5cb4,\n\t0x5ed7,\t0x60ec,\t0x62f2,\t0x64e8,\t0x66cf,\t0x68a6,\t0x6a6d,\t0x6c24,\n\t0x6dca,\t0x6f5f,\t0x70e2,\t0x7255,\t0x73b5,\t0x7504,\t0x7641,\t0x776c,\n\t0x7884,\t0x798a,\t0x7a7d,\t0x7b5d,\t0x7c29,\t0x7ce3,\t0x7d8a,\t0x7e1d,\n\t0x7e9d,\t0x7f09,\t0x7f62,\t0x7fa7,\t0x7fd8,\t0x7ff6,\t0x7fff,\t0x7ff6,\n\t0x7fd8,\t0x7fa7,\t0x7f62,\t0x7f09,\t0x7e9d,\t0x7e1d,\t0x7d8a,\t0x7ce3,\n\t0x7c29,\t0x7b5d,\t0x7a7d,\t0x798a,\t0x7884,\t0x776c,\t0x7641,\t0x7504,\n\t0x73b5,\t0x7255,\t0x70e2,\t0x6f5f,\t0x6dca,\t0x6c24,\t0x6a6d,\t0x68a6,\n\t0x66cf,\t0x64e8,\t0x62f2,\t0x60ec,\t0x5ed7,\t0x5cb4,\t0x5a82,\t0x5842,\n\t0x55f5,\t0x539b,\t0x5133,\t0x4ebf,\t0x4c3f,\t0x49b4,\t0x471c,\t0x447a,\n\t0x41ce,\t0x3f17,\t0x3c56,\t0x398c,\t0x36ba,\t0x33de,\t0x30fb,\t0x2e11,\n\t0x2b1f,\t0x2826,\t0x2528,\t0x2223,\t0x1f19,\t0x1c0b,\t0x18f8,\t0x15e2,\n\t0x12c8,\t0x0fab,\t0x0c8b,\t0x096a,\t0x0647,\t0x0324,\t0x7fff,\t0x7ff6,\n\t0x7fd8,\t0x7fa7,\t0x7f62,\t0x7f09,\t0x7e9d,\t0x7e1d,\t0x7d8a,\t0x7ce3,\n\t0x7c29,\t0x7b5d,\t0x7a7d,\t0x798a,\t0x7884,\t0x776c,\t0x7641,\t0x7504,\n\t0x73b5,\t0x7255,\t0x70e2,\t0x6f5f,\t0x6dca,\t0x6c24,\t0x6a6d,\t0x68a6,\n\t0x66cf,\t0x64e8,\t0x62f2,\t0x60ec,\t0x5ed7,\t0x5cb4,\t0x5a82,\t0x5842,\n\t0x55f5,\t0x539b,\t0x5133,\t0x4ebf,\t0x4c3f,\t0x49b4,\t0x471c,\t0x447a,\n\t0x41ce,\t0x3f17,\t0x3c56,\t0x398c,\t0x36ba,\t0x33de,\t0x30fb,\t0x2e11,\n\t0x2b1f,\t0x2826,\t0x2528,\t0x2223,\t0x1f19,\t0x1c0b,\t0x18f8,\t0x15e2,\n\t0x12c8,\t0x0fab,\t0x0c8b,\t0x096a,\t0x0647,\t0x0324,\t0x0000,\t0xfcdc,\n\t0xf9b9,\t0xf696,\t0xf375,\t0xf055,\t0xed38,\t0xea1e,\t0xe708,\t0xe3f5,\n\t0xe0e7,\t0xdddd,\t0xdad8,\t0xd7da,\t0xd4e1,\t0xd1ef,\t0xcf05,\t0xcc22,\n\t0xc946,\t0xc674,\t0xc3aa,\t0xc0e9,\t0xbe32,\t0xbb86,\t0xb8e4,\t0xb64c,\n\t0xb3c1,\t0xb141,\t0xaecd,\t0xac65,\t0xaa0b,\t0xa7be,\t0xa57e,\t0xa34c,\n\t0xa129,\t0x9f14,\t0x9d0e,\t0x9b18,\t0x9931,\t0x975a,\t0x9593,\t0x93dc,\n\t0x9236,\t0x90a1,\t0x8f1e,\t0x8dab,\t0x8c4b,\t0x8afc,\t0x89bf,\t0x8894,\n\t0x877c,\t0x8676,\t0x8583,\t0x84a3,\t0x83d7,\t0x831d,\t0x8276,\t0x81e3,\n\t0x8163,\t0x80f7,\t0x809e,\t0x8059,\t0x8028,\t0x800a,\t0x6488,\t0x0080,\n\t0x03ff,\t0x0116,\t0x0002,\t0x0080,\t0x4000,\t0x3fd7,\t0x3faf,\t0x3f86,\n\t0x3f5d,\t0x3f34,\t0x3f0c,\t0x3ee3,\t0x3eba,\t0x3e91,\t0x3e68,\t0x3e40,\n\t0x3e17,\t0x3dee,\t0x3dc5,\t0x3d9c,\t0x3d74,\t0x3d4b,\t0x3d22,\t0x3cf9,\n\t0x3cd0,\t0x3ca7,\t0x3c7f,\t0x3c56,\t0x3c2d,\t0x3c04,\t0x3bdb,\t0x3bb2,\n\t0x3b89,\t0x3b60,\t0x3b37,\t0x3b0e,\t0x3ae5,\t0x3abc,\t0x3a93,\t0x3a69,\n\t0x3a40,\t0x3a17,\t0x39ee,\t0x39c5,\t0x399c,\t0x3972,\t0x3949,\t0x3920,\n\t0x38f6,\t0x38cd,\t0x38a4,\t0x387a,\t0x3851,\t0x3827,\t0x37fe,\t0x37d4,\n\t0x37aa,\t0x3781,\t0x3757,\t0x372d,\t0x3704,\t0x36da,\t0x36b0,\t0x3686,\n\t0x365c,\t0x3632,\t0x3609,\t0x35df,\t0x35b4,\t0x358a,\t0x3560,\t0x3536,\n\t0x350c,\t0x34e1,\t0x34b7,\t0x348d,\t0x3462,\t0x3438,\t0x340d,\t0x33e3,\n\t0x33b8,\t0x338d,\t0x3363,\t0x3338,\t0x330d,\t0x32e2,\t0x32b7,\t0x328c,\n\t0x3261,\t0x3236,\t0x320b,\t0x31df,\t0x31b4,\t0x3188,\t0x315d,\t0x3131,\n\t0x3106,\t0x30da,\t0x30ae,\t0x3083,\t0x3057,\t0x302b,\t0x2fff,\t0x2fd2,\n\t0x2fa6,\t0x2f7a,\t0x2f4d,\t0x2f21,\t0x2ef4,\t0x2ec8,\t0x2e9b,\t0x2e6e,\n\t0x2e41,\t0x2e14,\t0x2de7,\t0x2dba,\t0x2d8d,\t0x2d60,\t0x2d32,\t0x2d05,\n\t0x2cd7,\t0x2ca9,\t0x2c7b,\t0x2c4d,\t0x2c1f,\t0x2bf1,\t0x2bc3,\t0x2b94,\n\t0x2b66,\t0x2b37,\t0x2b09,\t0x2ada,\t0x2aab,\t0x2a7c,\t0x2a4c,\t0x2a1d,\n\t0x29ed,\t0x29be,\t0x298e,\t0x295e,\t0x292e,\t0x28fe,\t0x28ce,\t0x289d,\n\t0x286d,\t0x283c,\t0x280b,\t0x27da,\t0x27a9,\t0x2777,\t0x2746,\t0x2714,\n\t0x26e2,\t0x26b0,\t0x267e,\t0x264c,\t0x2619,\t0x25e7,\t0x25b4,\t0x2581,\n\t0x254d,\t0x251a,\t0x24e6,\t0x24b2,\t0x247e,\t0x244a,\t0x2415,\t0x23e1,\n\t0x23ac,\t0x2376,\t0x2341,\t0x230b,\t0x22d6,\t0x229f,\t0x2269,\t0x2232,\n\t0x21fc,\t0x21c4,\t0x218d,\t0x2155,\t0x211d,\t0x20e5,\t0x20ad,\t0x2074,\n\t0x203b,\t0x2001,\t0x1fc7,\t0x1f8d,\t0x1f53,\t0x1f18,\t0x1edd,\t0x1ea1,\n\t0x1e66,\t0x1e29,\t0x1ded,\t0x1db0,\t0x1d72,\t0x1d35,\t0x1cf6,\t0x1cb8,\n\t0x1c79,\t0x1c39,\t0x1bf9,\t0x1bb8,\t0x1b77,\t0x1b36,\t0x1af4,\t0x1ab1,\n\t0x1a6e,\t0x1a2a,\t0x19e6,\t0x19a1,\t0x195c,\t0x1915,\t0x18ce,\t0x1887,\n\t0x183f,\t0x17f5,\t0x17ac,\t0x1761,\t0x1715,\t0x16c9,\t0x167c,\t0x162e,\n\t0x15df,\t0x158e,\t0x153d,\t0x14eb,\t0x1497,\t0x1442,\t0x13ec,\t0x1395,\n\t0x133c,\t0x12e2,\t0x1286,\t0x1228,\t0x11c9,\t0x1167,\t0x1104,\t0x109e,\n\t0x1036,\t0x0fcc,\t0x0f5f,\t0x0eef,\t0x0e7b,\t0x0e04,\t0x0d89,\t0x0d0a,\n\t0x0c86,\t0x0bfd,\t0x0b6d,\t0x0ad6,\t0x0a36,\t0x098d,\t0x08d7,\t0x0811,\n\t0x0736,\t0x063e,\t0x0519,\t0x039a,\t0x0000,\t0x7fff,\t0x0100,\t0x0080,\n\t0x021d,\t0x00c8,\t0x00ce,\t0x0048,\t0x0a26,\t0x277a,\t0x00ce,\t0x6488,\n\t0x14ac,\t0x0001,\t0x00f9,\t0x00fc,\t0x00ff,\t0x00fc,\t0x00f9,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\n\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff,\t0xffff};\n\n/***************************************************************************\\\n* DSP1 code *\n\\***************************************************************************/\n\nvoid InitDSP()\n{\n#ifdef DebugDSP1\n\tStart_Log();\n#endif\n}\n\nshort Op00Multiplicand;\nshort Op00Multiplier;\nshort Op00Result;\n\nvoid DSPOp00()\n{\n Op00Result= Op00Multiplicand * Op00Multiplier >> 15;\n\n #ifdef DebugDSP1\n Log_Message(\"OP00 MULT %d*%d/32768=%d\",Op00Multiplicand,Op00Multiplier,Op00Result);\n #endif\n}\n\nshort Op20Multiplicand;\nshort Op20Multiplier;\nshort Op20Result;\n\nvoid DSPOp20()\n{\n Op20Result= Op20Multiplicand * Op20Multiplier >> 15;\n Op20Result++;\n\n #ifdef DebugDSP1\n Log_Message(\"OP20 MULT %d*%d/32768=%d\",Op20Multiplicand,Op20Multiplier,Op20Result);\n #endif\n}\n\nsigned short Op10Coefficient;\nsigned short Op10Exponent;\nsigned short Op10CoefficientR;\nsigned short Op10ExponentR;\n\nvoid DSP1_Inverse(short Coefficient, short Exponent, short *iCoefficient, short *iExponent)\n{\n\t// Step One: Division by Zero\n\tif (Coefficient == 0x0000)\n\t{\n\t\t*iCoefficient = 0x7fff;\n\t\t*iExponent = 0x002f;\n\t}\n\telse\n\t{\n\t\tshort Sign = 1;\n\n\t\t// Step Two: Remove Sign\n\t\tif (Coefficient < 0)\n\t\t{\n\t\t\tif (Coefficient < -32767) Coefficient = -32767;\n\t\t\tCoefficient = -Coefficient;\n\t\t\tSign = -1;\n\t\t}\n\n\t\t// Step Three: Normalize\n\t\twhile (Coefficient < 0x4000)\n\t\t{\n\t\t\tCoefficient <<= 1;\n\t\t\tExponent--;\n\t\t}\n\n\t\t// Step Four: Special Case\n\t\tif (Coefficient == 0x4000)\n\t\t\tif (Sign == 1) *iCoefficient = 0x7fff;\n\t\t\telse {\n\t\t\t\t*iCoefficient = -0x4000;\n\t\t\t\tExponent--;\n\t\t\t}\n\t\telse {\n\t\t\t// Step Five: Initial Guess\n\t\t\tshort i = DSP1ROM[((Coefficient - 0x4000) >> 7) + 0x0065];\n\n\t\t\t// Step Six: Iterate \"estimated\" Newton's Method\n\t\t\ti = (i + (-i * (Coefficient * i >> 15) >> 15)) << 1;\n\t\t\ti = (i + (-i * (Coefficient * i >> 15) >> 15)) << 1;\n\n\t\t\t*iCoefficient = i * Sign;\n\t\t}\n\n\t\t*iExponent = 1 - Exponent;\n\t}\n}\n\nvoid DSPOp10()\n{\n\tDSP1_Inverse(Op10Coefficient, Op10Exponent, &Op10CoefficientR, &Op10ExponentR);\n\t#ifdef DebugDSP1\n Log_Message(\"OP10 INV %d*2^%d = %d*2^%d\", Op10Coefficient, Op10Exponent, Op10CoefficientR, Op10ExponentR);\n\t#endif\n}\n\nshort Op04Angle;\nshort Op04Radius;\nshort Op04Sin;\nshort Op04Cos;\n\nconst short DSP1_MulTable[256] = {\n\t 0x0000, 0x0003, 0x0006, 0x0009, 0x000c, 0x000f, 0x0012, 0x0015,\n\t 0x0019, 0x001c, 0x001f, 0x0022, 0x0025, 0x0028, 0x002b, 0x002f,\n\t 0x0032, 0x0035, 0x0038, 0x003b, 0x003e, 0x0041, 0x0045, 0x0048,\n\t 0x004b, 0x004e, 0x0051, 0x0054, 0x0057, 0x005b, 0x005e, 0x0061,\n\t 0x0064, 0x0067, 0x006a, 0x006d, 0x0071, 0x0074, 0x0077, 0x007a,\n\t 0x007d, 0x0080, 0x0083, 0x0087, 0x008a, 0x008d, 0x0090, 0x0093,\n\t 0x0096, 0x0099, 0x009d, 0x00a0, 0x00a3, 0x00a6, 0x00a9, 0x00ac,\n\t 0x00af, 0x00b3, 0x00b6, 0x00b9, 0x00bc, 0x00bf, 0x00c2, 0x00c5,\n\t 0x00c9, 0x00cc, 0x00cf, 0x00d2, 0x00d5, 0x00d8, 0x00db, 0x00df,\n\t 0x00e2, 0x00e5, 0x00e8, 0x00eb, 0x00ee, 0x00f1, 0x00f5, 0x00f8,\n\t 0x00fb, 0x00fe, 0x0101, 0x0104, 0x0107, 0x010b, 0x010e, 0x0111,\n\t 0x0114, 0x0117, 0x011a, 0x011d, 0x0121, 0x0124, 0x0127, 0x012a,\n\t 0x012d, 0x0130, 0x0133, 0x0137, 0x013a, 0x013d, 0x0140, 0x0143,\n\t 0x0146, 0x0149, 0x014d, 0x0150, 0x0153, 0x0156, 0x0159, 0x015c,\n\t 0x015f, 0x0163, 0x0166, 0x0169, 0x016c, 0x016f, 0x0172, 0x0175,\n\t 0x0178, 0x017c, 0x017f, 0x0182, 0x0185, 0x0188, 0x018b, 0x018e,\n\t 0x0192, 0x0195, 0x0198, 0x019b, 0x019e, 0x01a1, 0x01a4, 0x01a8,\n\t 0x01ab, 0x01ae, 0x01b1, 0x01b4, 0x01b7, 0x01ba, 0x01be, 0x01c1,\n\t 0x01c4, 0x01c7, 0x01ca, 0x01cd, 0x01d0, 0x01d4, 0x01d7, 0x01da,\n\t 0x01dd, 0x01e0, 0x01e3, 0x01e6, 0x01ea, 0x01ed, 0x01f0, 0x01f3,\n\t 0x01f6, 0x01f9, 0x01fc, 0x0200, 0x0203, 0x0206, 0x0209, 0x020c,\n\t 0x020f, 0x0212, 0x0216, 0x0219, 0x021c, 0x021f, 0x0222, 0x0225,\n\t 0x0228, 0x022c, 0x022f, 0x0232, 0x0235, 0x0238, 0x023b, 0x023e,\n\t 0x0242, 0x0245, 0x0248, 0x024b, 0x024e, 0x0251, 0x0254, 0x0258,\n\t 0x025b, 0x025e, 0x0261, 0x0264, 0x0267, 0x026a, 0x026e, 0x0271,\n\t 0x0274, 0x0277, 0x027a, 0x027d, 0x0280, 0x0284, 0x0287, 0x028a,\n\t 0x028d, 0x0290, 0x0293, 0x0296, 0x029a, 0x029d, 0x02a0, 0x02a3,\n\t 0x02a6, 0x02a9, 0x02ac, 0x02b0, 0x02b3, 0x02b6, 0x02b9, 0x02bc,\n\t 0x02bf, 0x02c2, 0x02c6, 0x02c9, 0x02cc, 0x02cf, 0x02d2, 0x02d5,\n\t 0x02d8, 0x02db, 0x02df, 0x02e2, 0x02e5, 0x02e8, 0x02eb, 0x02ee,\n\t 0x02f1, 0x02f5, 0x02f8, 0x02fb, 0x02fe, 0x0301, 0x0304, 0x0307,\n\t 0x030b, 0x030e, 0x0311, 0x0314, 0x0317, 0x031a, 0x031d, 0x0321};\n\nconst short DSP1_SinTable[256] = {\n\t 0x0000, 0x0324, 0x0647, 0x096a, 0x0c8b, 0x0fab, 0x12c8, 0x15e2,\n\t 0x18f8, 0x1c0b, 0x1f19, 0x2223, 0x2528, 0x2826, 0x2b1f, 0x2e11,\n\t 0x30fb, 0x33de, 0x36ba, 0x398c, 0x3c56, 0x3f17, 0x41ce, 0x447a,\n\t 0x471c, 0x49b4, 0x4c3f, 0x4ebf, 0x5133, 0x539b, 0x55f5, 0x5842,\n\t 0x5a82, 0x5cb4, 0x5ed7, 0x60ec, 0x62f2, 0x64e8, 0x66cf, 0x68a6,\n\t 0x6a6d, 0x6c24, 0x6dca, 0x6f5f, 0x70e2, 0x7255, 0x73b5, 0x7504,\n\t 0x7641, 0x776c, 0x7884, 0x798a, 0x7a7d, 0x7b5d, 0x7c29, 0x7ce3,\n\t 0x7d8a, 0x7e1d, 0x7e9d, 0x7f09, 0x7f62, 0x7fa7, 0x7fd8, 0x7ff6,\n\t 0x7fff, 0x7ff6, 0x7fd8, 0x7fa7, 0x7f62, 0x7f09, 0x7e9d, 0x7e1d,\n\t 0x7d8a, 0x7ce3, 0x7c29, 0x7b5d, 0x7a7d, 0x798a, 0x7884, 0x776c,\n\t 0x7641, 0x7504, 0x73b5, 0x7255, 0x70e2, 0x6f5f, 0x6dca, 0x6c24,\n\t 0x6a6d, 0x68a6, 0x66cf, 0x64e8, 0x62f2, 0x60ec, 0x5ed7, 0x5cb4,\n\t 0x5a82, 0x5842, 0x55f5, 0x539b, 0x5133, 0x4ebf, 0x4c3f, 0x49b4,\n\t 0x471c, 0x447a, 0x41ce, 0x3f17, 0x3c56, 0x398c, 0x36ba, 0x33de,\n\t 0x30fb, 0x2e11, 0x2b1f, 0x2826, 0x2528, 0x2223, 0x1f19, 0x1c0b,\n\t 0x18f8, 0x15e2, 0x12c8, 0x0fab, 0x0c8b, 0x096a, 0x0647, 0x0324,\n\t -0x0000, -0x0324, -0x0647, -0x096a, -0x0c8b, -0x0fab, -0x12c8, -0x15e2,\n\t -0x18f8, -0x1c0b, -0x1f19, -0x2223, -0x2528, -0x2826, -0x2b1f, -0x2e11,\n\t -0x30fb, -0x33de, -0x36ba, -0x398c, -0x3c56, -0x3f17, -0x41ce, -0x447a,\n\t -0x471c, -0x49b4, -0x4c3f, -0x4ebf, -0x5133, -0x539b, -0x55f5, -0x5842,\n\t -0x5a82, -0x5cb4, -0x5ed7, -0x60ec, -0x62f2, -0x64e8, -0x66cf, -0x68a6,\n\t -0x6a6d, -0x6c24, -0x6dca, -0x6f5f, -0x70e2, -0x7255, -0x73b5, -0x7504,\n\t -0x7641, -0x776c, -0x7884, -0x798a, -0x7a7d, -0x7b5d, -0x7c29, -0x7ce3,\n\t -0x7d8a, -0x7e1d, -0x7e9d, -0x7f09, -0x7f62, -0x7fa7, -0x7fd8, -0x7ff6,\n\t -0x7fff, -0x7ff6, -0x7fd8, -0x7fa7, -0x7f62, -0x7f09, -0x7e9d, -0x7e1d,\n\t -0x7d8a, -0x7ce3, -0x7c29, -0x7b5d, -0x7a7d, -0x798a, -0x7884, -0x776c,\n\t -0x7641, -0x7504, -0x73b5, -0x7255, -0x70e2, -0x6f5f, -0x6dca, -0x6c24,\n\t -0x6a6d, -0x68a6, -0x66cf, -0x64e8, -0x62f2, -0x60ec, -0x5ed7, -0x5cb4,\n\t -0x5a82, -0x5842, -0x55f5, -0x539b, -0x5133, -0x4ebf, -0x4c3f, -0x49b4,\n\t -0x471c, -0x447a, -0x41ce, -0x3f17, -0x3c56, -0x398c, -0x36ba, -0x33de,\n\t -0x30fb, -0x2e11, -0x2b1f, -0x2826, -0x2528, -0x2223, -0x1f19, -0x1c0b,\n\t -0x18f8, -0x15e2, -0x12c8, -0x0fab, -0x0c8b, -0x096a, -0x0647, -0x0324};\n\nshort DSP1_Sin(short Angle)\n{\n int S;\n\tif (Angle < 0) {\n\t\tif (Angle == -32768) return 0;\n\t\treturn -DSP1_Sin(-Angle);\n\t}\n S = DSP1_SinTable[Angle >> 8] + (DSP1_MulTable[Angle & 0xff] * DSP1_SinTable[0x40 + (Angle >> 8)] >> 15);\n\tif (S > 32767) S = 32767;\n\treturn (short) S;\n}\n\nshort DSP1_Cos(short Angle)\n{\n int S;\n\tif (Angle < 0) {\n\t\tif (Angle == -32768) return -32768;\n\t\tAngle = -Angle;\n\t}\n\tS = DSP1_SinTable[0x40 + (Angle >> 8)] - (DSP1_MulTable[Angle & 0xff] * DSP1_SinTable[Angle >> 8] >> 15);\n\tif (S < -32768) S = -32767;\n\treturn (short) S;\n}\n\nvoid DSP1_Normalize(short m, short *Coefficient, short *Exponent)\n{\n\tshort i = 0x4000;\n\tshort e = 0;\n\n\tif (m < 0)\n\t\twhile ((m & i) && i) {\n\t\t\ti >>= 1;\n\t\t\te++;\n\t\t}\n\telse\n\t\twhile (!(m & i) && i) {\n\t\t\ti >>= 1;\n\t\t\te++;\n\t\t}\n\n\tif (e > 0)\n\t\t*Coefficient = m * DSP1ROM[0x21 + e] << 1;\n\telse\n\t\t*Coefficient = m;\n\n\t*Exponent -= e;\n}\n\nvoid DSP1_NormalizeDouble(int Product, short *Coefficient, short *Exponent)\n{\n\tshort n = Product & 0x7fff;\n\tshort m = Product >> 15;\n\tshort i = 0x4000;\n\tshort e = 0;\n\n\tif (m < 0)\n\t\twhile ((m & i) && i) {\n\t\t\ti >>= 1;\n\t\t\te++;\n\t\t}\n\telse\n\t\twhile (!(m & i) && i) {\n\t\t\ti >>= 1;\n\t\t\te++;\n\t\t}\n\n\tif (e > 0)\n\t{\n\t\t*Coefficient = m * DSP1ROM[0x0021 + e] << 1;\n\n\t\tif (e < 15)\n\t\t\t*Coefficient += n * DSP1ROM[0x0040 - e] >> 15;\n\t\telse\n\t\t{\n\t\t\ti = 0x4000;\n\n\t\t\tif (m < 0)\n\t\t\t\twhile ((n & i) && i) {\n\t\t\t\t\ti >>= 1;\n\t\t\t\t\te++;\n\t\t\t\t}\n\t\t\telse\n\t\t\t\twhile (!(n & i) && i) {\n\t\t\t\t\ti >>= 1;\n\t\t\t\t\te++;\n\t\t\t\t}\n\n\t\t\tif (e > 15)\n\t\t\t\t*Coefficient = n * DSP1ROM[0x0012 + e] << 1;\n\t\t\telse\n\t\t\t\t*Coefficient += n;\n\t\t}\n\t}\n\telse\n\t\t*Coefficient = m;\n\n\t*Exponent = e;\n}\n\nshort DSP1_Truncate(short C, short E)\n{\n\tif (E > 0) {\n\t\tif (C > 0) return 32767; else if (C < 0) return -32767;\n\t} else {\n\t\tif (E < 0) return C * DSP1ROM[0x0031 + E] >> 15;\n\t}\n\treturn C;\n}\n\nvoid DSPOp04()\n{\n\tOp04Sin = DSP1_Sin(Op04Angle) * Op04Radius >> 15;\n\tOp04Cos = DSP1_Cos(Op04Angle) * Op04Radius >> 15;\n}\n\nshort Op0CA;\nshort Op0CX1;\nshort Op0CY1;\nshort Op0CX2;\nshort Op0CY2;\n\nvoid DSPOp0C()\n{\n\tOp0CX2 = (Op0CY1 * DSP1_Sin(Op0CA) >> 15) + (Op0CX1 * DSP1_Cos(Op0CA) >> 15);\n\tOp0CY2 = (Op0CY1 * DSP1_Cos(Op0CA) >> 15) - (Op0CX1 * DSP1_Sin(Op0CA) >> 15);\n}\n\nshort CentreX;\nshort CentreY;\nshort VOffset;\n\nshort VPlane_C;\nshort VPlane_E;\n\n// Azimuth and Zenith angles\nshort SinAas;\nshort CosAas;\nshort SinAzs;\nshort CosAzs;\n\n// Clipped Zenith angle\nshort SinAZS;\nshort CosAZS;\nshort SecAZS_C1;\nshort SecAZS_E1;\nshort SecAZS_C2;\nshort SecAZS_E2;\n\nshort Nx, Ny, Nz;\nshort Gx, Gy, Gz;\nshort C_Les, E_Les, G_Les;\n\nconst short MaxAZS_Exp[16] = {\n\t0x38b4, 0x38b7, 0x38ba, 0x38be, 0x38c0, 0x38c4, 0x38c7, 0x38ca,\n\t0x38ce,\t0x38d0, 0x38d4, 0x38d7, 0x38da, 0x38dd, 0x38e0, 0x38e4\n};\n\nvoid DSP1_Parameter(short Fx, short Fy, short Fz, short Lfe, short Les, short Aas, short Azs, short *Vof, short *Vva, short *Cx, short *Cy)\n{\n short CSec, C, E, MaxAZS, Aux;\n short LfeNx, LfeNy, LfeNz;\n short LesNx, LesNy, LesNz;\n short CentreZ;\n\n\t// Copy Zenith angle for clipping\n short AZS = Azs;\n\n\t// Store Sine and Cosine of Azimuth and Zenith angle\n SinAas = DSP1_Sin(Aas);\n CosAas = DSP1_Cos(Aas);\n SinAzs = DSP1_Sin(Azs);\n CosAzs = DSP1_Cos(Azs);\n\n Nx = SinAzs * -SinAas >> 15;\n Ny = SinAzs * CosAas >> 15;\n Nz = CosAzs * 0x7fff >> 15;\n\n LfeNx = Lfe*Nx>>15;\n LfeNy = Lfe*Ny>>15;\n LfeNz = Lfe*Nz>>15;\n\n\t// Center of Projection\n CentreX = Fx+LfeNx;\n CentreY = Fy+LfeNy;\n CentreZ = Fz+LfeNz;\n\n LesNx = Les*Nx>>15;\n LesNy = Les*Ny>>15;\n LesNz = Les*Nz>>15;\n\n Gx=CentreX-LesNx;\n Gy=CentreY-LesNy;\n Gz=CentreZ-LesNz;\n\n E_Les=0;\n DSP1_Normalize(Les, &C_Les, &E_Les);\n G_Les = Les;\n\n E = 0;\n DSP1_Normalize(CentreZ, &C, &E);\n\n VPlane_C = C;\n VPlane_E = E;\n\n\t// Determine clip boundary and clip Zenith angle if necessary\n MaxAZS = MaxAZS_Exp[-E];\n\n if (AZS < 0) {\n MaxAZS = -MaxAZS;\n if (AZS < MaxAZS + 1) AZS = MaxAZS + 1;\n } else {\n if (AZS > MaxAZS) AZS = MaxAZS;\n }\n\n\t// Store Sine and Cosine of clipped Zenith angle\n SinAZS = DSP1_Sin(AZS);\n CosAZS = DSP1_Cos(AZS);\n\n DSP1_Inverse(CosAZS, 0, &SecAZS_C1, &SecAZS_E1);\n DSP1_Normalize(C * SecAZS_C1 >> 15, &C, &E);\n E += SecAZS_E1;\n\n C = DSP1_Truncate(C, E) * SinAZS >> 15;\n\n CentreX += C * SinAas >> 15;\n CentreY -= C * CosAas >> 15;\n\n *Cx = CentreX;\n *Cy = CentreY;\n\n\t// Raster number of imaginary center and horizontal line\n *Vof = 0;\n\n if ((Azs != AZS) || (Azs == MaxAZS))\n {\n if (Azs == -32768) Azs = -32767;\n\n C = Azs - MaxAZS;\n if (C >= 0) C--;\n Aux = ~(C << 2);\n\n C = Aux * DSP1ROM[0x0328] >> 15;\n C = (C * Aux >> 15) + DSP1ROM[0x0327];\n *Vof -= (C * Aux >> 15) * Les >> 15;\n\n C = Aux * Aux >> 15;\n Aux = (C * DSP1ROM[0x0324] >> 15) + DSP1ROM[0x0325];\n CosAZS += (C * Aux >> 15) * CosAZS >> 15;\n }\n\n VOffset = Les * CosAZS >> 15;\n\n DSP1_Inverse(SinAZS, 0, &CSec, &E);\n DSP1_Normalize(VOffset, &C, &E);\n DSP1_Normalize(C * CSec >> 15, &C, &E);\n\n if (C == -32768) { C >>= 1; E++; }\n\n *Vva = DSP1_Truncate(-C, E);\n\n\t// Store Secant of clipped Zenith angle\n DSP1_Inverse(CosAZS, 0, &SecAZS_C2, &SecAZS_E2);\n}\n\nvoid DSP1_Raster(short Vs, short *An, short *Bn, short *Cn, short *Dn)\n{\n\tshort C, E, C1, E1;\n\n\tDSP1_Inverse((Vs * SinAzs >> 15) + VOffset, 7, &C, &E);\n\tE += VPlane_E;\n\n\tC1 = C * VPlane_C >> 15;\n\tE1 = E + SecAZS_E2;\n\n\tDSP1_Normalize(C1, &C, &E);\n\n\tC = DSP1_Truncate(C, E);\n\n\t*An = C * CosAas >> 15;\n\t*Cn = C * SinAas >> 15;\n\n\tDSP1_Normalize(C1 * SecAZS_C2 >> 15, &C, &E1);\n\n\tC = DSP1_Truncate(C, E1);\n\n\t*Bn = C * -SinAas >> 15;\n\t*Dn = C * CosAas >> 15;\n}\n\nshort Op02FX;\nshort Op02FY;\nshort Op02FZ;\nshort Op02LFE;\nshort Op02LES;\nshort Op02AAS;\nshort Op02AZS;\nshort Op02VOF;\nshort Op02VVA;\nshort Op02CX;\nshort Op02CY;\n\nvoid DSPOp02()\n{\n\tDSP1_Parameter(Op02FX, Op02FY, Op02FZ, Op02LFE, Op02LES, Op02AAS, Op02AZS, &Op02VOF, &Op02VVA, &Op02CX, &Op02CY);\n}\n\nshort Op0AVS;\nshort Op0AA;\nshort Op0AB;\nshort Op0AC;\nshort Op0AD;\n\nvoid DSPOp0A()\n{\n\tDSP1_Raster(Op0AVS, &Op0AA, &Op0AB, &Op0AC, &Op0AD);\n\tOp0AVS++;\n}\n\n\nshort DSP1_ShiftR(short C, short E)\n{\n return (C * DSP1ROM[0x0031 + E] >> 15);\n}\n\nvoid DSP1_Project(short X, short Y, short Z, short *H, short *V, short *M)\n{\n int aux, aux4;\n short E, E2, E3, E4, E5, refE, E6, E7;\n short C2, C4, C6, C8, C9, C10, C11, C12, C16, C17, C18, C19, C20, C21, C22, C23, C24, C25, C26;\n short Px, Py, Pz;\n\n E4=E3=E2=E=E5=0;\n\n DSP1_NormalizeDouble((int)X-Gx, &Px, &E4);\n DSP1_NormalizeDouble((int)Y-Gy, &Py, &E);\n DSP1_NormalizeDouble((int)Z-Gz, &Pz, &E3);\n Px>>=1; E4--; // to avoid overflows when calculating the scalar products\n Py>>=1; E--;\n Pz>>=1; E3--;\n\n refE = (E<E3)?E:E3;\n refE = (refE<E4)?refE:E4;\n\n Px=DSP1_ShiftR(Px,E4-refE); // normalize them to the same exponent\n Py=DSP1_ShiftR(Py,E-refE);\n Pz=DSP1_ShiftR(Pz,E3-refE);\n\n C11=- (Px*Nx>>15);\n C8=- (Py*Ny>>15);\n C9=- (Pz*Nz>>15);\n C12=C11+C8+C9; // this cannot overflow!\n\n aux4=C12; // de-normalization with 32-bits arithmetic\n refE = 16-refE; // refE can be up to 3\n if (refE>=0)\n aux4 <<=(refE);\n else\n aux4 >>=-(refE);\n if (aux4==-1) aux4 = 0; // why?\n aux4>>=1;\n\n aux = ((unsigned short)G_Les) + aux4; // Les - the scalar product of P with the normal vector of the screen\n DSP1_NormalizeDouble(aux, &C10, &E2);\n E2 = 15-E2;\n\n DSP1_Inverse(C10, 0, &C4, &E4);\n C2=C4*C_Les>>15; // scale factor\n\n\n // H\n E7=0;\n C16= (Px*(CosAas*0x7fff>>15)>>15);\n C20= (Py*(SinAas*0x7fff>>15)>>15);\n C17=C16+C20; // scalar product of P with the normalized horizontal vector of the screen...\n\n C18=C17*C2>>15; // ... multiplied by the scale factor\n DSP1_Normalize(C18, &C19, &E7);\n *H=DSP1_Truncate(C19, E_Les-E2+refE+E7);\n\n // V\n E6=0;\n C21 = Px*(CosAzs*-SinAas>>15)>>15;\n C22 = Py*(CosAzs*CosAas>>15)>>15;\n C23 = Pz*(-SinAzs*0x7fff>>15)>>15;\n C24=C21+C22+C23; // scalar product of P with the normalized vertical vector of the screen...\n\n C26=C24*C2>>15; // ... multiplied by the scale factor\n DSP1_Normalize(C26, &C25, &E6);\n *V=DSP1_Truncate(C25, E_Les-E2+refE+E6);\n\n // M\n DSP1_Normalize(C2, &C6, &E4);\n *M=DSP1_Truncate(C6, E4+E_Les-E2-7); // M is the scale factor divided by 2^7\n}\n\nshort Op06X;\nshort Op06Y;\nshort Op06Z;\nshort Op06H;\nshort Op06V;\nshort Op06M;\n\nvoid DSPOp06()\n{\n DSP1_Project(Op06X, Op06Y, Op06Z, &Op06H, &Op06V, &Op06M);\n}\n\n\nshort matrixC[3][3];\nshort matrixB[3][3];\nshort matrixA[3][3];\n\nshort Op01m;\nshort Op01Zr;\nshort Op01Xr;\nshort Op01Yr;\nshort Op11m;\nshort Op11Zr;\nshort Op11Xr;\nshort Op11Yr;\nshort Op21m;\nshort Op21Zr;\nshort Op21Xr;\nshort Op21Yr;\n\nvoid DSPOp01()\n{\n\tshort SinAz = DSP1_Sin(Op01Zr);\n\tshort CosAz = DSP1_Cos(Op01Zr);\n\tshort SinAy = DSP1_Sin(Op01Yr);\n\tshort CosAy = DSP1_Cos(Op01Yr);\n\tshort SinAx = DSP1_Sin(Op01Xr);\n\tshort CosAx = DSP1_Cos(Op01Xr);\n\n\tOp01m >>= 1;\n\n\tmatrixA[0][0] = (Op01m * CosAz >> 15) * CosAy >> 15;\n\tmatrixA[0][1] = -((Op01m * SinAz >> 15) * CosAy >> 15);\n\tmatrixA[0][2] = Op01m * SinAy >> 15;\n\n\tmatrixA[1][0] = ((Op01m * SinAz >> 15) * CosAx >> 15) + (((Op01m * CosAz >> 15) * SinAx >> 15) * SinAy >> 15);\n\tmatrixA[1][1] = ((Op01m * CosAz >> 15) * CosAx >> 15) - (((Op01m * SinAz >> 15) * SinAx >> 15) * SinAy >> 15);\n\tmatrixA[1][2] = -((Op01m * SinAx >> 15) * CosAy >> 15);\n\n\tmatrixA[2][0] = ((Op01m * SinAz >> 15) * SinAx >> 15) - (((Op01m * CosAz >> 15) * CosAx >> 15) * SinAy >> 15);\n\tmatrixA[2][1] = ((Op01m * CosAz >> 15) * SinAx >> 15) + (((Op01m * SinAz >> 15) * CosAx >> 15) * SinAy >> 15);\n\tmatrixA[2][2] = (Op01m * CosAx >> 15) * CosAy >> 15;\n}\n\nvoid DSPOp11()\n{\n\tshort SinAz = DSP1_Sin(Op11Zr);\n\tshort CosAz = DSP1_Cos(Op11Zr);\n\tshort SinAy = DSP1_Sin(Op11Yr);\n\tshort CosAy = DSP1_Cos(Op11Yr);\n\tshort SinAx = DSP1_Sin(Op11Xr);\n\tshort CosAx = DSP1_Cos(Op11Xr);\n\n\tOp11m >>= 1;\n\n\tmatrixB[0][0] = (Op11m * CosAz >> 15) * CosAy >> 15;\n\tmatrixB[0][1] = -((Op11m * SinAz >> 15) * CosAy >> 15);\n\tmatrixB[0][2] = Op11m * SinAy >> 15;\n\n\tmatrixB[1][0] = ((Op11m * SinAz >> 15) * CosAx >> 15) + (((Op11m * CosAz >> 15) * SinAx >> 15) * SinAy >> 15);\n\tmatrixB[1][1] = ((Op11m * CosAz >> 15) * CosAx >> 15) - (((Op11m * SinAz >> 15) * SinAx >> 15) * SinAy >> 15);\n\tmatrixB[1][2] = -((Op11m * SinAx >> 15) * CosAy >> 15);\n\n\tmatrixB[2][0] = ((Op11m * SinAz >> 15) * SinAx >> 15) - (((Op11m * CosAz >> 15) * CosAx >> 15) * SinAy >> 15);\n\tmatrixB[2][1] = ((Op11m * CosAz >> 15) * SinAx >> 15) + (((Op11m * SinAz >> 15) * CosAx >> 15) * SinAy >> 15);\n\tmatrixB[2][2] = (Op11m * CosAx >> 15) * CosAy >> 15;\n}\n\nvoid DSPOp21()\n{\n\tshort SinAz = DSP1_Sin(Op21Zr);\n\tshort CosAz = DSP1_Cos(Op21Zr);\n\tshort SinAy = DSP1_Sin(Op21Yr);\n\tshort CosAy = DSP1_Cos(Op21Yr);\n\tshort SinAx = DSP1_Sin(Op21Xr);\n\tshort CosAx = DSP1_Cos(Op21Xr);\n\n\tOp21m >>= 1;\n\n\tmatrixC[0][0] = (Op21m * CosAz >> 15) * CosAy >> 15;\n\tmatrixC[0][1] = -((Op21m * SinAz >> 15) * CosAy >> 15);\n\tmatrixC[0][2] = Op21m * SinAy >> 15;\n\n\tmatrixC[1][0] = ((Op21m * SinAz >> 15) * CosAx >> 15) + (((Op21m * CosAz >> 15) * SinAx >> 15) * SinAy >> 15);\n\tmatrixC[1][1] = ((Op21m * CosAz >> 15) * CosAx >> 15) - (((Op21m * SinAz >> 15) * SinAx >> 15) * SinAy >> 15);\n\tmatrixC[1][2] = -((Op21m * SinAx >> 15) * CosAy >> 15);\n\n\tmatrixC[2][0] = ((Op21m * SinAz >> 15) * SinAx >> 15) - (((Op21m * CosAz >> 15) * CosAx >> 15) * SinAy >> 15);\n\tmatrixC[2][1] = ((Op21m * CosAz >> 15) * SinAx >> 15) + (((Op21m * SinAz >> 15) * CosAx >> 15) * SinAy >> 15);\n\tmatrixC[2][2] = (Op21m * CosAx >> 15) * CosAy >> 15;\n}\n\nshort Op0DX;\nshort Op0DY;\nshort Op0DZ;\nshort Op0DF;\nshort Op0DL;\nshort Op0DU;\nshort Op1DX;\nshort Op1DY;\nshort Op1DZ;\nshort Op1DF;\nshort Op1DL;\nshort Op1DU;\nshort Op2DX;\nshort Op2DY;\nshort Op2DZ;\nshort Op2DF;\nshort Op2DL;\nshort Op2DU;\n\nvoid DSPOp0D()\n{\n Op0DF = (Op0DX * matrixA[0][0] >> 15) + (Op0DY * matrixA[0][1] >> 15) + (Op0DZ * matrixA[0][2] >> 15);\n\tOp0DL = (Op0DX * matrixA[1][0] >> 15) + (Op0DY * matrixA[1][1] >> 15) + (Op0DZ * matrixA[1][2] >> 15);\n\tOp0DU = (Op0DX * matrixA[2][0] >> 15) + (Op0DY * matrixA[2][1] >> 15) + (Op0DZ * matrixA[2][2] >> 15);\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP0D X: %d Y: %d Z: %d / F: %d L: %d U: %d\",Op0DX,Op0DY,Op0DZ,Op0DF,Op0DL,Op0DU);\n\t#endif\n}\n\nvoid DSPOp1D()\n{\n\tOp1DF = (Op1DX * matrixB[0][0] >> 15) + (Op1DY * matrixB[0][1] >> 15) + (Op1DZ * matrixB[0][2] >> 15);\n\tOp1DL = (Op1DX * matrixB[1][0] >> 15) + (Op1DY * matrixB[1][1] >> 15) + (Op1DZ * matrixB[1][2] >> 15);\n\tOp1DU = (Op1DX * matrixB[2][0] >> 15) + (Op1DY * matrixB[2][1] >> 15) + (Op1DZ * matrixB[2][2] >> 15);\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP1D X: %d Y: %d Z: %d / F: %d L: %d U: %d\",Op1DX,Op1DY,Op1DZ,Op1DF,Op1DL,Op1DU);\n\t#endif\n}\n\nvoid DSPOp2D()\n{\n\tOp2DF = (Op2DX * matrixC[0][0] >> 15) + (Op2DY * matrixC[0][1] >> 15) + (Op2DZ * matrixC[0][2] >> 15);\n\tOp2DL = (Op2DX * matrixC[1][0] >> 15) + (Op2DY * matrixC[1][1] >> 15) + (Op2DZ * matrixC[1][2] >> 15);\n\tOp2DU = (Op2DX * matrixC[2][0] >> 15) + (Op2DY * matrixC[2][1] >> 15) + (Op2DZ * matrixC[2][2] >> 15);\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP2D X: %d Y: %d Z: %d / F: %d L: %d U: %d\",Op2DX,Op2DY,Op2DZ,Op2DF,Op2DL,Op2DU);\n\t#endif\n}\n\nshort Op03F;\nshort Op03L;\nshort Op03U;\nshort Op03X;\nshort Op03Y;\nshort Op03Z;\nshort Op13F;\nshort Op13L;\nshort Op13U;\nshort Op13X;\nshort Op13Y;\nshort Op13Z;\nshort Op23F;\nshort Op23L;\nshort Op23U;\nshort Op23X;\nshort Op23Y;\nshort Op23Z;\n\nvoid DSPOp03()\n{\n\tOp03X = (Op03F * matrixA[0][0] >> 15) + (Op03L * matrixA[1][0] >> 15) + (Op03U * matrixA[2][0] >> 15);\n\tOp03Y = (Op03F * matrixA[0][1] >> 15) + (Op03L * matrixA[1][1] >> 15) + (Op03U * matrixA[2][1] >> 15);\n\tOp03Z = (Op03F * matrixA[0][2] >> 15) + (Op03L * matrixA[1][2] >> 15) + (Op03U * matrixA[2][2] >> 15);\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP03 F: %d L: %d U: %d / X: %d Y: %d Z: %d\",Op03F,Op03L,Op03U,Op03X,Op03Y,Op03Z);\n\t#endif\n}\n\nvoid DSPOp13()\n{\n\tOp13X = (Op13F * matrixB[0][0] >> 15) + (Op13L * matrixB[1][0] >> 15) + (Op13U * matrixB[2][0] >> 15);\n\tOp13Y = (Op13F * matrixB[0][1] >> 15) + (Op13L * matrixB[1][1] >> 15) + (Op13U * matrixB[2][1] >> 15);\n\tOp13Z = (Op13F * matrixB[0][2] >> 15) + (Op13L * matrixB[1][2] >> 15) + (Op13U * matrixB[2][2] >> 15);\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP13 F: %d L: %d U: %d / X: %d Y: %d Z: %d\",Op13F,Op13L,Op13U,Op13X,Op13Y,Op13Z);\n\t#endif\n}\n\nvoid DSPOp23()\n{\n\tOp23X = (Op23F * matrixC[0][0] >> 15) + (Op23L * matrixC[1][0] >> 15) + (Op23U * matrixC[2][0] >> 15);\n\tOp23Y = (Op23F * matrixC[0][1] >> 15) + (Op23L * matrixC[1][1] >> 15) + (Op23U * matrixC[2][1] >> 15);\n\tOp23Z = (Op23F * matrixC[0][2] >> 15) + (Op23L * matrixC[1][2] >> 15) + (Op23U * matrixC[2][2] >> 15);\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP23 F: %d L: %d U: %d / X: %d Y: %d Z: %d\",Op23F,Op23L,Op23U,Op23X,Op23Y,Op23Z);\n\t#endif\n}\n\nshort Op14Zr;\nshort Op14Xr;\nshort Op14Yr;\nshort Op14U;\nshort Op14F;\nshort Op14L;\nshort Op14Zrr;\nshort Op14Xrr;\nshort Op14Yrr;\n\nvoid DSPOp14()\n{\n\tshort CSec, ESec, CTan, CSin, C, E;\n\n\tDSP1_Inverse(DSP1_Cos(Op14Xr), 0, &CSec, &ESec);\n\n\t// Rotation Around Z\n\tDSP1_NormalizeDouble(Op14U * DSP1_Cos(Op14Yr) - Op14F * DSP1_Sin(Op14Yr), &C, &E);\n\n\tE = ESec - E;\n\n\tDSP1_Normalize(C * CSec >> 15, &C, &E);\n\n\tOp14Zrr = Op14Zr + DSP1_Truncate(C, E);\n\n\t// Rotation Around X\n\tOp14Xrr = Op14Xr + (Op14U * DSP1_Sin(Op14Yr) >> 15) + (Op14F * DSP1_Cos(Op14Yr) >> 15);\n\n\t// Rotation Around Y\n\tDSP1_NormalizeDouble(Op14U * DSP1_Cos(Op14Yr) + Op14F * DSP1_Sin(Op14Yr), &C, &E);\n\n\tE = ESec - E;\n\n\tDSP1_Normalize(DSP1_Sin(Op14Xr), &CSin, &E);\n\n\tCTan = CSec * CSin >> 15;\n\n\tDSP1_Normalize(-(C * CTan >> 15), &C, &E);\n\n\tOp14Yrr = Op14Yr + DSP1_Truncate(C, E) + Op14L;\n}\n\nvoid DSP1_Target(short H, short V, short *X, short *Y)\n{\n\tshort C, E, C1, E1;\n\n\tDSP1_Inverse((V * SinAzs >> 15) + VOffset, 8, &C, &E);\n\tE += VPlane_E;\n\n\tC1 = C * VPlane_C >> 15;\n\tE1 = E + SecAZS_E1;\n\n\tH <<= 8;\n\n\tDSP1_Normalize(C1, &C, &E);\n\n\tC = DSP1_Truncate(C, E) * H >> 15;\n\n\t*X = CentreX + (C * CosAas >> 15);\n\t*Y = CentreY - (C * SinAas >> 15);\n\n\tV <<= 8;\n\n\tDSP1_Normalize(C1 * SecAZS_C1 >> 15, &C, &E1);\n\n\tC = DSP1_Truncate(C, E1) * V >> 15;\n\n\t*X += C * -SinAas >> 15;\n\t*Y += C * CosAas >> 15;\n}\n\nshort Op0EH;\nshort Op0EV;\nshort Op0EX;\nshort Op0EY;\n\nvoid DSPOp0E()\n{\n\tDSP1_Target(Op0EH, Op0EV, &Op0EX, &Op0EY);\n}\n\nshort Op0BX;\nshort Op0BY;\nshort Op0BZ;\nshort Op0BS;\nshort Op1BX;\nshort Op1BY;\nshort Op1BZ;\nshort Op1BS;\nshort Op2BX;\nshort Op2BY;\nshort Op2BZ;\nshort Op2BS;\n\nvoid DSPOp0B()\n{\n Op0BS = (Op0BX * matrixA[0][0] + Op0BY * matrixA[0][1] + Op0BZ * matrixA[0][2]) >> 15;\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP0B\");\n\t#endif\n}\n\nvoid DSPOp1B()\n{\n Op1BS = (Op1BX * matrixB[0][0] + Op1BY * matrixB[0][1] + Op1BZ * matrixB[0][2]) >> 15;\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP1B X: %d Y: %d Z: %d S: %d\",Op1BX,Op1BY,Op1BZ,Op1BS);\n\t\tLog_Message(\" MX: %d MY: %d MZ: %d Scale: %d\",(short)(matrixB[0][0]*100),(short)(matrixB[0][1]*100),(short)(matrixB[0][2]*100),(short)(sc2*100));\n\t#endif\n}\n\nvoid DSPOp2B()\n{\n Op2BS = (Op2BX * matrixC[0][0] + Op2BY * matrixC[0][1] + Op2BZ * matrixC[0][2]) >> 15;\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP2B\");\n\t#endif\n}\n\nshort Op08X,Op08Y,Op08Z,Op08Ll,Op08Lh;\n\nvoid DSPOp08()\n{\n\tint Op08Size = (Op08X * Op08X + Op08Y * Op08Y + Op08Z * Op08Z) << 1;\n\tOp08Ll = Op08Size & 0xffff;\n\tOp08Lh = (Op08Size >> 16) & 0xffff;\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP08 %d,%d,%d\",Op08X,Op08Y,Op08Z);\n\t\tLog_Message(\"OP08 ((Op08X^2)+(Op08Y^2)+(Op08X^2))=%x\",Op08Size );\n\t#endif\n}\n\nshort Op18X,Op18Y,Op18Z,Op18R,Op18D;\n\nvoid DSPOp18()\n{\n Op18D = (Op18X * Op18X + Op18Y * Op18Y + Op18Z * Op18Z - Op18R * Op18R) >> 15;\n\n #ifdef DebugDSP1\n Log_Message(\"Op18 X: %d Y: %d Z: %d R: %D DIFF %d\",Op18X,Op18Y,Op38Z,Op18D);\n #endif\n}\n\nshort Op38X,Op38Y,Op38Z,Op38R,Op38D;\n\nvoid DSPOp38()\n{\n Op38D = (Op38X * Op38X + Op38Y * Op38Y + Op38Z * Op38Z - Op38R * Op38R) >> 15;\n Op38D++;\n\n #ifdef DebugDSP1\n Log_Message(\"OP38 X: %d Y: %d Z: %d R: %D DIFF %d\",Op38X,Op38Y,Op38Z,Op38D);\n #endif\n}\n\nshort Op28X;\nshort Op28Y;\nshort Op28Z;\nshort Op28R;\n\nvoid DSPOp28()\n{\n\tint Radius = Op28X * Op28X + Op28Y * Op28Y + Op28Z * Op28Z;\n\n\tif (Radius == 0) Op28R = 0;\n\telse\n\t{\n\t\tshort C, E, Pos, Node1, Node2;\n\t\tDSP1_NormalizeDouble(Radius, &C, &E);\n\t\tif (E & 1) C = C * 0x4000 >> 15;\n\n\t\tPos = C * 0x0040 >> 15;\n\n\t\tNode1 = DSP1ROM[0x00d5 + Pos];\n\t\tNode2 = DSP1ROM[0x00d6 + Pos];\n\n\t\tOp28R = ((Node2 - Node1) * (C & 0x1ff) >> 9) + Node1;\n\t\tOp28R >>= (E >> 1);\n\t}\n\n #ifdef DebugDSP1\n Log_Message(\"OP28 X:%d Y:%d Z:%d\",Op28X,Op28Y,Op28Z);\n Log_Message(\"OP28 Vector Length %d\",Op28R);\n #endif\n}\n\nshort Op1CX,Op1CY,Op1CZ;\nshort Op1CXBR,Op1CYBR,Op1CZBR,Op1CXAR,Op1CYAR,Op1CZAR;\nshort Op1CX1;\nshort Op1CY1;\nshort Op1CZ1;\nshort Op1CX2;\nshort Op1CY2;\nshort Op1CZ2;\n\nvoid DSPOp1C()\n{\n\t// Rotate Around Op1CZ1\n\tOp1CX1 = (Op1CYBR * DSP1_Sin(Op1CZ) >> 15) + (Op1CXBR * DSP1_Cos(Op1CZ) >> 15);\n\tOp1CY1 = (Op1CYBR * DSP1_Cos(Op1CZ) >> 15) - (Op1CXBR * DSP1_Sin(Op1CZ) >> 15);\n\tOp1CXBR = Op1CX1; Op1CYBR = Op1CY1;\n\n\t// Rotate Around Op1CY1\n\tOp1CZ1 = (Op1CXBR * DSP1_Sin(Op1CY) >> 15) + (Op1CZBR * DSP1_Cos(Op1CY) >> 15);\n\tOp1CX1 = (Op1CXBR * DSP1_Cos(Op1CY) >> 15) - (Op1CZBR * DSP1_Sin(Op1CY) >> 15);\n\tOp1CXAR = Op1CX1; Op1CZBR = Op1CZ1;\n\n\t// Rotate Around Op1CX1\n\tOp1CY1 = (Op1CZBR * DSP1_Sin(Op1CX) >> 15) + (Op1CYBR * DSP1_Cos(Op1CX) >> 15);\n\tOp1CZ1 = (Op1CZBR * DSP1_Cos(Op1CX) >> 15) - (Op1CYBR * DSP1_Sin(Op1CX) >> 15);\n\tOp1CYAR = Op1CY1; Op1CZAR = Op1CZ1;\n\n\t#ifdef DebugDSP1\n\t\tLog_Message(\"OP1C Apply Matrix CX:%d CY:%d CZ\",Op1CXAR,Op1CYAR,Op1CZAR);\n\t#endif\n}\n\nunsigned short Op0FRamsize;\nunsigned short Op0FPass;\n\nvoid DSPOp0F()\n{\n Op0FPass = 0x0000;\n\n #ifdef DebugDSP1\n Log_Message(\"OP0F RAM Test Pass:%d\", Op0FPass);\n #endif\n}\n\nshort Op2FUnknown;\nshort Op2FSize;\n\nvoid DSPOp2F()\n{\n\tOp2FSize=0x100;\n}\n" }, { "alpha_fraction": 0.6827262043952942, "alphanum_fraction": 0.7038778066635132, "avg_line_length": 33.040000915527344, "blob_id": "def5750c68579f958bf2d9da192805dc78507785", "content_id": "50a9bf29067fe900824dce180602ccccbcc2c8f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1702, "license_type": "no_license", "max_line_length": 132, "num_lines": 50, "path": "/src/jma/7zlzma.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 NSRT Team ( http://nsrt.edgeemu.com )\nCopyright (C) 2002 Andrea Mazzoleni ( http://advancemame.sf.net )\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense version 2.1 as published by the Free Software Foundation.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n*/\n\n#include \"7z.h\"\n\n#include \"lzmadec.h\"\n\nbool decompress_lzma_7z(ISequentialInStream& in, unsigned in_size, ISequentialOutStream& out, unsigned out_size) throw ()\n{\n try\n {\n NCompress::NLZMA::CDecoder cc;\n\n UINT64 in_size_l = in_size;\n UINT64 out_size_l = out_size;\n\n if (cc.ReadCoderProperties(&in) != S_OK) { return(false); }\n if (cc.Code(&in, &out, &in_size_l, &out_size_l) != S_OK) { return(false); }\n if (out.size_get() != out_size || out.overflow_get()) { return(false); }\n\n return(true);\n }\n catch (...)\n {\n return(false);\n }\n}\n\nbool decompress_lzma_7z(const unsigned char* in_data, unsigned int in_size, unsigned char* out_data, unsigned int out_size) throw ()\n{\n ISequentialInStream_Array in(reinterpret_cast<const char*>(in_data), in_size);\n ISequentialOutStream_Array out(reinterpret_cast<char*>(out_data), out_size);\n\n return(decompress_lzma_7z(in, in_size, out, out_size));\n}\n" }, { "alpha_fraction": 0.6819277405738831, "alphanum_fraction": 0.7036144733428955, "avg_line_length": 30.320755004882812, "blob_id": "b561951662124f0b2def1d8ba84fc3ab99dac772", "content_id": "18e15e29710fbefa47f24eab1a35cd481b0a22cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1660, "license_type": "no_license", "max_line_length": 98, "num_lines": 53, "path": "/src/tools/strutil.h", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 Nach, grinvader ( http://www.zsnes.com )\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nversion 2 as published by the Free Software Foundation.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n*/\n\n/*\nThis is part of a toolkit used to assist in ZSNES development\n*/\n\n#ifndef STRUTIL_H\n#define STRUTIL_H\n\n#include <string>\n#include <vector>\n#include <cctype>\n#include <cstring>\n\nstruct ci_char_traits : public std::char_traits<char>\n{\n static bool eq(char c1, char c2) { return(tolower(c1) == tolower(c2)); }\n static bool ne(char c1, char c2) { return(tolower(c1) != tolower(c2)); }\n static bool lt(char c1, char c2) { return(tolower(c1) < tolower(c2)); }\n static int compare(const char* s1, const char* s2, size_t n) { return(strncasecmp(s1, s2, n)); }\n\n static const char* find(const char* s, int n, char a)\n {\n while (n-- > 0 && tolower(*s) != tolower(a))\n {\n s++;\n }\n return(n >= 0 ? s : 0);\n }\n};\n\ntypedef std::basic_string<char, ci_char_traits> string_ci;\n\nvoid Tokenize(const std::string&, std::vector<std::string>&, const std::string&);\nvoid Tokenize(const string_ci&, std::vector<string_ci>&, const string_ci&);\nbool all_whitespace(const char *);\n\n#endif\n" }, { "alpha_fraction": 0.7100059986114502, "alphanum_fraction": 0.7345715761184692, "avg_line_length": 26.816667556762695, "blob_id": "8949327c303812b002ed3f1f440a3b793f1fc519", "content_id": "a617ea8c2fbd83c2b5aa38f0e8fd7c0abe6b979f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1669, "license_type": "no_license", "max_line_length": 84, "num_lines": 60, "path": "/src/jma/inbyte.cpp", "repo_name": "TeoTwawki/zsnes-archive", "src_encoding": "UTF-8", "text": "/*\nCopyright (C) 2005-2008 NSRT Team ( http://nsrt.edgeemu.com )\nCopyright (C) 2002 Andrea Mazzoleni ( http://advancemame.sf.net )\nCopyright (C) 2001-4 Igor Pavlov ( http://www.7-zip.org )\n\nThis library is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense version 2.1 as published by the Free Software Foundation.\n\nThis library is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this library; if not, write to the Free Software\nFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n*/\n\n#include \"inbyte.h\"\n\nnamespace NStream{\n\nCInByte::CInByte(UINT32 aBufferSize):\n m_BufferBase(0),\n m_BufferSize(aBufferSize)\n{\n m_BufferBase = new BYTE[m_BufferSize];\n}\n\nCInByte::~CInByte()\n{\n delete []m_BufferBase;\n}\n\nvoid CInByte::Init(ISequentialInStream *aStream)\n{\n m_Stream = aStream;\n m_ProcessedSize = 0;\n m_Buffer = m_BufferBase;\n m_BufferLimit = m_Buffer;\n m_StreamWasExhausted = false;\n}\n\nbool CInByte::ReadBlock()\n{\n if (m_StreamWasExhausted)\n return false;\n m_ProcessedSize += (m_Buffer - m_BufferBase);\n UINT32 aNumProcessedBytes;\n HRESULT aResult = m_Stream->Read(m_BufferBase, m_BufferSize, &aNumProcessedBytes);\n if (aResult != S_OK)\n throw aResult;\n m_Buffer = m_BufferBase;\n m_BufferLimit = m_Buffer + aNumProcessedBytes;\n m_StreamWasExhausted = (aNumProcessedBytes == 0);\n return (!m_StreamWasExhausted);\n}\n\n}\n" } ]
57
igorbannicov/jbb
https://github.com/igorbannicov/jbb
db02aed47d93aa726f96bc052fe48c28cdcb6263
8ffb68ff177f58fd30e944815299985cf66350a7
1dadbb829db0d5310909b03f179486f3e23efb2c
refs/heads/master
2022-03-05T23:54:22.583730
2019-10-09T07:20:57
2019-10-09T07:20:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7503876090049744, "alphanum_fraction": 0.7503876090049744, "avg_line_length": 14.731707572937012, "blob_id": "d0667f164ff91c40423aa3a53294cd3cd7e1a7c0", "content_id": "8b3d9dfabb39f3fd5fbe49c133029a0925a03a06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 645, "license_type": "no_license", "max_line_length": 59, "num_lines": 41, "path": "/config.ini", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "[loggers]\nkeys=root,APP\n \n[handlers]\nkeys=fileHandler\n \n[formatters]\nkeys=myFormatter\n \n[logger_root]\nlevel=INFO\nhandlers=fileHandler\n\n[logger_APP]\nlevel=INFO\nhandlers=fileHandler\nqualname=exampleApp\n\n[handler_fileHandler]\nclass=FileHandler\nformatter=myFormatter\nargs=(\"logs/main.log\",)\n\n[formatter_myFormatter]\nformat=%(asctime)s - %(name)s - %(levelname)s - %(message)s\ndatefmt=\n\n[bitbucket]\nurl = https://bitbucket.company.com\nuser = bbuser\npassword = bbuserpass\n\n[jenkins]\nurl = https://jenkins.company.com\nuser = jkuser\npassword = jkuserpass\n\n[jenkins_import]\nurl = https://jenkins-beta.company.com\nuser = beta-jkuser\npassword = beta-jkpass\n" }, { "alpha_fraction": 0.47590360045433044, "alphanum_fraction": 0.5662650465965271, "avg_line_length": 24.121212005615234, "blob_id": "273c96d78b6272afeb94c1d224771c14471d6700", "content_id": "71e6aeecc343eb5e67c31c245efb364c0aeaaa3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 830, "license_type": "no_license", "max_line_length": 63, "num_lines": 33, "path": "/lib/utils.py", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "import sys, tty, termios\n\nCOLORS = {\n\t'none'\t\t: '\\033[0m',\n\t'red'\t\t: '\\033[31m',\n\t'green'\t\t: '\\033[32m',\n\t'orange'\t: '\\033[33m',\n\t'blue'\t\t: '\\033[34m',\n\t'purple'\t: '\\033[35m',\n\t'cyan'\t\t: '\\033[36m',\n\t'lightgrey'\t: '\\033[37m',\n\t'darkgrey'\t: '\\033[90m',\n\t'lightred'\t: '\\033[91m',\n\t'lightgreen': '\\033[92m',\n\t'yellow'\t: '\\033[93m',\n\t'lightblue'\t: '\\033[94m',\n\t'pink'\t\t: '\\033[95m',\n\t'lightcyan'\t: '\\033[96m'\n}\n\ndef getch():\n \"\"\"Get a single character from stdin, Unix version\"\"\"\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno()) # Raw read\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\ndef cprint(string, color):\n\tprint(COLORS[color] + format(string) + COLORS['none'], end='') " }, { "alpha_fraction": 0.6392220258712769, "alphanum_fraction": 0.6451414227485657, "avg_line_length": 30.194244384765625, "blob_id": "fd0886531e30512c183cd34ebf03556d99aad0f9", "content_id": "7291df4f57d96174b67becdf668b68d98ab658a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13008, "license_type": "no_license", "max_line_length": 256, "num_lines": 417, "path": "/lib/jenkinsinstance.py", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "import os\nimport ast\nimport random\nimport string\nimport json\nimport hashlib\nimport requests\nimport logging\nimport xmltodict\nfrom xml.etree import ElementTree as et\nfrom .utils import *\n\n\nclass JenkinsInstance:\n\tJOB_TYPES = ['NONE', 'PR', 'CI-DEV', 'CI-QA', 'FOLDER']\n\n\t# Initialize connection\n\tdef __init__(self, url='http://localhost:8080', user = 'admin', password = 'admin'):\n\t\tself.logger = logging.getLogger(\"M::Jenkins\")\n\t\tself.url = url\n\t\tself.logger.info(\"Module initialized\")\n\t\tself.session = requests.Session()\n\t\tself.session.auth = (user, password)\n\n\tdef crumb(self):\n\t\turl = self.url + '/crumbIssuer/api/xml?xpath=(//crumb)'\n\t\trq = self.session.get(url)\n\t\tcrumb = xmltodict.parse(rq.text)['crumb']\n\t\tself.logger.info(crumb)\n\t\treturn crumb\n\n\t# Returns a Jenkins views list\n\tdef getViews(self):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::GetViews\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\turl = self.url + '/api/json'\n\t\t\trq = self.session.get(url)\n\t\t\treturn rq.json()['views']\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\tdef getJobs(self):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::GetJobs\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tjobs = []\n\t\t\turl = self.url + '/api/json'\n\t\t\trq = self.session.get(url)\n\t\t\tfor job in rq.json()['jobs']:\n\t\t\t\tif job['_class'] != 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\t\tjobs.append(job)\n\t\t\treturn jobs\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\tdef getFolders(self):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::GetFolders\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tfolders = []\n\t\t\turl = self.url + '/api/json'\n\t\t\trq = self.session.get(url)\n\t\t\tfor job in rq.json()['jobs']:\n\t\t\t\tif job['_class'] == 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\t\tfolders.append(job)\n\t\t\treturn folders\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Returns a list of Jenkins jobs from Jenkins view config (XML)\n\tdef getJobsFromView(self, view):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::GetJobsFromView\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tjobList = []\n\t\t\turl = self.url + '/view/' + view + '/api/json'\n\t\t\trq = self.session.get(url)\n\t\t\tfor job in rq.json()['jobs']:\n\t\t\t\tjobList.append(job)\n\t\t\tself.logger.info(\"Done getting job list\")\n\t\t\treturn jobList\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Returns a list of Jenkins jobs from Jenkins folder\n\tdef getJobsFromFolder(self, folder):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::GetJobsFromFolder\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tjobList = []\n\t\t\tfolderList = []\n\t\t\turl = folder['url'] + '/api/json'\n\t\t\trq = self.session.get(url)\n\t\t\tfor item in rq.json()['jobs']:\n\t\t\t\tif item['_class'] != 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\t\tjobList.append(item)\n\t\t\t\tif item['_class'] == 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\t\tfolderList.append(item)\n\t\t\tself.logger.info(\"Done getting job list\")\n\t\t\treturn jobList, folderList\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Returns a list of Jenkins jobs from Jenkins path\n\tdef getJobsFromPath(self, path):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::GetJobsFromPath\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tjobList = []\n\t\t\turl = self.url + path + '/api/json'\n\t\t\tprint(url)\n\t\t\trq = self.session.get(url)\n\t\t\tfor job in rq.json()['jobs']:\n\t\t\t\tjobList.append(job)\n\t\t\tself.logger.info(\"Done getting job list\")\n\t\t\treturn jobList\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Select Jenkins view dialog\n\tdef selectView(self):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::SelectView\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tos.system('clear')\n\t\t\tcprint(\"Jenkins views list:\\n\", \"green\")\n\t\t\tviews = self.getViews()\n\t\t\tfor x in range(len(views)):\n\t\t\t\tprint('{:3}'.format(str(x)) + \" -> \" + views[x]['name'])\n\t\t\torder = input(\"Select a view where your Jenkins job is located: \")\n\t\t\ttry:\n\t\t\t\tview = views[int(order)]\n\t\t\t\treturn view\n\t\t\texcept Exception as exc:\n\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\tcprint(\"An error occured.\\n\", 'red')\n\t\t\t\tcprint(exc + '\\n', 'red')\n\t\t\t\texit()\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Select Jenkins job dialog\n\tdef selectJob(self, view):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::SelectJob\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tfound = False\n\t\t\tos.system('clear')\n\t\t\tcprint(\"Jenkins jobs selection:\\n\",\"green\")\n\t\t\tjobs = self.getJobsFromView(view)\n\t\t\twhile not found:\n\t\t\t\tfor x in range(len(jobs)):\n\t\t\t\t\tprint('{:3}'.format(str(x)) + \" -> \" + jobs[x]['name'], end='')\n\t\t\t\t\tif jobs[x]['_class'] == 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\t\t\tcprint(\" (F)\\n\", \"orange\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint()\n\t\t\t\torder = input(\"Select a Jenkins job: \")\n\t\t\t\ttry:\n\t\t\t\t\tjob = jobs[int(order)]\n\t\t\t\t\tif job['_class'] != 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\t\t\tself.logger.info(\"Got job \" + job['name'])\n\t\t\t\t\t\tfound = True\n\t\t\t\t\t\treturn job\n\t\t\t\t\telse:\n\t\t\t\t\t\turl = self.url + '/view/' + view + '/job/' + job['name'] + '/api/json'\n\t\t\t\t\t\trq = self.session.get(url)\n\t\t\t\t\t\tjobs = rq.json()['jobs']\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\t\texit()\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Select path in Jenkins\n\tdef selectPath(self):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::SelectPath\")\n\t\tself.logger.info(\"Starting\")\n\t\tpath = ''\n\t\tfound = False\n\t\tself.logger.info(\"Select view \")\n\t\ttry:\n\t\t\tos.system('clear')\n\t\t\tcprint(\"Jenkins views list:\\n\", \"green\")\n\t\t\tviews = self.getViews()\n\t\t\tself.logger.info(\"Got views list\")\n\t\t\tfor x in range(len(views)):\n\t\t\t\tprint('{:3}'.format(str(x)) + \" -> \" + views[x]['name'])\n\t\t\torder = input(\"Select a view where your Jenkins job is located: \")\n\t\t\tview = views[int(order)]\n\t\t\tself.logger.info(\"Selected view\")\n\t\t\tif view['name'] != 'all':\n\t\t\t\tpath = path + '/view/' + view['name']\n\t\t\tjobs = self.getJobsFromView(view['name'])\n\n\t\t\twhile not found:\n\t\t\t\tfolders = []\n\t\t\t\tfor x in range(len(jobs)):\n\t\t\t\t\tif jobs[x]['_class'] == 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\t\t\tfolders.append(jobs[x])\n\t\t\t\t\t\tprint('{:3}'.format(str(x)) + \" -> (Folder) -> \" + jobs[x]['name'])\n\t\t\t\tif len(folders) > 0:\n\t\t\t\t\torder = input(\"Select a folder or press H to create a job here \")\n\t\t\t\t\tkey = order.upper()\n\t\t\t\t\tif key != 'H':\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tfolder = jobs[int(order)]\n\t\t\t\t\t\t\tpath = '/job/' + folder['name']\n\t\t\t\t\t\t\tjobs = self.getJobsFromPath(path)\n\t\t\t\t\t\t\tself.logger = logging.getLogger(\"M::Jenkins::SelectPath\")\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tfound = True\n\t\t\t\t\t\treturn path\n\t\t\t\telse:\n\t\t\t\t\tcprint(\"Create a job here? (Y/N) \", \"green\")\n\t\t\t\t\torder = input()\n\t\t\t\t\tif order.upper() == 'Y':\n\t\t\t\t\t\tfound = True\n\t\t\t\t\t\treturn path\n\t\t\t\t\telse:\n\t\t\t\t\t\tcprint(\"WHOA! I don't get the idea... Bye.\\n\", \"red\")\n\t\t\t\t\t\texit()\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured in creation job: \" + str(e))\n\t\t\texit()\n\n\tdef getJobDetails(self, job):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::GetJobDetails\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\turl = job['url'] + '/config.xml'\n\t\t\trq = self.session.get(url)\n\t\t\t#data = xmltodict.parse(rq.text)\n\t\t\treturn rq.text\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\tdef createFolder(self, name, path):\n\t\t#curl -XPOST 'http://jenkins/createItem?\n\t\t#name=FolderName&\n\t\t#mode=com.cloudbees.hudson.plugins.folder.Folder&\n\t\t#from=&json=%7B%22name%22%3A%22FolderName%22%2C%22mode%22%3A%22com.cloudbees.hudson.plugins.folder.Folder%22%2C%22from%22%3A%22%22%2C%22Submit%22%3A%22OK%22%7D&Submit=OK' --user 'user.name:YourAPIToken' -H \"Content-Type:application/x-www-form-urlencoded\"\n\t\ttry:\n\t\t\theaders = {'Content-Type': 'application/json'}\n\t\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateListView\")\n\t\t\turl = self.url + path +\"/createItem?name=\"+name+\"&mode=com.cloudbees.hudson.plugins.folder.Folder&Submit=OK\"\n\t\t\tself.logger.info(url)\n\t\t\tself.logger.info(headers)\n\t\t\trq = self.session.post(url, headers=headers)\n\t\t\tresponse = rq.status_code\n\t\t\tself.logger.info(rq.status_code)\n\t\t\tif response == 200:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\tdef createNewJob(self, name, dirpath, urlpath):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateNewJob\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\ttree = et.parse(dirpath)\n\t\t\txml = tree.getroot()\n\t\t\tdata = et.tostring(xml, encoding='utf8', method='xml')\n\t\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateNewJob\")\n\t\t\theaders = {'Content-Type': 'application/xml'}\n\t\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateNewJob\")\n\t\t\turl = self.url + urlpath + \"/createItem?name=\" + name\n\t\t\tself.logger.info(url)\n\t\t\tself.logger.info(headers)\n\t\t\trq = self.session.post(url, data=data, headers=headers)\n\t\t\tresponse = rq.status_code\n\t\t\tself.logger.info(rq.status_code)\n\t\t\tself.logger.info(rq.text)\n\t\t\tif response == 200:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\n\tdef createJob(self):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateJob\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tos.system('clear')\n\t\t\tcprint('Enter new job name: ', 'green')\n\t\t\tjname = input()\n\t\t\tos.system('clear')\n\t\t\tcprint('Jenkins job types: \\n', 'green')\n\t\t\tfor x in range(len(self.JOB_TYPES)):\n\t\t\t\tprint('{:3}'.format(str(x)) + \" -> \" + self.JOB_TYPES[x])\n\t\t\torder = input(\"Select a Jenkins job type: \")\n\t\t\ttry:\n\t\t\t\tjtype = self.JOB_TYPES[int(order)]\n\t\t\texcept Exception as e:\n\t\t\t\tself.logger.error(e)\n\t\t\txml = self.parseJobXML(jtype)\n\t\t\tjdata = et.tostring(xml, encoding='utf8', method='xml')\n\t\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateJob\")\n\t\t\tjpath = self.selectPath()\n\t\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateJob\")\n\t\t\theaders = {'Content-Type': 'application/xml'}\n\t\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateJob\")\n\t\t\turl = self.url + jpath + \"/createItem?name=\" + jname\n\t\t\tself.logger.info(url)\n\t\t\tself.logger.info(headers)\n\t\t\tself.logger.info(xmltodict.parse(jdata))\n\t\t\trq = self.session.post(url, data=jdata, headers=headers)\n\t\t\tresponse = rq.status_code\n\t\t\tself.logger.info(rq.status_code)\n\t\t\tself.logger.info(rq.text)\n\t\t\tif response == 200:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\tdef createListView(self, name):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::CreateListView\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\theaders = {'Content-Type': 'application/json'}\n\t\t\turl = self.url + \"/createView?name=\"+name+\"&mode=hudson.model.ListView&Submit=OK\"\n\t\t\tself.logger.info(url)\n\t\t\tself.logger.info(headers)\n\t\t\tself.logger.info(\"Sending requests\")\n\t\t\trq = self.session.post(url, headers=headers)\n\t\t\tresponse = rq.status_code\n\t\t\tself.logger.info(rq.status_code)\n\t\t\tself.logger.info(rq.text)\n\t\t\tif response == 200:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\n\tdef deleteJob(self, job):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::DeleteJob\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\theaders = {'Content-Type': 'application/xml'}\n\t\t\turl = job['url'] + \"/doDelete\"\n\t\t\trq = self.session.post(url, headers=headers)\n\t\t\tresponse = rq.status_code\n\t\t\tif response == 200:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\tdef deleteView(self, view):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::DeleteView\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\theaders = {'Content-Type': 'application/xml; charset=utf-8', 'Content-Type': 'text/xml; charset=utf-8'}\n\t\t\turl = view['url'] + \"/doDelete\"\n\t\t\trq = self.session.post(url, headers=headers)\n\t\t\tresponse = rq.status_code\n\t\t\tif response == 200:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\tdef parseJobXML(self, type):\n\t\tself.logger = logging.getLogger(\"M::Jenkins::ParseJobXML\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tif type == 'NONE':\n\t\t\t\ttpath = 'templates/Empty.xml'\n\t\t\tif type == 'PR':\n\t\t\t\ttpath = 'templates/PR.xml'\n\t\t\tif type == 'CI-DEV':\n\t\t\t\ttpath = 'templates/CI-DEV.xml'\n\t\t\tif type == 'CI-QA':\n\t\t\t\ttpath = 'templates/CI-QA.xml'\n\t\t\tif type == 'FOLDER':\n\t\t\t\ttpath = 'templates/Folder.xml'\n\t\t\ttree = et.parse(tpath)\n\t\t\ttree = tree.getroot()\n\t\t\tm = hashlib.md5()\n\t\t\tletters = string.ascii_lowercase\n\t\t\trdata = ''.join(random.choice(letters) for i in range(12)).encode('utf-8')\n\t\t\tm.update(rdata)\n\t\t\ttoken = m.hexdigest()\n\t\t\ttree.find('authToken').text = token\n\t\t\tself.logger.info('XML Parsed')\n\t\t\treturn tree\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n" }, { "alpha_fraction": 0.7021182775497437, "alphanum_fraction": 0.7025595903396606, "avg_line_length": 26.634145736694336, "blob_id": "7b56bb4d45a8c25f16a624a1dc1e44f452749420", "content_id": "bad41e0c37b8ad0416477eeae0ecab3260ca29d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2266, "license_type": "no_license", "max_line_length": 68, "num_lines": 82, "path": "/view_export.py", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport os\nimport re\nimport logging\nimport logging.config\nimport configparser\nfrom pprint import pprint as pp\nfrom lib.jenkinsinstance import JenkinsInstance as JI\nfrom lib.utils import *\n\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nlogging.config.fileConfig('config.ini')\nmylogger = logging.getLogger('CORE')\nmylogger.info(\"Starting application\")\n\nimportsPath = os.getcwd()+'/imports/'\njobs = []\nfolders = []\n\n## Init jenkins object\ntry:\n\tjenkins = JI(url=config['jenkins']['url'], \n\t\t\t user=config['jenkins']['user'], \n\t\t\t password=config['jenkins']['password'])\nexcept Exception as e:\n\tcprint(\"An error occured: \\n\", \"red\")\n\tcprint(str(e) + \"\\n\", \"red\")\n\tmylogger.error(\"An error occured at startup: \" + str(e))\n\texit()\n\n## Create folder procedure\ndef make_folder(path):\n\ttry:\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\t\t\tprint(\"Successfully created the directory %s \" % path)\n\t\telse:\n\t\t\tprint(\"Directory %s already exists\" % path)\n\texcept OSError:\n\t print (\"Creation of the directory %s failed\" % path)\n\n## Transform object url to os path\ndef urlToPath(view, url):\n\tcleanUrl = re.sub(jenkins.url, '', url)\n\tcleanPath = re.sub('/job', '', cleanUrl)\n\tpath = importsPath+view+cleanPath\n\treturn(path)\n\ndef processFolder(view, folder):\n\tfolderPath = urlToPath(view, folder['url'])\n\tmake_folder(folderPath)\n\tjobItems, folderItems = jenkins.getJobsFromFolder(folder)\n\tfor itemFolder in folderItems:\n\t\tprocessFolder(view, itemFolder)\n\tfor jobItem in jobItems:\n\t\tjobPath = folderPath+'/'+jobItem['name'] + '.xml'\n\t\tf= open(jobPath,\"w\")\n\t\tjobcfg = jenkins.getJobDetails(jobItem)\n\t\tf.write(jobcfg)\n\t\tf.close\n\nmylogger.info(\"Dumping Jenkins view\")\ntry:\n\tview = jenkins.selectView()\n\titems = jenkins.getJobsFromView(view['name'])\n\tviewPath = importsPath+view['name']\n\tmake_folder(viewPath)\n\tfor item in items:\n\t\tif item['_class'] != 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\tjobPath = viewPath + '/' + item['name'] + '.xml'\n\t\t\tf= open(jobPath,\"w\")\n\t\t\tjobcfg = jenkins.getJobDetails(item)\n\t\t\tf.write(jobcfg)\n\t\t\tf.close\n\t\tif item['_class'] == 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\tprocessFolder(view['name'], item)\nexcept Exception as e:\n\tmylogger.error(\"An error occured: \" + str(e))\n\texit()\n" }, { "alpha_fraction": 0.6404485106468201, "alphanum_fraction": 0.6432723999023438, "avg_line_length": 33.59770202636719, "blob_id": "1809fa03b2b33b661684bb20595d2befe514461e", "content_id": "d0b74323c65f3363d0fc4afb44e9b56d960a6c3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12040, "license_type": "no_license", "max_line_length": 142, "num_lines": 348, "path": "/lib/bitbucketinstance.py", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "import os\nimport requests\nimport json\nimport logging\nfrom urllib.parse import urlparse\nfrom pprint import pprint as pp\nfrom .utils import *\n\n\ntriggers_list = ['APPROVED', \n\t\t\t\t 'COMMENTED',\n \t\t\t\t 'DECLINED',\n \t\t\t\t 'DELETED',\n \t\t\t\t 'MERGED',\n \t\t\t\t 'OPENED',\n \t\t\t\t 'BUTTON_TRIGGER',\n \t\t\t\t 'REOPENED',\n 'RESCOPED_FROM',\n \t\t\t 'RESCOPED_TO',\n \t\t\t\t 'REVIEWED',\n \t\t\t\t 'UNAPPROVED',\n 'UPDATED']\n\nfilters_list = ['NONE', 'PULL_REQUEST_TO_BRANCH', 'BUTTON_TRIGGER_TITLE']\n\nclass BitBucketInstance:\n\n\n\tdef __init__(self, url='http://localhost:8080', user = 'admin', password = 'admin'):\n\t\tu = urlparse(url)\n\t\tself.logger = logging.getLogger(\"M::Bitbucket\")\n\t\tself._url_ = u.netloc\n\t\tself._user_ = user\n\t\tself._password_ = password\n\t\tself._api_ = u.scheme + '://' + u.netloc + '/rest/api/1.0/' # BitBucket Stash API URL\n\t\tself._napi_ = u.scheme + '://' + u.netloc + '/rest/prnfb-admin/1.0/settings/notifications' # BitBucket Stash Pull Request Notifier API URL\n\t\tself._bapi_ = u.scheme + '://' + u.netloc + '/rest/prnfb-admin/1.0/settings/buttons' # BitBucket Stash Pull Request Notifier Buttons API URL\n\t\tself.logger.info(\"Module initialized\")\n\n\t# Get list of all available projects\n\tdef getProjects(self):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::GetProjects\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tplist = []\n\t\t\turl = self._api_ + 'projects/'\n\t\t\trq = requests.get(url, auth=(self._user_, self._password_))\n\t\t\tx = 0\n\t\t\tfor item in rq.json()['values']:\n\t\t\t\tplist.append(item['key'])\n\t\t\treturn plist\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Get list of all available repos in a project\n\tdef getRepos(self, project):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::GetRepos\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\trlist = []\n\t\t\turl = self._api_ + 'projects/' + project + '/repos?limit=999'\n\t\t\trq = requests.get(url, auth=(self._user_, self._password_))\n\t\t\tx = 0\n\t\t\tfor item in rq.json()['values']:\n\t\t\t\trlist.append(item['slug'])\n\t\t\treturn rlist\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Get list of all available repos in a project\n\tdef getBranches(self, project, repo):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::GetBranches\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tbrlist = []\n\t\t\turl = self._api_ + 'projects/' + project + '/repos/' + repo + '/branches'\n\t\t\trq = requests.get(url, auth=(self._user_, self._password_))\n\t\t\tx = 0\n\t\t\tfor item in rq.json()['values']:\n\t\t\t\tbrlist.append(item['displayId'])\n\t\t\treturn brlist\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Get list of all available PR notifications for a repo\n\tdef getNotifications(self, project, repo):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::GetNotifications\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tnotifications = []\n\t\t\trq = requests.get(self._napi_, auth=(self._user_, self._password_))\n\t\t\tfor item in rq.json():\n\t\t\t\tif item[\"repositorySlug\"] == repo and item[\"projectKey\"] == project:\n\t\t\t\t\tnotifications.append(item)\n\t\t\treturn notifications\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Get list of all available PR notifications for a repo\n\tdef getButtons(self, project, repo):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::GetButtons\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tbuttons = []\n\t\t\trq = requests.get(self._bapi_, auth=(self._user_, self._password_))\n\t\t\tfor item in rq.json():\n\t\t\t\tif item[\"repositorySlug\"] == repo and item[\"projectKey\"] == project:\n\t\t\t\t\tbuttons.append(item)\n\t\t\treturn buttons\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Delete PR notifications by uuid\n\tdef deleteNotification(self, uuid):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::DeleteNotification\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\turl = self._napi_ + \"/\" + uuid\n\t\t\trq = requests.delete(url, auth=(self._user_, self._password_))\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Update PR notifications by uuid\n\tdef updateNotification(self, uuid, payload):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::UpdateNotification\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\turl = self._napi_ + \"/\" + uuid\n\t\t\trq = requests.post(url, auth=(self._user_, self._password_))\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Function to select triggers from list\n\tdef selectTriggers(self):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::SelectTriggers\")\n\t\tself.logger.info(\"Starting\")\n\t\tos.system('clear')\n\t\ttry:\n\t\t\tready = False\n\t\t\ttlist = []\n\t\t\twhile not ready:\n\t\t\t\tcprint(\"Triggers: \\n\", \"green\")\n\t\t\t\tfor x in range(len(triggers_list)):\n\t\t\t\t\tprint('{:3}'.format(str(x)) + \" -> \" + triggers_list[x])\n\t\t\t\torder = input(\"Select a trigger from list to add: \")\n\t\t\t\ttry:\n\t\t\t\t\ttlist.append(triggers_list[int(order)])\n\t\t\t\t\tconfirm = input(\"Select another? (Y/N) \").upper()\n\t\t\t\t\tif confirm != 'Y':\n\t\t\t\t\t\tready = True\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\t\texit()\n\t\t\treturn tlist\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Function to select filters from list\n\tdef selectFilters(self, project, repo):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::SelectFilters\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tready = False\n\t\t\twhile not ready:\n\t\t\t\tos.system('clear')\n\t\t\t\tcprint(\"Filters available: \\n\", \"green\")\n\t\t\t\tfor x in range(len(filters_list)):\n\t\t\t\t\tprint('{:3}'.format(str(x)) + \" -> \" + filters_list[x])\n\t\t\t\tforder = input(\"Select a trigger from list to add: \")\n\t\t\t\ttry:\n\t\t\t\t\t# In case we want to filter by buttons\n\t\t\t\t\tif filters_list[int(forder)] == 'BUTTON_TRIGGER_TITLE':\n\t\t\t\t\t\tos.system('clear')\n\t\t\t\t\t\tbuttons_list = self.getButtons(project, repo)\n\t\t\t\t\t\tcprint(\"\\tButtons available: \\n\", \"green\")\n\t\t\t\t\t\tfor b in range(len(buttons_list)):\n\t\t\t\t\t\t\tprint('{:3}'.format(str(b)) + \" -> \" + buttons_list[b]['name'])\n\t\t\t\t\t\torder = input(\"Select a button from list to add: \")\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tbutton = buttons_list[int(order)]['name']\n\t\t\t\t\t\t\treturn filters_list[int(forder)], button\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\t\t\t\texit()\n\n\t\t\t\t\t# In case we want to filter by buttons\n\t\t\t\t\tif filters_list[int(forder)] == 'PULL_REQUEST_TO_BRANCH':\n\t\t\t\t\t\tbranches_list = self.getBranches(project, repo)\n\t\t\t\t\t\tos.system('clear')\n\t\t\t\t\t\tcprint(\"\\tBranches available: \\n\", \"green\")\n\t\t\t\t\t\tfor br in range(len(branches_list)):\n\t\t\t\t\t\t\tprint('{:3}'.format(str(br)) + \" -> \" + branches_list[br])\n\t\t\t\t\t\torder = input(\"Select a branch from list to add: \")\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tbranch = branches_list[int(order)]\n\t\t\t\t\t\t\treturn filters_list[int(forder)], branch\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\t\t\t\texit()\n\t\t\t\t\tif filters_list[int(forder)] == 'NONE':\n\t\t\t\t\t\treturn None, None\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\t\texit()\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\t\t\n\t# Function to select a project\n\tdef selectProject(self):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::SelectProject\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tos.system('clear')\n\t\t\tcprint(\"Projects list:\\n\", \"green\")\n\t\t\tprojects = self.getProjects()\n\t\t\tfor p in range(len(projects)):\n\t\t\t\tprint('{:3}'.format(str(p)) + \" -> \" + projects[p])\n\t\t\torder = input(\"Select a Bitbucket project from list: \")\n\t\t\ttry:\n\t\t\t\treturn projects[int(order)]\n\t\t\texcept Exception as e:\n\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\texit()\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Function to select a repo\n\tdef selectRepo(self, project):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::SelectRepo\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tos.system('clear')\n\t\t\tcprint(\"Repositories list:\\n\", \"green\")\n\t\t\trepos = self.getRepos(project)\n\t\t\tfor r in range(len(repos)):\n\t\t\t\tprint('{:3}'.format(str(r)) + \" -> \" + repos[r])\n\t\t\torder = input(\"Select a Bitbucket repo from list: \")\n\t\t\ttry:\n\t\t\t\treturn repos[int(order)]\n\t\t\texcept Exception as e:\n\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\texit()\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Function to select a notification\n\tdef selectNotification(self, project, repo):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::SelectNotification\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tos.system('clear')\n\t\t\tcprint(\"Notifications list:\\n\", \"green\")\n\t\t\tnotifications = self.getNotifications(project=project, repo=repo)\n\t\t\tfor x in range(len(notifications)):\n\t\t\t\tprint('{:3}'.format(str(x)) + '{:30}'.format(notifications[x]['name']) + notifications[x]['uuid'])\n\t\t\torder = input(\"Select a notification from list: \")\n\t\t\ttry:\n\t\t\t\treturn notifications[int(order)]\n\t\t\texcept Exception as e:\n\t\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\t\texit()\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n\n\t# Function to add a notification\n\tdef addNotification(self, url, user, password):\n\t\tself.logger = logging.getLogger(\"M::Bitbucket::AddNotification\")\n\t\tself.logger.info(\"Starting\")\n\t\ttry:\n\t\t\tu = urlparse(url)\n\t\t\tjenkinsurl = u.scheme + '://' + u.netloc + '/view/crumbIssuer/api/xml?xpath=//crumb/text()'\n\t\t\tnotification = {}\n\t\t\tos.system('clear')\n\t\t\tcprint(\"Enter the data for new PR notification:\\n\", \"green\")\n\t\t\tname = input(\"Name: \")\n\t\t\tif name != \"\":\n\t\t\t\tnotification['name'] = name\n\t\t\telse:\n\t\t\t\tself.logger.error(\"Creating notification failed - name not specified\")\n\t\t\t\tcprint(\"Notification name can not be empty.\\n\", \"red\")\n\t\t\t\texit()\n\n\t\t\t# Creating notification\n\t\t\tnotification['uuid'] = \"\"\n\t\t\tnotification['url'] = url\n\t\t\tnotification['method'] = 'GET'\n\t\t\tnotification['triggerIfCanMerge'] = 'ALWAYS'\n\t\t\tnotification['triggerIgnoreStateList'] = []\n\t\t\tnotification['updatePullRequestRefs'] = False\n\t\t\tnotification['postContentEncoding'] ='NONE'\n\t\t\tnotification['httpVersion'] = 'HTTP_1_0'\n\t\t\tnotification['headers'] = []\n\t\t\tnotification['injectionUrl'] = jenkinsurl\n\t\t\tnotification['injectionUrlRegexp'] = '<crumb>([^<]*)</crumb>'\n\t\t\tnotification['user'] = user\n\t\t\tnotification['password'] = password\n\t\t\tnotification['triggers'] = self.selectTriggers()\n\t\t\tproject = self.selectProject()\n\t\t\trepo = self.selectRepo(project)\n\t\t\tnotification['projectKey'] = project\n\t\t\tnotification['repositorySlug'] = repo\n\t\t\tfstring, fregexp = self.selectFilters(project, repo)\n\t\t\tif fstring == None:\n\t\t\t\tnotification['filterString'] = ''\n\t\t\telse:\n\t\t\t\tnotification['filterString'] = '${' + fstring + '}'\n\t\t\tif fregexp == None:\n\t\t\t\tnotification['filterRegexp'] = ''\n\t\t\telse:\n\t\t\t\tnotification['filterRegexp'] = fregexp\n\t\t\t\n\t\t\ttry:\n\t\t\t\theaders = {'Content-Type': 'application/json; charset=UTF-8', 'Accept': 'application/json, text/javascript, */*; q=0.01'}\n\t\t\t\tdata = json.dumps(notification, sort_keys=True)\n\t\t\t\tos.system('clear')\n\t\t\t\tcprint(\"Trying to create notification: \", \"yellow\")\n\t\t\t\trq = requests.post(self._napi_, auth=(self._user_, self._password_), data=data, headers=headers)\n\t\t\t\tif rq.status_code == 200:\n\t\t\t\t\tcprint(\"OK.\\n\", \"green\")\n\t\t\t\t\tself.logger.info(\"Notification \" + notification['name'] + \" created for project \" + project + \", repo \" + repo)\n\t\t\t\telse:\n\t\t\t\t\tcprint(\"FAIL!\\n\", \"red\")\n\t\t\t\t\tcprint(str(rq.status_code) + \"\\n\", \"red\")\n\t\t\t\t\tself.logger.error(\"Creating notification failed: HTTP response code\" + str(rq.status_code))\n\t\t\t\t\tself.logger.error(\"HTTP response body\" + str(rq.text))\n\t\t\t\tcprint(\"Press any key to continue\", \"yellow\")\n\t\t\texcept Exception as e:\n\t\t\t\tcprint(\"Notification could not be created.\\n\", \"red\")\n\t\t\t\tpp(notification)\n\t\t\t\tcprint(\"An error occured.\\n\", 'red')\n\t\t\t\tcprint(str(exc) + '\\n', 'red')\n\t\t\t\tself.logger.error(\"Creating notification failed: \" + str(e))\n\t\t\t\texit()\n\t\texcept Exception as e:\n\t\t\tself.logger.error(\"An error occured: \" + str(e))\n\t\t\texit()\n" }, { "alpha_fraction": 0.755359411239624, "alphanum_fraction": 0.755359411239624, "avg_line_length": 48.5625, "blob_id": "e5dd6f231ce449f44793861745bdde4222de51c5", "content_id": "fb5ae06c17941228759361b73a460fcfba6ec9c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 793, "license_type": "no_license", "max_line_length": 167, "num_lines": 16, "path": "/README.md", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "# jbb\nJenkins + Bitbucket stash scripts\n\nif anyone would find this useful - feel free to use!\n\n_clean_all.py_ - deletes ALL jobs, folders and views from Jenkins\n\n_clean_view.py_ - deletes ALL jobs and folders from a view, and after that deletes the view\n\n_view_export.py_ - allows to select a view and creates a full dump of ordered(!) folders and jobs structure of that view locally (in \"imports\" subfolder)\n\n_view_import.py_ - imports the structure of all(!) views in subfolder \"imports\" to Jenkins (creates views, folders and jobs)\n\n_main.py_ - allows to view, create from template and delete Jenkins jobs/folders and also allows to view, create or delete a PR notification in Bitbucket repositories.\n\n__The code is not ideal at all, it was written just to work Feel free to improve it!__\n" }, { "alpha_fraction": 0.6565328240394592, "alphanum_fraction": 0.6609421968460083, "avg_line_length": 25.598766326904297, "blob_id": "50591fc15f76d2fc8be39cad0a12ac1bb289c84e", "content_id": "27f86720b2607fa5c665c542426d60d1232f8c1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4309, "license_type": "no_license", "max_line_length": 132, "num_lines": 162, "path": "/main.py", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport os\nimport logging\nimport logging.config\nimport configparser\nfrom pprint import pprint as pp\nfrom lib.jenkinsinstance import JenkinsInstance as JI\nfrom lib.bitbucketinstance import BitBucketInstance as BI\nfrom lib.utils import *\n\n\nfinished = False\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nlogging.config.fileConfig('config.ini')\nmylogger = logging.getLogger('CORE')\nmylogger.info(\"Starting application\")\n\ntry:\n\tbucket = BI(url=config['bitbucket']['url'],\n\t\t\t\tuser=config['bitbucket']['user'], \n\t\t\t\tpassword=config['bitbucket']['password'])\n\tjenkins = JI(url=config['jenkins']['url'], \n\t\t\t user=config['jenkins']['user'], \n\t\t\t password=config['jenkins']['password'])\nexcept Exception as e:\n\tcprint(\"An error occured: \\n\", \"red\")\n\tcprint(str(e) + \"\\n\", \"red\")\n\tmylogger.error(\"An error occured at startup: \" + str(e))\n\texit()\n\nmenu_items = ['View Jenkins job', \t\t\t#1\n 'Create Jenkins job', \t\t#2\n 'Delete Jenkins job', \t\t#3\n 'Delete Jenkins view',\t\t#4\n 'View PR notifications',\t\t#5\n 'Create PR notification',\t\t#6\n 'Delete PR notification', \t#7\n 'Quit']\t\t\t\t\t\t#8\n\ndef viewJenkinsJob():\n\tmylogger.info(\"View Jenkins job\")\n\ttry:\n\t\tview = jenkins.selectView()\n\t\tjob = jenkins.selectJob(view['name'])\n\t\tjobcfg = jenkins.getJobDetails(job)\n\t\tpp(jobcfg)\n\texcept Exception as e:\n\t\tmylogger.error(\"An error occured: \" + str(e))\n\t\texit()\n\ndef createJenkinsJob():\n\tmylogger.info(\"Create Jenkins job\")\n\ttry:\n\t\tjenkins.createJob()\n\texcept Exception as e:\n\t\tmylogger.error(\"An error occured: \" + str(e))\n\t\texit()\n\ndef deleteJenkinsJob():\n\tmylogger.info(\"Delete Jenkins job\")\n\ttry:\n\t\tview = jenkins.selectView()\n\t\tjob = jenkins.selectJob(view['name'])\n\t\tjobcfg = jenkins.getJobDetails(job)\n\t\tjenkins.deleteJob(job)\n\texcept Exception as e:\n\t\tmylogger.error(\"An error occured: \" + str(e))\n\t\texit()\n\ndef notificationView():\n\tmylogger.info(\"Notifications view\")\n\ttry:\n\t\tproject = bucket.selectProject()\n\t\trepo = bucket.selectRepo(project)\n\t\tnotification = bucket.selectNotification(project, repo)\n\t\tpp(notification)\n\t\tcprint(\"Press any key to continue\", \"green\")\n\texcept Exception as e:\n\t\tmylogger.error(\"An error occured: \" + str(e))\n\t\texit()\n\ndef notificationCreate():\n\tmylogger.info(\"Create notification\")\n\ttry:\n\t\tview = jenkins.selectView()\n\t\tjob = jenkins.selectJob(view['name'])\n\t\tif job['_class'] == \"org.jenkinsci.plugins.workflow.job.WorkflowJob\":\n\t\t\tmylogger.info(\"Pipeline detected. Getting token if exists.\")\n\t\t\ttoken = str(jenkins.getJobDetails(job)['flow-definition']['authToken'])\n\t\t\tmylogger.info(token)\n\t\turl = jenkins.url + '/view/' + view['name'] + '/job/' + job['name'] + '/buildWithParameters?token=' + token + '&${EVERYTHING_URL}'\n\t\tmylogger.info(\"URL generated: \" + url)\n\t\tbucket.addNotification(url, config['jenkins']['user'], config['jenkins']['password'])\n\texcept Exception as e:\n\t\tmylogger.error(\"An error occured: \" + str(e))\n\t\texit()\n\ndef notificationDelete():\n\tmylogger.info(\"Delete Notification\")\n\ttry:\n\t\tproject = bucket.selectProject()\n\t\trepo = bucket.selectRepo(project)\n\t\tnotification = bucket.selectNotification(project, repo)\n\t\tpp(notification)\n\t\tcprint(\"Are you sure you want to delete this PR Notification? (Y/N) \", \"red\")\n\t\tconfirm = input()\n\t\tconfirm = confirm.upper()\n\t\tif confirm == \"Y\":\n\t\t\tbucket.deleteNotification(notification['uuid'])\n\texcept Exception as e:\n\t\tmylogger.error(\"An error occured: \" + str(e))\n\t\texit()\n\n\n\nwhile not finished:\n\tos.system('clear')\n\tprint('\\t Select an option\\n')\n\tfor x in range(len(menu_items)):\n\t\tcprint('{:2}'.format(str(x + 1)), \"orange\")\n\t\tprint(\" ...... \" + menu_items[x])\n\tprint('\\n')\n\tkey = getch()\n\ttry:\n\t\toption = int(key)\n\t\tif option == 1:\n\t\t\tviewJenkinsJob()\n\t\t\tgetch()\n\t\tif option == 2:\n\t\t\tcreateJenkinsJob()\n\t\t\tgetch()\n\t\tif option == 3:\n\t\t\tdeleteJenkinsJob()\n\t\t\tgetch()\n\t\tif option == 4:\n\t\t\tdeleteJenkinsView()\n\t\t\tgetch()\n\t\tif option == 5:\n\t\t\tnotificationView()\n\t\t\tgetch()\n\t\tif option == 6:\n\t\t\tnotificationCreate()\n\t\t\tgetch()\n\t\tif option == 7:\n\t\t\tnotificationDelete()\n\t\t\tgetch()\n\t\telif option == 8:\n\t\t\tmylogger.info(\"Exit\")\n\t\t\tfinished = True\n\t\t\tos.system('clear')\n\t\t\texit()\n\t\telse:\n\t\t\tpass\n\texcept:\n\t\tif key == 'q':\n\t\t\tmylogger.info(\"Exit\")\n\t\t\tos.system('clear')\n\t\t\texit()\n\t\telse:\n\t\t\tpass\n" }, { "alpha_fraction": 0.668653130531311, "alphanum_fraction": 0.6716328859329224, "avg_line_length": 23.691177368164062, "blob_id": "cda9d4cbe783a23300d629abc5085501f969dec6", "content_id": "1a07cb33ef053c687b44f6c39aa56be6891fd09d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1678, "license_type": "no_license", "max_line_length": 57, "num_lines": 68, "path": "/view_import.py", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport os\nimport re\nimport logging\nimport logging.config\nimport configparser\nfrom pprint import pprint as pp\nfrom lib.jenkinsinstance import JenkinsInstance as JI\nfrom lib.utils import *\n\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nlogging.config.fileConfig('config.ini')\nmylogger = logging.getLogger('CORE')\nmylogger.info(\"Starting application\")\n\nimportsPath = os.getcwd()+'/imports/'\n\ntry:\n\tjenkins = JI(url=config['jenkins_import']['url'], \n\t\t\t user=config['jenkins_import']['user'], \n\t\t\t password=config['jenkins_import']['password'])\nexcept Exception as e:\n\tcprint(\"An error occured: \\n\", \"red\")\n\tcprint(str(e) + \"\\n\", \"red\")\n\tmylogger.error(\"An error occured at startup: \" + str(e))\n\texit()\n\ndef jobPathToUrl(path):\n\turlPath = ''\n\tpath = re.sub(importsPath, '', path)\n\tif path[0] == '/':\n\t\tpath = path[1:]\n\tfolders = path.split('/')\n\tfor item in folders:\n\t\tif item != '':\n\t\t\tif folders.index(item) == 0:\n\t\t\t\turlPath = '/view/' + item\n\t\t\telse:\n\t\t\t\tif item.endswith('.xml'):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\turlPath = urlPath + '/job/' + item\n\treturn(urlPath)\n\ndef parseFolder(view, path):\n\tfor item in os.listdir(path):\n\t\tmyPath = path + '/' + item\n\t\tif os.path.isdir(myPath):\n\t\t\tprint (\"Found folder %s\" % item)\n\t\t\tjenkins.createFolder(item, jobPathToUrl(path))\n\t\t\tparseFolder(view, myPath)\n\t\telse:\n\t\t\tprint(\"Found job %s\" % item)\n\t\t\turlpath = jobPathToUrl(myPath)\n\t\t\tname = item.split('.')[0]\n\t\t\tjenkins.createNewJob(name, myPath, urlpath)\n\n\n\nfor item in os.listdir(importsPath):\n\tprint(\"Creating view %s \" % item)\n\tjenkins.createListView(item)\n\tprint(\"Done\")\n\tviewPath = importsPath + '/' + item\n\tparseFolder(item, viewPath)" }, { "alpha_fraction": 0.6791045069694519, "alphanum_fraction": 0.6796785593032837, "avg_line_length": 25.799999237060547, "blob_id": "b9c3c1a45e1c75f2f36bebf7421e9072860b059b", "content_id": "e3bef026c509525c553ba34ee69320b6a2c19102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1742, "license_type": "no_license", "max_line_length": 70, "num_lines": 65, "path": "/clean_all.py", "repo_name": "igorbannicov/jbb", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\n\nimport os\nimport re\nimport logging\nimport logging.config\nimport configparser\nfrom pprint import pprint as pp\nfrom lib.jenkinsinstance import JenkinsInstance as JI\nfrom lib.utils import *\n\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nlogging.config.fileConfig('config.ini')\nmylogger = logging.getLogger('CORE')\nmylogger.info(\"Starting application\")\n\njobs = []\nfolders = []\n\ntry:\n\tjenkins = JI(url=config['jenkins_import']['url'], \n\t\t\t user=config['jenkins_import']['user'], \n\t\t\t password=config['jenkins_import']['password'])\nexcept Exception as e:\n\tcprint(\"An error occured: \\n\", \"red\")\n\tcprint(str(e) + \"\\n\", \"red\")\n\tmylogger.error(\"An error occured at startup: \" + str(e))\n\texit()\n\n\ndef parseFolder(folder):\n\titems = ()\n\titems = jenkins.getJobsFromFolder(folder)\n\tif items != ():\n\t\tfor entry in items:\n\t\t\tfor item in entry:\n\t\t\t\tif item['_class'] == 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\t\tprint (\"Found folder %s\" % item['name'])\n\t\t\t\t\tparseFolder(item)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Found job %s\" % item['name'])\n\t\t\t\t\tjenkins.deleteJob(item)\n\tjenkins.deleteJob(folder)\n\n\nmylogger.info(\"Dumping Jenkins view\")\ntry:\n\tviews = jenkins.getViews()\n\tfor view in views:\n\t\tprint(\"Deleting view %s \" % view['name'])\n\t\tpath = '/view/'+view['name']\n\t\titems = jenkins.getJobsFromView(view['name'])\n\t\tfor item in items:\n\t\t\tpath = path + '/job/'+item['name']\n\t\t\tif item['_class'] == 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\tparseFolder(item)\n\t\t\tif item['_class'] != 'com.cloudbees.hudson.plugins.folder.Folder':\n\t\t\t\tprint('Deleting job %s ' % item['name'])\n\t\t\t\tjenkins.deleteJob(item)\n\t\tjenkins.deleteView(view)\nexcept Exception as e:\n\tmylogger.error(\"An error occured: \" + str(e))\n\texit()\n" } ]
9
AntoData/PyDesktopBrowserRecorder
https://github.com/AntoData/PyDesktopBrowserRecorder
e27cc7a50703aa98adc3a55e5f745f32a43b633a
d88ff7d5ae7fb0379de8038ebc9e6f69e642a244
34a8d342d010fff3031b8a791f537694c85dae7d
refs/heads/master
2022-02-27T17:37:48.529132
2022-02-10T20:28:32
2022-02-10T20:28:32
244,056,721
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7544150352478027, "alphanum_fraction": 0.7665562629699707, "avg_line_length": 33.86538314819336, "blob_id": "3f06f637027b1829c3d5203b9bdc99363eea7985", "content_id": "3e4ace801959889cca14dda5117a247f0ffe0390", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1812, "license_type": "permissive", "max_line_length": 129, "num_lines": 52, "path": "/pydesktopbrowerrecorder/README.md", "repo_name": "AntoData/PyDesktopBrowserRecorder", "src_encoding": "UTF-8", "text": "# PyDesktopBrowserRecorder\n This project allows you to record your desktop or the browser during an automated test using selenium's webdriver\n\n To install it, just open a terminal and execute:\n\n pip install pydesktopbrowserrecorder\n\n PiPY Project: https://pypi.org/project/pydesktopbrowserrecorder/\n\n Once installed, you just have to make the following import:\n \n from selenium_browser_desktop_recorder import SeleniumBrowserRecorder\n \n Then you create a SeleniumBrowserRecorder object, you have two modes.\n\n First mode:\n\n obj = SeleniumBrowserRecorder(folder,encoding)\n\n - folder: Folder where we want to create the folder that will contain the videos for our recording session\n - encoding: Encoding of the video. We only assure that using \".mp4\" will work\n This way, when you start the recording session, the desktop will be recorded until we stop the recording session\n \n But you can also provide a third parameter:\n\n obj = SeleniumBrowserRecorder(folder,encoding,driver)\n - driver: A webdriver object\n \n In this case, we will record only the browser window(s) that are being controlled by that webdriver object\n \n To start the recording session once we build the object we only have to use this method:\n \n obj.start_recording_session()\n \n And our object will start recording in a parallel thread\n \n To stop the recording session we only have to:\n\n obj.stop_recording_session()\n\n The video will be saved and the threads finished\n \n NOTE: If we are recording a browser and the size of it changes, we will stop the current video and start a new one with the new\n size of the window\n \nNOTE: This was developed using the following versions of the following external libraries:\n- imageio_ffmpeg = 0.4.5 \n- numpy~=1.22.2 \n- imageio==2.15.0 \n- PyAutoGUI~=0.9.53 \n- selenium~=4.1.0 \n- Pillow~=9.0.1" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.717391312122345, "avg_line_length": 14.5, "blob_id": "de67aab670df3d70be2b90150caf7ddc801f4456", "content_id": "b3a47a30fbcb83f3be2ebb56484b43031d2e1472", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 92, "license_type": "permissive", "max_line_length": 17, "num_lines": 6, "path": "/pydesktopbrowerrecorder/requirements.txt", "repo_name": "AntoData/PyDesktopBrowserRecorder", "src_encoding": "UTF-8", "text": "imageio-ffmpeg\nnumpy~=1.22.2\nimageio==2.15.0\nPyAutoGUI~=0.9.53\nselenium~=4.1.0\nPillow~=9.0.1" }, { "alpha_fraction": 0.6140486001968384, "alphanum_fraction": 0.6165573000907898, "avg_line_length": 35.751773834228516, "blob_id": "3c9d3967f923a42ef6eff81f440adf09170880e7", "content_id": "572fa90af9a06bd964ca273967c89d2778258d1b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10364, "license_type": "permissive", "max_line_length": 79, "num_lines": 282, "path": "/pydesktopbrowerrecorder/selenium_browser_desktop_recorder.py", "repo_name": "AntoData/PyDesktopBrowserRecorder", "src_encoding": "UTF-8", "text": "\"\"\"\nContains the basic functionality that records the videos using an instance of\nwebdriver provided by the user that we clone or the whole desktop if not\nprovided\n\nClasses:\n SeleniumBrowserRecorder\n\nExceptions:\n SessionStartedException\n NoSessionStartedException\n\n\"\"\"\nfrom __future__ import annotations\n\nimport io\nimport threading\nimport os\nimport time\nfrom datetime import datetime\nimport numpy as np\nfrom selenium import webdriver\nimport imageio\nimport pyautogui\nfrom PIL import Image\n\n\nclass SeleniumBrowserRecorder:\n \"\"\"\n Creates one or several videos using frames taking from a browser using\n Selenium webdriver or the desktop\n\n :ivar driver: Instance of webdriver we use to record a browser session\n (if None we record the desktop)\n :type driver: webdriver.remote.webdriver.WebDriver\n :ivar __folder: Contains the path where we will create folders that contain\n the videos created during the session\n :type __folder: str\n :ivar __session_path: Contains the path to the folder where we save the\n videos of the recording session\n :type __session_path: str\n :ivar __encoding: Encoding of the video\n :type __encoding: str\n :ivar __keep_recording: Flag to keep adding frames to the video or stop the\n session\n :type __keep_recording: bool\n :ivar frame: Frame to be added to the video\n :type frame: np.typing.ArrayLike\n :ivar __thread: Contains the thread that will start adding frames to the\n video\n :type __thread: threading.Thread\n \"\"\"\n\n def __init__(self, folder: str, encoding: str, driver: webdriver = None):\n \"\"\"\n Class constructor\n\n :param folder: Contains the path where we will create folders that\n contain the videos created during the session\n :type folder: str\n :param encoding: Encoding of the video\n :type encoding: str\n :param driver: Instance of webdriver we use to record a browser session\n (if None we record the desktop)\n :type driver: webdriver.remote.webdriver.WebDriver\n \"\"\"\n self.driver: webdriver.remote.webdriver.WebDriver = driver\n self.__folder: str = folder\n self.__session_path: str = \"\"\n self.__encoding: str = encoding\n self.__keep_recording: bool = False\n self.frame: np.typing.ArrayLike | None = None\n self.__thread: threading.Thread | None = None\n\n # noinspection PyTypeChecker\n def __thread_take_screenshot(self):\n \"\"\"\n Takes a screenshot to be added to the video in a parallel thread\n\n :return: None\n \"\"\"\n # We first create a string var that contains the path where to\n # allocate the frame we will be taking every number of ms.\n # This will be added to the video while the variable\n # __keep_recording is true (we are recording)\n #\n # If we provided a webdriver (the variable driver is not None)\n # the screenshot will be taken from the browser\n # by selenium. Otherwise, it will be taken by pyautogui of\n # the desktop.\n #\n # That screenshot will be saved in the attribute frame to be\n # added as a frame to the video.\n\n while self.__keep_recording:\n if self.driver is not None:\n self.frame = np.asarray(Image.open(io.BytesIO(self.driver\n .get_screenshot_as_png())))\n else:\n self.frame = np.asarray((pyautogui.screenshot()))\n\n time.sleep(1 / 25) # 20fps, sleep for 1/25 secs.\n\n def __build_writer(self) -> imageio.core.format.Writer:\n \"\"\"\n Creates the object that will be used to build our video frame by frame\n\n :return: Video writer\n :rtype: imageio.core.format.Writer\n \"\"\"\n # We first get the current date and time which we join that to\n # the attribute encoding (which contains the extension our\n # video file should have) to get the name of the video file\n # for this video session we are recording\n #\n # Then we create the video writer that will build the video\n # frame by frame. Finally, we return that object\n now: datetime = datetime.now()\n file_name: str = now.strftime(\"%d-%m-%Y_%H-%M-%S\") + self.__encoding\n\n writer: imageio.core.format.Writer = imageio.\\\n get_writer(self.__session_path + \"/\" + file_name, fps=20)\n return writer\n\n def __main_thread_recording_session(self):\n \"\"\"\n Builds the video in the main thread adding frame by frame\n\n :return: None\n \"\"\"\n # First we build the folder that will contain our video (or\n # videos in case our recording session crashes\n # so this module starts another session immediately).\n #\n # We do this using the current date and time and the attribute\n # __folder where the user set which folder\n # should contain our recording sessions. Then, we create it\n now: datetime = datetime.now()\n self.__session_path: str = self.__folder + \"\\\\\" + now.\\\n strftime(\"%d-%m-%Y_%H-%M-%S\")\n try:\n os.mkdir(self.__session_path)\n except (NotADirectoryError, Exception) as e:\n print(e)\n\n # We create the thread that will take the screenshots for the video\n screenshots_thread: threading.Thread = threading.\\\n Thread(target=self.__thread_take_screenshot, args=())\n\n writer = self.__build_writer() # To get the video writer\n\n screenshots_thread.start() # Start taking screenshots\n\n # While the flag __keep_recording is True, we will keep adding\n # frames to our video or create a new video if case our\n # recording session crashes.\n #\n # If the frame is not None, which means we already have a\n # frame, we add it to the video\n while self.__keep_recording:\n try:\n if self.frame is not None:\n writer.append_data(self.frame)\n except (BufferError, Exception):\n # In case there is an exception when trying to add the\n # frame to the video we close the writer. After that,\n # we create a new one immediately.\n #\n # Then, we try to add the new frame to the new video\n # created if t is not None\n #\n # This is the best way to prevent problems when the\n # size of a browser changes. In that case, we just\n # start a new video to keep recording\n writer = self.__build_writer()\n if self.frame is not None:\n writer.append_data(self.frame)\n\n time.sleep(1 / 25) # 20fps, sleep for 1/25 secs\n\n # Once the flag __keep_recording is False we stop our recording\n # session, so we close our video writer and stop the thread\n # taking the screenshots\n writer.close()\n screenshots_thread.join()\n\n def start_recording_session(self):\n \"\"\"\n Kicks off the recording session (main thread and screenshots thread)\n\n :return: None\n \"\"\"\n if self.__keep_recording: # Session already started, exception\n raise SessionStartedException(\"There is an existing running \"\n \"recording session for this object\")\n self.__keep_recording = True # New recording session\n\n # We create the main __thread that runs the algorithm and assign\n # it to attribute __thread\n #\n # Then, we start the thread to start the session\n self.__thread = threading.\\\n Thread(target=self.__main_thread_recording_session, args=())\n self.__thread.start()\n\n def stop_recording_session(self):\n \"\"\"\n Stops the recording session\n\n :return: None\n \"\"\"\n if not self.__keep_recording: # No session, so exception\n raise NoSessionStartedException(\"There is no current recording \"\n \"session\")\n\n # We set the flag __keep_recording to false to stop the\n # recording process and stop the thread\n #\n # We also set the attribute __thread to None as the recording\n # session is finished\n self.__keep_recording = False\n self.__thread.join()\n self.__thread: threading.Thread | None = None\n\n\nclass SessionStartedException(Exception):\n \"\"\"\n Custom exception that is raised when we try to start a session when there\n is already one running\n \"\"\"\n pass\n\n\nclass NoSessionStartedException(Exception):\n \"\"\"\n Custom exception that is raised when we try to stop a session but there is\n not one in process\n \"\"\"\n pass\n\n\nif __name__ == \"__main__\":\n from selenium.webdriver.chrome.service import Service\n # Selenium webdriver instance\n chrome = Service(\"./chromedriver.exe\")\n # noinspection PyArgumentList\n browser = webdriver.Chrome(service=chrome)\n # Creating instance of SeleniumBrowserRecorder\n browser_recorder = SeleniumBrowserRecorder(\"./\", \".mp4\", browser)\n\n try:\n # Starting recording session\n browser_recorder.start_recording_session()\n\n # Browsing and performing automated tasks\n browser.get(\"https://www.google.es/\")\n browser.find_element(webdriver.common.by.By.ID, \"L2AGLb\").click()\n query = browser.find_element(webdriver.common.by.By.NAME, \"q\")\n query.send_keys(\"pypi stats privateattributesdecorator\")\n query.send_keys(webdriver.common.keys.Keys.ENTER)\n time.sleep(10)\n result = browser.find_element(webdriver.common.by.By.XPATH,\n \"//h3[contains(text(), \"\n \"'PrivateAttributesDecorator - PyPI')]\")\\\n .click()\n\n finally:\n # We always stop the recording session\n browser_recorder.stop_recording_session()\n browser.close()\n\n desktop_recorder = SeleniumBrowserRecorder(\"./\", \".mp4\")\n\n try:\n # Starting recording session\n desktop_recorder.start_recording_session()\n\n time.sleep(30)\n\n finally:\n # We always stop the recording session\n desktop_recorder.stop_recording_session()\n" } ]
3
Mine4Phantom/IART
https://github.com/Mine4Phantom/IART
3faec9635316800bc1f25f693f73f79fc4d15975
f69543d504e9381eb9359e0d5063275ef18723a6
c6c79447f0db29907fb42509bb7bccf62cc7c2e2
refs/heads/main
2023-03-17T15:45:33.906651
2021-03-03T23:52:47
2021-03-03T23:52:47
344,294,656
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4859922230243683, "alphanum_fraction": 0.524319052696228, "avg_line_length": 23.131454467773438, "blob_id": "4e9a22fb5013b91d61045ba2ca4209d9a5a85919", "content_id": "c9965d179703297548ee563258451d2e06fa86ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5140, "license_type": "no_license", "max_line_length": 86, "num_lines": 213, "path": "/Praticas2/n_puzzle.py", "repo_name": "Mine4Phantom/IART", "src_encoding": "UTF-8", "text": "#a)\nfrom copy import deepcopy\nimport sys\n\n\n\n#end_matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]\n\nend_matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 0]] \n\nclass Node:\n def __init__(self, currentPos, parent):\n self.matrix = currentPos\n self.parent = parent\n self.children = []\n if parent != None:\n self.cost = parent.getCost() + 1\n else:\n self.cost = 0\n\n def getCost(self):\n return self.cost\n\n\n def addChild(self, new_matrix):\n self.children.append(Node(new_matrix))\n\n \n def addChild(self, node):\n self.children.append(node)\n\n def compareAncient(self, parentnode):\n if self.matrix == parentnode.matrix:\n return False\n elif parentnode.parent == None:\n return True\n else:\n return self.compareAncient(parentnode.parent)\n\n\ndef find_empty(matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if (matrix[i][j] == 0):\n return [j,i]\n\ndef up(matrix):\n pos = find_empty(matrix)\n xs = pos[0]\n ys = pos[1]\n if ys < 1:\n return False\n matrix[ys][xs] = matrix[ys - 1][xs]\n matrix[ys - 1][xs] = 0\n ys -= 1\n\n\ndef down(matrix):\n pos = find_empty(matrix)\n xs = pos[0]\n ys = pos[1]\n if ys > len(matrix) - 2:\n return False\n matrix[ys][xs] = matrix[ys + 1][xs]\n matrix[ys + 1][xs] = 0\n ys += 1 \n\n\ndef left(matrix):\n pos = find_empty(matrix)\n xs = pos[0]\n ys = pos[1]\n if xs < 1:\n return False\n matrix[ys][xs] = matrix[ys][xs - 1]\n matrix[ys][xs - 1] = 0\n xs -= 1\n\n\ndef right(matrix):\n pos = find_empty(matrix)\n xs = pos[0]\n ys = pos[1]\n if xs > len(matrix) - 2:\n return False\n matrix[ys][xs] = matrix[ys][xs + 1]\n matrix[ys][xs + 1] = 0\n xs += 1\n\n\ndef check (matrix, end_matrix):\n return matrix == end_matrix\n\n\nqueue = []\n\n\ndef breadth_first_search():\n initial = queue.pop(0)\n '''\n x = input()\n print(\"\\n\")\n print(initial.matrix)\n print(\"\\n\")\n '''\n i = 0\n while i < 4:\n matrix = deepcopy(initial.matrix)\n if i == 0:\n down(matrix)\n elif i == 1:\n right(matrix)\n elif i == 2:\n up(matrix)\n elif i == 3:\n left(matrix)\n\n print(matrix)\n \n node = Node(matrix, initial)\n initial.addChild(node)\n\n if (check(matrix, end_matrix)): \n return 1\n queue.append(node)\n i += 1\n return breadth_first_search() + 1\n\ndef greedy_search(heuristic):\n children = []\n initial = queue.pop(0)\n i = 0\n while i < 4:\n matrix = deepcopy(initial.matrix)\n if i == 0:\n up(matrix)\n elif i == 1:\n left(matrix)\n elif i == 2:\n down(matrix)\n elif i == 3:\n right(matrix)\n\n node = Node(matrix, initial)\n initial.addChild(node)\n\n if (check(matrix, end_matrix)): \n return 1\n children.append(node)\n i += 1\n\n # Applies our heuristic to sort the children by cost\n children.sort(key = heuristic)\n for childrennode in children:\n if childrennode.compareAncient(childrennode.parent):\n #print(childrennode.matrix)\n queue.append(childrennode)\n return greedy_search(heuristic) + 1\n\n\ndef heuristic1(node):\n off_pos = 0\n matrix = node.matrix\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if (matrix[j][i] == end_matrix[j][i]):\n continue\n else:\n off_pos += 1\n return node.getCost() + off_pos\n\n\ndef heuristic2(node):\n manhattan_sum = 0\n matrix = node.matrix\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if (matrix[j][i] == end_matrix[j][i]):\n continue\n else:\n for a in range(len(matrix)):\n for b in range(len(matrix)):\n if (matrix[b][a] == end_matrix[j][i]):\n manhattan_sum += abs(a-i)+ abs(b-j)\n return manhattan_sum + node.getCost()\n\n \n\nsys.setrecursionlimit(10000)\n\ninitial1 = Node([[1, 3, 6], [5, 2, 0], [4, 7, 8]], None)\ninitial2 = Node([[1, 6, 2], [5, 7, 3], [0, 4, 8]], None)\ninitial3 = Node([[5, 1, 3, 4], [2, 0, 7, 8], [10, 6, 11, 12], [9, 13, 14, 15]], None)\n#initial4 = Node([[2, 11, 5, 4], [1, 6, 3, 10], [9, 14, 8, 15], [13, 12, 0, 7]], None)\n#initial5 = Node([[13, 9, 4, 0], [6, 15, 1, 8], [10, 11, 3, 5], [14, 12, 7, 2]], None)\n\n#print(\"Breadth first search : \" + str(breadth_first_search()))\n\nqueue.clear()\nqueue.append(initial3)\nprint( \"Test 1 with Greedy Search with H1: \" + str(greedy_search(heuristic1)))\nqueue.clear()\nqueue.append(initial3)\nprint( \"Test 1 with Greedy Search with H2: \" + str(greedy_search(heuristic2)))\n'''\nqueue.clear()\nqueue.append(initial2)\nprint( \"Test 2 with Greedy Search with H1: \" + str(greedy_search(heuristic1)))\nqueue.clear()\nqueue.append(initial2)\nprint( \"Test 2 with Greedy Search with H2: \" + str(greedy_search(heuristic2)))\n\n'''\n" }, { "alpha_fraction": 0.5296279191970825, "alphanum_fraction": 0.5617822408676147, "avg_line_length": 20.97979736328125, "blob_id": "ca57b6133f3be1e24ab014a34432d41735256992", "content_id": "65b9c0a524a998951b4b01d783ac2d8b50776ea6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2177, "license_type": "no_license", "max_line_length": 62, "num_lines": 99, "path": "/Praticas1/baldes.py", "repo_name": "Mine4Phantom/IART", "src_encoding": "UTF-8", "text": "from dataclasses import dataclass \nfrom copy import deepcopy\n\n@dataclass\nclass Bucket:\n maximum : int\n value : int\n\n\nclass Node:\n def __init__(self, bucket1, bucket2, parent):\n self.b1 = bucket1\n self.b2 = bucket2\n self.parent = parent\n self.children = []\n \n def addChild(self, bucket1, bucket2):\n self.children.append(Node(bucket1, bucket2))\n \n def addChild(self, node):\n self.children.append(node)\n\n def print(self):\n if (self.parent != None):\n self.parent.print()\n print(self.b1, self.b2)\n\n \n\ndef emp(bucket):\n if bucket.value > 0:\n bucket.value = 0\n return True\n else:\n return False\n\ndef fill(bucket):\n bucket.value = bucket.maximum\n\ndef pour(bucket1, bucket2):\n if bucket1.value > 0 and bucket2.value < bucket2.maximum:\n if (bucket2.value + bucket1.value) <= bucket2.maximum:\n bucket2.value += bucket1.value\n bucket1.value = 0\n else:\n bucket1.value -= bucket2.maximum - bucket2.value\n bucket2.value = bucket2.maximum\n return True\n else:\n return False\n\ndef check(bucket1, value):\n return bucket1.value == value\n\n\nqueue = []\nvalue = 2 #change for necessity\n\ndef breathFirstSearch():\n initial = queue.pop(0)\n i = 0\n while i < 6:\n bucket1 = deepcopy(initial.b1)\n bucket2 = deepcopy(initial.b2)\n if i == 0:\n fill(bucket1)\n elif i== 1:\n fill(bucket2)\n elif i== 2:\n emp(bucket2)\n elif i== 3:\n emp(bucket2)\n elif i== 4:\n pour(bucket1, bucket2)\n elif i== 5:\n pour(bucket2, bucket1)\n\n\n node = Node(bucket1, bucket2, initial)\n initial.addChild(node)\n \n if (check(bucket1, value)):\n node.print()\n return\n queue.append(node)\n i += 1\n breathFirstSearch()\n \nsize1 = input(\"First container size:\")\nsize2 = input(\"Second container size:\")\n\n\nb1 = Bucket(int(size1), 0)\nb2 = Bucket(int(size2), 0)\n\ninitial = Node(b1, b2, None)\nqueue.append(initial)\n\nbreathFirstSearch()\n\n" } ]
2
reginalin/game-backend
https://github.com/reginalin/game-backend
74a07784a3ebe10cf112a7c16e2d012ca74c13cd
ffc9ed9b9d9eb33868e63aedd1debbef65a9778c
102e3b08741e2b9ccb1df717f07e0efbfe005ea8
refs/heads/master
2020-07-19T06:40:03.735413
2019-09-04T20:37:42
2019-09-04T20:37:42
205,248,030
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6718346476554871, "alphanum_fraction": 0.6770026087760925, "avg_line_length": 31.22222137451172, "blob_id": "05ef96061aed23abc268584de8859b40664c1720", "content_id": "504caef7b347e4b65d956ae43596033c5b28e72b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1161, "license_type": "no_license", "max_line_length": 99, "num_lines": 36, "path": "/backend/backend.py", "repo_name": "reginalin/game-backend", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask_sqlalchemy import SQLAlchemy\n\nproject_dir = os.path.dirname(os.path.abspath(__file__))\ndatabase_file = \"sqlite:///{}\".format(os.path.join(project_dir, \"highscores.db\"))\napp = Flask(__name__)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = database_file\ndb = SQLAlchemy(app)\n\nclass Score(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), nullable=False)\n scoreValue = db.Column(db.Integer, nullable=False)\n\n def __repr__(self):\n return \"<Name: {}>\".format(self.name)\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n if request.form:\n userscore = Score(name=request.form.get(\"name\"), scoreValue=request.form.get(\"scoreValue\"))\n db.session.add(userscore)\n db.session.commit()\n # scores = Score.query.order_by(Score.scoreValue.desc())\n scores = topScores(10)\n return render_template(\"home.html\", scores=scores)\n\n# get top 10 scores\ndef topScores(n): \n return Score.query.order_by(Score.scoreValue.desc()).limit(n)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n" }, { "alpha_fraction": 0.4953271150588989, "alphanum_fraction": 0.6962617039680481, "avg_line_length": 15.461538314819336, "blob_id": "37a7c6d075845a0deef6fe1ae682d5de49d4cc7e", "content_id": "7f2cf06d589574317f6cb20f3a5200e668bd7187", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 214, "license_type": "no_license", "max_line_length": 23, "num_lines": 13, "path": "/requirements.txt", "repo_name": "reginalin/game-backend", "src_encoding": "UTF-8", "text": "Click==7.0\nFlask==1.1.1\nFlask-SQLAlchemy==2.4.0\ngreenlet==0.4.15\nitsdangerous==1.1.0\nJinja2==2.10.1\nMarkupSafe==1.1.1\nmsgpack==0.6.1\npynvim==0.3.2\nredis==3.3.8\nSQLAlchemy==1.3.8\nvirtualenv==16.7.4\nWerkzeug==0.15.5\n" } ]
2
Futrell/lc-surprisal
https://github.com/Futrell/lc-surprisal
13d30efa6d485178903004c94ddc5a362a2c09c5
f973599a628c77681eeab840a516b3cff9b9b584
e971dafc8d4b24c1d0690c6a03ef46d1d7468d71
refs/heads/master
2020-03-24T11:55:03.353619
2020-02-27T17:53:52
2020-02-27T17:53:52
142,698,397
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5234568119049072, "alphanum_fraction": 0.5526748895645142, "avg_line_length": 28.573171615600586, "blob_id": "517d370d244b9d0102d6b1716444d236fdafcd02", "content_id": "2aa4e76050b7a32fbb1fa6b3e8c9ee6d88113464", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2430, "license_type": "no_license", "max_line_length": 116, "num_lines": 82, "path": "/syntngrams_depmi.py", "repo_name": "Futrell/lc-surprisal", "src_encoding": "UTF-8", "text": "\"\"\" Calculate dependency pair MI from Syntactic N-grams \"\"\"\n0;95;0c\nimport sys\nimport itertools\nimport operator\n\nYEAR_RANGE = range(1960, 2000+1)\n\n# I got all the deptypes from Google Books by running\n# zcat arcs.48-of-99.gz | awk '{print $2}' | awk -F\"/\" '{print $3}' | uniq | sort | uniq\n# Then looked at them and selected the following as function word deptypes based on Stanford Dependencies standards.\nFW_DEPTYPES = {\n 'abbrev',\n 'cc',\n 'conj',\n 'expl',\n 'mark',\n 'measure',\n 'neg',\n 'poss',\n 'pobj',\n 'prep',\n 'dep',\n}\n\n\ndef sum_over_years(years):\n def gen():\n for part in years:\n year, count = part.split(\",\")\n if int(year) in YEAR_RANGE:\n yield int(count)\n return sum(gen())\n\ndef normalize(partcode):\n # 01 -> 01\n # 20 -> 01\n # 202 -> 011\n # 011 -> 011\n # 330 -> 011\n # 012 -> 012\n # 201 -> 012\n # 310 -> 012\n state = itertools.count()\n seen = {}\n for thing in partcode:\n if thing in seen:\n yield seen[thing]\n else:\n result = seen[thing] = next(state)\n yield result\n\ndef read_lines(lines, match_code, get_code, ignore_fw=False):\n def read_line(line):\n _, phrase, _, *years = line.strip().split(\"\\t\")\n phrase = [part.split(\"/\") for part in phrase.split()]\n phrase = sorted(phrase, key=operator.itemgetter(-1))\n partcode = tuple(normalize(sorted(int(part[-1]) for part in phrase)))\n if partcode != match_code:\n #print(\"Rejected %s\" % phrase, file=sys.stderr)\n return None\n else:\n relevant_parts = [(phrase[i][0].lower(), phrase[i][2]) for i in get_code]\n if ignore_fw and any(part[1] in FW_DEPTYPES for part in relevant_parts[1:]):\n return None\n else:\n count = sum_over_years(years)\n if count:\n return [part[0] for part in relevant_parts], count\n else:\n return None\n return filter(None, map(read_line, lines))\n\ndef main(match_code, get_code, ignore_fw=False):\n match_code = tuple(map(int, match_code))\n get_code = tuple(map(int, get_code))\n lines = read_lines(sys.stdin, match_code, get_code, ignore_fw=ignore_fw)\n for parts, count in lines:\n print(\" \".join(parts), count, sep=\"\\t\")\n\nif __name__ == '__main__':\n main(*sys.argv[1:])\n \n" } ]
1
edwinkost/discharge_analysis_IWMI
https://github.com/edwinkost/discharge_analysis_IWMI
06c1d028abb44b8c79289892963d9466dc79a39b
450a5510f2a8556a6d34e9a7011b99caba7262ae
d89db0fa3d717dcff023d8da80508d166bc3c513
refs/heads/master
2021-01-23T08:43:32.434421
2015-07-03T14:09:32
2015-07-03T14:09:32
38,494,405
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5159615278244019, "alphanum_fraction": 0.5189430713653564, "avg_line_length": 50.87466812133789, "blob_id": "f43d8ec1da88989fa21bb336dad012aa9c6dbfe8", "content_id": "e70d27d8b9bfb3435be7ff54dc604fb6fa07da76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19453, "license_type": "no_license", "max_line_length": 199, "num_lines": 375, "path": "/baseflowIWMI.py", "repo_name": "edwinkost/discharge_analysis_IWMI", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport glob\nimport datetime\n\nimport netCDF4 as nc\nimport numpy as np\n\nimport pcraster as pcr\n\nimport virtualOS as vos\n\nimport logging\n# logger object\nlogger = logging.getLogger(__name__)\n\n# the following dictionary is needed to avoid open and closing files\nfilecache = dict()\n\nclass BaseflowEvaluation(object):\n\n def __init__(self, modelOutputFolder,startDate=None,endDate=None,temporary_directory=None):\n object.__init__(self)\n \n logger.info('Evaluating the model results (annual baseflow) stored in %s.', modelOutputFolder)\n \n self.startDate = startDate\n self.endDate = endDate\n if (self.startDate != None) and (self.endDate != None):\n self.startDate = datetime.datetime.strptime(str(startDate),'%Y-%m-%d')\n self.endDate = datetime.datetime.strptime(str( endDate),'%Y-%m-%d')\n logger.info(\"Only results from \"+str(self.startDate)+\" to \"+str(self.endDate)+\" are analyzed to available observation data.\")\n else:\n logger.info(\"Entire model results will be analyzed to available observation data.\")\n\n self.tmpDir = \"/dev/shm/\"\n if temporary_directory != None: self.tmpDir = temporary_directory\n\n # initiating a dictionary that will contain all GRDC attributes:\n self.attributeGRDC = {}\n #\n # initiating keys in GRDC dictionary \n self.grdc_dict_keys = \\\n [\"id_from_grdc\", \n \"iwmi_annual_baseflow_file_name\", \n \"river_name\", \n \"station_name\", \n \"country_code\", \n \"grdc_catchment_area_in_km2\", \n \"grdc_latitude_in_arc_degree\", \n \"grdc_longitude_in_arc_degree\", \n \"model_catchment_area_in_km2\", \n \"model_latitude_in_arc_degree\", \n \"model_longitude_in_arc_degree\",\n \"model_landmask\", \n \"num_of_annual_pairs\", \n \"table_file_name\", \n \"chart_file_name\", \n \"average_iwmi_opt_baseflow\", \n \"average_iwmi_max_baseflow\", \n \"average_iwmi_min_baseflow\", \n \"average_model\", \n \"bias\", \n \"correlation\", \n \"R2\", \n \"R2_adjusted\", \n \"rmse\", \n \"mae\", \n \"ns_efficiency\", \n \"ns_efficiency_log\",\n \"avg_baseflow_deviation\"]\n #\n for key in self.grdc_dict_keys: self.attributeGRDC[key] = {} \n\n # initiating a list that will contain all grdc ids that will be used\n self.list_of_grdc_ids = [] \n\n # initiating a list that will contain random (temporary) directories \n # (this list should be empty at the end of the calculation):\n self.randomDirList = [] \n\n def makeRandomDir(self,tmpDir):\n\n # make a random (temporary) directory (default: in the memory)\n randomDir = tmpDir + vos.get_random_word()\n directoryExist = True\n while directoryExist:\n try:\n os.makedirs(randomDir)\n directoryExist = False\n self.randomDirList.append(randomDir) \n except:\n # generate another random directory\n randomDir = tmpDir + vos.get_random_word()\n return randomDir \n # PS: do not forget to delete this random directory.\n\n def cleanRandomDir(self,randomDir):\n\n # clean randomDir\n cmd = 'rm -r '+randomDir+\"*\"\n print(cmd); os.system(cmd)\n self.randomDirList.remove(randomDir)\n if self.randomDirList != []: print \"WARNING!: randomDir(s) found: \", self.randomDirList \n\n def get_grdc_attributes(self, attributeDischargeGRDC, baseflowFolderIWMI):\n \n for id_from_grdc in attributeDischargeGRDC[\"id_from_grdc\"].keys():\n \n self.attributeGRDC[\"id_from_grdc\"][id_from_grdc] = attributeDischargeGRDC[\"id_from_grdc\"][id_from_grdc]\n self.attributeGRDC[\"river_name\"][id_from_grdc] = attributeDischargeGRDC[\"river_name\"][id_from_grdc] \n self.attributeGRDC[\"station_name\"][id_from_grdc] = attributeDischargeGRDC[\"station_name\"][id_from_grdc] \n self.attributeGRDC[\"country_code\"][id_from_grdc] = attributeDischargeGRDC[\"country_code\"][id_from_grdc] \n self.attributeGRDC[\"grdc_latitude_in_arc_degree\"][id_from_grdc] = attributeDischargeGRDC[\"grdc_latitude_in_arc_degree\"][id_from_grdc]\n self.attributeGRDC[\"grdc_longitude_in_arc_degree\"][id_from_grdc] = attributeDischargeGRDC[\"grdc_longitude_in_arc_degree\"][id_from_grdc]\n self.attributeGRDC[\"grdc_catchment_area_in_km2\"][id_from_grdc] = attributeDischargeGRDC[\"grdc_catchment_area_in_km2\"][id_from_grdc] \n \n self.attributeGRDC[\"model_longitude_in_arc_degree\"][id_from_grdc] = attributeDischargeGRDC[\"model_longitude_in_arc_degree\"][id_from_grdc]\n self.attributeGRDC[\"model_latitude_in_arc_degree\"][id_from_grdc] = attributeDischargeGRDC[\"model_latitude_in_arc_degree\"][id_from_grdc] \n self.attributeGRDC[\"model_catchment_area_in_km2\"][id_from_grdc] = attributeDischargeGRDC[\"model_catchment_area_in_km2\"][id_from_grdc] \n self.attributeGRDC[\"model_landmask\"][id_from_grdc] = attributeDischargeGRDC[\"model_landmask\"][id_from_grdc] \n\n iwmi_annual_baseflow_file_name = str(os.path.abspath(baseflowFolderIWMI+\"/\"+str(id_from_grdc)+\".out\"))\n self.attributeGRDC[\"iwmi_annual_baseflow_file_name\"][id_from_grdc] = iwmi_annual_baseflow_file_name\n\n logger.info(\"IWMI annual baseflow time series \"+str(iwmi_annual_baseflow_file_name)+\" will be used.\")\n # add grdc id to the list (that will be processed later)\n self.list_of_grdc_ids.append(int(id_from_grdc))\n\n def evaluateAllBaseflowResults(self,globalCloneMapFileName,\\\n catchmentClassFileName,\\\n lddMapFileName,\\\n cellAreaMapFileName,\\\n pcrglobwb_output,\\\n analysisOutputDir=\"\",\\\n tmpDir = None): \n\n # temporary directory\n if tmpDir == None: tmpDir = self.tmpDir+\"/edwin_iwmi_\"\n\n # output directory for all analyses for all stations\n analysisOutputDir = str(analysisOutputDir)\n self.chartOutputDir = analysisOutputDir+\"/chart/\"\n self.tableOutputDir = analysisOutputDir+\"/table/\"\n #\n if analysisOutputDir == \"\": self.chartOutputDir = \"chart/\"\n if analysisOutputDir == \"\": self.tableOutputDir = \"table/\"\n #\n # make the chart and table directories:\n os.system('rm -r '+self.chartOutputDir+\"*\")\n os.system('rm -r '+self.tableOutputDir+\"*\")\n os.makedirs(self.chartOutputDir)\n os.makedirs(self.tableOutputDir)\n \n # cloneMap for all pcraster operations\n pcr.setclone(globalCloneMapFileName)\n cloneMap = pcr.boolean(1)\n \n lddMap = pcr.lddrepair(pcr.readmap(lddMapFileName))\n cellArea = pcr.scalar(pcr.readmap(cellAreaMapFileName))\n \n # The landMaskClass map contains the nominal classes for all landmask regions. \n landMaskClass = pcr.nominal(cloneMap) # default: if catchmentClassFileName is not given\n if catchmentClassFileName != None:\n landMaskClass = pcr.nominal(pcr.readmap(catchmentClassFileName))\n\n for id in self.list_of_grdc_ids: \n\n logger.info(\"Evaluating simulated annual baseflow time series to IWMI baseflow time series at \"+str(self.attributeGRDC[\"id_from_grdc\"][str(id)])+\".\")\n \n # evaluate model results to GRDC data\n self.evaluateBaseflowResult(str(id),pcrglobwb_output,catchmentClassFileName,tmpDir)\n \n # write the summary to a table \n summary_file = analysisOutputDir+\"baseflow_summary.txt\"\n #\n logger.info(\"Writing the summary for all stations to the file: \"+str(summary_file)+\".\")\n #\n # prepare the file:\n summary_file_handle = open(summary_file,\"w\")\n #\n # write the header\n summary_file_handle.write( \";\".join(self.grdc_dict_keys)+\"\\n\")\n #\n # write the content\n for id in self.list_of_grdc_ids:\n rowLine = \"\"\n for key in self.grdc_dict_keys: rowLine += str(self.attributeGRDC[key][str(id)]) + \";\" \n rowLine = rowLine[0:-1] + \"\\n\"\n summary_file_handle.write(rowLine)\n summary_file_handle.close() \n\n def evaluateBaseflowResult(self,id,pcrglobwb_output,catchmentClassFileName,tmpDir):\n \n # open and crop the netcdf file that contains the result\n ncFile = pcrglobwb_output['folder']+\"/\"+pcrglobwb_output[\"netcdf_file_name\"] \n\n # for high resolution output, the netcdf files are usually splitted in several files\n if catchmentClassFileName != None:\n \n # identify the landmask\n landmaskCode = str(self.attributeGRDC[\"model_landmask\"][str(id)])\n if int(landmaskCode) < 10: landmaskCode = \"0\"+landmaskCode \n\n # identify the landmask - # TODO: THIS MUST BE FIXED\n ncFile = \"/projects/wtrcycle/users/edwinhs/two_layers_with_demand_one_degree_zonation_cruts3.21-era_interim_5arcmin_but_30minArno\"+\"/M\"+landmaskCode+\"/netcdf/discharge_monthAvg_output.nc\"\n \n logger.info(\"Reading and evaluating the model result for the grdc station \"+str(id)+\" from \"+ncFile)\n \n if ncFile in filecache.keys():\n f = filecache[ncFile]\n print \"Cached: \", ncFile\n else:\n f = nc.Dataset(ncFile)\n filecache[ncFile] = f\n print \"New: \", ncFile\n\n #\n varName = pcrglobwb_output[\"netcdf_variable_name\"]\n try:\n f.variables['lat'] = f.variables['latitude']\n f.variables['lon'] = f.variables['longitude']\n except:\n pass\n\n # identify row and column indexes:\n #\n lon = float(self.attributeGRDC[\"model_longitude_in_arc_degree\"][str(id)])\n minX = min(abs(f.variables['lon'][:] - lon))\n xStationIndex = int(np.where(abs(f.variables['lon'][:] - lon) == minX)[0]) \n #\n lat = float(self.attributeGRDC[\"model_latitude_in_arc_degree\"][str(id)])\n minY = min(abs(f.variables['lat'][:] - lat))\n yStationIndex = int(np.where(abs(f.variables['lat'][:] - lat) == minY)[0]) \n\n # cropping the data:\n cropData = f.variables[varName][:,yStationIndex,xStationIndex]\n\n # select specific ranges of date/year\n nctime = f.variables['time'] # A netCDF time variable object. \n cropTime = nctime[:]\n\n if (self.startDate != None) and (self.endDate != None):\n idx_start = nc.date2index(self.startDate, \\\n nctime, \\\n calendar = nctime.calendar, \\\n select = 'exact')\n idx_end = nc.date2index(self.endDate, \\\n nctime, \\\n calendar = nctime.calendar, \\\n select = 'exact')\n cropData = cropData[int(idx_start):int(idx_end+1)]\n cropTime = cropTime[int(idx_start):int(idx_end+1)]\n\n cropData = np.column_stack((cropTime,cropData))\n print(cropData)\n \n # make a randomDir containing txt files (attribute and model result):\n randomDir = self.makeRandomDir(tmpDir) \n txtModelFile = randomDir+\"/\"+vos.get_random_word()+\".txt\"\n \n # write important attributes to a .atr file \n #\n atrModel = open(txtModelFile+\".atr\",\"w\")\n atrModel.write(\"# grdc_id: \" +str(self.attributeGRDC[\"id_from_grdc\"][str(id)])+\"\\n\")\n atrModel.write(\"# country_code: \" +str(self.attributeGRDC[\"country_code\"][str(id)])+\"\\n\")\n atrModel.write(\"# river_name: \" +str(self.attributeGRDC[\"river_name\"][str(id)])+\"\\n\") \n atrModel.write(\"# station_name: \" +str(self.attributeGRDC[\"station_name\"][str(id)])+\"\\n\") \n atrModel.write(\"# grdc_catchment_area_in_km2: \" +str(self.attributeGRDC[\"grdc_catchment_area_in_km2\"][str(id)])+\"\\n\") \n #\n atrModel.write(\"# model_landmask: \" +str(self.attributeGRDC[\"model_landmask\"][str(id)])+\"\\n\") \n atrModel.write(\"# model_latitude: \" +str(self.attributeGRDC[\"model_latitude_in_arc_degree\"][str(id)])+\"\\n\") \n atrModel.write(\"# model_longitude: \" +str(self.attributeGRDC[\"model_longitude_in_arc_degree\"][str(id)])+\"\\n\") \n atrModel.write(\"# model_catchment_area_in_km2: \"+str(self.attributeGRDC[\"model_catchment_area_in_km2\"][str(id)])+\"\\n\") \n atrModel.write(\"####################################################################################\\n\") \n atrModel.close()\n \n # save cropData to a .txt file:\n txtModel = open(txtModelFile,\"w\")\n np.savetxt(txtModelFile,cropData,delimiter=\";\") # two columns with date and model_result\n txtModel.close()\n \n # run R for evaluation\n cmd = 'R -f evaluateAnnualBaseflow.R '+self.attributeGRDC[\"iwmi_annual_baseflow_file_name\"][str(id)]+' '+txtModelFile\n print(cmd); os.system(cmd)\n \n # get model performance: read the output file (from R)\n try: \n outputFile = txtModelFile+\".out\"\n f = open(outputFile) ; allLines = f.read() ; f.close()\n # split the content of the file into several lines\n allLines = allLines.replace(\"\\r\",\"\"); allLines = allLines.split(\"\\n\")\n \n # performance values\n performance = allLines[2].split(\";\")\n\n print performance\n\n #\n nPairs = float(performance[0])\n avg_opt_obs = float(performance[1])\n avg_max_obs = float(performance[2])\n avg_min_obs = float(performance[3])\n avg_sim = float(performance[4])\n NSeff = float(performance[5])\n NSeff_log = float(performance[6])\n rmse = float(performance[7])\n mae = float(performance[8])\n bias = float(performance[9])\n R2 = float(performance[10])\n R2ad = float(performance[11])\n correlation = float(performance[12])\n avg_baseflow_deviation = float(performance[13])\n\n table_file_name = self.tableOutputDir+\"/\"+\\\n str(self.attributeGRDC[\"country_code\"][str(id)])+\"_\"+\\\n str(self.attributeGRDC[\"river_name\"][str(id)]) +\"_\"+\\\n str(self.attributeGRDC[\"id_from_grdc\"][str(id)])+\"_\"+\\\n str(self.attributeGRDC[\"station_name\"][str(id)])+\"_\"+\\\n \"table.txt\"\n cmd = 'cp '+txtModelFile+\".out \"+table_file_name\n print(cmd); os.system(cmd)\n logger.info(\"Copying the model result for the grdc station \"+str(id)+\" to a column/txt file: \"+str(table_file_name)+\".\")\n\n chart_file_name = self.chartOutputDir+\"/\"+\\\n str(self.attributeGRDC[\"country_code\"][str(id)])+\"_\"+\\\n str(self.attributeGRDC[\"river_name\"][str(id)]) +\"_\"+\\\n str(self.attributeGRDC[\"id_from_grdc\"][str(id)])+\"_\"+\\\n str(self.attributeGRDC[\"station_name\"][str(id)])+\"_\"+\\\n \"chart.pdf\"\n cmd = 'cp '+txtModelFile+\".out.pdf \"+chart_file_name\n print(cmd); os.system(cmd)\n logger.info(\"Saving the time series plot for the grdc station \"+str(id)+\" to a pdf file: \"+str(chart_file_name)+\".\")\n \n except: \n\n nPairs = \"NA\"\n avg_opt_obs = \"NA\"\n avg_max_obs = \"NA\"\n avg_min_obs = \"NA\"\n avg_sim = \"NA\"\n NSeff = \"NA\"\n NSeff_log = \"NA\"\n rmse = \"NA\"\n mae = \"NA\"\n bias = \"NA\"\n R2 = \"NA\"\n R2ad = \"NA\"\n correlation = \"NA\"\n avg_baseflow_deviation = \"NA\"\n \n chart_file_name = \"NA\"\n table_file_name = \"NA\"\n \n logger.info(\"Evaluation baseflow time series can NOT be performed.\")\n\n # clean (random) temporary directory\n self.cleanRandomDir(randomDir)\n \n self.attributeGRDC[\"num_of_annual_pairs\"][str(id)] = nPairs \n self.attributeGRDC[\"average_iwmi_opt_baseflow\"][str(id)] = avg_opt_obs \n self.attributeGRDC[\"average_iwmi_max_baseflow\"][str(id)] = avg_max_obs \n self.attributeGRDC[\"average_iwmi_min_baseflow\"][str(id)] = avg_min_obs \n self.attributeGRDC[\"average_model\"][str(id)] = avg_sim \n self.attributeGRDC[\"ns_efficiency\"][str(id)] = NSeff \n self.attributeGRDC[\"ns_efficiency_log\"][str(id)] = NSeff_log \n self.attributeGRDC[\"rmse\"][str(id)] = rmse \n self.attributeGRDC[\"mae\"][str(id)] = mae \n self.attributeGRDC[\"bias\"][str(id)] = bias \n self.attributeGRDC[\"R2\"][str(id)] = R2 \n self.attributeGRDC[\"R2_adjusted\"][str(id)] = R2ad \n self.attributeGRDC[\"correlation\"][str(id)] = correlation \n self.attributeGRDC[\"chart_file_name\"][str(id)] = chart_file_name \n self.attributeGRDC[\"table_file_name\"][str(id)] = table_file_name \n self.attributeGRDC[\"avg_baseflow_deviation\"][str(id)] = avg_baseflow_deviation\n" }, { "alpha_fraction": 0.619448721408844, "alphanum_fraction": 0.6297090649604797, "avg_line_length": 47.73134231567383, "blob_id": "4bb91c425c6e4535510b641740bdf7b4628d48b7", "content_id": "dbad438fe1e664d1d99d11d36f119c8fe85b80c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6530, "license_type": "no_license", "max_line_length": 138, "num_lines": 134, "path": "/0_main_analyze_discharge.py", "repo_name": "edwinkost/discharge_analysis_IWMI", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Edwin Husni Sutanudjaja (EHS, 06 Jun 2014): This is script for evaluating monthly simulated discharge to GRDC discharge data.\n# Edwin Husni Sutanudjaja (EHS, 10 Jun 2014): I modify this script such that it can also be used for evaluating 30 arc-min results. \n# Edwin Husni Sutanudjaja (EHS, 26 Jun 2014): I modify this script such that it can also be used for evaluating a certain period of time series. \n\nimport os\nimport sys\nimport glob\n\nimport dischargeGRDC\nimport baseflowIWMI\n\nimport logging\nfrom logger import Logger\n# get name for the logger\nlogger = logging.getLogger(\"main_script\")\n\n# PCR-GLOBWB results: model output directory, \npcrglobwb_output = {}\npcrglobwb_output[\"folder\"] = None # \"/scratch/edwin/IWMI_run_20_nov/without_fossil_limit_with_pumping_limit_CRU/netcdf/\"\npcrglobwb_output[\"netcdf_file_name\"] = None # \"netcdf/discharge_monthAvg_output.nc\" # \"discharge_monthAvg_output.nc\" \npcrglobwb_output[\"netcdf_variable_name\"] = None # \"discharge\" \n\n# output directory storing analysis results (results from this script)\nglobalAnalysisOutputDir = None # \"/scratch/edwin/IWMI_run_20_nov/without_fossil_limit_with_pumping_limit_CRU/analysis/monthly_discharge/\"\ncleanOutputDir = True # option to clean analysisOutputDir \t\t\n\n# optional: PCR-GLOBWB output and analysis output folders are given in the system argument\nif len(sys.argv) > 1:\n pcrglobwb_output[\"folder\"] = str(sys.argv[1])\n globalAnalysisOutputDir = str(sys.argv[1])+\"/analysis/\"\ntry:\n os.makedirs(globalAnalysisOutputDir) \nexcept:\n pass \n\n# time range for analyses\nstartDate = None # \"1960-01-31\" #YYYY-MM-DD # None \nendDate = None # \"2010-12-31\" #YYYY-MM-DD # None \n\n# directory for GRDC files:\n#~ globalDirectoryGRDC = \"/projects/wtrcycle/users/edwinhs/observation_data/IWMI_calibration/monthly_discharge/for_calibration/\"\n#~ globalDirectoryGRDC = \"/scratch/edwin/observation_data/IWMI_calibration/monthly_discharge/for_validation/\"\nglobalDirectoryGRDC = \"/scratch/edwin/observation_data/IWMI_calibration/monthly_discharge/for_calibration/\"\n\n# directory for baseflow files:\n#~ baseflowFolderIWMI = \"/projects/wtrcycle/users/edwinhs/observation_data/IWMI_calibration/annual_baseflow/for_calibration/\"\n#~ baseflowFolderIWMI = \"/scratch/edwin/observation_data/IWMI_calibration/annual_baseflow/for_validation/\"\nbaseflowFolderIWMI = \"/scratch/edwin/observation_data/IWMI_calibration/annual_baseflow/for_calibration/\"\n\n# clone, ldd and cell area maps, for 30min results (of PCR-GLOBWB 2.0)\nglobalCloneMapFileName = \"/data/hydroworld/PCRGLOBWB20/input30min/global/Global_CloneMap_30min.map\"\nlddMapFileName = \"/data/hydroworld/PCRGLOBWB20/input30min/routing/lddsound_30min.map\"\ncellAreaMapFileName = \"/data/hydroworld/PCRGLOBWB20/input30min/routing/cellarea30min.map\"\n\n# the following is needed for evaluating model results with 5 arcmin resolution\ncatchmentClassFileName = None \n\n# temporary directory (note that it is NOT a good idea to store temporary files in the memory (/dev/shm))\ntemporary_directory = globalAnalysisOutputDir+\"/tmp/\"\ntry:\n os.makedirs(temporary_directory) \nexcept:\n os.system('rm -r '+temporary_directory+\"/*\") # make sure that temporary directory is clean \n\ndef main():\n\n # discharge analysis\n ####################################################################################################\n #\n # make analysisOutputDir\n analysisOutputDir = globalAnalysisOutputDir+\"/calibration/monthly_discharge/\"\n try:\n os.makedirs(analysisOutputDir) \n except:\n if cleanOutputDir == True: os.system('rm -r '+analysisOutputDir+\"/*\") \n #\n # logger object for discharge analysis\n logger = Logger(analysisOutputDir)\n #\n # monthly discharge evaluation (based on GRDC data)\n dischargeEvaluation = dischargeGRDC.DischargeEvaluation(pcrglobwb_output[\"folder\"],\\\n startDate,endDate,temporary_directory)\n # - get GRDC attributes of all stations:\n dischargeEvaluation.get_grdc_attributes(directoryGRDC = globalDirectoryGRDC)\n #\n # - evaluate monthly discharge results\n pcrglobwb_output[\"netcdf_file_name\"] = \"netcdf/discharge_monthAvg_output.nc\"\n pcrglobwb_output[\"netcdf_variable_name\"] = \"discharge\"\n dischargeEvaluation.evaluateAllModelResults(globalCloneMapFileName,\\\n catchmentClassFileName,\\\n lddMapFileName,\\\n cellAreaMapFileName,\\\n pcrglobwb_output,\\\n analysisOutputDir) \n ####################################################################################################\n \n\n # baseflow analysis\n ####################################################################################################\n #\n # make analysisOutputDir\n analysisOutputDir = globalAnalysisOutputDir+\"/calibration/annual_baseflow/\"\n try:\n os.makedirs(analysisOutputDir) \n except:\n if cleanOutputDir == True: os.system('rm -r '+analysisOutputDir+\"/*\") \n #\n # logger object for baseflow analysis\n logger = Logger(analysisOutputDir)\n #\n # annual baseflow evaluation (based on baseflow data)\n baseflowEvaluation = baseflowIWMI.BaseflowEvaluation(pcrglobwb_output[\"folder\"],\\\n startDate,endDate,temporary_directory)\n # - get GRDC attributes of all stations \n # (based on the previous analysis on monthly discharge)\n baseflowEvaluation.get_grdc_attributes(dischargeEvaluation.attributeGRDC, baseflowFolderIWMI)\n #\n # - evaluate annual baseflow time series\n pcrglobwb_output[\"netcdf_file_name\"] = \"netcdf/accuBaseflow_annuaAvg_output.nc\"\n pcrglobwb_output[\"netcdf_variable_name\"] = \"accumulated_land_surface_baseflow\"\n baseflowEvaluation.evaluateAllBaseflowResults(globalCloneMapFileName,\\\n catchmentClassFileName,\\\n lddMapFileName,\\\n cellAreaMapFileName,\\\n pcrglobwb_output,\\\n analysisOutputDir) \n ####################################################################################################\n \n\nif __name__ == '__main__':\n sys.exit(main())\n" }, { "alpha_fraction": 0.5943358540534973, "alphanum_fraction": 0.6209812760353088, "avg_line_length": 49.13999938964844, "blob_id": "2f5b771819a3502b43816d8cc24ed3e94dbf2f9c", "content_id": "e7c91bb36b0ba60ca660067cc45b58b3bc524cad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 12535, "license_type": "no_license", "max_line_length": 219, "num_lines": 250, "path": "/evaluateAnnualBaseflow.R", "repo_name": "edwinkost/discharge_analysis_IWMI", "src_encoding": "UTF-8", "text": "# This scripts \n\n# clear the memory\nrm(list=ls());ls()\n\n# packages needed:\nrequire('ggplot2'); require('RColorBrewer')\n\n# set minimum number of pairs that will be analyzed:\nminPairs = 2\n\n# functions:\n#\nnPairs_function <- function(obs, pred) length(pred[which(!is.na(obs) & !is.na(pred))])\n#\navg_baseflow_deviation_function <- function (Qopt_obs, Qmax_obs, Qmin_obs, Qsim) {\n # original data\n Qopt_obs_ori <- Qopt_obs\n Qmax_obs_ori <- Qmax_obs\n Qmin_obs_ori <- Qmin_obs\n Qsim_ori <- Qsim\n # throw away missing values (both obs and sim must have values)\n Qopt_obs <- Qopt_obs_ori[!is.na(Qopt_obs_ori) & !is.na(Qmax_obs_ori) & !is.na(Qmin_obs_ori) & !is.na(Qsim_ori)]\n Qmax_obs <- Qmax_obs_ori[!is.na(Qopt_obs_ori) & !is.na(Qmax_obs_ori) & !is.na(Qmin_obs_ori) & !is.na(Qsim_ori)]\n Qmin_obs <- Qmin_obs_ori[!is.na(Qopt_obs_ori) & !is.na(Qmax_obs_ori) & !is.na(Qmin_obs_ori) & !is.na(Qsim_ori)]\n Qsim <- Qsim_ori [!is.na(Qopt_obs_ori) & !is.na(Qmax_obs_ori) & !is.na(Qmin_obs_ori) & !is.na(Qsim_ori)]\n if (length(Qsim) == 0) \n return(NA)\n baseflow_deviation = apply(cbind(0, Qsim -Qmax_obs), 1, max) + apply(cbind(0, Qmin_obs - Qsim), 1, max) \n avg_baseflow_deviation = mean(baseflow_deviation)\n return(avg_baseflow_deviation)\n}\n#\nNSeff_function <- function (Qobs, Qsim) {\n # original data:\n Qobs_ori <-Qobs \n Qsim_ori <-Qsim \n # throw away missing values (both obs and sim must have values)\n Qsim <- Qsim_ori[!is.na(Qobs_ori) & !is.na(Qsim_ori)]\n Qobs <- Qobs_ori[!is.na(Qobs_ori) & !is.na(Qsim_ori)]\n if (length(Qobs) == 0 || length(Qsim) == 0) \n return(NA)\n NS <- 1 - (sum((Qobs - Qsim)^2)/sum((Qobs - mean(Qobs))^2))\n return(NS)\n}\n#\nNSeff_log_function <- function (Qobs, Qsim) {\n # avoid zero and negative discharge values\n Qobs[which(Qobs<=1)] = 1\n Qsim[which(Qsim<=1)] = 1\n # convert to become log values\n Qobs = log(Qobs)\n Qsim = log(Qsim)\n # original data:\n Qobs_ori <-Qobs \n Qsim_ori <-Qsim \n # throw away missing values (both obs and sim must have values)\n Qsim <- Qsim_ori[!is.na(Qobs_ori) & !is.na(Qsim_ori)]\n Qobs <- Qobs_ori[!is.na(Qobs_ori) & !is.na(Qsim_ori)]\n if (length(Qobs) == 0 || length(Qsim) == 0) \n return(NA)\n NS <- 1 - (sum((Qobs - Qsim)^2)/sum((Qobs - mean(Qobs))^2))\n return(NS)\n}\n#\navg_obs_function <- function(obs, sim) mean(obs[which(!is.na(obs) & !is.na(sim))]) # PS: While calculating average we consider only complete pairs.\navg_sim_function <- function(obs, sim) mean(sim[which(!is.na(obs) & !is.na(sim))]) # PS: While calculating average we consider only complete pairs. \n#\nrmse_function <- function(obs, pred) sqrt(mean((obs-pred)^2 ,na.rm=T))\n mae_function <- function(obs, pred) mean(abs(obs-pred),na.rm=T)\nbias_function <- function(obs, pred) mean(pred[which(!is.na(obs) & !is.na(pred))]) - mean(obs[which(!is.na(obs) & !is.na(pred))]) # POSITIVE indicates that the average prediction is higher than average observation. \nR2_function <- function(obs, pred) summary(lm(obs ~ pred))$r.squared\nR2ad_function <- function(obs, pred) summary(lm(obs ~ pred))$adj.r.squared\n\n# read the arguments\nargs <- commandArgs()\ngrdcFile = args[4]\nmodelFile = args[5]\n\n# load the model result\nmodelTable = read.table(modelFile,header=F,sep=\";\")\t\nmodelTable[,1] = as.character(as.Date(modelTable[,1],origin=\"1901-01-01\"))\n#\n\nsimulation = data.frame(modelTable[,1], modelTable[,2])\nnames(simulation)[1] <- \"date\"\nnames(simulation)[2] <- \"simulation\"\nsimulation$date = paste(substr(as.character(modelTable[,1]), 1,4),\"-01-01\",sep=\"\") # simulation date -> assume day = 1 January\nsimulation$date = as.Date(simulation$date,\"%Y-%m-%d\")\n\n# load the baseflow data provided by IWMI\niwmiTableOriginal <- read.table(grdcFile,header=F,skip=17) # assume that there are 17 lines to be skipped\n#\nnames(iwmiTableOriginal)[1] <- \"date\"\nnames(iwmiTableOriginal)[2] <- \"annual_discharge\" # unit: m3/year\nnames(iwmiTableOriginal)[3] <- \"max_baseflow\" # unit: m3/year\nnames(iwmiTableOriginal)[4] <- \"opt_baseflow\" # unit: m3/year\nnames(iwmiTableOriginal)[5] <- \"min_baseflow\" # unit: m3/year\nnames(iwmiTableOriginal)[6] <- \"record_days\" # unit: days\n#\n# ignore data with record_days <= 200\niwmiTable = iwmiTableOriginal[which(iwmiTableOriginal$record_days > 200),]\n\n# convert unit from m3/year to m3/s\n# - number of days in a year\nthis_year_1_jan = as.Date(paste(as.character(iwmiTable$date ),\"-01-01\",sep=\"\"), \"%Y-%m-%d\")\nnext_year_1_jan = as.Date(paste(as.character(iwmiTable$date+1),\"-01-01\",sep=\"\"), \"%Y-%m-%d\")\ndays_in_a_year = as.numeric(next_year_1_jan - this_year_1_jan) ; print(days_in_a_year)\n# - convert annual discharge, maximum baseflow and minimum baseflow\niwmiTable$annual_discharge = iwmiTable$annual_discharge / (days_in_a_year * 24 * 3600)\niwmiTable$max_baseflow = iwmiTable$max_baseflow / (days_in_a_year * 24 * 3600)\niwmiTable$opt_baseflow = iwmiTable$opt_baseflow / (days_in_a_year * 24 * 3600)\niwmiTable$min_baseflow = iwmiTable$min_baseflow / (days_in_a_year * 24 * 3600)\n\n# convert year to date\niwmiTable$date = paste(substr(as.character(iwmiTable$date), 1,4),\"-01-01\",sep=\"\") # observation date -> assume day = 1 January\niwmiTable$date = as.Date(as.character(iwmiTable$date), \"%Y-%m-%d\")\n\n# merging model and iwmi tables\nmergedTable = merge(simulation,iwmiTable,by=\"date\",all.x=TRUE)\n\nprint(mergedTable)\n\n# available observation data\nlength_observation = length(mergedTable[,1])\n\nif (length_observation < minPairs) {\nprint(paste(\"ONLY FEW OBSERVATION DATA ARE AVAILABLE: \",length_observation,sep=\"\"))} else {\n\n# evaluating model performance:\n#\nnPairs = length_observation\n#\navg_opt_obs = avg_obs_function(mergedTable$opt_baseflow, mergedTable$simulation)\navg_max_obs = avg_obs_function(mergedTable$max_baseflow, mergedTable$simulation)\navg_min_obs = avg_obs_function(mergedTable$min_baseflow, mergedTable$simulation)\n#\navg_sim = avg_sim_function(mergedTable$simulation , mergedTable$simulation)\n#\nNSeff = NSeff_function(mergedTable$opt_baseflow, mergedTable$simulation)\nNSeff_log = NSeff_log_function(mergedTable$opt_baseflow, mergedTable$simulation)\nrmse = rmse_function(mergedTable$opt_baseflow, mergedTable$simulation)\nmae = mae_function(mergedTable$opt_baseflow, mergedTable$simulation)\nbias = bias_function(mergedTable$opt_baseflow, mergedTable$simulation)\nR2 = R2_function(mergedTable$opt_baseflow, mergedTable$simulation) \nR2ad = R2ad_function(mergedTable$opt_baseflow, mergedTable$simulation)\ncorrelation = cor(mergedTable$opt_baseflow, mergedTable$simulation, use = \"na.or.complete\")\n#\navg_baseflow_deviation = avg_baseflow_deviation_function(Qopt_obs = mergedTable$opt_baseflow, \n Qmax_obs = mergedTable$max_baseflow, \n Qmin_obs = mergedTable$min_baseflow, \n Qsim = mergedTable$simulation)\n\nprint(avg_baseflow_deviation)\n\nperformance = c(\nnPairs, \navg_opt_obs, avg_max_obs, avg_min_obs, \navg_sim, \nNSeff, NSeff_log, \nrmse, mae, bias, \nR2, R2ad, correlation,\navg_baseflow_deviation)\n#\nperformance_character = paste(\nnPairs, \navg_opt_obs, avg_max_obs, avg_min_obs, \navg_sim, \nNSeff, NSeff_log, \nrmse, mae, bias, \nR2, R2ad, correlation,\navg_baseflow_deviation,\nsep=\";\")\n\n# saving model performance to outputFile (in the memory)\noutputFile = paste(modelFile,\".out\",sep=\"\")\nprint(outputFile)\ncat(\"observation file: \",grdcFile,\"\\n\",sep=\"\",file=outputFile)\ncat(\n\"nPairs\", \n\"avg_opt_obs\", \"avg_max_obs\", \"avg_min_obs\", \n\"avg_sim\", \n\"NSeff\", \"NSeff_log\", \n\"rmse\", \"mae\", \"bias\", \n\"R2\", \"R2ad\", \"correlation\",\n\"avg_baseflow_deviation\",\n\"\\n\",sep=\"\",file=outputFile,append=TRUE)\ncat(performance_character,\"\\n\",sep=\"\",file=outputFile,append=TRUE)\nwrite.table(mergedTable,file=outputFile,sep=\";\",quote=FALSE,append=TRUE,row.names=FALSE)\n\nprint(performance_character)\n\n# read attribute information of station location\nattributeStat = readLines(paste(modelFile,\".atr\",sep=\"\"))\n\n# Plotting the monthly chart !\n####################################################################################################################################\n#\n# x and y- axis scales:\ny_min = 0\ny_max_obs = max(mergedTable$max_baseflow,na.rm=T)\ny_max_sim = max(mergedTable$simulation ,na.rm=T)\ny_max = max(y_max_obs, y_max_sim)\nif (y_max > 50) {y_max = ceiling((y_max+75)/100)*100} else {y_max = 50}\n#\nx_min = min(mergedTable$date,na.rm=T) - 365*5\nx_max = max(mergedTable$date,na.rm=T)\n#\nx_info_text = x_min - 365*0.5\n\noutplott <- ggplot()\noutplott <- outplott +\n layer(data = mergedTable, mapping = aes(x = date, y = opt_baseflow), geom = \"line\", colour = \"red\", size = 0.90) + # measurement\n layer(data = mergedTable, mapping = aes(x = date, y = max_baseflow), geom = \"line\", colour = \"black\", size = 0.45, linetype = \"dashed\") + # model results\n layer(data = mergedTable, mapping = aes(x = date, y = min_baseflow), geom = \"line\", colour = \"black\", size = 0.45, linetype = \"dashed\") + # model results\n layer(data = mergedTable, mapping = aes(x = date, y = simulation ), geom = \"line\", colour = \"blue\", size = 0.35 ) + # model results\n#\n geom_text(aes(x = x_info_text, y = 1.00*y_max, label = attributeStat[1]), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.95*y_max, label = attributeStat[2]), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.90*y_max, label = attributeStat[3]), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.85*y_max, label = attributeStat[4]), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.80*y_max, label = attributeStat[5]), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.75*y_max, label = attributeStat[6]), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.70*y_max, label = attributeStat[7]), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.65*y_max, label = attributeStat[8]), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.60*y_max, label = attributeStat[9]), size = 2.5,hjust = 0) +\n#\n geom_text(aes(x = x_info_text, y = 0.55*y_max, label = paste(\" nPairs = \", round(performance[1] ,2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.50*y_max, label = paste(\" avg_opt_obs = \", round(performance[2] ,2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.45*y_max, label = paste(\" avg_max_obs = \", round(performance[3] ,2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.40*y_max, label = paste(\" avg_min_obs = \", round(performance[4] ,2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.35*y_max, label = paste(\" avg_sim = \" , round(performance[5] ,2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.30*y_max, label = paste(\" avg_bf_dev = \" , round(performance[14],2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.25*y_max, label = paste(\" rmse = \", round(performance[8] ,2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.20*y_max, label = paste(\" mae = \", round(performance[9] ,2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.15*y_max, label = paste(\" bias = \", round(performance[10],2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.10*y_max, label = paste(\" R2 = \", round(performance[11],2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.05*y_max, label = paste(\" R2ad = \", round(performance[12],2),sep=\"\")), size = 2.5,hjust = 0) +\n geom_text(aes(x = x_info_text, y = 0.00*y_max, label = paste(\" correlation = \", round(performance[13],2),sep=\"\")), size = 2.5,hjust = 0) +\n#\n scale_y_continuous(\"baseflow (m3/s)\",limits=c(y_min,y_max)) +\n scale_x_date('',limits=c(x_min,x_max)) +\n theme(legend.position = \"none\") \n#ggsave(\"screen.pdf\", plot = outplott,width=30,height=8.25,units='cm')\n ggsave(paste(outputFile,\".pdf\",sep=\"\"), plot = outplott,width=27,height=7,units='cm')\n#\nrm(outplott)\n####################################################################################################################################\n\n}\n" }, { "alpha_fraction": 0.747960090637207, "alphanum_fraction": 0.768812358379364, "avg_line_length": 46.869564056396484, "blob_id": "f4ab83321c3dea28a264f0d879f0e127760bc37b", "content_id": "e20e65944c58bfec35f3766569c685bcfc25ad95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1103, "license_type": "no_license", "max_line_length": 113, "num_lines": 23, "path": "/check_a_specific_run.R", "repo_name": "edwinkost/discharge_analysis_IWMI", "src_encoding": "UTF-8", "text": "\n# folder for a specific scenario\nfolder = \"/scratch/edwin/IWMI_calibration/version_01_dec_2014/uncalibrated/code__a__0/analysis/calibration/\"\n\n# read table containing discharge analysis\ndischarge_table_file = paste(folder, \"monthly_discharge/summary.txt\", sep=\"\") \ndischarge_table = read.table(discharge_table_file, header=T, sep= \";\")\n\n# read table containing baseflow analysis\nbaseflow_table_file = paste(folder, \"annual_baseflow/baseflow_summary.txt\", sep=\"\") \nbaseflow_table = read.table(baseflow_table_file, header=T, sep= \";\")\n\n# calculate performance values\n#\nns_discharge = discharge_table$ns_efficiency\nns_discharge[which(ns_discharge < 0.00)] = 0.00\naverage_ns_discharge = mean(ns_discharge, na.rm = TRUE)\n#\nbaseflow_deviation_relative = abs(baseflow_table$avg_baseflow_deviation/baseflow_table$average_iwmi_opt_baseflow)\nbaseflow_deviation_relative[which(baseflow_deviation_relative > 1.50)] = 1.50\nbaseflow_deviation = mean(baseflow_deviation_relative, na.rm = TRUE)\nbaseflow_deviation = floor(baseflow_deviation*10)/10\n#\ngeneral_performance = average_ns_discharge / (baseflow_deviation)\n\n" }, { "alpha_fraction": 0.7250911593437195, "alphanum_fraction": 0.746051013469696, "avg_line_length": 45.30986022949219, "blob_id": "45ddb13d27c375da89f8de27ffb6a61e0e2b5594", "content_id": "8ed72f968a2c953b1577659f8a60fa88c7dad6b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3292, "license_type": "no_license", "max_line_length": 154, "num_lines": 71, "path": "/find_the_best_runs.R", "repo_name": "edwinkost/discharge_analysis_IWMI", "src_encoding": "UTF-8", "text": "\n# global folder for all scenarios\nglobal_folder = \"/projects/wtrcycle/users/edwinhs/IWMI_calibration/\"\n\nfirst_run_to_be_analyzed = TRUE\nfor (i in seq(0,224,1)) {\n\n# run code/name:\nrun_code = paste(\"code__a__\",i,sep=\"\")\n\n# read table containing discharge analysis\ndischarge_table_file =paste(global_folder, run_code,\"/analysis/calibration/monthly_discharge/summary.txt\",sep=\"\") \ndischarge_table = read.table(discharge_table_file, header=T, sep= \";\")\n\n# read table containing baseflow analysis\nbaseflow_table_file =paste(global_folder, run_code,\"/analysis/calibration/annual_baseflow/baseflow_summary.txt\",sep=\"\") \nbaseflow_table = read.table(baseflow_table_file, header=T, sep= \";\")\n\n# calculate performance values\n#\nns_discharge = discharge_table$ns_efficiency\nns_discharge[which(ns_discharge < 0.00)] = 0.00\naverage_ns_discharge = mean(ns_discharge, na.rm = TRUE)\n#\nbaseflow_deviation_relative = abs(baseflow_table$avg_baseflow_deviation/baseflow_table$average_iwmi_opt_baseflow)\nbaseflow_deviation_relative[which(baseflow_deviation_relative > 1.50)] = 1.50\nbaseflow_deviation = mean(baseflow_deviation_relative, na.rm = TRUE)\nbaseflow_deviation = floor(baseflow_deviation*10)/10\n#\ngeneral_performance = average_ns_discharge / (baseflow_deviation)\n\nif (first_run_to_be_analyzed == TRUE) {\nsummary = cbind(run_code, average_ns_discharge, baseflow_deviation, general_performance)\nfirst_run_to_be_analyzed = FALSE\n} else {\nsummary = rbind(summary, \n cbind(run_code, average_ns_discharge, baseflow_deviation, general_performance))\n}\n}\n\n# convert summary table to data frame\nsummary_df = data.frame(summary)\nsummary_df$run_code = as.character(summary_df$run_code)\nsummary_df$baseflow_deviation = as.numeric(as.character(summary_df$baseflow_deviation))\nsummary_df$average_ns_discharge = as.numeric(as.character(summary_df$average_ns_discharge))\nsummary_df$general_performance = as.numeric(as.character(summary_df$general_performance))\n\n# merge it to parameters table\nparameter_table_1st = read.table(\"/home/edwinhs/github/edwinkost/calibration_log_IWMI/runs_for_IWMI_calibration_code_A/table_24_nov_2014.txt\",header=TRUE)\nparameter_table_2nd = read.table(\"/home/edwinhs/github/edwinkost/calibration_log_IWMI/runs_for_IWMI_calibration_code_A/table_26_nov_2014.txt\",header=TRUE)\nreference_run = c(\"code__a__0\",1.0,0.0,0.0,1.0)\n#\nparameter_table = rbind(reference_run, parameter_table_1st, parameter_table_2nd) \nparameter_table[ ,1] <- as.character(parameter_table[,1])\nparameter_table[1,1] <- \"code__a__0\"\nparameter_table[ ,2] <- as.numeric(parameter_table[,2])\nparameter_table[ ,3] <- as.numeric(parameter_table[,3])\nparameter_table[ ,4] <- as.numeric(parameter_table[,4])\nnames(parameter_table)[1] <- \"run_code\"\nnames(parameter_table)[2] <- \"min_soil_depth_frac\"\nnames(parameter_table)[3] <- \"log_ksat\"\nnames(parameter_table)[4] <- \"log_recession_coef\"\nnames(parameter_table)[5] <- \"degree_day_factor\"\n#\nfinal_table = merge(parameter_table, summary_df, by = \"run_code\")\n\n# order it based on performance values\n#~ final_table = final_table[order(final_table$general_performance), ]\nfinal_table = final_table[order(-final_table$general_performance), ]\nfinal_table[1:15,]\n\nwrite.table(final_table,file = \"scatterplot_dec_2014.txt\", col.names=TRUE,row.names=FALSE,sep=\";\")\n\n\n\n" }, { "alpha_fraction": 0.7520661354064941, "alphanum_fraction": 0.7887362241744995, "avg_line_length": 94.65968322753906, "blob_id": "5120a1d03ba2c4c48469ba00d82c7031e4429c02", "content_id": "dd370bba5d06e34f183addcfeb07fd807209accc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 18271, "license_type": "no_license", "max_line_length": 203, "num_lines": 191, "path": "/calibration_run_045_to_224.sh", "repo_name": "edwinkost/discharge_analysis_IWMI", "src_encoding": "UTF-8", "text": "#!/bin/bash #SBATCH -N 1 \n#SBATCH -t 59:00 \n#SBATCH -p short \n \npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__45 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__46 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__47 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__48 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__49 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__50 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__51 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__52 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__53 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__54 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__55 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__56 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__57 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__58 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__59 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__60 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__61 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__62 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__63 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__64 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__65 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__66 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__67 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__68 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__69 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__70 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__71 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__72 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__73 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__74 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__75 &\nwait\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__76 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__77 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__78 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__79 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__80 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__81 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__82 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__83 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__84 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__85 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__86 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__87 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__88 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__89 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__90 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__91 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__92 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__93 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__94 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__95 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__96 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__97 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__98 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__99 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__100 &\nwait\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__101 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__102 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__103 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__104 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__105 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__106 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__107 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__108 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__109 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__110 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__111 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__112 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__113 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__114 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__115 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__116 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__117 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__118 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__119 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__120 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__121 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__122 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__123 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__124 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__125 &\nwait\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__126 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__127 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__128 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__129 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__130 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__131 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__132 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__133 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__134 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__135 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__136 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__137 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__138 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__139 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__140 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__141 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__142 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__143 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__144 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__145 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__146 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__147 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__148 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__149 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__150 &\nwait\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__151 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__152 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__153 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__154 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__155 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__156 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__157 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__158 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__159 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__160 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__161 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__162 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__163 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__164 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__165 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__166 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__167 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__168 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__169 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__170 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__171 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__172 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__173 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__174 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__175 &\nwait\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__176 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__177 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__178 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__179 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__180 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__181 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__182 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__183 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__184 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__185 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__186 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__187 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__188 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__189 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__190 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__191 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__192 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__193 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__194 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__195 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__196 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__197 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__198 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__199 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__200 &\nwait\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__201 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__202 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__203 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__204 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__205 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__206 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__207 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__208 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__209 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__210 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__211 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__212 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__213 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__214 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__215 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__216 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__217 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__218 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__219 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__220 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__221 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__222 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__223 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__224 &\nwait\n" }, { "alpha_fraction": 0.8111110925674438, "alphanum_fraction": 0.8111110925674438, "avg_line_length": 43.5, "blob_id": "d81cee7e1855b54913d46042dcfa4e46dfaeb1ea", "content_id": "b2e860e56f0ddbe635fc8ce173d0785161fba47e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 90, "license_type": "no_license", "max_line_length": 62, "num_lines": 2, "path": "/README.md", "repo_name": "edwinkost/discharge_analysis_IWMI", "src_encoding": "UTF-8", "text": "# discharge_analysis_IWMI\nThe scripts to analyze model performance for the IWMI project. \n" }, { "alpha_fraction": 0.7321768403053284, "alphanum_fraction": 0.7596784234046936, "avg_line_length": 91.68627166748047, "blob_id": "8709eefce81a21180d423387b91a55acca7c18d2", "content_id": "f4f4e11e0d1109f2b8fb8b10de01d2969159dea4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4727, "license_type": "no_license", "max_line_length": 203, "num_lines": 51, "path": "/calibration_run_000_to_044.sh", "repo_name": "edwinkost/discharge_analysis_IWMI", "src_encoding": "UTF-8", "text": "#!/bin/bash #SBATCH -N 1 \n#SBATCH -t 59:00 \n#SBATCH -p short \n\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__0 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__1 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__2 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__3 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__4 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__5 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__6 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__7 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__8 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__9 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__10 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__11 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__12 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__13 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__14 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__15 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__16 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__17 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__18 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__19 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__20 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__21 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__22 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__23 &\nwait\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__24 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__25 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__26 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__27 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__28 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__29 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__30 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__31 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__32 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__33 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__34 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__35 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__36 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__37 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__38 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__39 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__40 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__41 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__42 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__43 &\npython 0_main_analyze_discharge.py /projects/wtrcycle/users/edwinhs/IWMI_calibration/code__a__44 &\nwait\n" } ]
8
darmattt/Naruto-labirint
https://github.com/darmattt/Naruto-labirint
26eea8372a0f64dd7c5e302a5ea866ad31aadaef
375b3774ce5aca9b3d37c7ed628c5f102e731988
0e69c6b753c826ef83e42f8db6605167d2e7776f
refs/heads/main
2023-04-09T23:38:23.433904
2021-04-18T09:05:45
2021-04-18T09:05:45
359,092,955
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5105162262916565, "alphanum_fraction": 0.5530061721801758, "avg_line_length": 31.161972045898438, "blob_id": "39b9f492e845feb95aa246158438321f44d54666", "content_id": "ed3caf664c730e53148959602f290629fcadd463", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4707, "license_type": "permissive", "max_line_length": 231, "num_lines": 142, "path": "/maze.py", "repo_name": "darmattt/Naruto-labirint", "src_encoding": "UTF-8", "text": "from pygame import *\r\n\r\nclass GameSprite(sprite.Sprite):\r\n def __init__(self, player_image, player_x, player_y, player_speed, height_sprite, width_sprite):\r\n super().__init__()\r\n self.image = transform.scale(image.load(player_image), (height_sprite, width_sprite))\r\n self.speed = player_speed\r\n self.rect = self.image.get_rect()\r\n self.rect.x = player_x\r\n self.rect.y = player_y\r\n def reset(self):\r\n window.blit(self.image, (self.rect.x, self.rect.y))\r\n\r\nclass Player(GameSprite):\r\n def update(self):\r\n keys = key.get_pressed()\r\n if keys[K_LEFT] and self.rect.x > 5:\r\n self.rect.x -= self.speed\r\n if keys[K_RIGHT] and self.rect.x < win_width - 80:\r\n self.rect.x += self.speed\r\n if keys[K_UP] and self.rect.y > 5:\r\n self.rect.y -= self.speed\r\n if keys[K_DOWN] and self.rect.y < win_height - 80:\r\n self.rect.y += self.speed\r\n\r\nclass Enemy(GameSprite):\r\n direction = \"left\"\r\n def update(self):\r\n if self.rect.x <= 490:\r\n self.direction = \"right\"\r\n if self.rect.x >= win_width - 65:\r\n self.direction = \"left\"\r\n if self.direction == \"left\":\r\n self.rect.x -= self.speed\r\n else:\r\n self.rect.x += self.speed\r\n\r\nclass Coach(GameSprite):\r\n direction = \"left\"\r\n def update(self):\r\n if self.rect.x <= 180:\r\n self.direction = \"right\"\r\n if self.rect.x >= win_width - 400:\r\n self.direction = \"left\"\r\n if self.direction == \"left\":\r\n self.rect.x -= self.speed\r\n else:\r\n self.rect.x += self.speed\r\n \r\nclass Star(GameSprite):\r\n direction = \"left\"\r\n def update(self):\r\n if self.rect.x <= 360:\r\n self.direction = \"right\"\r\n if self.rect.x >= win_width - 240:\r\n self.direction = \"left\"\r\n if self.direction == \"left\":\r\n self.rect.x -= self.speed\r\n else:\r\n self.rect.x += self.speed\r\n\r\nclass Wall(sprite.Sprite):\r\n def __init__(self, color_1, color_2, color_3, wall_x, wall_y, wall_width, wall_height):\r\n super().__init__()\r\n self.color_1 = color_1\r\n self.color_2 = color_2\r\n self.color_3 = color_3\r\n self.width = wall_width\r\n self.height = wall_height\r\n self.image = Surface((self.width, self.height))\r\n self.image.fill((color_1, color_2, color_3))\r\n self.rect = self.image.get_rect()\r\n self.rect.x = wall_x\r\n self.rect.y = wall_y\r\n def draw_wall(self):\r\n window.blit(self.image, (self.rect.x, self.rect.y))\r\n #draw.rect(window, (self.color_1, self.color_2, self.color_3), (self.rect.x, self.rect.y, self.width, self.height))5\r\nwin_width = 700\r\nwin_height = 500\r\nwindow = display.set_mode((win_width, win_height))\r\ndisplay.set_caption(\"Maze\")\r\nbackground = transform.scale(image.load(\"background.jpg\"), (win_width, win_height))\r\n\r\nmixer.init()\r\nmixer.music.load('Maze2.mp3')\r\nmixer.music.play()\r\n\r\nplayer = Player('sprite2.png', 20, 350, 5, 100, 100)\r\nvillian = Enemy('sprite1.png', 480, 200, 1, 100, 150)\r\ncoach = Coach('sprite3.png', 200, 180, 1, 50, 100)\r\nfinal = GameSprite('money.png', 580, 400, 0, 90, 90)\r\nstar = Star('ninjastar.png', 360, 180, 3, 40, 40)\r\n\r\nw1 = Wall(99, 185, 85, 170, 150, 10, 420)\r\nw2 = Wall(99, 185, 85, 350, 0, 10, 350)\r\nw3 = Wall(99, 185, 85, 500, 150, 10, 420)\r\n\r\nfont.init()\r\nfont = font.Font(None, 70)\r\nwin = font.render('YOU WIN!', True, (255, 215, 0))\r\nlose = font.render('YOU LOSE!', True, (180, 0, 0))\r\n\r\nmoney = mixer.Sound('money.ogg')\r\nkick = mixer.Sound('kick.ogg')\r\n\r\nclock = time.Clock()\r\nFPS = 60\r\ngame = True\r\nfinish = False\r\nwhile game:\r\n for e in event.get():\r\n if e.type == QUIT:\r\n game = False\r\n if finish != True:\r\n window.blit(background, (0, 0))\r\n player.update()\r\n villian.update()\r\n coach.update()\r\n star.update()\r\n\r\n player.reset()\r\n villian.reset()\r\n coach.reset()\r\n final.reset()\r\n star.reset() \r\n\r\n w1.draw_wall()\r\n w2.draw_wall()\r\n w3.draw_wall()\r\n\r\n '''if sprite.collide_rect(player, villian) or sprite.collide_rect(player, coach) or sprite.collide_rect(player, star) or sprite.collide_rect(player, w1) or sprite.collide_rect(player, w2) or sprite.collide_rect(player, w3):\r\n finish = True\r\n window.blit(lose, (200, 200))\r\n kick.play()'''\r\n \r\n if sprite.collide_rect(player, final):\r\n finish = True\r\n window.blit(win, (200, 200))\r\n money.play()\r\n\r\n display.update()\r\n clock.tick(FPS)" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 30, "blob_id": "c4913ac01f69f94fe405b7d2064fa95ae39f3406", "content_id": "f8c55050ff4f3afd630ed0c4c5bbfb8b376fc5bd", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "permissive", "max_line_length": 43, "num_lines": 2, "path": "/README.md", "repo_name": "darmattt/Naruto-labirint", "src_encoding": "UTF-8", "text": "# Naruto-labirint\nСупер-пупер крутой лабиринт в стиле Наруто!\n" } ]
2
elmahdy-intake37/timber_base
https://github.com/elmahdy-intake37/timber_base
46388e29666a2c21a2d63e1860792d8489bc2ad0
71e475cf1fa497e520a15bafd889c11e5c0680a5
cba7701ee2603fab529d5015824e96a2620570eb
refs/heads/master
2022-12-10T03:47:02.270131
2019-12-23T20:29:59
2019-12-23T20:29:59
229,812,793
0
0
null
2019-12-23T19:40:11
2019-12-23T20:30:18
2022-12-08T03:20:51
Python
[ { "alpha_fraction": 0.673521876335144, "alphanum_fraction": 0.673521876335144, "avg_line_length": 34.3636360168457, "blob_id": "6c05acdac4ea8c8d36891b5f76dffa65c0674ce5", "content_id": "1c8caf9daa83c01da7abe374be65441e690b6fd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1167, "license_type": "no_license", "max_line_length": 65, "num_lines": 33, "path": "/timber_base/apps/invoices/api/v1/views.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets, mixins\nfrom .serializers import InvoiceSerializer, InvoiceItemSerializer\nfrom rest_framework.permissions import IsAuthenticated\nfrom apps.invoices.models import Invoice, InvoiceLineItem\n\nclass InvoiceViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n\n permission_classes = [IsAuthenticated]\n serializer_class = InvoiceSerializer\n queryset = Invoice.objects.all()\n\n\nclass InvoiceItemViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n\n permission_classes = [IsAuthenticated]\n serializer_class = InvoiceItemSerializer\n queryset = InvoiceLineItem.objects.all()\n\n '''\n here we will override peform create\n to create invoice item with calc all amount and sub total\n also create client \n '''\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.8041666746139526, "avg_line_length": 23, "blob_id": "751cf7c1b7559e88979ff7ad75ad39f2e1cfeee5", "content_id": "944760feb01540f19c1e5f6907168e3a377c3984", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 54, "num_lines": 10, "path": "/timber_base/apps/clients/api/v1/urls.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom rest_framework.routers import DefaultRouter\nfrom apps.clients.api.v1.views import ClientViewSet\n\nrouter = DefaultRouter()\n\n\nrouter.register('', ClientViewSet, basename='clients')\n\nurlpatterns = router.urls\n" }, { "alpha_fraction": 0.5077720284461975, "alphanum_fraction": 0.5194300413131714, "avg_line_length": 29.8799991607666, "blob_id": "7da7be5a9a4822cc7a75d45feba6bd4e5690155b", "content_id": "45518098fb55c09ba03d7261b786c37bcfffe700", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 772, "license_type": "no_license", "max_line_length": 65, "num_lines": 25, "path": "/fron_end/js/invoice.js", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "$(document).ready(function(){\n$.ajax({\n url: \"http://localhost:8000/api/v1/client/\",\n dataType: 'json',\n type: 'GET',\n headers: {\n 'Authorization': 'Token '+ getCookie('Token')\n },\n // processData: false,\n timeout: 5000,\n success: function( data, textStatus, jQxhr ){\n console.log('data', data);\n\n // $('#response pre').html( JSON.stringify( data ) );\n },\n error: function(xhr, textStatus, error){\n console.log(\"readyState: \" + xhr.readyState);\n console.log(\"responseText: \"+ xhr.responseText);\n console.log(\"status: \" + xhr.status);\n console.log(\"text status: \" + textStatus);\n console.log(\"error: \" + error);\n\n }\n });\n });\n" }, { "alpha_fraction": 0.5353901982307434, "alphanum_fraction": 0.7223230600357056, "avg_line_length": 18, "blob_id": "6f222d6371225d7a3fe27c882860583ef5eeced6", "content_id": "b0e45eedac5a114e28c7dbbab23a0f5e9ccbe385", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 551, "license_type": "no_license", "max_line_length": 31, "num_lines": 29, "path": "/timber_base/requirements/requirements.txt", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "asgiref==3.2.3\nBabel==2.7.0\ncertifi==2019.11.28\nchardet==3.0.4\ncoreapi==2.3.3\ncoreschema==0.0.4\nDjango==2.2.2\ndjango-cors-headers==3.2.0\ndjango-environ==0.4.5\ndjango-localflavor==2.2\ndjango-phonenumber-field==3.0.1\ndjango-rest-auth==0.9.5\ndjango-rest-swagger==2.2.0\ndjangorestframework==3.11.0\nidna==2.8\nitypes==1.1.0\nJinja2==2.10.3\nMarkupSafe==1.1.1\nopenapi-codec==1.3.2\nphonenumbers==8.11.1\nphonenumberslite==8.10.14\npython-stdnum==1.12\npytz==2019.3\nrequests==2.22.0\nsimplejson==3.17.0\nsix==1.13.0\nsqlparse==0.3.0\nuritemplate==3.0.1\nurllib3==1.25.7\n" }, { "alpha_fraction": 0.5811320543289185, "alphanum_fraction": 0.6396226286888123, "avg_line_length": 26.894737243652344, "blob_id": "ae955b8cbf3cdb1f1baac456766582393773ab8f", "content_id": "85ed1e925fd9b550b6f359d88dd893d825be2c02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 530, "license_type": "no_license", "max_line_length": 163, "num_lines": 19, "path": "/timber_base/apps/users/migrations/0003_auto_20191222_0711.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.2 on 2019-12-22 07:11\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_auto_20191222_0705'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='sales',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='clients', to='clients.Client', verbose_name='Sales Man'),\n ),\n ]\n" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.573369562625885, "avg_line_length": 19.44444465637207, "blob_id": "c7ddc36873e256699d45ed6e5cbde4540943de39", "content_id": "7e2a72ef48b4c40ece01ef66a7aa22da5ecf3378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "no_license", "max_line_length": 50, "num_lines": 18, "path": "/timber_base/apps/clients/migrations/0003_auto_20191223_0733.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.2 on 2019-12-23 07:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('clients', '0002_remove_client_invoice'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='client',\n old_name='Counrty',\n new_name='country',\n ),\n ]\n" }, { "alpha_fraction": 0.6685134172439575, "alphanum_fraction": 0.6888273358345032, "avg_line_length": 59.16666793823242, "blob_id": "67ee4a05be59a1ca58a821ca500253c012279d88", "content_id": "36210822d4f590e5c5cfb9cfb5994bc9642e3652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1084, "license_type": "no_license", "max_line_length": 107, "num_lines": 18, "path": "/timber_base/apps/clients/models.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from django.db import models\n# from apps.invoices.models import Invoice\nfrom phonenumber_field.modelfields import PhoneNumberField\nfrom django.utils.translation import gettext_lazy as _\n\n# Create your models here.\nclass Client(models.Model):\n # invoice = models.ForeignKey(Invoice, verbose_name=('Invoices'),\n # related_name='clients_invoices', on_delete=models.CASCADE)\n email = models.EmailField(_('email address'), unique=True)\n server_url = models.URLField(_('Server url'), null=True)\n address_1 = models.CharField(_(\"address\"), max_length=128)\n address_2 = models.CharField(_(\"address cont'd\"), max_length=128, blank=True)\n city = models.CharField(_(\"city\"), max_length=64, default=\"Düsseldorf\")\n country = models.CharField(_(\"country\"), max_length=64, default=\"Germany\")\n state = models.CharField(_(\"state\"), max_length=64, default=\"\")\n zip_code = models.CharField(_(\"zip code\"), max_length=5, default=\"43701\")\n phone = PhoneNumberField(_('Mobile'), help_text=('with country code (eg. +48)'), blank=True, null=True)\n" }, { "alpha_fraction": 0.6313087940216064, "alphanum_fraction": 0.6338751316070557, "avg_line_length": 40.75, "blob_id": "d381ce4ff3c1fe700dd0ce1f734fff43a1d676d3", "content_id": "58fa361a4cce15d9ba809693f1821156e1ccd518", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1169, "license_type": "no_license", "max_line_length": 89, "num_lines": 28, "path": "/timber_base/apps/users/utils.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from django.contrib.auth import authenticate\nfrom rest_framework import serializers\nfrom django.contrib.auth import get_user_model\nfrom apps.users.models import Profile\n\ndef get_and_authenticate_user(username, password):\n user = authenticate(username=username, password=password)\n if user is None:\n raise serializers.ValidationError(\"Invalid username/password. Please try again!\")\n return user\n\n\ndef create_user_account(email, password, first_name=\"\",\n last_name=\"\", **extra_fields):\n user = get_user_model().objects.create_user(\n email=email, password=password, first_name=first_name,\n last_name=last_name, **extra_fields)\n return user\n\ndef create_profile_user(address_1, city, country, mobile, server_url, state,user,\n **extra_fields):\n print('extra_fields', extra_fields)\n print('user', user)\n profile = Profile.objects.create(address_1=address_1, city=city,\n country=country, mobile=mobile,\n server_url=server_url, state=state,\n **extra_fields)\n return profile\n" }, { "alpha_fraction": 0.6673511266708374, "alphanum_fraction": 0.6824092864990234, "avg_line_length": 51.17856979370117, "blob_id": "9b7e63bc84dd7ee66796551a5e1affbdfc005aaf", "content_id": "9661cd608b0eef18620b967d5c7813cf03bff269", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 108, "num_lines": 28, "path": "/timber_base/apps/users/models.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom phonenumber_field.modelfields import PhoneNumberField\nfrom django.utils.translation import gettext_lazy as _\n\nfrom apps.clients.models import Client\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n# Create your models here.\nclass Profile(models.Model):\n user = models.OneToOneField(User,null=True, on_delete=models.CASCADE)\n sales = models.ForeignKey(Client, verbose_name=('Sales Man'),\n related_name='clients',null=True,\n on_delete=models.CASCADE)\n server_url = models.URLField(_('Server url'), null=True)\n address_1 = models.CharField(_(\"address\"), max_length=128)\n address_2 = models.CharField(_(\"address cont'd\"), max_length=128, blank=True)\n city = models.CharField(_(\"city\"), max_length=64, default=\"Düsseldorf\")\n country = models.CharField(_(\"Country\"), max_length=64, default=\"Germany\")\n state = models.CharField(_(\"state\"), max_length=64, default=\"\")\n zip_code = models.CharField(_(\"zip code\"), max_length=5, default=\"43701\")\n mobile = PhoneNumberField(_('Mobile'), help_text=('with country code (eg. +48)'), blank=True, null=True)\n\n # @receiver(post_save, sender=User)\n # def update_user_profile(sender, instance, created, **kwargs):\n # if created:\n # Profile.objects.create(user=instance)\n # instance.profile.save()\n" }, { "alpha_fraction": 0.6097832322120667, "alphanum_fraction": 0.6342412233352661, "avg_line_length": 50.400001525878906, "blob_id": "d8644096613945afa0758a7079470d534a8a4cc3", "content_id": "befe63b22f5baa91f18f2489c8dabe0ca9124631", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1800, "license_type": "no_license", "max_line_length": 191, "num_lines": 35, "path": "/timber_base/apps/users/migrations/0001_initial.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.2 on 2019-12-21 20:59\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport phonenumber_field.modelfields\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('clients', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('server_url', models.URLField(null=True, verbose_name='Server url')),\n ('address_1', models.CharField(max_length=128, verbose_name='address')),\n ('address_2', models.CharField(blank=True, max_length=128, verbose_name=\"address cont'd\")),\n ('city', models.CharField(default='Düsseldorf', max_length=64, verbose_name='city')),\n ('Counrty', models.CharField(default='Germany', max_length=64, verbose_name='country')),\n ('state', models.CharField(default='', max_length=64, verbose_name='state')),\n ('zip_code', models.CharField(default='43701', max_length=5, verbose_name='zip code')),\n ('mobile', phonenumber_field.modelfields.PhoneNumberField(blank=True, help_text='with country code (eg. +48)', max_length=128, null=True, region=None, verbose_name='Mobile')),\n ('sales', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clients', to='clients.Client', verbose_name='Sales Man')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6998223662376404, "alphanum_fraction": 0.6998223662376404, "avg_line_length": 36.53333282470703, "blob_id": "cea6a5f34a66324baa5192313389f01403d8b9d2", "content_id": "46bd7b6ee9896e53d20cd271c9dbc4a78ca02719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 563, "license_type": "no_license", "max_line_length": 54, "num_lines": 15, "path": "/timber_base/apps/clients/api/v1/views.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets, mixins\nfrom .serializers import ClientSerializer\nfrom rest_framework.permissions import IsAuthenticated\nfrom apps.clients.models import Client\n\nclass ClientViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n\n permission_classes = [IsAuthenticated]\n serializer_class = ClientSerializer\n queryset = Client.objects.all()\n" }, { "alpha_fraction": 0.682572603225708, "alphanum_fraction": 0.6970954537391663, "avg_line_length": 29.125, "blob_id": "d011fe58a26529e5eab24b7de3c46fe61d72c664", "content_id": "442d29527a58029bfe461f1ad9c46dbffc8db642", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 69, "num_lines": 16, "path": "/timber_base/timber_base/urls.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_swagger.views import get_swagger_view\n\nschema_view = get_swagger_view(title='Temper Base API Documentation')\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/v1/', include('apps.invoices.api.v1.urls')),\n path('api/v1/auth/', include('apps.users.api.v1.urls')),\n path('api/v1/docs/', schema_view),\n path('api/v1/client/', include('apps.clients.api.v1.urls'))\n\n\n]\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 54, "blob_id": "0c1b0153794f853c2ce6cb5b1eae345839c23f38", "content_id": "3e5ec482af358eff0a22350112966bca202b78f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 54, "num_lines": 1, "path": "/timber_base/apps/clients/__init__.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "default_app_config = 'apps.clients.apps.ClientsConfig'\n" }, { "alpha_fraction": 0.5965741276741028, "alphanum_fraction": 0.624335527420044, "avg_line_length": 50.30303192138672, "blob_id": "38bb4af640492d0ad25c0af8262ed86045dc25ec", "content_id": "8a60c5d86d95e26c74d4fa09d5bfe7740850bcad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1694, "license_type": "no_license", "max_line_length": 190, "num_lines": 33, "path": "/timber_base/apps/clients/migrations/0001_initial.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.2 on 2019-12-21 20:59\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport phonenumber_field.modelfields\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('invoices', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Client',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),\n ('server_url', models.URLField(null=True, verbose_name='Server url')),\n ('address_1', models.CharField(max_length=128, verbose_name='address')),\n ('address_2', models.CharField(blank=True, max_length=128, verbose_name=\"address cont'd\")),\n ('city', models.CharField(default='Düsseldorf', max_length=64, verbose_name='city')),\n ('Counrty', models.CharField(default='Germany', max_length=64, verbose_name='country')),\n ('state', models.CharField(default='', max_length=64, verbose_name='state')),\n ('zip_code', models.CharField(default='43701', max_length=5, verbose_name='zip code')),\n ('phone', phonenumber_field.modelfields.PhoneNumberField(blank=True, help_text='with country code (eg. +48)', max_length=128, null=True, region=None, verbose_name='Mobile')),\n ('invoice', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='clients_invoices', to='invoices.Invoice', verbose_name='Invoices')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6745406985282898, "alphanum_fraction": 0.6771653294563293, "avg_line_length": 21.41176414489746, "blob_id": "124f72cf9932561876ee77214bc9c6b4ecb0e764", "content_id": "ebdbc45d540c432752dde791667da30d542a4889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 57, "num_lines": 17, "path": "/timber_base/apps/invoices/api/v1/serializers.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom apps.invoices.models import (\n Invoice, InvoiceLineItem\n)\n\n\nclass InvoiceSerializer(serializers.ModelSerializer):\n class Meta:\n model = Invoice\n fields = '__all__'\n\n\nclass InvoiceItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = InvoiceLineItem\n fields = '__all__'\n depth = 1\n" }, { "alpha_fraction": 0.6817660331726074, "alphanum_fraction": 0.6938073635101318, "avg_line_length": 55.25806427001953, "blob_id": "124564a799186f870e87552eaa7902948e59c951", "content_id": "f7ebed893e4b4dc6e04185c93d01b862655f210b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1744, "license_type": "no_license", "max_line_length": 119, "num_lines": 31, "path": "/timber_base/apps/invoices/models.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nimport uuid\nfrom django.utils.translation import gettext_lazy as _\nfrom apps.clients.models import Client\n# Create your models here.\n\nclass Invoice(models.Model):\n clients = models.ForeignKey(Client, verbose_name=_('Invoice Client '),\n related_name='clients_invoices',\n on_delete=models.CASCADE)\n admin = models.ForeignKey(User, verbose_name=('Manager'),\n related_name='Manager', on_delete=models.CASCADE)\n sub_total = models.DecimalField(max_digits=7, decimal_places=2)\n tax = models.DecimalField(max_digits=7, decimal_places=2)\n discount = models.DecimalField(max_digits=7, decimal_places=2)\n total = models.DecimalField(max_digits=7, decimal_places=2)\n invoice_no = models.UUIDField(_('Invoice Number'), default=uuid.uuid4)\n amount = models.DecimalField(max_digits=6, decimal_places=2)\n terms = models.TextField(blank=True, default=\"\")\n du_date = models.DateTimeField(_(\"Du Date\"), auto_now_add=True)\n invoice_date = models.DateTimeField(_(\"Invoice Date\"), auto_now=True, null=True)\n\nclass InvoiceLineItem(models.Model):\n invoices = models.ForeignKey(Invoice, verbose_name=('Invoices'), related_name='invoices', on_delete=models.CASCADE)\n code = models.UUIDField(_('Code'), default=uuid.uuid4)\n name = models.CharField(_('Name Of Item'), max_length=250, blank=True, null=True)\n rate = models.DecimalField(max_digits=6, decimal_places=2)\n discount = models.DecimalField(max_digits=7, decimal_places=2)\n cost = models.DecimalField(max_digits=6, decimal_places=2)\n description = models.TextField(blank=True, default=\"\")\n" }, { "alpha_fraction": 0.5914652943611145, "alphanum_fraction": 0.599571704864502, "avg_line_length": 31.366336822509766, "blob_id": "7357fa4d7f668b5788d12ddd631265d62d302cf5", "content_id": "e473958f9d19b2810c38ac414937e0a1c26efd59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6538, "license_type": "no_license", "max_line_length": 949, "num_lines": 202, "path": "/fron_end/js/dashboard.js", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "var txt = document.getElementById('brand').innerText = \"Welcome \"+ localStorage.getItem(\"username\")\n\n$(document).ready(function(){\n$.ajax({\n url: \"http://localhost:8000/api/v1/client/\",\n dataType: 'json',\n type: 'GET',\n headers: {\n 'Authorization': 'Token '+ getCookie('Token')\n },\n // processData: false,\n timeout: 5000,\n success: function( data, textStatus, jQxhr ){\n console.log('data', data);\n\n // $('#response pre').html( JSON.stringify( data ) );\n },\n error: function(xhr, textStatus, error){\n console.log(\"readyState: \" + xhr.readyState);\n console.log(\"responseText: \"+ xhr.responseText);\n console.log(\"status: \" + xhr.status);\n console.log(\"text status: \" + textStatus);\n console.log(\"error: \" + error);\n\n }\n });\n });\n\njQuery(function ($) {\n\n $(\".sidebar-dropdown > a\").click(function () {\n $(\".sidebar-submenu\").slideUp(200);\n if ($(this).parent().hasClass(\"active\")) {\n $(\".sidebar-dropdown\").removeClass(\"active\");\n $(this).parent().removeClass(\"active\");\n } else {\n $(\".sidebar-dropdown\").removeClass(\"active\");\n $(this).next(\".sidebar-submenu\").slideDown(200);\n $(this).parent().addClass(\"active\");\n }\n\n });\n\n $(\"#toggle-sidebar\").click(function () {\n $(\".page-wrapper\").toggleClass(\"toggled\");\n });\n\n\n});\n\n\ndocument.getElementById(\"clickMe\").onclick = function () {\n var headers = {\n 'Authorization': 'Token '+ getCookie('Token')\n }\n console.log(headers)\n $.ajax({\n url:'http://localhost:8000/api/v1/invoice/',\n dataType: 'json',\n type: 'GET',\n headers: {\n 'Authorization': 'Token '+ getCookie('Token')\n },\n // processData: false,\n timeout: 5000,\n success: function( data, textStatus, jQxhr ){\n console.log('data', data);\n window.location.href = 'invoice.html';\n\n // $('#response pre').html( JSON.stringify( data ) );\n },\n error: function(xhr, textStatus, error){\n console.log(\"readyState: \" + xhr.readyState);\n console.log(\"responseText: \"+ xhr.responseText);\n console.log(\"status: \" + xhr.status);\n console.log(\"text status: \" + textStatus);\n console.log(\"error: \" + error);\n\n }\n });\n };\n\n\n $(function() {\n\n // contact form animations\n $('#invoice').click(function() {\n $('#invoiceForm').fadeToggle();\n })\n $(document).mouseup(function (e) {\n var container = $(\"#invoiceForm\");\n\n if (!container.is(e.target) // if the target of the click isn't the container...\n && container.has(e.target).length === 0) // ... nor a descendant of the container\n {\n container.fadeOut();\n }\n });\n\n });\n\n\nvar room = 1;\nfunction education_fields() {\n\n room++;\n var objTo = document.getElementById('education_fields')\n var divtest = document.createElement(\"div\");\n\tdivtest.setAttribute(\"class\", \"form-group removeclass\"+room);\n\tvar rdiv = 'removeclass'+room;\n divtest.innerHTML = '<div class=\"col-sm-9 nopadding\"><div class=\"form-group\"> <input type=\"text\" class=\"form-control\" id=\"description\" name=\"description[]\" value=\"\" placeholder=\"description\"></div></div><div class=\"col-sm-9 nopadding\"><div class=\"form-group\"> <input type=\"text\" class=\"form-control\" id=\"cost\" name=\"cost[]\" value=\"\" placeholder=\"cost\"></div></div><div class=\"col-sm-9 nopadding\"><div class=\"form-group\"> <input type=\"text\" class=\"form-control\" id=\"qty\" name=\"Degree[]\" value=\"\" placeholder=\"Degree\"></div></div><div class=\"col-sm-9 nopadding\"><div class=\"form-group\"> <input type=\"text\" class=\"form-control\" id=\"rate\" name=\"rate[]\" value=\"\" placeholder=\"rate\"></div></div><div class=\"input-group-btn\"> <button class=\"btn btn-danger\" type=\"button\" onclick=\"remove_education_fields('+ room +');\"> <span class=\"glyphicon glyphicon-minus\" aria-hidden=\"true\"></span> remove</button></div></div></div></div><div class=\"clear\"></div>';\n\n objTo.appendChild(divtest)\n}\n function remove_education_fields(rid) {\n\t $('.removeclass'+rid).remove();\n }\n // this need to handle\nfunction send_req(){\n var desc_invoice = {\n 'desc':'',\n 'cost':'',\n 'tax':'',\n 'qty':''\n }\n var invoice_list = []\n\n var des = document.querySelectorAll('[id=description]'),\n cost = document.querySelectorAll('[id=cost]'),\n tax = document.querySelectorAll('[id=tax]'),\n qty = document.querySelectorAll('[id=qty]'),\n clientName = document.getElementById('clientName'),\n zipCode = document.getElementById('zipCode'),\n city = document.getElementById('city'),\n state = document.getElementById('state'),\n country = document.getElementById('country'),\n terms = document.getElementById('terms')\n\nvar data = {\n 'desc_invoice':desc_invoice,\n 'clinet': clientName,\n 'zipCode': zipCode,\n 'city': city,\n 'state': state,\n 'country': country,\n 'terms': terms\n}\n\nfor(var i = 0; i < des.length; i++){\ndesc_invoice['desc'] = des[i].value\ninvoice_list.push(desc_invoice)\n}\nfor(var i = 0; i < cost.length; i++){\ndesc_invoice['cost'] = cost[i].value\ninvoice_list.push(desc_invoice)\n}\nfor(var i = 0; i < tax.length; i++){\ndesc_invoice['tax'] = tax[i].value\ninvoice_list.push(desc_invoice)\n}\nfor(var i = 0; i < qty.length; i++){\ndesc_invoice['qty'] = qty[i].value\ninvoice_list.push(desc_invoice)\n}\n\n$.ajax({\n url: 'http://localhost:8000/api/v1/invoice_item/',\n dataType: 'json',\n type: 'POST',\n \"Content-type\": 'application/json',\n data: {data:JSON.stringify(data)},\n // processData: false,\n timeout: 5000,\n success: function( data, textStatus, jQxhr ){\n // console.log('data', data['auth_token']);\n // document.cookie=\"username=\"+data['username']+\";max-age=\"+ 30*24*60*60;\n var cookieName = 'Token',\n cookieValue = data['auth_token'],\n myDate = new Date();\n myDate.setMonth(myDate.getMonth() + 12);\n document.cookie = cookieName +\"=\" + cookieValue + \";expires=\" + myDate\n + \";path=/\";\n localStorage.setItem(\"username\",data['username']);\n\n // $('#response pre').html( JSON.stringify( data ) );\n window.location.href = '../../html/dashboard/dashboard.html';\n },\n error: function(xhr, textStatus, error){\n console.log(\"readyState: \" + xhr.readyState);\n console.log(\"responseText: \"+ xhr.responseText);\n console.log(\"status: \" + xhr.status);\n console.log(\"text status: \" + textStatus);\n console.log(\"error: \" + error);\n\n }\n});\n\n\n\n\n\n}\n" }, { "alpha_fraction": 0.7387387156486511, "alphanum_fraction": 0.7537537813186646, "avg_line_length": 22.785715103149414, "blob_id": "4126d435b5a151bfba39be456924b601f860642e", "content_id": "4ffb96993599b209988e0274d27bf2de0ff0097e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 333, "license_type": "no_license", "max_line_length": 78, "num_lines": 14, "path": "/README.md", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "# timber_base\n> this app to create invoice\n\n>open folder html/auth/login_and_register\n\n__open django server__\n_make migration, migrate_\nhttp://localhost:8000 - atomatic!\n\n`all api under api/v1`\n\n**after register login by username and password from file login_and_register**\n\n>for now index file empty it will be for index fronnt end\n" }, { "alpha_fraction": 0.5952283143997192, "alphanum_fraction": 0.6100370287895203, "avg_line_length": 50.72340393066406, "blob_id": "f8ba32796c1e5bae7c8c19a9127e9ad5a415758e", "content_id": "970b4794d59cdd6c27d8510096450dff02930f99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2431, "license_type": "no_license", "max_line_length": 167, "num_lines": 47, "path": "/timber_base/apps/invoices/migrations/0001_initial.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.2 on 2019-12-21 20:59\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Invoice',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('sub_total', models.DecimalField(decimal_places=2, max_digits=7)),\n ('tax', models.DecimalField(decimal_places=2, max_digits=7)),\n ('discount', models.DecimalField(decimal_places=2, max_digits=7)),\n ('total', models.DecimalField(decimal_places=2, max_digits=7)),\n ('invoice_no', models.UUIDField(default=uuid.uuid4, verbose_name='Invoice Number')),\n ('amount', models.DecimalField(decimal_places=2, max_digits=6)),\n ('terms', models.TextField(blank=True, default='')),\n ('du_date', models.DateTimeField(auto_now_add=True, verbose_name='Du Date')),\n ('invoice_date', models.DateTimeField(auto_now=True, null=True, verbose_name='Invoice Date')),\n ('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Manager', to=settings.AUTH_USER_MODEL, verbose_name='Manager')),\n ],\n ),\n migrations.CreateModel(\n name='InvoiceLineItem',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('code', models.UUIDField(default=uuid.uuid4, verbose_name='Code')),\n ('name', models.CharField(blank=True, max_length=250, null=True, verbose_name='Name Of Item')),\n ('rate', models.DecimalField(decimal_places=2, max_digits=6)),\n ('discount', models.DecimalField(decimal_places=2, max_digits=7)),\n ('cost', models.DecimalField(decimal_places=2, max_digits=6)),\n ('description', models.TextField(blank=True, default='')),\n ('invoices', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invoices', to='invoices.Invoice', verbose_name='Invoices')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.8063660264015198, "alphanum_fraction": 0.8090185523033142, "avg_line_length": 30.41666603088379, "blob_id": "c779f52c26917c9f4650f2bf11e766af376ddbd2", "content_id": "912dcfae43459b97f06e7d78a232aabc90afcf9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "no_license", "max_line_length": 76, "num_lines": 12, "path": "/timber_base/apps/invoices/api/v1/urls.py", "repo_name": "elmahdy-intake37/timber_base", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom rest_framework.routers import DefaultRouter\nfrom apps.invoices.api.v1.views import InvoiceViewSet, InvoiceItemViewSet\n\nrouter = DefaultRouter()\n\n\nrouter.register('invoice', InvoiceViewSet, basename='invoices')\nrouter.register('invoice_item', InvoiceItemViewSet, basename='invoice_itme')\n# urlpatterns += router.urls\n\nurlpatterns = router.urls\n" } ]
20
techniqueFirst/techniquefirst
https://github.com/techniqueFirst/techniquefirst
fa4bdae45432adc423e44c0f08c3939167d28a13
14f162e0b59a73e71e73f5e273a62fdce02fc37d
664bacf13c0d1ee868f43cadee2afeb720762d63
refs/heads/master
2018-07-24T14:54:10.443860
2018-07-22T19:59:45
2018-07-22T19:59:45
121,247,972
0
0
null
2018-02-12T13:09:47
2019-03-02T21:17:44
2019-04-08T12:13:55
HTML
[ { "alpha_fraction": 0.7099664211273193, "alphanum_fraction": 0.7122060656547546, "avg_line_length": 34.7599983215332, "blob_id": "88b077a6ff90000950e2aba5a80631a7cfd115a1", "content_id": "334b4dbbca1cc91c0efc16360094b9b830b6df77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 893, "license_type": "no_license", "max_line_length": 76, "num_lines": 25, "path": "/techniquefirst/tests/test_blog.py", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.urls import reverse\n\nclass TestBlogPage(TestCase):\n \n # Testing template loading\n def test_blog_uses_base(self):\n response = self.client.get(reverse(\"blog\"))\n self.assertTemplateUsed(response, \"base.html\")\n\n def test_blog_uses_bloghome_template(self):\n \tresponse = self.client.get(reverse(\"blog\"))\n \tself.assertTemplateUsed(response, \"techniquefirst/blog-home.html\")\n\n def test_blog_post_uses_correct_template_1(self):\n \tresponse = self.client.get(reverse(\"blog\", args=['how_you_move']))\n \tself.assertTemplateUsed(response, \"techniquefirst/blog.html\")\n\n def test_blog_post_uses_correct_template_2(self):\n \tresponse = self.client.get(reverse(\"blog\", args=['abc_weightlifting']))\n \tself.assertTemplateUsed(response, \"techniquefirst/blog.html\")\n\n \n\t\t\n #Testing parameter forwarding in render() call" }, { "alpha_fraction": 0.6511891484260559, "alphanum_fraction": 0.6613816618919373, "avg_line_length": 33, "blob_id": "ef3f938788b8ba8313ba8af07c3c91a7799a2aee", "content_id": "13057af325544f018d6f673e8d84e9e20ddaacf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 883, "license_type": "no_license", "max_line_length": 73, "num_lines": 26, "path": "/functional_tests/test_home.py", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom django.urls import reverse\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase \n \n \nclass HomeNewVisitorTest(StaticLiveServerTestCase): \n def setUp(self):\n self.browser = webdriver.Chrome()\n self.browser.implicitly_wait(3)\n \n def tearDown(self):\n self.browser.quit()\n \n def get_full_url(self, namespace):\n return self.live_server_url + reverse(namespace)\n \n def test_home_title(self):\n self.browser.get(self.get_full_url(\"home\"))\n self.assertIn(\"TechniqueFirst\", self.browser.title)\n\n def test_navbar_css(self):\n self.browser.get(self.get_full_url(\"home\"))\n navBar = self.browser.find_element_by_id(\"navbarResponsive\")\n self.assertEqual(navBar.value_of_css_property(\"color\"), \n \"rgba(33, 37, 41, 1)\")" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7563636302947998, "avg_line_length": 33.375, "blob_id": "5c85a59e23e6f2c45efb55e9a0700378f2b1b18d", "content_id": "f05b15a7a72d53263d1ab42666b9797f04b616e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 151, "num_lines": 8, "path": "/techniquefirst/settings/production.py", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "from .base import *\n\nALLOWED_HOSTS = ['localhost','vps174256.vps.ovh.ca', 'techniquefirst.ca', 'techniquefirst.net', 'www.techniquefirst.ca', 'integratedweightlifting.com',\n'www.integratedweightlifting.com']\n\nSTATIC_ROOT = '/home/sam8401/website_nginx/static'\n\nDEBUG = False\n" }, { "alpha_fraction": 0.6745380163192749, "alphanum_fraction": 0.6745380163192749, "avg_line_length": 20.954545974731445, "blob_id": "5779ee88a6512f03f1778a833cdef581390abe2f", "content_id": "a3985160b6b8835f1e7271661c176e4b39c38c01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 974, "license_type": "no_license", "max_line_length": 84, "num_lines": 44, "path": "/techniquefirst/db.py", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "# Module to read and return the contents in the blog/ directory (including blog.yml)\nfrom django.conf import settings\nimport os\nimport yaml\n\nBLOG_YML = \"blog.yml\" ;\n\n\nclass BlogDB():\n\n\tdef initiate(self):\n\t\tdetails_file_name = os.path.join(settings.BLOG_DIRECTORY, BLOG_YML);\n\t\twith open(details_file_name, 'r') as stream:\n\t\t\ttry:\n\t\t\t\tcontent = yaml.load(stream)\n\n\t\t\t\tself.titles = content['titles']\n\t\t\t\tself.summaries = content['summaries']\n\t\t\t\tself.images = content['images']\n\t\t\t\tself.dates = content['dates']\n\n\t\t\texcept yaml.YAMLError as ex:\n\t\t\t\tprint(ex)\n\n\t\t\treturn None;\n\t\t\t\n\tdef get_images(self):\n\t\treturn self.images;\t\t\n\t\n\tdef get_summaries(self):\n\t\treturn self.summaries;\t\n\n\tdef get_titles(self):\n\t\treturn self.titles;\n\n\tdef get_dates(self):\n\t\treturn self.dates\n\t\t\n\tdef get_full_post(self, key):\n\t\tpost_file_name = '{}.html'.format(key)\n\t\tpost_file = os.path.join(settings.BLOG_DIRECTORY, post_file_name)\n\t\tfile = open(post_file, \"r\")\n\n\t\treturn file.read();\n\t\t \n\n\n\n\n" }, { "alpha_fraction": 0.6609195470809937, "alphanum_fraction": 0.6685823798179626, "avg_line_length": 19.8799991607666, "blob_id": "b859075206226a66f040608eff7246d27fcc5c0c", "content_id": "2f93a4b409ae6a19a027ecbcf5e92dcd0fd75fb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1044, "license_type": "no_license", "max_line_length": 51, "num_lines": 50, "path": "/techniquefirst/models.py", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom .db import BlogDB\nimport datetime\n\n'''\nManager for a post class\n'''\n\nclass PostManager(models.Manager):\n\tdef get(self, key, blogDB = None):\n\t\tif blogDB is None:\n\t\t\tblogDB = BlogDB()\n\t\t\tblogDB.initiate();\n\n\t\tpost = {};\n\t\tpost['key'] = key;\n\t\tpost['title'] = blogDB.get_titles().get(key);\n\t\tpost['image'] = blogDB.get_images().get(key)\n\t\tpost['summary'] = blogDB.get_summaries().get(key)\n\t\tpost['date'] = blogDB.get_dates().get(key)\n\n\t\treturn post; \n\t\n\tdef all(self):\n\t\tblogDB = BlogDB()\n\t\tblogDB.initiate();\n\n\t\tposts = []\n\t\tfor key in blogDB.get_dates():\n\t\t\tpost = self.get(key, blogDB)\n\t\t\tif post is not None:\n\t\t\t\tposts.append(post)\n\n\t\treturn posts;\t\t\n\n\tdef content(self, key):\n\t\tblogDB = BlogDB()\n\t\treturn blogDB.get_full_post(key)\n\n\t\n\n\nclass Post(models.Model):\n\tpost_title = models.CharField(max_length=50)\n\tpost_date = models.CharField(max_length=15)\n\tpost_summary = models.TextField()\n\tpost_file_name = models.CharField(max_length=30)\n\tpost_image = models.CharField(max_length=50)\n\n\tposts = PostManager()\n" }, { "alpha_fraction": 0.7182235717773438, "alphanum_fraction": 0.7182235717773438, "avg_line_length": 26.25, "blob_id": "8a808a0a374cb700ddbe1475c961ccde92893fd1", "content_id": "2f9255e5f592262ec019c84aa7362b141e02806e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 653, "license_type": "no_license", "max_line_length": 101, "num_lines": 24, "path": "/techniquefirst/views.py", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.conf import settings\nfrom .models import Post\n\n# Landing page\ndef home(request):\n\treturn render(request, \"techniquefirst/index.html\", {})\t\n\n# /blog and /blog/${post} endpoint\ndef blog(request, post=None):\n\tif post is None:\n\t\tall_posts = Post.posts.all()\n\t\treturn render(request, \"techniquefirst/blog-home.html\", {'posts' : all_posts})\n\t\n\telse:\n\n\t\tpost_html = Post.posts.content(post);\n\t\tpost_details = Post.posts.get(post);\n\n\t\treturn render(request, \"techniquefirst/blog.html\", {'post': post_details, 'post_html' : post_html})\n\n\ndef about(request):\n\treturn render(request, \"techniquefirst/about.html\", {})" }, { "alpha_fraction": 0.7591768503189087, "alphanum_fraction": 0.7591768503189087, "avg_line_length": 140.9736785888672, "blob_id": "54aec4c712a4f6381040ef807d25e9b09d2d1930", "content_id": "0a39d4ee28ed54d6e7d3483a0655e9ba6712d67d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 5394, "license_type": "no_license", "max_line_length": 1101, "num_lines": 38, "path": "/techniquefirst/blog/proprioception.html", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "<!-- Post Content -->\n <p>In our earlier <a href=\"../how_you_move\">post</a>, we established that the quality (i.e. technique and form) of movement is of significantly more importance than the quantity (repetition or load). When we shift our focus from short term to long term effect of training on our bodies, it is imperative that the time spent doing quality movements should dictate our training. Thus, as one gets farther away from his/ her prime or has had a late initiation into physical training (and is still looking to squeeze true athletic potential), quantity shouldn't hold any meaning if it comes at even a small sacrifice of quality (a PR day here and there is fine). In the next paragraphs, we dive into a key aspect of improving lifting quality, which is the focus of this post. </p>\n\n <p>With all the hype around mobility and flexibility these days, when thinking about movement quality we have a tendency to overemphasize the role of these two. The ubiquitous SMR tools in strength/ Oly gyms, CrossFit boxes and, even yoga studios, seem to convey the message that a bit of pre/post/during (training) mobility is enough to achieve both quantitative and qualitative progress in training. Amongst all this, something that we often ignore (or worse, not even think of) is the concept of proprioception. </p>\n\n <p>Wikipedia defines it as :</p>\n\n <blockquote class=\"blockquote\">\n \"Proprioception is the sense of the relative position of one's own parts of the body and strength of effort being employed in movement\"\n </blockquote>\n\n <p>In other words, being proprioceptively sound and body aware would imply that there is no difference between (a) \"How one is moving\" and (b) \"how they think they're moving\". Conversely, an existence of such a difference between the two would mean the athlete in question can't really be classified as a perfect athlete* and if already not in pain or injured, then the same is bound to happen at some point in the training cycle.</p>\n\n <p>Here's how an unchecked proprioceptive issue manifests itself to one or more injuries (due to one or more muscle compensation patterns along the motor chain) :\n\n Say an athlete is doing a back squat. Let's assume that due to an existing condition (which the athlete is unaware of) the athlete favors his right glute more than his left. Thus looking from the front, as the athlete descends into the squat, it would appear that instead of solely going straight down, he/she also shifts horizontally to the *left* (knees outs more on the right, hence shifts left). It is highly likely that the athlete \"feels\" fine doing such a movement. The irony is that at a heavier weight, when the \"stakes\" are high and the shifting would be more pronounced, the athlete is actually *less* likely to notice or experience the shift. Why? Because as the weight goes up, more and more of the mental energy is focussed towards the movement itself (no one wants to get crushed and/or miss a PR) and less of it (if at all) at being aware of the movement pattern. As you can see, the problem gets worse fairly quickly over time. However, depending upon the severty of the inital condition, it may take months or years for the issue to surface itself as a full blown injury.</p>\n\n <p>\n Problems related to proprioception and insufficient body awareness can be divided into two categories. The first is when athletes/ coaches aren't even aware that the problem exists. As sometimes it is really hard to spot in an environment dominated by noise (more likely in CrossFit boxes) </p>\n\n <p>And the second (more frequent) is when the observation is made but attributed to something commonplace (e.g. \"Yeah, I know my right side is stronger\"). The issue is (usually) diagnosed incorrectly and the athlete is made to do incorrect \"corrective exercises\".</p> \n\n <p>Further more often than not, proprioceptive issues are hardest to work upon, almost impossible for progress to not stall while we take the time out to work on it.</p>\n\n<p> \nConsequences of a proprioceptive issue left unchecked :\n<ul>\n <li>As the saying goes \"practice makes permanent\" if principles of progressive overload is applied while being unaware of above situation, one is essentially progressing towards an inefficient and injury prone athletic state (of strength, speed, agility etc).</li> \n\n <li> The bigger the \"proprioceptive difference\", the more of \"anti-progress\" is made instead of true progress.</li>\n\n <li> As the weights get heavier and the reps increase over time, the rate at which \"anti-training\"/ \"anti-progress\" is happening increases.</li> \n \n <li>If unchecked, the problem gets worse and finally enters a zone where the imbalance is so bad that the athlete/ lifter begins to \"feel\" the issue (either through pain or difficulty in making a lift).</li>\n</ul>\n</p>\n\n <p>*A \"perfect athlete\" in this context would be one who is well balanced, mobile & injury free. Such an athlete in most scenarios won't have to bother with proprioceptive issues and would be okay focussing their time in reaping gains (yes, blame it on their genetics and/or an athletic childhood). I've used all in the original sentence above since a \"well balanced mobile\" athlete are few and far between. In fact, a lot of coaches believe for it to be a myth.</p>" }, { "alpha_fraction": 0.7248322367668152, "alphanum_fraction": 0.744966447353363, "avg_line_length": 36.375, "blob_id": "b57369fbb48eb9016cc8bd529792fcbad6290e67", "content_id": "92e006fc91b5f96964c63bbf572a558278fa1603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 168, "num_lines": 8, "path": "/techniquefirst/settings/development.py", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "from .base import *\n\nALLOWED_HOSTS = ['localhost','vps174256.vps.ovh.ca', 'techniquefirst.ca', 'techniquefirst.com', 'www.techniquefirst.com', 'techniquefirst.net', 'www.techniquefirst.ca']\n\nSTATIC_ROOT = '/home/suman/Desktop/static'\n\n#SECURE_SSL_REDIRECT = True\n#INSTALLED_APPS.append('sslserver')" }, { "alpha_fraction": 0.7284894585609436, "alphanum_fraction": 0.7590821981430054, "avg_line_length": 17.034482955932617, "blob_id": "a9201804c28da73d1f21f5ed6bbcadb69a794587", "content_id": "b41e529fa8e5f509e1141e7b929c16830ef26720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 523, "license_type": "no_license", "max_line_length": 65, "num_lines": 29, "path": "/mysite_uwsgi.ini", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "[uwsgi]\n\n#project home directory\nchdir\t\t= /home/sam8401/website_nginx/techniquefirst\n\n# the wsgi file\nmodule\t\t= techniquefirst.wsgi\n\n# the virtualenv\nhome \t\t= /home/sam8401/tf_env\n\n# exports settings module\nenv = DJANGO_SETTINGS_MODULE=techniquefirst.settings.production\n\n# export secret key \nenv = SECRET_KEY='gh1(ldul8sj(h76l8-p3-)(@xdco9b&7t9y)lau%rh+zq$m8s*'\n\nmaster\t\t= true\n\nprocesses\t= 1\n\nsocket \t\t= /home/sam8401/website_nginx/techniquefirst/mysite.sock\n\nchmod-socket= 666\n\nvaccum\t\t= true\n\n#background the process and log it\ndaemonize = techniquefirst_uwsgi.log\n" }, { "alpha_fraction": 0.6965517401695251, "alphanum_fraction": 0.7655172348022461, "avg_line_length": 28.200000762939453, "blob_id": "b6fe4f2c97a224afb5a886ab391d4ed39d99dfa7", "content_id": "da326029d15f8d2dc9d11b809d6cf2c9e7113595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 145, "license_type": "no_license", "max_line_length": 60, "num_lines": 5, "path": "/requirements.txt", "repo_name": "techniqueFirst/techniquefirst", "src_encoding": "UTF-8", "text": "#adding requirements.txt to make sure this works with aws eb\nDjango==2.0\npyaml==17.12.1\n# Need to figure out SSL properly\n#django-sslserver==0.20" } ]
10
Albertbmm/progjar
https://github.com/Albertbmm/progjar
48d9dd582cf7246cc3bd4166019f4f7ee363660f
d55fe95ba004a3078f12a31302fc32ed321d8ff7
99dae397d94f203fc7c775befc1dc747684a1b98
refs/heads/master
2021-01-13T17:05:24.815660
2015-12-01T05:17:09
2015-12-01T05:17:09
43,325,611
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5922421813011169, "alphanum_fraction": 0.616840124130249, "avg_line_length": 25.8157901763916, "blob_id": "e2a44b52e376fa12fa67c8e2e020a999c99baddd", "content_id": "f71d317629bcbff4ebef8940e37e628d765727bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 73, "num_lines": 38, "path": "/webserver2.py", "repo_name": "Albertbmm/progjar", "src_encoding": "UTF-8", "text": "import sys\r\nimport socket\r\nimport io\r\n\r\n\r\nHOST, PORT = '127.0.0.1', 8182\r\n# Create a TCP/IP socket\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n#sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\r\n# Bind the socket to the port\r\nsock.bind((HOST,PORT))\r\nsock.listen(1)\r\nprint 'starting up on port %s' % PORT\r\n# sock.bind(server_address)\r\n\r\n# Listen for incoming connections\r\n\r\nwhile True:\r\n \tconnection, client_address = sock.accept()\r\n data = connection.recv(1024)\r\n\tgambarku = data.split()\r\n\tgambar1 =gambarku[1]\t\r\n\tgambar2 =gambar1[1:]\r\n\r\n\tdatagambar = 'sukses'\r\n\thttp_response = \"\\HTTP/1.1 200 OK \\n\\n%s\"%datagambar\r\n\r\n\r\n\t\t# print >>sys.stderr, 'received \"%s\"' % data\r\n \t# if data:\r\n #\tprint >>sys.stderr, 'sending data back to the client'\r\n #\tconnection.sendall(data)\r\n \t# else:\r\n #\tprint >>sys.stderr, 'no more data from', client_address\r\n #\tbreak\r\n # Clean up the connection\r\n\tconnection.sendall(http_response)\r\n\tconnection.close()\r\n" }, { "alpha_fraction": 0.5166997909545898, "alphanum_fraction": 0.5318091511726379, "avg_line_length": 24.61375617980957, "blob_id": "1e4a0be918bf5208e77bc54bb93cb479f8ab8465", "content_id": "a794fe57f18ea88d515a47790833124b117aeefc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5030, "license_type": "no_license", "max_line_length": 91, "num_lines": 189, "path": "/Server.py", "repo_name": "Albertbmm/progjar", "src_encoding": "UTF-8", "text": "import sys\r\nimport socket\r\nimport select\r\nimport pickle\r\nimport string\r\n\r\nHOST = 'localhost' \r\nSOCKET_LIST = []\r\nLIST_NAMA = []\r\nRECV_BUFFER = 3999 \r\nPORT = 12000\r\n\r\n\r\ndef chat_server():\r\n\r\n\t#sys.stdout.write('Port : ')\r\n\t#PORT = int(sys.stdin.readline())\r\n\t\r\n\t#creating TCP/IP socket\r\n\tserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\tserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\r\n\t# binding the socket\r\n\tserver_socket.bind((HOST, PORT))\r\n\tserver_socket.listen(10)\r\n\r\n\t# add server socket object to the list of readable connections\r\n\tSOCKET_LIST.append(server_socket)\r\n\r\n\tprint \"The chat server is started on Port \" + str(PORT)\r\n\t#print \"and the Host is \" + str(HOST)\r\n\r\n\twhile True:\r\n\t\t# get the list sockets which are ready to be read through select\r\n\t\t# 4th arg, time_out = 0 : poll and never block\r\n\t\tready_to_read,ready_to_write,in_error = select.select(SOCKET_LIST,[],[],0)\r\n\t \r\n\t\tfor sock in ready_to_read:\r\n\t\t\t# when new connection request received\r\n\t\t\tif sock == server_socket: \r\n\t\t\t\tsockfd, addr = server_socket.accept()\r\n\t\t\t\tSOCKET_LIST.append(sockfd)\r\n\t\t\t\tprint \"Client (%s, %s) is connected\" % addr\r\n\t\t\t\t \r\n\t\t\t\tbroadcast(server_socket, sockfd, \"[%s:%s] has joined the chat\\n\" % addr)\r\n\t\t\t \r\n\t\t\t# a message from a client, not a new connection\r\n\t\t\telse:\r\n\t\t\t\t# process data received from client, \r\n\t\t\t\ttry:\r\n\t\t\t\t\t# receiving data from the socket.\r\n\t\t\t\t\tdata = sock.recv(RECV_BUFFER)\r\n\t\t\t\t\t#data = pickle.loads(data)\r\n\t\t\t\t\tif data:\r\n\t\t\t\t\t\t#broadcast(server_socket, sock, \"\\r\" + '[' + str(sock.getpeername()) + '] ' + data) \r\n\t\t\t\t\t\ttemp1 = string.split(data[:-1])\r\n\t\t\t\t\r\n\t\t\t\t\t\td=len(temp1)\r\n\t\t\t\t\t\tif temp1[0]==\"login\" :\r\n\t\t\t\t\t\t\tlog_in(sock, str(temp1[1]))\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\telif temp1[0]==\"send\" :\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tlogged = 0\r\n\t\t\t\t\t\t\tuser = \"\"\r\n\t\t\t\t\t\t\tfor x in range (len(LIST_NAMA)):\r\n\t\t\t\t\t\t\t\tif LIST_NAMA[x]==sock:\r\n\t\t\t\t\t\t\t\t\tlogged=1\r\n\t\t\t\t\t\t\t\t\tuser=LIST_NAMA[x+1]\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif logged==0:\r\n\t\t\t\t\t\t\t\tsend_msg(sock, \"Please login first\\n\")\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\ttemp2=\"\"\r\n\t\t\t\t\t\t\t\tfor x in range (len(temp1)):\r\n\t\t\t\t\t\t\t\t\tif x>1:\r\n\t\t\t\t\t\t\t\t\t\tif not temp2:\r\n\t\t\t\t\t\t\t\t\t\t\ttemp2+=str(temp1[x])\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\ttemp2+=\" \"\r\n\t\t\t\t\t\t\t\t\t\t\ttemp2+=str(temp1[x])\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\tfor x in range (len(LIST_NAMA)):\r\n\t\t\t\t\t\t\t\t\tif LIST_NAMA[x]==temp1[1]:\r\n\t\t\t\t\t\t\t\t\t\tsend_msg(LIST_NAMA[x-1], \"[\"+user+\"] : \"+temp2+\"\\n\")\r\n\t\t\t\t\r\n\t\t\t\t\t\telif temp1[0]==\"sendall\" :\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tlogged = 0\r\n\t\t\t\t\t\t\tuser = \"\"\r\n\t\t\t\t\t\t\tfor x in range (len(LIST_NAMA)):\r\n\t\t\t\t\t\t\t\tif LIST_NAMA[x]==sock:\r\n\t\t\t\t\t\t\t\t\tlogged=1\r\n\t\t\t\t\t\t\t\t\tuser=LIST_NAMA[x+1]\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif logged==0:\r\n\t\t\t\t\t\t\t\tsend_msg(sock, \"Please login first\\n\")\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\ttemp2=\"\"\r\n\t\t\t\t\t\t\t\tfor x in range(len(temp1)):\r\n\t\t\t\t\t\t\t\t\tif x!=0:\r\n\t\t\t\t\t\t\t\t\t\tif not temp2:\r\n\t\t\t\t\t\t\t\t\t\t\ttemp2=str(temp1[x])\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\ttemp2+=\" \"\r\n\t\t\t\t\t\t\t\t\t\t\ttemp2+=temp1[x]\r\n\t\t\t\t\t\t\t\tbroadcast(server_socket, sock, \"[\"+user+\"] : \"+temp2+\"\\n\")\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\telif temp1[0]==\"list\" :\r\n\t\t\t\t\t\t\t#send_msg(sock, \"cobo\\n\")\r\n\t\t\t\t\t\t\tlogged = 0\r\n\t\t\t\t\t\t\tfor x in range (len(LIST_NAMA)):\r\n\t\t\t\t\t\t\t\tif LIST_NAMA[x]==sock:\r\n\t\t\t\t\t\t\t\t\tlogged=1\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif logged==0:\r\n\t\t\t\t\t\t\t\tsend_msg(sock, \"Please login first\\n\")\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\ttemp2=\"\"\r\n\t\t\t\t\t\t\t\tfor x in range (len(LIST_NAMA)):\r\n\t\t\t\t\t\t\t\t\tif x%2==1:\r\n\t\t\t\t\t\t\t\t\t\ttemp2+=\" \"\r\n\t\t\t\t\t\t\t\t\t\ttemp2+=str(LIST_NAMA[x])\r\n\t\t\t\t\t\t\t\tsend_msg(sock, \"[List_User] : \"+temp2+\"\\n\")\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tprint ('Invalid Command')\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t# remove the socket that's broken \r\n\t\t\t\t\t\tif sock in SOCKET_LIST:\r\n\t\t\t\t\t\t\tSOCKET_LIST.remove(sock)\r\n\r\n\t\t\t\t\t\t# at this stage, no data means probably the connection has been broken\r\n\t\t\t\t\t\tbroadcast(server_socket, sock, \"Client (%s, %s) is offline\\n\" % addr) \r\n\r\n\t\t\t\t# exception \r\n\t\t\t\texcept:\r\n\t\t\t\t\tbroadcast(server_socket, sock, \"Client (%s, %s) is offline\\n\" % addr)\r\n\t\t\t\t\tcontinue\r\n\r\n\tserver_socket.close()\r\n \r\n# broadcast chat messages to all connected clients\r\ndef broadcast (server_socket, sock, message):\r\n for x in range (len(LIST_NAMA)):\r\n\t\t\r\n # send the message only to peer\r\n if LIST_NAMA[x] != server_socket and LIST_NAMA[x] != sock and x%2==0 :\r\n try :\r\n LIST_NAMA[x].send(message)\r\n except :\r\n # broken socket connection\r\n LIST_NAMA[x].close()\r\n # broken socket, remove it\r\n if LIST_NAMA[x] in SOCKET_LIST:\r\n SOCKET_LIST.remove(LIST_NAMA[x])\r\n \r\ndef send_msg (sock, message):\r\n\ttry:\r\n\t\tsock.send(message)\r\n\texcept:\r\n\t\tsock.close()\r\n\t\t\r\n\t\tif sock in SOCKET_LIST:\r\n\t\t\tSOCKET_LIST.remove(sock)\r\n\r\ndef log_in (sock, user):\r\n\tg = 0\r\n\tf = 0\r\n\tfor name in LIST_NAMA:\r\n\t\tif name == user:\r\n\t\t\tg = 1\r\n\t\tif name == sock:\r\n\t\t\tf = 1\r\n\t\r\n\tif f==1:\r\n\t\tsend_msg(sock, \"You already has a username\\n\")\r\n\telif g==1:\r\n\t\tsend_msg(sock, \"Username already exist\\n\")\r\n\telse:\r\n\t\tLIST_NAMA.append(sock)\r\n\t\tLIST_NAMA.append(user)\r\n\t\tsend_msg(sock, \"Login success\\n\")\r\n\t\r\nchat_server()\r\n" } ]
2
Dawidkubis/self-info
https://github.com/Dawidkubis/self-info
f580e34aa5680be4b160d1824c542b23137abe61
ff74959cf90db6694e5bea9cc98889679fc22da4
e388457fb822cf77f2401264ee25d0308e7e9771
refs/heads/master
2020-06-16T12:56:15.057962
2019-09-19T22:30:38
2019-09-19T22:30:38
195,582,071
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5177589058876038, "alphanum_fraction": 0.5262631177902222, "avg_line_length": 21.460674285888672, "blob_id": "604d8c9ed8a5108f409adb4aa944953a90a67d0a", "content_id": "44d3684520fdd0bfca86e61720af7ba3cf2a7afc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1999, "license_type": "no_license", "max_line_length": 82, "num_lines": 89, "path": "/self-info.py", "repo_name": "Dawidkubis/self-info", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n'''\na simple script for managing information\n'''\n\nimport shelve\nfrom functools import reduce\nimport sys\nimport os\n\nclass Info:\n '''\n a piece of information, contains `name`, `text`(optional) and `date`(optional)\n '''\n def __init__(self, name, text=None, date=None):\n self.name, self.text, self.date = name, text, date\n\n def __str__(self):\n head = (self.date + ' | ') if self.date else ''\n body = self.name\n back = (' : ' + self.text) if self.text else ''\n\n return head + body + back\n\ndef parse_args(argv, shv):\n\n if len(argv) == 0:\n return ('', '')\n \n assert(len(argv) == 2), f'invalid nuber of arguments : {argv}'\n\n argv = [i.lower() for i in argv]\n\n if argv[0] == 'add':\n pass\n elif argv[0] == 'del':\n assert(argv[1] in shv), f'{argv[1]} does not exist'\n elif argv[0] == 'help':\n pass\n else:\n raise AssertionError(f'invalid argument : {argv[0]}')\n\n return argv\n \ndef parse_contents(txt):\n\n name, text, date = None, None, None\n \n rest = txt.split('|')\n \n if len(rest) == 1:\n rest = rest[0]\n else:\n date = rest[0]\n rest = rest[1]\n\n rest = rest.split(':')\n\n if len(rest) == 1:\n name = rest[0]\n else:\n text = rest[1]\n name = rest[0]\n\n #print('name:',name,'text:',text,'date',date)\n\n return (name, text, date)\n\ndef main():\n shv_root = os.path.join(os.environ['HOME'], '.config/self-info/')\n\n if not os.path.exists(shv_root):\n os.makedirs(shv_root)\n\n with shelve.open(os.path.join(shv_root, 'shv')) as shv:\n option, item = parse_args(sys.argv[1:], shv)\n \n if option == 'add':\n name, text, date = parse_contents(item)\n\n shv[name] = Info(name, text, date)\n elif option == 'del':\n print(f'removed item : {shv.pop(item)}')\n else:\n [print(shv[i]) for i in shv]\n\nif __name__ == '__main__':\n main()\n" } ]
1
SamuelFelipe/AirBnB_clone_v2
https://github.com/SamuelFelipe/AirBnB_clone_v2
f3a89ed0005f090422f34a82e02ee3d45fe7cea8
1557c75c6d234e9b8f34a7af02b8c61b4c2bdfca
c01b2ed3c63393747868ccd2bf2fe63a259c5e12
refs/heads/master
2023-08-04T14:59:26.719592
2021-09-14T03:53:29
2021-09-14T03:53:29
352,477,151
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6261792182922363, "alphanum_fraction": 0.6438679099082947, "avg_line_length": 21.91891860961914, "blob_id": "7c52b5d1522f57679952fb655ee08ffea39dbd59", "content_id": "c560ca55365099fa85e8e7fb9d9d0972c9101897", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 848, "license_type": "no_license", "max_line_length": 72, "num_lines": 37, "path": "/web_flask/9-states.py", "repo_name": "SamuelFelipe/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n'''Flask web server'''\n\nfrom flask import Flask, render_template, abort\nfrom models import storage\nfrom models.state import State\n\napp = Flask(__name__)\n\n\[email protected]_appcontext\ndef teardown(err):\n storage.close()\n\n\[email protected]('/states', strict_slashes=False)\ndef States_list():\n ess = storage.all(State)\n nd = {}\n for item in ess.values():\n nd[item.name] = item.id\n return render_template('7-states_list.html', ess=sorted(nd.items()))\n return render_template('9-states.html', ess=ess)\n\n\[email protected]('/states/<string:id>', strict_slashes=False)\ndef State_id(id):\n ess = storage.all(State)\n for state in ess.values():\n if state.id == id:\n return render_template('9-states.html', obj=state)\n return abort(404)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n" }, { "alpha_fraction": 0.5532604455947876, "alphanum_fraction": 0.555690586566925, "avg_line_length": 29.481481552124023, "blob_id": "fa4c29ed65055682f1549019bce9a4f3f782b49e", "content_id": "f1d8af9d21e06d55557ef414f1955de4de95cb17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2469, "license_type": "no_license", "max_line_length": 78, "num_lines": 81, "path": "/models/engine/db_storage.py", "repo_name": "SamuelFelipe/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n'''\nClass to manage the database\n'''\n\nfrom os import getenv\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom models.base_model import BaseModel, Base\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass DBStorage:\n '''Class to manage the storage in a database'''\n __engine = None\n __session = None\n\n classes = {\n 'User': User, 'Place': Place,\n 'State': State, 'City': City,\n 'Amenity': Amenity,\n 'Review': Review\n }\n\n def __init__(self):\n '''Cretate a new session'''\n crt = {\n \"user\": getenv(\"HBNB_MYSQL_USER\"),\n \"pwd\": getenv(\"HBNB_MYSQL_PWD\"),\n \"host\": getenv(\"HBNB_MYSQL_HOST\"),\n \"db\": getenv(\"HBNB_MYSQL_DB\")\n }\n f = 'mysql+mysqldb://{user}:{pwd}@{host}:3306/{db}'.format(**crt)\n self.__engine = create_engine(f, encoding='utf-8', pool_pre_ping=True)\n\n if getenv('HBNB_ENV') == 'test':\n Base.metadata.drop_all(self.__engine)\n\n def new(self, obj):\n '''Create a new object instance'''\n self.__session.add(obj)\n\n def all(self, cls=None):\n '''list all the entries of a class if one is pased\n otherwise list all the elementes in the db\n '''\n new_dict = {}\n for clss in self.classes:\n if cls is None or cls is self.classes[clss] or cls is clss:\n objs = self.__session.query(self.classes[clss]).all()\n for obj in objs:\n key = obj.__class__.__name__ + '.' + obj.id\n new_dict[key] = obj\n return (new_dict)\n\n def save(self):\n '''Commit the changes'''\n self.__session.commit()\n\n def delete(self, obj=None):\n '''Remove a instance if it exist'''\n if obj:\n self.__session.delete(obj)\n\n def reload(self):\n '''Reload all the items in stored in the db'''\n Base.metadata.create_all(self.__engine)\n session_factory = sessionmaker(self.__engine,\n expire_on_commit=False)\n Session = scoped_session(session_factory)\n self.__session = Session()\n\n def close(self):\n '''finalize the session'''\n self.__session.close()\n" }, { "alpha_fraction": 0.6557692289352417, "alphanum_fraction": 0.675000011920929, "avg_line_length": 19.799999237060547, "blob_id": "305cdb213c274df04c80147556e43e920ff48938", "content_id": "afa6859c313565a156eede104c07e29844b8f96e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 64, "num_lines": 25, "path": "/web_flask/8-cities_by_states.py", "repo_name": "SamuelFelipe/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n'''Flask web server'''\n\nfrom flask import Flask, render_template\nfrom models import storage\nfrom models.state import State\n\napp = Flask(__name__)\n\n\[email protected]_appcontext\ndef teardown(err):\n storage.close()\n\n\[email protected]('/cities_by_states', strict_slashes=False)\ndef States_list():\n ess = storage.all(State)\n dictt = {obj.name:obj for obj in ess.values()}\n return render_template('8-cities_by_states.html', ess=dictt)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n" }, { "alpha_fraction": 0.5572419762611389, "alphanum_fraction": 0.5659149885177612, "avg_line_length": 35.60317611694336, "blob_id": "7fecdaed4c07af5228bbaeddd2993321eac34eeb", "content_id": "a8295a919b340cdd2bf1de86b212ee8c3e6e00fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2306, "license_type": "no_license", "max_line_length": 75, "num_lines": 63, "path": "/models/place.py", "repo_name": "SamuelFelipe/AirBnB_clone_v2", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\" Place Module for HBNB project \"\"\"\nfrom os import getenv\nfrom models.base_model import BaseModel, Base\nfrom models.review import Review\nfrom sqlalchemy import Column, String, Float, Integer\nfrom sqlalchemy import ForeignKey, Table\nfrom sqlalchemy.orm import relationship\n\n\nif getenv('HBNB_TYPE_STORAGE') == 'db':\n metadata = Base.metadata\n place_amenity = Table('place_amenity', metadata,\n Column('place_id', String(60),\n ForeignKey('places.id'),\n primary_key=True),\n Column('amenity_id', String(60),\n ForeignKey('amenities.id'),\n primary_key=True))\n\n\nclass Place(BaseModel, Base):\n \"\"\" A place to stay \"\"\"\n __tablename__ = 'places'\n city_id = Column(String(60), ForeignKey('cities.id'),\n nullable=False)\n user_id = Column(String(60), ForeignKey('users.id'),\n nullable=False)\n name = Column(String(128), nullable=False)\n description = Column(String(1024))\n number_rooms = Column(Integer, default=0, nullable=False)\n number_bathrooms = Column(Integer, default=0, nullable=False)\n max_guest = Column(Integer, default=0, nullable=False)\n price_by_night = Column(Integer, default=0, nullable=False)\n latitude = Column(Float)\n longitude = Column(Float)\n amenity_ids = []\n if getenv('HBNB_TYPE_STORAGE') == 'db':\n reviews = relationship('Review', backref='place',\n cascade='all, delete')\n amenities = relationship('Amenity', secondary=place_amenity,\n backref='place_amenities', viewonly=False)\n else:\n @property\n def cities(self):\n relations = []\n dic = storage.all(Review)\n for review in dic.values():\n if review.state_id == self.id:\n relations.append(review)\n\n return relations\n\n @property\n def amenities(self):\n return self.amenity_ids\n\n @amenities.setter\n def amenities(self, obj=None):\n if not obj:\n return\n if obj.to_dict()['__class__'] == 'Amenity':\n self.amenity_ids.append(obj.id)\n" } ]
4
ujm-projet-l3info-2017/Groupe2
https://github.com/ujm-projet-l3info-2017/Groupe2
19ffecf90cd5b3eea335a0ac615927923c2c05c3
1706b50d542d82ca4184dd8394e235e6350ff8b2
d944b6dc2b43ec73dd223c7122d6ad202e910a7c
refs/heads/master
2020-12-31T00:40:29.492207
2017-04-07T15:29:10
2017-04-07T15:29:10
80,608,155
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5165793895721436, "alphanum_fraction": 0.584642231464386, "avg_line_length": 27.649999618530273, "blob_id": "86d4010a167502c4ca695d8707057c4dec0dcb83", "content_id": "30506217c10851a72856e490e279177c15e8d35a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 176, "num_lines": 20, "path": "/GardenManager/main/migrations/0027_auto_20170319_2147.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-19 21:47\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0026_auto_20170319_2146'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fruit',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'brown'), (1, 'brown, purple'), (2, 'pink, brown, purple'), (3, 'green'), (4, 'violet, blue'), (5, 'unknown')]),\n ),\n ]\n" }, { "alpha_fraction": 0.52163165807724, "alphanum_fraction": 0.5735476016998291, "avg_line_length": 31.360000610351562, "blob_id": "633501a6fd68b3b9c72b61dd3949406290094597", "content_id": "f16d7d939ed9732d3fe625dc9f9bc992b9262bb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 809, "license_type": "no_license", "max_line_length": 157, "num_lines": 25, "path": "/GardenManager/main/migrations/0026_auto_20170319_2146.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-19 21:46\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0025_auto_20170319_1912'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fruit',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'brown'), (1, 'brown, purple'), (2, 'pink, brown, purple'), (3, 'green'), (4, 'unknown')]),\n ),\n migrations.AlterField(\n model_name='fruit',\n name='type',\n field=models.PositiveSmallIntegerField(choices=[(0, 'capsule'), (1, 'cone (winged seeds)'), (2, 'aborted (hybrids) or absent'), (3, 'unknown')]),\n ),\n ]\n" }, { "alpha_fraction": 0.5178571343421936, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 23.639999389648438, "blob_id": "7541f313e752445b9fb138dae5c4b75b0bad4aa1", "content_id": "beeff965e10e07a997a008dde9b0aa42f477378d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/GardenManager/main/migrations/0005_auto_20170307_1818.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-07 18:18\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0004_auto_20170307_1818'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='flower',\n name='months',\n field=models.ManyToManyField(to='main.Month'),\n ),\n migrations.AlterField(\n model_name='fruit',\n name='months',\n field=models.ManyToManyField(to='main.Month'),\n ),\n ]\n" }, { "alpha_fraction": 0.505602240562439, "alphanum_fraction": 0.5630252361297607, "avg_line_length": 28.75, "blob_id": "cd88f89f253be722dd542c6eb8da3446bb73d41c", "content_id": "281b8986f05f103b3d4f0d2448856925fa5be2fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "no_license", "max_line_length": 217, "num_lines": 24, "path": "/GardenManager/main/migrations/0022_auto_20170315_1342.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-15 13:42\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0021_auto_20170315_1339'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='ground',\n name='type',\n ),\n migrations.AddField(\n model_name='ground',\n name='ground',\n field=models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'acidic'), (2, 'bog'), (3, 'well-drained'), (4, 'humus rich'), (5, 'alkaline'), (6, 'rocky or gravelly or dry'), (7, 'unknown')], null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.4605462849140167, "alphanum_fraction": 0.5341426134109497, "avg_line_length": 51.720001220703125, "blob_id": "d446b9f074ce9726fe90f7966a9b1b1a41761fcb", "content_id": "38868d47104e103e1a4eaa2e1dbc509ff4feb71f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1318, "license_type": "no_license", "max_line_length": 409, "num_lines": 25, "path": "/GardenManager/main/migrations/0028_auto_20170319_2156.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-19 21:56\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0027_auto_20170319_2147'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='flower',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'brown, purple'), (1, 'pink, brown, purple'), (2, 'violet, blue'), (3, 'All'), (4, 'White'), (5, 'Orange'), (6, 'Yellow'), (7, 'Green-yellow'), (8, 'Green'), (9, 'Blue'), (10, 'Violet'), (11, 'Purple'), (12, 'Pink'), (13, 'Magenta'), (14, 'Red'), (15, 'Dark-red'), (16, 'Brown'), (17, 'Bronze'), (18, 'Silver'), (19, 'Black'), (20, 'unknown')]),\n ),\n migrations.AlterField(\n model_name='fruit',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'brown, purple'), (1, 'pink, brown, purple'), (2, 'violet, blue'), (3, 'All'), (4, 'White'), (5, 'Orange'), (6, 'Yellow'), (7, 'Green-yellow'), (8, 'Green'), (9, 'Blue'), (10, 'Violet'), (11, 'Purple'), (12, 'Pink'), (13, 'Magenta'), (14, 'Red'), (15, 'Dark-red'), (16, 'Brown'), (17, 'Bronze'), (18, 'Silver'), (19, 'Black'), (20, 'unknown')]),\n ),\n ]\n" }, { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.5543859601020813, "avg_line_length": 41.75, "blob_id": "75ecdc21ecec5bf6eb13f31bd3381fc5e149ba89", "content_id": "426a173503720251ae1394ec952e72d66889e576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "no_license", "max_line_length": 458, "num_lines": 20, "path": "/GardenManager/main/migrations/0032_auto_20170319_2211.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-19 22:11\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0031_auto_20170319_2204'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fruit',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'red, brown'), (1, 'green-yellow, brown'), (2, 'brown, purple'), (3, 'pink, brown, purple'), (4, 'violet, blue'), (5, 'all'), (6, 'white'), (7, 'orange'), (8, 'yellow'), (9, 'green-yellow'), (10, 'green'), (11, 'blue'), (12, 'violet'), (13, 'purple'), (14, 'pink'), (15, 'magenta'), (16, 'red'), (17, 'dark-red'), (18, 'brown'), (19, 'bronze'), (20, 'silver'), (21, 'black'), (22, 'unknown')]),\n ),\n ]\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5525525808334351, "avg_line_length": 24.615385055541992, "blob_id": "243d63c86dfacdbe2ea91a866e5db4145c03a363", "content_id": "1257d188e6c0cba02e6df53c7748d4b2a5affacd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 666, "license_type": "no_license", "max_line_length": 104, "num_lines": 26, "path": "/GardenManager/main/migrations/0016_auto_20170314_1049.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-14 10:49\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0015_auto_20170314_1038'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Form',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('form', models.PositiveSmallIntegerField()),\n ],\n ),\n migrations.RemoveField(\n model_name='plant',\n name='form',\n ),\n ]\n" }, { "alpha_fraction": 0.5682319402694702, "alphanum_fraction": 0.5694925785064697, "avg_line_length": 33.129032135009766, "blob_id": "9c4c6081626945d36ea25bb2ce52304896a932a9", "content_id": "c0f428b6b03cd4210d4de5f88f668d82740f8127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3178, "license_type": "no_license", "max_line_length": 101, "num_lines": 93, "path": "/GardenManager/databases_scraper/plantdatabase_kpu_ca/plantdatabase_kpu_ca/spiders/plant.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport scrapy\nimport csv\nimport re\n\n\nclass PlantSpider (scrapy.Spider):\n\n name = \"PlantSpider\"\n\n with open (\"URLs\", \"r\") as f:\n start_urls = map (str.strip, f.readlines ())\n\n def __init__ (self):\n super (PlantSpider, self).__init__ ()\n self.header = map (unicode, [\n \"scientific_name\", \"pronunciation\", \"common_name\", \"family_name\",\n \"plant_type\", \"key_id_features\",\n \"habit\", \"form\", \"texture\", \"height\", \"spread\", \"growth_rate\", \"origin\",\n \"climate\", \"exposure\", \"soil_or_growing_medium\", \"water\", \n \"landscape_uses\",\n \"additional_info\", \"leaf_form\", \"leaf_arrangement\",\n \"leaf_texture\", \"leaf_surfaces\", \"leaf_colour_in_summer\", \"leaf_colour_in_fall\", \"leaf_shapes\",\n \"leaf_apices\", \"leaf_bases\", \"leaf_margins\",\n \"inflorescence_type\", \"petal_colour\", \"flower_scent\", \"flower_time\",\n \"fruit_type\", \"fruit_colour\", \"fruiting_time\",\n \"bark_morphology\", \"bark_or_stem_colour\", \"propagation\",\n \"pest_susceptibility\"])\n self.csvfile = open('plantes.csv', 'w')\n self.writer = csv.DictWriter(self.csvfile, fieldnames=self.header)\n self.writer.writeheader()\n self.replacement = {\n \"soil/_growing_medium\": \"soil_or_growing_medium\",\n \"leaf_texture/_venation\": \"leaf_texture\",\n \"flower_flower_scent\": \"flower_scent\",\n \"flower_flower_time\": \"flower_time\",\n \"flower_colour_(petals)\": \"petal_colour\",\n \"flower_flower_time_at_peak\": \"flower_time_peak\",\n \"hardiness_rating\": \"climate\",\n \"water_use\": \"water\", \n }\n self.frames = {\n \"leaf_morphology\": \"leaf\",\n \"flower_morphology\": \"flower\",\n \"flower_morphology\": \"flower\",\n }\n\n def parse (self, response):\n rows = {}\n tds = response.xpath (\"//tr/td\")\n skip = None\n frame = None\n for no, td in enumerate (tds):\n if no == skip:\n no = None\n continue\n key = '_'.join (filter (lambda x:x!='', map (self.trim, td.xpath (\".//text()\").extract ())))\n if key:\n if self.frames.has_key (key):\n frame = self.frames[key]\n continue\n if frame is not None:\n key = frame + \"_\" + key\n if key in (\"leaf_margins\", \"flower_flower_time_at_peak\"):\n frame = None\n if self.replacement.has_key (key):\n key = self.replacement[key]\n if key in self.header:\n value = tds[no+1].xpath (\".//text()\").extract ()\n skip = no + 1\n rows[key] = ' '.join (filter (lambda x:x!='', map (self.trim_no_under, value)))\n else:\n pass#print key\n self.writer.writerow (dict (map (self.sanitize, rows.items ())))\n\n def trim_no_under (self, text):\n return self.trim (text, False)\n\n def trim (self, text, underscore=True):\n trimed = re.sub (\":$\", '', re.sub (\"[\\n\\r\\t\\ ]+\", \" \", text).strip ())\n if underscore:\n trimed = trimed.lower ().replace (\" \", \"_\")\n return trimed\n\n def sanitize (self, value):\n if isinstance (value, (str, unicode)):\n return value.replace (u\"’\", \"'\").replace (u\"‘\", \"'\").replace (u\" \", \" \")\\\n .encode (\"utf-8\")\n else:\n return map (self.sanitize, value)" }, { "alpha_fraction": 0.5444947481155396, "alphanum_fraction": 0.5942685008049011, "avg_line_length": 25.520000457763672, "blob_id": "d759726398f5a644413b343722f0c4fbcd4dd055", "content_id": "e37dabed6b6cc01a6157460c550fcf6e456cb878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 99, "num_lines": 25, "path": "/GardenManager/main/migrations/0013_auto_20170314_0957.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-14 09:57\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0012_auto_20170314_0828'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='plant',\n name='landscapes',\n field=models.ManyToManyField(null=True, related_name='plants', to='main.LandscapeUse'),\n ),\n migrations.AlterField(\n model_name='landscapeuse',\n name='landscape',\n field=models.PositiveSmallIntegerField(),\n ),\n ]\n" }, { "alpha_fraction": 0.5122349262237549, "alphanum_fraction": 0.5725938081741333, "avg_line_length": 23.520000457763672, "blob_id": "3a294ab6e90d7e44275f6af7bc969a7846cc1231", "content_id": "fc09859c8f73ede0dcceab1ba616c3bd49ff4654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 50, "num_lines": 25, "path": "/GardenManager/main/migrations/0014_auto_20170314_1005.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-14 10:05\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0013_auto_20170314_0957'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='plant',\n name='common_name',\n field=models.CharField(max_length=64),\n ),\n migrations.AlterField(\n model_name='plant',\n name='scientific_name',\n field=models.CharField(max_length=64),\n ),\n ]\n" }, { "alpha_fraction": 0.6403688788414001, "alphanum_fraction": 0.6434426307678223, "avg_line_length": 32.655174255371094, "blob_id": "88e104875258a66bee03cdccc6ea4dbcc6e38fe1", "content_id": "37442419ad195eb5222657a6e219ecc9f688df43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 976, "license_type": "no_license", "max_line_length": 84, "num_lines": 29, "path": "/GardenManager/main/static/js/layout.js", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "\nwindow.addEventListener (\"load\", function () {\n\n document.getElementById (\"global_info_alert\").onclick = function () {\n $(\"#flash_notice\").hide () ;\n } ;\n\n document.getElementById (\"submit_login_form_button\").onclick = function () {\n $('#submit_login')[0].click () ;\n } ;\n\n document.getElementById (\"submit_register_form_button\").onclick = function () {\n $('#submit_register')[0].click () ;\n } ;\n\n document.getElementById (\"register_user_password\").onchange = function () {\n $('#register_user_password_verif')[0].pattern = '^' + this.value + '$' ;\n } ;\n\n document.getElementById (\"register_user_password_verif\").onchange = function () {\n $('#formRegister').validate () ;\n } ;\n document.getElementById (\"register_user_password_verif\").oninput = function () {\n this.setCustomValidity('') ;\n } ;\n document.getElementById (\"register_user_password_verif\").oninvalid = function () {\n this.setCustomValidity('Password does not match') ;\n } ;\n\n}) ;" }, { "alpha_fraction": 0.5222482681274414, "alphanum_fraction": 0.5491803288459778, "avg_line_length": 26.54838752746582, "blob_id": "276f369684d64ddb33507e12aec98dae27c4524e", "content_id": "4a17ac95021b6cc696c99acd2b80bc6fa23a7794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 854, "license_type": "no_license", "max_line_length": 104, "num_lines": 31, "path": "/GardenManager/main/migrations/0018_auto_20170314_1107.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-14 11:07\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0017_plant_forms'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Water',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('water', models.PositiveSmallIntegerField()),\n ],\n ),\n migrations.RemoveField(\n model_name='plant',\n name='water',\n ),\n migrations.AddField(\n model_name='plant',\n name='waters',\n field=models.ManyToManyField(null=True, related_name='plants', to='main.Water'),\n ),\n ]\n" }, { "alpha_fraction": 0.5726072788238525, "alphanum_fraction": 0.5891088843345642, "avg_line_length": 25.34782600402832, "blob_id": "47b25193a3cfb46f1a78bf1abb1bcd39db01f3bb", "content_id": "8fa033cd4d31aabf9e8e56de558cf250de4482b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 79, "num_lines": 23, "path": "/GardenManager/databases_scraper/garden_org/garden_org/spiders/genus.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport scrapy\n\n\nclass GenusSpider (scrapy.Spider):\n\n name = \"genus\"\n\n def __init__ (self):\n super (GenusSpider, self).__init__ ()\n open (\"genuses\", \"w\").close ()\n\n base_url = \"https://garden.org/plants/browse/plants/genus/?offset={}\"\n start_urls = map (base_url.format, xrange (0, 16280, 20)) and []\n\n def parse (self, response):\n page = response.url.split (\"/\")[-2]\n with open (\"genuses\", \"a\") as f:\n genus = response.css (\"div div div table tbody tr td a::text\").extract ()\n f.write ('\\n'.join (genus) + ('\\n' if genus else ''))\n" }, { "alpha_fraction": 0.6537278294563293, "alphanum_fraction": 0.6541624665260315, "avg_line_length": 37.79507064819336, "blob_id": "701daf054bd688d3ea0bf7729c48e8ac87b05303", "content_id": "3776bc1c8e845c50140d32ce172a378b69cc4fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29910, "license_type": "no_license", "max_line_length": 83, "num_lines": 771, "path": "/GardenManager/backend.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nimport os, sys\nimport csv\nimport argparse\n\nimport django\nfrom django.conf import settings\n\n\nclass Backend (object):\n\n \"\"\"\n Defines a backend class to be allowed to create some plant (for the moment)\n and its related tables.\n Only handle csv files for the moment.\n \"\"\"\n\n SETTINGS_PATH = \"GardenManager.settings\"\n DATA_TYPES = \"whole_plant\", \n DEFAULT_DATA_TYPE = \"whole_plant\"\n MANDATORY_KEYS = {\n \"whole_plant\": {\n \"scientific_name\", \"common_name\", \"family_name\", \n \"plant_type\", \"form\", \"height\", \n \"spread\", \"growth_rate\", \"climate\", \"exposure\", \n \"soil_or_growing_medium\", \"landscape_uses\", \"water\",\n \"leaf_colour_in_summer\", \"leaf_colour_in_fall\", \n \"petal_colour\", \"flower_scent\", \"flower_time\", \"fruit_type\", \n \"fruit_colour\", \"fruiting_time\", \"propagation\" \n }, \n \"plant\": { \"scientific_name\", \"common_name\" },\n \"landscape\": { \"landscape\" },\n \"climate\": { \"climate\" },\n \"exposure\": { \"exposure\" },\n \"water\": { \"water\" },\n \"ground\": { \"ground\" },\n \"form\": { \"form\" },\n \"habit\": { \"habit\" },\n \"month\": { \"month\" },\n \"fruit\": { \"fruit_type\", \"fruit_colour\", \"fruiting_time\" },\n \"colour\": { \"colour\" },\n \"fruit_type\": { \"fruit_type\" },\n \"flower\": { \"petal_colour\", \"flower_scent\", \"flower_time\" },\n \"flower_scent\": { \"scent\" } ,\n }\n\n ADDITIONNAL_KEYS = { key: set () for key in DATA_TYPES }\n ADDITIONNAL_KEYS[\"whole_plant\"] = {\n \"foreign_tables\": dict (),\n \"attributes\": set ()\n }\n ADDITIONNAL_KEYS[\"plant\"] = {\n \"foreign_tables\": {\n \"Month\": { \"plantation_time\" }\n }, \"attributes\": set ()\n }\n UNUSED_KEYS = { key: set () for key in DATA_TYPES }\n UNUSED_KEYS[\"whole_plant\"]= {\n \"pronunciation\", \"key_id_features\", \"texture\", \"origin\", \"additional_info\",\n \"leaf_form\", \"leaf_arrangement\", \"leaf_texture\", \"leaf_surfaces\", \n \"leaf_shapes\", \"leaf_apices\", \"leaf_bases\", \"leaf_margins\",\n \"inflorescence_type\", \"bark_morphology\", \"bark_or_stem_colour\",\n \"pest_susceptibility\"\n }\n\n @staticmethod\n def usable_arguments (args):\n \"\"\"\n Return True if the arguments given in the command line are enough to\n execute one of the backend workflow.\n \"\"\"\n return args.csv is not None\n\n def __init__ (self, args, settings_path=None):\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",\n settings_path or Backend.SETTINGS_PATH)\n django.setup ()\n self.args = args\n self.model_module = __import__ (\"main\").models\n model_module_attributes = dir (self.model_module)\n model_name_list = model_module_attributes[:model_module_attributes.index (\\\n \"__builtins__\")]\n model_name_list.remove (\"Digester\")\n model_name_list.remove (\"CircularList\")\n self.models = {\n name: getattr (self.model_module, name) for name in model_name_list\n }\n self.model_attributes = {\n model: set (map (lambda x:x.name, model._meta.get_fields())) \\\n for model in self.models.itervalues ()\n }\n\n def process_cmd_line (self):\n \"\"\"\n Select the workflow to execute in function of the command arguments and\n execute it.\n \"\"\"\n if self.args.csv is not None:\n self.process_csv ()\n\n def process_csv (self):\n \"\"\"\n Process the csv file given in the command line\n \"\"\"\n if os.path.exists (self.args.csv[0]) is False:\n raise ValueError (\"The given path does not exists: '%s'.\" % \\\n self.args.csv[0])\n with open (self.args.csv[0], \"rb\") as self.csv_file:\n self.process_raw_data (csv.DictReader (self.csv_file))\n print \"The given data has been processed successfully.\"\n\n def process_raw_data (self, generator, data_type=DEFAULT_DATA_TYPE,\n display_errors=True):\n \"\"\"\n Wait for a list of dict objects having the keys predefined by the backend.\n \"\"\"\n if hasattr (generator, \"__iter__\") is False:\n raise ValueError (\"The given data are not iterable. Raw data has not\" + \\\n \"been processed.\")\n if data_type not in Backend.DATA_TYPES:\n raise ValueError (\"Unknown data type: '%s'. Expected one of %s.\" % \\\n (data_type, repr (Backend.DATA_TYPES)))\n errors = []\n for plant_data_set in generator:\n error = self.pass_mandatory_fields_tests (plant_data_set, data_type)\n if error is not None:\n errors.append (error)\n else:\n self.process_tested_data (plant_data_set, data_type)\n if display_errors is True:\n for error in errors:\n print >>sys.stderr, error\n\n def pass_mandatory_fields_tests (self, data_set,\n data_type=DEFAULT_DATA_TYPE):\n \"\"\"\n Takes a data set (a dictionnary) and search for missing mandatory keys.\n \"\"\"\n missing_fields = Backend.MANDATORY_KEYS[data_type] - data_set.viewkeys ()\n if not missing_fields:\n return None\n return \"Missing fields: %s\" % repr (sorted (list (missing_fields)))\n\n def process_tested_data (self, data_set, data_type=DEFAULT_DATA_TYPE):\n \"\"\"\n Insert the data into their respective table.\n \"\"\"\n if data_type == \"whole_plant\":\n self.create_plant_and_related (data_set)\n else:\n pass\n\n def create_model_instance (self, model_class, model_attributes):\n \"\"\"\n Create an instance of the given model, having the given attributes\n and return the instance.\n \"\"\"\n attributes = self.model_attributes.get (model_class, set ())\n arguments = dict (map (lambda key:(key, model_attributes[key]),\n set (attributes) & set (model_attributes)))\n instance = model_class.objects.get_or_create (**arguments)\n return instance[0]\n\n def split_from_data (self, sentence, sep=\",\\ ?\", remove_parenthesis=True,\n remove_quotes=True, lower=True):\n \"\"\"\n Split a sentence into a list of words/sub-sentences, using the regex sep.\n Strip the simple/double quotes from the original sentence if\n remove_quotes is True.\n Lower the sentence is lower is True (before split).\n Remove any parenthesis is remove_parenthesis is True (before split).\n \"\"\"\n if remove_quotes:\n sentence = self.remove_trailing_from_data (sentence)\n try:\n while sentence is not None and remove_parenthesis and \\\n sentence.index ('(') < sentence.index (')'):\n sentence = self.remove_parenthesis_from_data (sentence)\n except ValueError:\n pass\n if lower and sentence is not None:\n sentence = sentence.lower ()\n return re.split (sep, sentence or '')\n\n def remove_trailing_from_data (self, sentence, trailing=\"\\\"'\"):\n for character in trailing:\n if sentence.startswith (character) and sentence.endswith (character):\n sentence = sentence[1:-1]\n return sentence\n\n def remove_parenthesis_from_data (self, sentence):\n return re.sub (\"\\ ?\\([^)]*\\)\", \"\", sentence)\n\n def create_plant_and_related (self, plant_data_set):\n \"\"\"\n Create a plant and all its related attributes in foreign tables:\n - exposures ;\n - flower (in progress) ;\n - forms ;\n - fruit (in progress) ;\n - grounds ;\n - habits ;\n - landscapes ;\n - waters ;\n The dataset is a dictionnary containing all {keys-value} for all tables.\n \"\"\"\n plant = self.create_plant (plant_data_set, verify=False)\n exposures = self.create_exposure_set (plant_data_set, verify=False)\n forms = self.create_form_set (plant_data_set, verify=False)\n flower = self.create_flowers (plant_data_set, verify=False)\n fruit = self.create_fruits (plant_data_set, verify=False)\n grounds = self.create_ground_set (plant_data_set, verify=False)\n habits = self.create_habit_set (plant_data_set, verify=False)\n landscapes = self.create_landscape_use_set (plant_data_set, verify=False)\n waters = self.create_water_set (plant_data_set, verify=False)\n self.link_plant_to_exposures (plant, exposures)\n self.link_plant_to_forms (plant, forms)\n self.link_plant_to_flower (plant, flower)\n self.link_plant_to_fruit (plant, fruit)\n self.link_plant_to_grounds (plant, grounds)\n self.link_plant_to_habits (plant, habits)\n self.link_plant_to_landscapes (plant, landscapes)\n self.link_plant_to_waters (plant, waters)\n print repr (plant)\n\n def create_plant (self, plant_data_set, verify=True):\n \"\"\"\n Extract the plant's attributes from the plant_data_set,\n create a models.Plant instance with the given data and return it.\n \"\"\"\n self.sanitize_plant_data_set (plant_data_set)\n additionnal_keys = Backend.ADDITIONNAL_KEYS.get (\"plant\", {})\n additionnal_attributes = additionnal_keys.get (\"attributes\", set ())\n additional_relations = additionnal_keys.get (\"foreign_tables\", dict ())\n plant = self.create_model_instance (self.model_module.Plant, plant_data_set)\n if additionnal_attributes:\n for attribute, value in additionnal_attributes.iteritems ():\n setattr (plant, attribute, value)\n if additional_relations:\n for table_name in additional_relations.iterkeys ():\n self.process_tested_data (plant_data_set, data_type=table_name.lower ())\n return plant\n\n def sanitize_plant_data_set (self, plant_data_set):\n \"\"\"\n Sanitize the plant's attributes from the dictionnary by:\n - Changing the climate value to its corresponding integer or\n Plant.DEFAULT_CLIMATE_NAME if unknown ;\n - Changing the growth rate value to its corresponding integer or\n \"unknown\" if not recognized ;\n - Create height_{min,max} from the height attribute ;\n - Create spread_{min,max} from the spread attribute.\n \"\"\"\n try:\n int (plant_data_set[\"climate\"])\n except ValueError:\n search = re.search (\"Zone (\\w?\\d+)\", plant_data_set[\"climate\"])\n climate_name = search and (\"ZONE_\" + search.groups ()[0]) or \\\n Plant.DEFAULT_CLIMATE_NAME\n plant_data_set[\"climate\"] = \\\n self.model_module.Plant.CLIMATE_VALUE[str (climate_name)]\n try:\n int (plant_data_set[\"growth_rate\"])\n except ValueError:\n plant_data_set[\"growth_rate\"] = self.model_module.Plant.\\\n GROWTH_RATE_VALUE[plant_data_set[\"growth_rate\"].lower () or \"unknown\"]\n if plant_data_set.has_key (\"height_min\") is False:\n search = re.search (\"(\\d+(\\.\\d+)?)\\ \\-\", plant_data_set[\"height\"])\n if search is not None:\n plant_data_set[\"height_min\"] = float (search.groups ()[0])\n if plant_data_set.has_key (\"height_max\") is False:\n search = re.search (\"\\ \\-\\ (\\d+(\\.\\d+)?)\", plant_data_set[\"height\"])\n if search is not None:\n plant_data_set[\"height_max\"] = float (search.groups ()[0])\n if plant_data_set.has_key (\"spread_min\") is False:\n search = re.search (\"(\\d+(\\.\\d+)?)\\ \\-\", plant_data_set[\"spread\"])\n if search is not None:\n plant_data_set[\"spread_min\"] = float (search.groups ()[0])\n if plant_data_set.has_key (\"spread_max\") is False:\n search = re.search (\"\\ \\-\\ (\\d+(\\.\\d+)?)\", plant_data_set[\"spread\"])\n if search is not None:\n plant_data_set[\"spread_max\"] = float (search.groups ()[0])\n\n def link_plant_to_exposures (self, plant, exposures):\n plant.exposures.add (*exposures)\n\n def link_plant_to_forms (self, plant, forms):\n plant.forms.add (*forms)\n\n def link_plant_to_fruit (self, plant, fruit):\n if fruit:\n plant.fruit = fruit\n\n def link_plant_to_flower (self, plant, flower):\n if flower:\n plant.flower = flower\n\n def link_plant_to_grounds (self, plant, grounds):\n plant.grounds.add (*grounds)\n\n def link_plant_to_habits (self, plant, habits):\n plant.habits.add (*habits)\n\n def link_plant_to_landscapes (self, plant, landscapes):\n plant.landscapes.add (*landscapes)\n\n def link_plant_to_waters (self, plant, waters):\n plant.waters.add (*waters)\n\n def create_flowers (self, flower_data_set, verify=True):\n \"\"\"\n Extract the flower's attributes from the flower_data_set,\n create a models.Flower instance with the given data.\n Create the related months and link them to the created flower.\n Return the newly created flower.\n \"\"\"\n self.sanitize_flower_data_set (flower_data_set)\n errors = self.pass_mandatory_fields_tests (flower_data_set, \"flower\")\n assert errors is None, errors\n flower = self.create_model_instance (self.model_module.Flower, flower_data_set)\n months = self.create_month_set (\n {\"months\" : flower_data_set[\"flower_time\"]})\n colours = self.create_colour_set (\n {\"colours\" : flower_data_set[\"petal_colour\"]})\n scents = self.create_flower_scent_set (\n {\"scents\" : flower_data_set[\"flower_scent\"]})\n self.link_flower_to_months (flower, months)\n self.link_flower_to_colours (flower, colours)\n self.link_flower_to_scents (flower, scents)\n \n return flower\n\n def sanitize_flower_data_set (self, flower_data_set):\n \"\"\"\n Sanitize the flower_data_set dictionnary by:\n - Replacing the flower value by its corresponding integer.\n \"\"\"\n flower_data_set[\"petal_colour\"] = re.sub (\"Male\\ Cone(\\([^)]*\\))?\", \n \"red, yellow\", flower_data_set[\"petal_colour\"])\n if \"no flowers\" in flower_data_set[\"petal_colour\"].lower ():\n flower_data_set[\"petal_colour\"] = \"none\"\n\n def link_flower_to_months (self, flower, months):\n flower.months.add (*months)\n\n def link_flower_to_colours (self, flower, colours):\n flower.petal_colours.add (*colours)\n\n def link_flower_to_scents (self, flower, scents):\n flower.scents.add (*scents)\n\n def create_flower_scents (self, flower_scent_data_set, verify=True):\n \"\"\"\n Extract the flower_scent's attributes from the flower_scent_data_set,\n create a models.FruitType instance with the given data and return it.\n \"\"\"\n self.sanitize_flower_scent_data_set (flower_scent_data_set)\n errors = self.pass_mandatory_fields_tests (flower_scent_data_set,\n \"flower_scent\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.Scent, \n flower_scent_data_set)\n\n def parse_flower_scents (self, flower_scent):\n \"\"\"\n Extract all diffrent flower_scents (without parenthesis) from comma\n separated sentence.\n \"\"\"\n return self.split_from_data (flower_scent, lower=True)\n\n def sanitize_flower_scent_data_set (self, flower_scent_data_set):\n \"\"\"\n Sanitize the flower_scent_data_set dictionnary by:\n - Replacing the flower_scent value by its corresponding integer.\n \"\"\"\n if isinstance (flower_scent_data_set[\"scent\"], str):\n flower_scent_data_set[\"scent\"] = self.model_module.Scent.\\\n SCENT_VALUES[flower_scent_data_set[\"scent\"] or \"unknown\"]\n\n def create_flower_scent_set (self, flower_scent_data_set, verify=True):\n flower_scents = flower_scent_data_set.get (\"scents\", None)\n if flower_scents is not None:\n flower_scents = self.parse_flower_scents (flower_scents)\n return map (lambda *args:self.create_flower_scents (*args, verify=verify),\n map (lambda flower_scent: { \"scent\": flower_scent }, flower_scents))\n return []\n\n def create_fruits (self, fruit_data_set, verify=True):\n \"\"\"\n Extract the fruit's attributes from the fruit_data_set,\n create a models.Fruit instance with the given data.\n Create the related months and link them to the created fruit.\n Return the newly created fruit.\n \"\"\"\n self.sanitize_fruit_data_set (fruit_data_set)\n errors = self.pass_mandatory_fields_tests (fruit_data_set, \"fruit\")\n assert errors is None, errors\n fruit = self.create_model_instance (self.model_module.Fruit, fruit_data_set)\n months = self.create_month_set (\n {\"months\" : fruit_data_set[\"fruiting_time\"]})\n colours = self.create_colour_set (\n {\"colours\" : fruit_data_set[\"fruit_colour\"]})\n types = self.create_fruit_type_set (\n {\"types\" : fruit_data_set[\"fruit_type\"]})\n self.link_fruit_to_months (fruit, months)\n self.link_fruit_to_colours (fruit, colours)\n self.link_fruit_to_types (fruit, types)\n \n return fruit\n\n def sanitize_fruit_data_set (self, fruit_data_set):\n \"\"\"\n Sanitize the fruit_data_set dictionnary by:\n - Replacing the fruit value by its corresponding integer.\n \"\"\"\n return\n\n def link_fruit_to_months (self, fruit, months):\n fruit.months.add (*months)\n\n def link_fruit_to_colours (self, fruit, colours):\n fruit.colours.add (*colours)\n\n def link_fruit_to_types (self, fruit, types):\n fruit.types.add (*types)\n\n def create_fruit_types (self, fruit_type_data_set, verify=True):\n \"\"\"\n Extract the fruit_type's attributes from the fruit_type_data_set,\n create a models.FruitType instance with the given data and return it.\n \"\"\"\n self.sanitize_fruit_type_data_set (fruit_type_data_set)\n errors = self.pass_mandatory_fields_tests (fruit_type_data_set, \"fruit_type\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.FruitType, \n fruit_type_data_set)\n\n def parse_fruit_types (self, fruit_type):\n \"\"\"\n Extract all diffrent fruit_types (without parenthesis) from comma\n separated sentence.\n \"\"\"\n return self.split_from_data (fruit_type, lower=True)\n\n def sanitize_fruit_type_data_set (self, fruit_type_data_set):\n \"\"\"\n Sanitize the fruit_type_data_set dictionnary by:\n - Replacing the fruit_type value by its corresponding integer.\n \"\"\"\n if isinstance (fruit_type_data_set[\"fruit_type\"], str):\n if fruit_type_data_set[\"fruit_type\"] == \"n/a\":\n fruit_type_data_set[\"fruit_type\"] = \"unknown\"\n fruit_type_data_set[\"type\"] = self.model_module.FruitType.\\\n TYPE_VALUES[fruit_type_data_set[\"fruit_type\"] or \"unknown\"]\n\n def create_fruit_type_set (self, fruit_type_data_set, verify=True):\n fruit_types = fruit_type_data_set.get (\"types\", None)\n if fruit_types is not None:\n fruit_types = self.parse_fruit_types (fruit_types)\n return map (lambda *args:self.create_fruit_types (*args, verify=verify),\n map (lambda fruit_type: { \"fruit_type\": fruit_type }, fruit_types))\n return []\n\n def create_exposures (self, exposure_data_set, verify=True):\n \"\"\"\n Extract the exposure's attributes from the exposure_data_set,\n create a models.Exposure instance with the given data and return it.\n \"\"\"\n self.sanitize_exposure_data_set (exposure_data_set)\n errors = self.pass_mandatory_fields_tests (exposure_data_set, \"exposure\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.Exposure, \n exposure_data_set)\n\n def parse_exposures (self, exposure):\n \"\"\"\n Extract all diffrent exposures (without parenthesis) from comma\n separated sentence.\n \"\"\"\n return self.split_from_data (exposure, lower=True)\n\n def sanitize_exposure_data_set (self, exposure_data_set):\n \"\"\"\n Sanitize the exposure_data_set dictionnary by:\n - Replacing the exposure value by its corresponding integer.\n \"\"\"\n if isinstance (exposure_data_set[\"exposure\"], str):\n exposure_data_set[\"exposure\"] = self.model_module.Exposure.\\\n EXPOSURE_VALUES[exposure_data_set[\"exposure\"] or \"unknown\"]\n\n def create_exposure_set (self, exposure_data_set, verify=True):\n exposures = exposure_data_set.get (\"exposure\", None)\n if exposures is not None:\n exposures = self.parse_exposures (exposures)\n return map (lambda *args:self.create_exposures (*args, verify=verify),\n map (lambda exposure: { \"exposure\": exposure }, exposures))\n return []\n\n def create_forms (self, form_data_set, verify=True):\n \"\"\"\n Extract the form's attributes from the form_data_set,\n create a models.Form instance with the given data and return it.\n \"\"\"\n self.sanitize_form_data_set (form_data_set)\n errors = self.pass_mandatory_fields_tests (form_data_set, \"form\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.Form, \n form_data_set)\n\n def parse_forms (self, form):\n \"\"\"\n Extract all diffrent forms (without parenthesis) from comma\n separated sentence.\n \"\"\"\n return self.split_from_data (form, lower=True)\n\n def sanitize_form_data_set (self, form_data_set):\n \"\"\"\n Sanitize the form_data_set dictionnary by:\n - Replacing the form value by its corresponding integer.\n \"\"\"\n if isinstance (form_data_set[\"form\"], str):\n form_data_set[\"form\"] = \\\n self.model_module.Form.FORM_VALUES[form_data_set[\"form\"] or \"unknown\"]\n\n def create_form_set (self, form_data_set, verify=True):\n forms = form_data_set.get (\"form\", None)\n if forms is not None:\n forms = self.parse_forms (forms)\n return map (lambda *args:self.create_forms (*args, verify=verify),\n map (lambda form: { \"form\": form }, forms))\n return []\n\n def create_grounds (self, ground_data_set, verify=True):\n \"\"\"\n Extract the ground's attributes from the ground_uses_data_set,\n create a models.Ground instance with the given data and return it.\n \"\"\"\n self.sanitize_ground_data_set (ground_data_set)\n errors = self.pass_mandatory_fields_tests (ground_data_set, \"ground\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.Ground, \n ground_data_set)\n\n def parse_grounds (self, ground):\n \"\"\"\n Extract all diffrent grounds (without parenthesis) from comma\n separated sentence.\n \"\"\"\n return self.split_from_data (ground, lower=True)\n\n def sanitize_ground_data_set (self, ground_data_set):\n \"\"\"\n Sanitize the ground_data_set dictionnary by:\n - Replacing the ground value by its corresponding integer.\n \"\"\"\n if isinstance (ground_data_set[\"ground\"], str):\n ground_data_set[\"ground\"] = self.model_module.Ground.GROUND_VALUES[\\\n ground_data_set[\"ground\"] or \"unknown\"]\n\n def create_ground_set (self, ground_data_set, verify=True):\n grounds = ground_data_set.get (\"soil_or_growing_medium\", None)\n if grounds is not None:\n grounds = self.parse_grounds (grounds)\n return map (lambda *args:self.create_grounds (*args, verify=verify),\n map (lambda ground: { \"ground\": ground }, grounds))\n return []\n\n def create_habits (self, habit_data_set, verify=True):\n \"\"\"\n Extract the habit's attributes from the habit_data_set,\n create a models.Habit instance with the given data and return it.\n \"\"\"\n self.sanitize_habit_data_set (habit_data_set)\n errors = self.pass_mandatory_fields_tests (habit_data_set, \"habit\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.Habit, \n habit_data_set)\n\n def parse_habits (self, habit):\n return self.split_from_data (habit)\n\n def sanitize_habit_data_set (self, habit_data_set):\n \"\"\"\n Sanitize the habit_data_set dictionnary by:\n - Replacing the habit value by its corresponding integer.\n \"\"\"\n if isinstance (habit_data_set[\"habit\"], str):\n habit_data_set[\"habit\"] = self.model_module.Habit.HABIT_VALUES[\\\n habit_data_set[\"habit\"] or \"unknown\"]\n\n def create_habit_set (self, habit_data_set, verify=True):\n habits = habit_data_set.get (\"habit\", None)\n if habits is not None:\n habits = self.parse_habits (habits)\n return map (lambda *args: self.create_habits (*args, verify=verify),\n map (lambda habit: { \"habit\": habit }, habits))\n return []\n\n def create_landscape_uses (self, landscape_uses_data_set, verify=True):\n \"\"\"\n Extract the landscape's attributes from the landscape_uses_data_set,\n create a models.LandscapeUses instance with the given data and return it.\n \"\"\"\n self.sanitize_landscape_data_set (landscape_uses_data_set)\n errors = self.pass_mandatory_fields_tests (landscape_uses_data_set, \\\n \"landscape\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.LandscapeUse, \n landscape_uses_data_set)\n\n def sanitize_landscape_data_set (self, landscape_uses_data_set):\n \"\"\"\n Sanitize the landscape_uses_data_set dictionnary by:\n - Replacing the landscape value by its corresponding integer.\n \"\"\"\n if isinstance (landscape_uses_data_set[\"landscape\"], str):\n landscape_uses_data_set[\"landscape\"] = \\\n self.model_module.LandscapeUse.LANDSCAPE_VALUES[\\\n landscape_uses_data_set[\"landscape\"] or \"unknown\"\n ]\n\n def parse_landscape_uses (self, landscape_uses):\n return self.split_from_data (landscape_uses, lower=False)\n\n def create_landscape_use_set (self, plant_data_set, verify=True):\n landscape_uses = plant_data_set.get (\"landscape_uses\", None)\n if landscape_uses is not None:\n uses = self.parse_landscape_uses (landscape_uses)\n return map (lambda *args:self.create_landscape_uses(*args, verify=verify),\n map (lambda use: { \"landscape\": use }, uses))\n return []\n\n def create_colour (self, colour_data_set, verify=True):\n \"\"\"\n Extract the colour's attributes from the colour_data_set,\n create a models.Colour instance with the given data.\n \"\"\"\n self.sanitize_colour_data_set (colour_data_set)\n if colour_data_set[\"colour\"] is not None:\n errors = self.pass_mandatory_fields_tests (colour_data_set, \"colour\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.Colour, colour_data_set)\n\n def parse_colours (self, colour):\n \"\"\"\n Extract all diffrent colours (without parenthesis) from comma\n separated sentence.\n \"\"\"\n return self.split_from_data (colour, lower=True)\n\n def sanitize_colour_data_set (self, colour_data_set):\n \"\"\"\n Sanitize the colour_data_set dictionnary by:\n - Replacing the colour value by its corresponding integer.\n \"\"\"\n if isinstance (colour_data_set[\"colour\"], str):\n if colour_data_set[\"colour\"] in (\"n/a\", '', None):\n colour_data_set[\"colour\"] = \"unknown\"\n colour_data_set[\"colour\"] = self.model_module.Colour.COLOUR_VALUES.get (\\\n colour_data_set[\"colour\"],\n self.model_module.Colour.COLOUR_VALUES[\"unknown\"])\n\n def create_colour_set (self, colour_data_set, verify=True):\n colours = colour_data_set.get (\"colours\", None)\n if colours is not None:\n colours = self.parse_colours (colours)\n return filter (None, map (lambda *args:self.create_colour (*args,\n verify=verify), map (lambda colour: { \"colour\": colour }, colours)))\n return []\n\n def create_month (self, month_data_set, verify=True):\n \"\"\"\n Extract the month's attributes from the month_data_set,\n create a models.Month instance with the given data.\n \"\"\"\n self.sanitize_month_data_set (month_data_set)\n if month_data_set[\"month\"] is not None:\n errors = self.pass_mandatory_fields_tests (month_data_set, \"month\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.Month, month_data_set)\n\n def parse_months (self, month):\n \"\"\"\n Extract all diffrent months (without parenthesis) from comma\n separated sentence.\n \"\"\"\n if \"can flower any month\" in month.lower ():\n month = str (\", \".join (self.model_module.Month.MONTH_FULL_NAME.keys ()))\n return self.split_from_data (month, lower=True)\n\n def sanitize_month_data_set (self, month_data_set):\n \"\"\"\n Sanitize the month_data_set dictionnary by:\n - Replacing the month value by its corresponding integer.\n \"\"\"\n if isinstance (month_data_set[\"month\"], str):\n if month_data_set[\"month\"] in (\"n/a\", '', None):\n month_data_set[\"month\"] = None\n else:\n month_data_set[\"month\"] = self.model_module.Month.MONTH_VALUES[\\\n self.model_module.Month.MONTH_FULL_NAME[month_data_set[\"month\"]]]\n\n def create_month_set (self, month_data_set, verify=True):\n months = month_data_set.get (\"months\", None)\n if months is not None:\n months = self.parse_months (months)\n return filter (None, map (lambda *args:self.create_month (*args,\n verify=verify), map (lambda month: { \"month\": month }, months)))\n return []\n\n def create_waters (self, water_data_set, verify=True):\n \"\"\"\n Extract the water's attributes from the warer_data_set,\n create a models.Water instance with the given data and return it.\n \"\"\"\n self.sanitize_water_data_set (water_data_set)\n errors = self.pass_mandatory_fields_tests (water_data_set, \"water\")\n assert errors is None, errors\n return self.create_model_instance (self.model_module.Water, \n water_data_set)\n\n def parse_waters (self, water):\n \"\"\"\n Extract all diffrent waters (without parenthesis) from comma\n separated sentence.\n \"\"\"\n return self.split_from_data (water, lower=True)\n\n def sanitize_water_data_set (self, water_data_set):\n \"\"\"\n Sanitize the water_data_set dictionnary by:\n - Replacing the water value by its corresponding integer.\n \"\"\"\n if isinstance (water_data_set[\"water\"], str):\n water_data_set[\"water\"] = \\\n self.model_module.Water.WATER_VALUES[water_data_set[\"water\"] or \\\n \"unknown\"]\n\n def create_water_set (self, water_data_set, verify=True):\n waters = water_data_set.get (\"water\", None)\n if waters is not None:\n waters = self.parse_waters (waters)\n return map (lambda *args:self.create_waters (*args, verify=verify),\n map (lambda water: { \"water\": water }, waters))\n return []\n\n\n\nif __name__ == \"__main__\":\n\n description = \"A backend script to fill the database with some data.\\n\" + \\\n \"Only CSV files can be used for the moment.\"\n parser = argparse.ArgumentParser (description=description)\n parser.add_argument ('--csv', metavar='csv_path', type=str, nargs=1,\n help='The path to the CSV file to process')\n\n args = parser.parse_args ()\n\n if Backend.usable_arguments (args) is False:\n #backend = Backend (args=args)\n parser.print_help ()\n exit ()\n\n backend = Backend (args=args)\n backend.process_cmd_line ()\n\n exit ()" }, { "alpha_fraction": 0.5247285962104797, "alphanum_fraction": 0.5428226590156555, "avg_line_length": 35.043479919433594, "blob_id": "c9b26b7186af18fb5bead94cbfa2182f104eb5d5", "content_id": "c1025c2aeee7acac9da5fb246b9c4104e09cf4ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2487, "license_type": "no_license", "max_line_length": 109, "num_lines": 69, "path": "/GardenManager/main/migrations/0007_auto_20170309_1617.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-09 16:17\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0006_image'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Exposure',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('exposure', models.PositiveSmallIntegerField(choices=[(0, '')])),\n ],\n ),\n migrations.CreateModel(\n name='LandscapeUse',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('landscape', models.PositiveSmallIntegerField(choices=[(0, '')])),\n ],\n ),\n migrations.CreateModel(\n name='Session',\n fields=[\n ('user_id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('last_operation', models.DateField()),\n ('cookie', models.CharField(max_length=256)),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('exposure', models.PositiveSmallIntegerField(choices=[(0, '')])),\n ],\n ),\n migrations.AlterField(\n model_name='flower',\n name='id',\n field=models.CharField(max_length=90, primary_key=True, serialize=False, unique=True),\n ),\n migrations.AlterField(\n model_name='fruit',\n name='id',\n field=models.CharField(max_length=90, primary_key=True, serialize=False, unique=True),\n ),\n migrations.AlterField(\n model_name='image',\n name='id',\n field=models.CharField(max_length=90, primary_key=True, serialize=False, unique=True),\n ),\n migrations.AlterField(\n model_name='month',\n name='id',\n field=models.CharField(max_length=90, primary_key=True, serialize=False, unique=True),\n ),\n migrations.AlterField(\n model_name='plant',\n name='id',\n field=models.CharField(max_length=90, primary_key=True, serialize=False, unique=True),\n ),\n ]\n" }, { "alpha_fraction": 0.6193181872367859, "alphanum_fraction": 0.6373106241226196, "avg_line_length": 32.63694381713867, "blob_id": "5245c5801964d0bdd43bda642622ac7e6ceb8961", "content_id": "9d50978f29d58b96cfb90fe526740a33e628dc8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5280, "license_type": "no_license", "max_line_length": 80, "num_lines": 157, "path": "/GardenManager/main/utils/digester.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport hashlib\nimport os\n\n\nclass Digester (object):\n\n \"\"\"\n The Digester class allow the user to create a hash of a string using three\n phases:\n - the real hashing of the string ;\n - an encoding phase ;\n - a cutoff phase ;\n\n The algorithms used for theses phases are given by name like this:\n digester = Digester (name='-'.join ([\"sha256\", \"base64\", \"16\"]))\n or\n digester = Digester (name=\"sha256-base64-16\")\n The cutoff, hash_name and encoding can be given individualy by named\n parameter and are prevalant to the name:\n Digester (name=\"sha256-base64-90\", hash_name=sha512) will use sha512.\n Digester (name=\"sha256-base64-16\").digest (\"some string\") will apply the\n following algorithms to the given string (here: \"some string\"):\n - apply a sha256 to the string ;\n - convert it to base64 ;\n - cut the output to get a string with a maximum length of 16 characters.\n\n The user can provide a salt parameter equal to True, False, or any string.\n Default is False, it does not apply any salt.\n If True, a random (os.urandom) salt will be concatenated after the string to\n hash, before the hash phase (never the same salt). An additional parameter\n can be given: salt_length which is the length of the generated\n salt (default=16).\n If salt is a string, this string will be applied as the salt.\n\n If 0 is given as cutoff, there will not be any cutoff applied to the result.\n\n The suported hash algorithms are:\n hashlib.algorithms\n\n The supported encodings are:\n \"base64\", \"\", None\n You can add any encoding by doing\n Digester.SUPORTED_ENCODINGS.append (encoding)\n\n\n Examples of uses:\n sha256_base64_90 = Digester ()\n print sha256_base64_90.digest (\"some string\")\n regular_sha256 = Digester (\"sha256;;\")\n # or regular_sha256 = Digester (Digester.SEP.join ([\"sha256\", '', '']))\n digester = Digester (hash_name=\"md5\", cutoff=0, salt=True, salt_length=32)\n hashed, salt = digester.digest (\"some string\", get_salt=True)\n # lol, salt is bigger than output\n \"\"\"\n\n HASH_ALGORITHMS = hashlib.algorithms\n SUPORTED_ENCODINGS = [\"base64\", \"\", None]\n SEP = ';'\n DEFAULT_HASH = \"sha512\"\n DEFAULT_ENCODING = \"base64\"\n DEFAULT_CUTOFF = \"0\"\n DEFAULT_NAME = SEP.join ([DEFAULT_HASH, DEFAULT_ENCODING,\n str (DEFAULT_CUTOFF)])\n\n def __init__ (self, name=DEFAULT_NAME, salt=False, salt_length=16,\n hash_name=None, cutoff=None, **kwargs):\n algo = name.split (Digester.SEP)\n if len (algo) != 3 and \\\n (hash_name == None or encoding == None or cutoff == None):\n raise ValueError (\"The digester name has not been recognized: '%s'\" % \\\n name)\n if hash_name is None:\n hash_name = algo[0]\n if kwargs.has_key (\"encoding\"):\n encoding = kwargs[\"encoding\"]\n else:\n encoding = algo[1]\n if cutoff is None:\n cutoff = algo[2]\n if hash_name not in Digester.HASH_ALGORITHMS:\n raise ValueError (\"The digester could not recognize the hash \" + \\\n \"algorithm: '%s'\" % hash_name)\n if encoding not in Digester.SUPORTED_ENCODINGS:\n raise ValueError (\"The digester could not recognize the encoding: \" \\\n \"'%s'\" % encoding)\n try:\n self.cutoff = int (cutoff or 0)\n except ValueError:\n raise ValueError (\"The given cutoff is not a integer: '%s'\" % \\\n str (cutoff))\n if not isinstance (salt_length, int):\n raise ValueError (\"The salt length must be an int\")\n if not salt_length >= 0:\n raise ValueError (\"The salt length must be equal or more the 0\")\n self.hash_algorithm = getattr (hashlib, hash_name)\n self.encoding = encoding\n self.salt_length = salt_length\n if salt is False:\n self.apply_salt = False\n self.salt = \"\"\n elif salt is True:\n self.apply_salt = True\n self.salt = None\n elif isinstance (salt, str):\n self.apply_salt = True\n self.salt = salt\n\n def digest (self, sentence=\"\", get_salt=False):\n \"\"\"\n This method apply the digest algorithm (hash, encode, cutoff) to the given\n string. If get_salt parameter is set to True, the output of this function\n is a tuple of two elements: (the result string, the salt used).\n Otherwise, just the string is returned.\n \"\"\"\n hashed, salt = self.hash (sentence)\n encoded = self.encode (hashed).replace (\"\\n\", \"\")\n result = self.cut (encoded)\n if get_salt:\n return result, salt\n return result\n\n def hash (self, sentence):\n \"\"\"\n Apply the hash phase\n \"\"\"\n if self.apply_salt and self.salt is None:\n salt = os.urandom (self.salt_length)\n else:\n salt = self.salt\n sentence = str (sentence)\n hashed = str (self.hash_algorithm (sentence or '' + salt or '').digest ())\n return hashed, salt\n\n def encode (self, sentence):\n \"\"\"\n Apply the encoding phase\n \"\"\"\n return sentence.encode (self.encoding) if self.encoding else sentence\n\n def cut (self, sentence):\n \"\"\"\n Apply the cutoff phase\n \"\"\"\n if self.cutoff != 0:\n return sentence[:self.cutoff]\n return sentence\n\n\nif __name__ == \"__main__\":\n help (Digester)\n\n d = Digester ()\n print d.digest (\"test sentence\")" }, { "alpha_fraction": 0.5626283288002014, "alphanum_fraction": 0.5985626578330994, "avg_line_length": 35.074073791503906, "blob_id": "cae2e4410472cb4a2dd92dc02bdb429d4c2e4b7d", "content_id": "e08ba359ac2ffe12bc98e632193eb034b71aee10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 974, "license_type": "no_license", "max_line_length": 110, "num_lines": 27, "path": "/GardenManager/main/migrations/0006_image.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-07 18:34\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0005_auto_20170307_1818'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Image',\n fields=[\n ('id', models.CharField(max_length=64, primary_key=True, serialize=False, unique=True)),\n ('blob', models.BinaryField(null=True)),\n ('path', models.FilePathField()),\n ('flowers', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Flower')),\n ('fruits', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Fruit')),\n ('plants', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Plant')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.60447758436203, "alphanum_fraction": 0.608208954334259, "avg_line_length": 28.77777862548828, "blob_id": "7e3977954ad476aae68f15896cf63fbb0b0d8e15", "content_id": "6b10b6c3a9fb61a91abe144f4c2abf7215408c0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 78, "num_lines": 9, "path": "/GardenManager/initializer.py.template", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nwith open (\"main/static/js/google_map_api.js\", \"w\") as google_map_api_js_file:\n google_map_api_key = \"\"\n google_map_api_js_file.write (\n \"get_google_api_key = function () { return '%s' } ;\" % google_map_api_key\n )\n" }, { "alpha_fraction": 0.5976732969284058, "alphanum_fraction": 0.6204556226730347, "avg_line_length": 17.105262756347656, "blob_id": "16eb33365125871dbe02671eeb2546ac059b4b77", "content_id": "fef14ca139717f1ce2269cc61c341bfeb4c9f1e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2063, "license_type": "no_license", "max_line_length": 79, "num_lines": 114, "path": "/README.md", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "Garden Project\n==============\n\n\nMetadata\n--------\n\n * **@name**: Garden Project\n * **@version**: 1.0\n * **@authors**: PAVOT Baltazar ; PELEGRIN Romain ; RIGHI Sarah\n * **@date creation**: 2017/02/14\n * **@main usage**: Website to manage your gardens.\n\n\nDeveloper info\n------------------\nSome commit hook have been set.\nPut this text into .git/hooks/commit-msg:\n```python\n#!/usr/bin/env python\n\n\nimport sys\nimport re\n\n\nif __name__ == \"__main__\":\n\n if len (sys.argv) != 2:\n print >> sys.stderr, \"You must give the commit file as first argument\"\n exit (1)\n\n regexps = [\n \"^(feat|fix|docs|style|refactor|test|chore)\\(.+\\)\\:\\ .+$\",\n \"^$\",\n \"^.+$\"\n ]\n\n with open (sys.argv[1], \"r\") as commit_file:\n lines = commit_file.readlines ()\n\n if len (lines) < 3:\n print >> sys.stderr, \"Bad global pattern: There must be at least 3 lines.\"\n exit (1)\n\n for no, (line, regexp) in enumerate (zip (lines, regexps)):\n if re.match (regexp, line) is None:\n print >> sys.stderr, \"Bad pattern at line %d:\\n'%s'\\n%s\" % (no+1,\n line.replace (\"\\n\", \"\"), regexp)\n exit (1)\n\n exit (0)\n```\nPut this text into .git/commit.sample: \n```text\n<type>(<scope>): <subject>\n\n<body>\n\n<optionnal_footer>\n```\nOpen a terminal in the parent directory\nof .git then type\n```bash\ngit config commit.template .git/commit.template\n```\nKeep following the sample not to be rejected by hooks.\n\n\n\nConfiguration\n-------------\n\n### Requirement:\n * Debian\n\nInstall pip\n```bash\nsudo apt-get install python-pip\n```\nInstall django\n```bash\nsudo pip install django\n```\n\n\n### Deploy:\n\n * Getting the project ready to work\n\n```bash\ngit clone [email protected]:ujm-projet-l3info-2017/Groupe2.git\n # ou \ngit clone https://github.com/ujm-projet-l3info-2017/Groupe2.git\n```\n\n * Launch the application\n\n```bash\ngit checkout master\ncd GardenManager\npython ./manage.py runserver\n```\n\n\nTechnical description\n---------------------\nDeveloped under python 2.7 and django 1.10.6\n\n\nNotes\n-----------\nIf the application is hosted on your onw computer, to access it, launch it then\ngo to 127.0.0.1:3000 on your web browser." }, { "alpha_fraction": 0.5429141521453857, "alphanum_fraction": 0.6087824106216431, "avg_line_length": 24.049999237060547, "blob_id": "67e81e455b99d5e85496eea3af7d2d2b8546f9c8", "content_id": "9ebd5c5ba5f511c11f44f18752bbbd37f513e50d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "no_license", "max_line_length": 108, "num_lines": 20, "path": "/GardenManager/main/migrations/0037_auto_20170322_1029.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-22 10:29\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0036_auto_20170322_1019'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fruit',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n ]\n" }, { "alpha_fraction": 0.47284644842147827, "alphanum_fraction": 0.5402621626853943, "avg_line_length": 41.720001220703125, "blob_id": "76439fc068e81738676cf998bf5f07478d8e20cd", "content_id": "8642c2f37058b3390b3ba81464d1c66ab93bd94d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1068, "license_type": "no_license", "max_line_length": 394, "num_lines": 25, "path": "/GardenManager/main/migrations/0044_auto_20170322_1317.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-22 13:17\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0043_auto_20170322_1309'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='colour',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'white'), (2, 'orange'), (3, 'yellow'), (4, 'green-yellow'), (5, 'green'), (6, 'blue'), (7, 'violet'), (8, 'purple'), (9, 'pink'), (10, 'magenta'), (11, 'red'), (12, 'dark-red'), (13, 'brown'), (14, 'bronze'), (15, 'silver'), (16, 'black'), (17, 'showy'), (18, 'not showy'), (19, 'none'), (20, 'unknown')], null=True),\n ),\n migrations.AlterField(\n model_name='scent',\n name='scent',\n field=models.PositiveSmallIntegerField(choices=[(0, 'none'), (1, 'fragrant'), (2, 'spicy'), (3, 'sweet'), (4, 'lemony'), (5, 'musky'), (6, 'unknown')], null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5110336542129517, "alphanum_fraction": 0.5516840815544128, "avg_line_length": 26.774192810058594, "blob_id": "8dab62cf4f720e9f0e1f436fc2f744d31dce928b", "content_id": "a5efcd84aa1eb892b3589d601ffa63d2f9a2bb97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 861, "license_type": "no_license", "max_line_length": 104, "num_lines": 31, "path": "/GardenManager/main/migrations/0015_auto_20170314_1038.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-14 10:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0014_auto_20170314_1005'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Habit',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('habit', models.PositiveSmallIntegerField()),\n ],\n ),\n migrations.RemoveField(\n model_name='plant',\n name='habit',\n ),\n migrations.AddField(\n model_name='plant',\n name='habits',\n field=models.ManyToManyField(null=True, related_name='plants', to='main.Habit'),\n ),\n ]\n" }, { "alpha_fraction": 0.5004868507385254, "alphanum_fraction": 0.5501460433006287, "avg_line_length": 33.233333587646484, "blob_id": "1dc0ba8dfe3a4e726ef07af03af5b2859ea8359a", "content_id": "4469f8e09dc2e87db9e720c4a17ea8b702a76098", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1027, "license_type": "no_license", "max_line_length": 245, "num_lines": 30, "path": "/GardenManager/main/migrations/0025_auto_20170319_1912.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-19 19:12\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0024_auto_20170317_0527'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fruit',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'brown'), (1, 'unknown')]),\n ),\n migrations.AlterField(\n model_name='fruit',\n name='type',\n field=models.PositiveSmallIntegerField(choices=[(0, 'capsule'), (1, 'unknown')]),\n ),\n migrations.AlterField(\n model_name='month',\n name='month',\n field=models.PositiveSmallIntegerField(choices=[(0, 'january'), (1, 'february'), (2, 'march'), (3, 'april'), (4, 'may'), (5, 'june'), (6, 'july'), (7, 'august'), (8, 'septembre'), (9, 'octobre'), (10, 'novembre'), (11, 'decembre')]),\n ),\n ]\n" }, { "alpha_fraction": 0.6132169365882874, "alphanum_fraction": 0.617298424243927, "avg_line_length": 29.524377822875977, "blob_id": "6c499cf2a5224d3a572116c6afbe60035defabd9", "content_id": "55428d22a659c64413a2369cb97e7c8d101dea49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28176, "license_type": "no_license", "max_line_length": 93, "num_lines": 923, "path": "/GardenManager/main/models.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "\n\nfrom __future__ import unicode_literals\nimport time\n\nfrom django.db import models\n\nfrom utils.digester import Digester\nfrom utils.circular_list import CircularList\n\n\ndef set_class_attribute (attributes):\n print attributes\n def attr_setter (cls):\n for attribute, value in attributes.iteritems ():\n setattr (cls, attribute, value)\n return cls\n return attr_setter\n\n\nclass Scent (models.Model):\n\n \"\"\"\n The Scent class maps the Scent table.\n It defines:\n - an id ;\n - a scent ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n SCENT_NAMES = \"none\", \"fragrant\", \"spicy\", \"sweet\", \"lemony\", \"musky\", \\\n \"unpleasant\", \"unknown\"\n SCENTS = tuple (enumerate (SCENT_NAMES))\n SCENT_VALUES = dict (map (lambda x:x[::-1], SCENTS))\n scent = models.PositiveSmallIntegerField (choices=SCENTS, null=True)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (Scent, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def str_scent (self):\n return Scent.SCENT_NAMES[self.scent]\n\n def __str__ (self):\n return \"Scent (%s)\" % self.str_scent ()\n\n def __repr__ (self):\n return ('\\n'.join ((\"Scent object of id %(id)s ({ \",\n \"\\tscent = %(scent)s\",\n \"})\")) % { \"id\": self.id, \"scent\": self.str_scent () })\n\n\nclass Colour (models.Model):\n\n \"\"\"\n The Colour class maps the Colour table.\n It defines:\n - an id ;\n - a colour ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n COLOUR_NAMES = \"all\", \"white\", \"orange\", \"yellow\", \"green-yellow\", \"green\", \\\n \"blue\", \"violet\", \"purple\", \"pink\", \"magenta\", \"red\", \"dark-red\", \"brown\", \\\n \"bronze\", \"silver\", \"black\", \"showy\", \"not showy\", \"none\", \"unknown\"\n COLOURS = tuple (enumerate (COLOUR_NAMES))\n COLOUR_VALUES = dict (map (lambda x:x[::-1], COLOURS))\n colour = models.PositiveSmallIntegerField (choices=COLOURS, null=True)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (Colour, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def str_colour (self):\n return Colour.COLOUR_NAMES[self.colour]\n\n def __str__ (self):\n return \"Colour (%s)\" % self.str_colour ()\n\n def __repr__ (self):\n return ('\\n'.join ((\"Colour object of id %(id)s ({ \",\n \"\\tcolour = %(colour)s\",\n \"})\")) % { \"id\": self.id, \"colour\": self.str_colour () })\n\n\nclass Ground (models.Model):\n\n \"\"\"\n The Ground class maps the Ground table.\n It defines:\n - an id ;\n - a ground ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n GROUND_NAMES = \"all\", \"acidic\", \"bog\", \"well-drained\", \"humus rich\", \\\n \"alkaline\", \"rocky or gravelly or dry\", \"unknown\", \n GROUNDS = tuple (enumerate (GROUND_NAMES))\n GROUND_VALUES = dict (map (lambda x:x[::-1], GROUNDS))\n ground = models.PositiveSmallIntegerField (choices=GROUNDS, null=True)\n\n #ph = models.FloatField (null=True)\n PH_VALUES = {\n \"all\":2, \"neutral\": 2, \"acidic\":1, \"bog\": 1, \"well-drained\": 1, \\\n \"humus rich\": 1, \"rocky or gravelly or dry\": 3, \"alkaline\": 3,\n \"unknown\": 2\n }\n PH_EQUIVALANCES = {\n lambda x:x < 7: PH_VALUES[\"acidic\"],\n lambda x:x > 7: PH_VALUES[\"alkaline\"],\n lambda x:x == 7: PH_VALUES[\"neutral\"],\n }\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (Ground, self).__init__ (*args, **kwargs)\n \"\"\"\n if kwargs.has_key (\"ph\") is False and self.ground is not None:\n self.ph = Ground.PH_VALUES[Ground.GROUND_NAMES[self.ground]]\n \"\"\"\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def __str__ (self):\n return \"Ground (%s)\" % Ground.GROUND_NAMES[self.ground]\n\n def __repr__ (self):\n return ('\\n'.join ((\"Ground object of id %(id)s ({ \",\n \"\\ttype = %(ground)s\",\n \"\\tph = %(ph)s\",\n \"})\")) % { \"id\": self.id, \"ground\": Ground.GROUND_NAMES[self.ground], \"ph\": self.ph })\n\n\nclass Session (models.Model):\n\n \"\"\"\n The Session class maps the Session table.\n It defines:\n - a user_id ;\n - a last_operation ;\n - a cookie ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n user_id = models.CharField (max_length=90, primary_key=True, unique=True)\n last_operation = models.DateField (auto_now=True)\n cookie = models.CharField (max_length=256)\n\n # Definition of the relation-related attributes\n None\n\n def update_last_operation (self):\n # with auto_now, the last_operation field will be updated at each save op.\n self.save ()\n\n def digest (self):\n return Digester ().digest (self.user_id)\n\n\nclass User (models.Model):\n\n \"\"\"\n The User class maps the User table.\n It defines:\n - an ID ;\n - a login ;\n - a password ;\n - a password's salt ;\n - an email ;\n - a date of last login ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n login = models.CharField (max_length=32, null=True)\n password_hash = models.CharField (max_length=90, null=True)\n salt = models.CharField (max_length=90, null=True)\n email = models.EmailField (null=True)\n last_login = models.DateField (auto_now=True)\n\n # Definition of the relation-related attributes\n session = models.ForeignKey (Session, null=True)\n\n def __init__ (self, login=\"anonymous\", *args, **kwargs):\n super (User, self).__init__ (login=login, *args, **kwargs)\n\n @staticmethod\n def updating_session_operation (function):\n def updating_session_function (self, *args, **kwargs):\n self.update_last_operation ()\n return function (self, *args, **kwargs)\n return updating_session_function\n\n @property\n def password (self):\n return self.password_hash\n\n @password.setter\n def password (self, password, salt=True):\n self.password_hash, self.salt = Digester (salt=salt).digest (password, \\\n get_salt=True)\n\n def update_last_operation (self):\n if self.session:\n self.session.update_last_operation ()\n\n def has_password (self, password):\n return str (sha512 (password + self.salt).digest ()) == self.password\n\n @property\n def is_logged (self):\n return self.login != \"anonymous\"\n\n def is_connected (self):\n return self.session is not None and self.session.has_expired is False\n\n def disconnect (self):\n if self.session:\n self.session.delete ()\n\n def digest (self):\n return Digester ().digest (self.exposure)\n\n\nclass Project (models.Model):\n\n \"\"\"\n The Session class maps the Session table.\n It defines:\n - an id ;\n - a name ;\n - a creation date ;\n - an update date ;\n \"\"\"\n\n def validate_name (name):\n if self.user.projects.filter (name=name) is not None:\n raise ValidationError('Already existing project with name: %s' % name,\n code='invalid')\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n name = models.CharField (max_length=32)\n creation_date = models.DateField ()\n update_date = models.DateField (auto_now=True)\n\n # Definition of the relation-related attributes\n user = models.ForeignKey (User, null=True)\n\n def save (self):\n \"\"\"\n This save check for the existance of another project with the same name.\n If one is found, throw a ValidationError.\n \"\"\"\n super (Project, self).save ()\n\n def digest (self):\n return Digester (salt=True).digest ()\n\n\nclass Exposure (models.Model):\n\n \"\"\"\n The Exposure class maps the Exposure table.\n It defines:\n - an ID ;\n - an exposure ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n EXPOSURE_NAMES = \"moderate\", \"filtered shade\", \"full sun\", \\\n \"part sun/part shade\", \"full sun only if soil kept moist\", \\\n \"deep shade\", \"sheltered\", \"unknown\"\n EXPOSURES = tuple (enumerate (EXPOSURE_NAMES))\n EXPOSURE_VALUES = dict (map (lambda x:x[::-1], EXPOSURES))\n exposure = models.PositiveSmallIntegerField (choices=EXPOSURES)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (Exposure, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def __str__ (self):\n return \"Exposure (%s)\" % Exposure.EXPOSURE_NAMES[self.exposure]\n\n def __repr__ (self):\n return ('\\n'.join ((\"Exposure object of id %(id)s ({ \",\n \"\\texposure = %(exposure)s\",\n \"})\")) % { \"id\": self.id, \"exposure\": str (self)\n })\n\n\nclass LandscapeUse (models.Model):\n\n \"\"\"\n The LandscapeUse class maps the LandscapeUse table.\n It defines:\n - an ID ;\n - a landscape ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n UNSPECIFIED_LANDSCAPE_NAMES = \"Arbors or trellis\", \n LANDSCAPE_NAMES = \"All\", \"Accent plant\", \"Alpine\", \"Aquatic - ponds\", \\\n \"Attract beneficial insects\", \"Attract birds\", \"Attract butterflies\", \\\n \"Bedding plant\", \"Container planting\", \"Cut flower or foliage\", \\\n \"Dried flower or fruit\", \"Dryland\", \"Erosion control\", \\\n \"Espalier\", \"Fall interest\", \"Filler\", \"Floristry\", \"Forestry\", \\\n \"Fragrance\", \"Golf green\", \"Green roof technology\", \"Green walls\", \\\n \"Ground cover\", \"Group or mass planting\", \"Hanging basket\", \"Hedge row\", \\\n \"Herb\", \"Indoor plant\", \"Lawn - sports field\", \"Medicinal plant\", \\\n \"Mixed shrub border\", \"Native planting\", \"Perennial border\", \"Reclamation\",\\\n \"Rock garden\", \"Screening\", \"Security/barrier\", \"Shade tree\", \\\n \"Sheared hedge\", \"Small garden/space\", \"Specimen plant\", \"Spring interest\",\\\n \"Street\", \"Summer interest\", \"Tall background\", \"Topiary\", \\\n \"Urban agriculture\", \"Waterside planting\", \"Wetland - bogs\", \\\n \"Wild flower garden\", \"Wildlife food\", \"Wind break\", \"Winter interest\", \\\n \"Woodland margin\", \"unknown\"\n LANDSCAPE_NAMES += UNSPECIFIED_LANDSCAPE_NAMES\n LANDSCAPES = tuple (enumerate (LANDSCAPE_NAMES))\n LANDSCAPE_VALUES = dict (map (lambda x:x[::-1], LANDSCAPES))\n landscape = models.PositiveSmallIntegerField (choices=LANDSCAPES)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (LandscapeUse, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def set_landscape (self, name):\n if LandscapeUse.LANDSCAPE_VALUES.has_key (name):\n self.landscape = LandscapeUse.LANDSCAPE_VALUES[name]\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def str_landscape (self):\n return LandscapeUse.LANDSCAPE_NAMES[self.landscape]\n\n def __str__ (self):\n return \"Landscape (%s)\" % self.str_landscape ()\n\n def __repr__ (self):\n return ('\\n'.join ((\"Landscape object of id %(id)s ({ \",\n \"\\tlandscape use = %(landscape)s\",\n \"})\")) % { \"id\": self.id, \"landscape\": str (self)\n })\n\n\nclass Month (models.Model):\n\n \"\"\"\n The Month class maps the month table.\n It defines:\n - an ID ;\n - a month ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n MONTH_NAMES = CircularList ((\"january\", \"february\", \"march\", \"april\", \"may\", \\\n \"june\", \"july\", \"august\", \"septembre\", \"octobre\", \"novembre\", \"decembre\"))\n MONTHS = tuple (enumerate (MONTH_NAMES))\n MONTH_VALUES = dict (map (lambda _:_[::-1], MONTHS))\n MONTH_FULL_NAME = { month[:3]: month for month in MONTH_NAMES }\n month = models.PositiveSmallIntegerField (choices=MONTHS)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (Month, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def from_to (self, start, stop, step=1):\n start_index, stop_index = map (Month.MONTH_NAMES.index, (start, stop))\n return Month.MONTH_NAMES[start_index:stop_index:step]\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def str_month (self):\n return list (Month.MONTH_NAMES[self.month])[0]\n\n def __str__ (self):\n return \"Month (%s)\" % self.str_month ()\n\n def __repr__ (self):\n return ('\\n'.join ((\"Month object of id %(id)s ({ \",\n \"\\tmonth = %(month)s\",\n \"})\")) % { \"id\": self.id, \"month\": str (self)\n })\n\n\nclass FruitType (models.Model):\n\n \"\"\"\n The FruitType class maps the fruit_type table.\n It defines:\n - an ID ;\n - a type ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n TYPE_NAMES = \"all\", \"aggregate fruit\", \"achene\", \"berry\", \"capsule\", \\\n \"cypsela\", \"drupe\", \"edible\", \"follicle\", \"grain\", \"hesperidium\", \"legume\",\\\n \"multiple fruit\", \"nut\", \"pepo\", \"pome\", \"samara\", \"schizocarp\", \"silicle\",\\\n \"silique\", \"aborted or absent\", \"cone\", \"sporangium\", \"unknown\"\n\n TYPES = tuple (enumerate (TYPE_NAMES))\n TYPE_VALUES = dict (map (lambda _:_[::-1], TYPES))\n type = models.PositiveSmallIntegerField (choices=TYPES)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (FruitType, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def str_type (self):\n return FruitType.TYPE_NAMES[self.type]\n\n def __str__ (self):\n return \"FruitType (%s)\" % self.str_type ()\n\n def __repr__ (self):\n return ('\\n'.join ((\"FruitType object of id %(id)s ({ \",\n \"\\ttype = %(type)s\",\n \"})\")) % { \"id\": str (self.id), \"type\": self.str_type () })\n\n\nclass Fruit (models.Model):\n\n \"\"\"\n The Fruit class maps the fruit table.\n It defines:\n - an ID ;\n - some type ;\n - some colours ;\n - some fruit time.\n \"\"\"\n\n # Definition of the regular attributes.\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n # Definition of the relation-related attributes\n colours = models.ManyToManyField (Colour, related_name=\"fruits\")\n types = models.ManyToManyField (FruitType, related_name=\"fruits\")\n months = models.ManyToManyField (Month, related_name=\"fruits\")\n None\n\n def __init__ (self, *args, **kwargs):\n super (Fruit, self).__init__ (*args, **kwargs)\n self.update_id ()\n\n def update_id (self):\n self.id = self.digest ()\n\n def digest (self):\n return Digester (salt=True).digest ('')# str (self))\n\n def str_colour (self):\n return ', '.join ([colour.str_colour () for colour in self.colours.all ()])\n\n def str_month (self):\n return ', '.join ([month.str_month () for month in self.months.all ()])\n\n def str_type (self):\n return ', '.join ([fruit_type.str_type () \\\n for fruit_type in self.types.all ()])\n\n def __str__ (self):\n return \"Fruit (colours are %s ; types are %s ; fruiting times are %s)\" % (\n self.str_colour (), self.str_type (), self.str_month ())\n\n def __repr__ (self):\n return ('\\n'.join ((\"Fruit object of id %(id)s ({ \",\n \"\\tcolour = %(colour)s\",\n \"})\")) % { \"id\": str (self.id), \"colour\": self.str_colour () })\n\n\nclass Flower (models.Model):\n\n \"\"\"\n The Flower class maps the flower table.\n It defines:\n - an ID ;\n - some scents ;\n - some petal colours ;\n - some flower times\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n # Definition of the relation-related attributes\n months = models.ManyToManyField (Month, related_name=\"flowers\")\n petal_colours = models.ManyToManyField (Colour, related_name=\"flowers\")\n scents = models.ManyToManyField (Scent, related_name=\"flowers\")\n None\n\n def __init__ (self, *args, **kwargs):\n super (Flower, self).__init__ (*args, **kwargs)\n self.update_id ()\n\n def update_id (self):\n self.id = self.digest ()\n\n def digest (self):\n return Digester (salt=True).digest ('')\n\n def str_colour (self):\n return ', '.join (colour.str_colour () for colour in self.petal_colours.all ())\n\n def str_scent (self):\n return ', '.join (scent.str_scent () for scent in self.scents.all ())\n return Flower.SCENT_NAMES[self.scent]\n\n def __str__ (self):\n return \"Flower (colour is %s ; scent is %s)\" % (self.str_colour (), self.str_scent ())\n\n def __repr__ (self):\n return ('\\n'.join ((\"Flower object of id %(id)s ({ \",\n \"\\tcolours = %(colours)s\",\n \"})\")) % { \"id\": self.id, \"colours\": self.str_colour ()\n })\n\n\nclass Habit (models.Model):\n\n \"\"\"\n The Habit class maps the habit table.\n It defines:\n - an ID ;\n - a habit name ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n HABIT_NAMES = \"all\", \"arching\", \"dense\", \"epiphytic\", \"fastigiate\", \\\n \"horizontal\", \"irregular\", \"open\", \"pendulous\", \"spreading\", \\\n \"stiffly upright\", \"twiggy\", \"upright\", \"unknown\"\n HABITS = tuple (enumerate (HABIT_NAMES))\n HABIT_VALUES = dict (map (lambda _:_[::-1], HABITS))\n habit = models.PositiveSmallIntegerField (choices=HABITS)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (Habit, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def str_habit (self):\n return Habit.HABIT_NAMES[self.habit]\n\n def __str__ (self):\n return \"Habit (%s)\" % self.str_habit ()\n\n def __repr__ (self):\n return ('\\n'.join ((\"Habit object of id %(id)s ({ \",\n \"\\thabit = %(habit)s\",\n \"})\")) % { \"id\": self.id, \"habit\": str (self)\n })\n\n\nclass Form (models.Model):\n\n \"\"\"\n The Form class maps the form table.\n It defines:\n - an ID ;\n - a form name ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n FORM_NAMES = \"all\", \"climbing\", \"columnar\", \"creeping / mat-like\", \\\n \"irregular\", \"mounded\", \"oval - horizontal\", \"oval - vertical\", \\\n \"pyramidal - narrowly\", \"pyramidal - widely\", \"round\", \"vase\", \"weeping\", \\\n \"unknown\"\n FORMS = tuple (enumerate (FORM_NAMES))\n FORM_VALUES = dict (map (lambda _:_[::-1], FORMS))\n form = models.PositiveSmallIntegerField (choices=FORMS)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (Form, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def str_form (self):\n return Form.FORM_NAMES[self.form]\n\n def __str__ (self):\n return \"Form (%s)\" % self.str_form ()\n\n def __repr__ (self):\n return ('\\n'.join ((\"Form object of id %(id)s ({ \",\n \"\\tform = %(form)s\",\n \"})\")) % { \"id\": self.id, \"form\": str (self)\n })\n\n\nclass Water (models.Model):\n\n \"\"\"\n The Water class maps the water table.\n It defines:\n - an ID ;\n - a Water frequency ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n WATER_NAMES = \"low\", \"moderate\", \"high\", \"wetlands\", \"summer dry\", \\\n \"aquatic\", \"winter dry\", \"dryland\", \"unknown\"\n WATERS = tuple (enumerate (WATER_NAMES))\n WATER_VALUES = dict (map (lambda _:_[::-1], WATERS))\n water = models.PositiveSmallIntegerField (choices=WATERS)\n\n # Definition of the relation-related attributes\n None\n\n def __init__ (self, *args, **kwargs):\n super (Water, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (str (self))\n\n def str_water (self):\n return Water.WATERS[self.water][1]\n\n def __str__ (self):\n return \"Water (%s)\" % self.str_water ()\n\n def __repr__ (self):\n return ('\\n'.join ((\"Water object of id %(id)s ({ \",\n \"\\twater frequency = %(water)s\",\n \"})\")) % { \"id\": self.id, \"water\": str (self)\n })\n\n\n#@set_class_attribute(dict (map (lambda _:_[::-1],\n #enumerate (map (\"ZONE_{}\".format, range (1, 12)+[\"8A\", \"8B\"])))))\nclass Plant (models.Model):\n\n \"\"\"\n The Plant class maps the plant table.\n It defines:\n - an ID ;\n - a scientific name ;\n - a common name ;\n - a habit ;\n - a form ;\n - a height ;\n - a spread ;\n - a growth rate ;\n - a climate ;\n - the needed amount of water ;\n - the ability to flower ;\n - the ability to produce friuts ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n \"\"\"\n To get the id of a plant, a SHA-512 algorithm is applied to the:\n common_name+scientific_name+salt\n The output length of a sha-512 is 64.\n The reason to use this kind of algorithm are:\n - ids are NOT indexes, so \"1, 2, 3, ...\" is not sementicaly correct ;\n - it prevents misspelling ids (for dangerous operation like deletion) ;\n - it prevent users to guess metadate like the number of entries we have ;\n \"\"\"\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n scientific_name = models.CharField (max_length=64)\n common_name = models.CharField (max_length=64)\n\n spread_min = models.FloatField (null=True)\n spread_max = models.FloatField (null=True)\n height_min = models.FloatField (null=True)\n height_max = models.FloatField (null=True)\n\n GROWTH_RATE_NAMES = \"fast\", \"moderate\", \"slow\", \"unknown\"\n GROWTH_RATES = enumerate (GROWTH_RATE_NAMES)\n GROWTH_RATE_VALUE = dict (map (lambda _:_[::-1], GROWTH_RATES))\n growth_rate = models.PositiveSmallIntegerField (choices=GROWTH_RATES)\n\n CLIMATE_NAMES = map (\"ZONE_{}\".format, range (1, 12)+[\"8A\", \"8B\"]) + [\"unknown\"]\n CLIMATES = enumerate (CLIMATE_NAMES)\n CLIMATE_VALUE = dict (map (lambda _:_[::-1], CLIMATES))\n DEFAULT_CLIMATE = 5\n DEFAULT_CLIMATE_NAME = CLIMATE_NAMES[DEFAULT_CLIMATE]\n climate = models.PositiveSmallIntegerField (choices=CLIMATES)\n\n # Definition of the relation-related attributes\n\n fruit = models.ForeignKey (Fruit, null=True, related_name=\"plants\")\n flower = models.ForeignKey (Flower, null=True, related_name=\"plants\")\n landscapes = models.ManyToManyField (LandscapeUse, related_name=\"plants\")\n exposures = models.ManyToManyField (Exposure, related_name=\"plants\")\n habits = models.ManyToManyField (Habit, related_name=\"plants\")\n forms = models.ManyToManyField (Form, related_name=\"plants\")\n waters = models.ManyToManyField (Water, related_name=\"plants\")\n grounds = models.ManyToManyField (Ground, related_name=\"plants\")\n plantation_time = models.ManyToManyField (Month, related_name=\"plants\")\n\n def __init__ (self, *args, **kwargs):\n super (Plant, self).__init__ (*args, **kwargs)\n self.id = self.digest ()\n\n def digest (self):\n return Digester ().digest (self.common_name + self.scientific_name)\n\n def str_growth_rate (self):\n return Plant.GROWTH_RATE_NAMES[self.growth_rate or -1].lower ().capitalize ()\n\n def str_climate (self):\n return Plant.CLIMATE_NAMES[self.climate or -1].lower ().capitalize ()\n\n def __str__(self):\n return \"Plant (%s ; %s)\" % (self.common_name, self.scientific_name)\n\n def __repr__ (self):\n return ('\\n'.join ((\"Plant object of id %(id)s ({ \", \n \"\\tscientific name = %(scientific_name)s\", \n \"\\tcommon name = %(common_name)s\", \n \"\\thabit = %(habits)s\", \n \"\\texposure = %(exposure)s\", \n \"\\tground = %(ground)s\", \n \"\\tform = %(form)s\", \n \"\\tflower = %(flower)s\", \n \"\\tfruit = %(fruit)s\", \n \"\\theight = %(height_min)s - %(height_max)s\", \n \"\\tspread = %(spread_min)s - %(spread_max)s\", \n \"\\tgrowth rate = %(growth_rate)s\", \n \"\\tclimate = %(climate)s\", \n \"\\tplantation time = %(plantation_time)s\", \n \"\\twater = %(water)s\", \n \"\\tlandscape = %(landscape)s\", \n \"})\"\n )) % {\n \"scientific_name\" : self.scientific_name,\n \"common_name\" : self.common_name,\n \"habits\" : map (str, self.habits.all ()) if self.habits else \"unknown\",\n \"exposure\" : map (str, self.exposures.all ()) if self.exposures else \"unknown\",\n \"ground\" : map (str, self.grounds.all ()) if self.grounds else \"unknown\",\n \"form\" : map (str, self.forms.all ()) if self.forms else \"unknown\",\n \"flower\" : str (self.flower) if self.flower else \"unknown\",\n \"fruit\" : str (self.fruit) if self.fruit else \"unknown\",\n \"height_min\" : self.height_min if self.height_min else \"unknown\",\n \"height_max\" : self.height_max if self.height_max else \"unknown\",\n \"spread_min\" : self.spread_min if self.spread_min else \"unknown\",\n \"spread_max\" : self.spread_max if self.spread_max else \"unknown\",\n \"growth_rate\" : self.str_growth_rate () if self.growth_rate is not None else \"unknown\",\n \"climate\" : self.str_climate () if self.climate is not None else \"unknown\",\n \"plantation_time\" : map (str, self.plantation_time.all ()),\n \"water\" : map (str, self.waters.all ()) if self.waters else \"unknown\",\n \"landscape\" : map (str, self.landscapes.all ()) if self.landscapes else \"unknown\",\n \"id\": self.id,\n })\n\n\nclass Area (models.Model):\n\n \"\"\"\n The Area class maps the Area table.\n It defines:\n - an id ;\n - a x position ;\n - a y position ;\n - a ground_id ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n x = models.FloatField (null=False)\n y = models.FloatField (null=False)\n\n # Definition of the relation-related attributes\n ground = models.ForeignKey (Ground, null=False, related_name=\"areas\")\n\n def digest (self):\n return Digester ().digest (str (self.x) + str (self.y) + self.ground_id)\n\n\nclass PlantSpot (models.Model):\n\n \"\"\"\n The PlantSpot class maps the PlantSpot table.\n It defines:\n - an id ;\n - a position id ;\n - a plant id ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n\n # Definition of the relation-related attributes\n plant = models.ForeignKey (Plant, null=False, related_name=\"plant_spots\")\n area = models.ForeignKey (Area, null=False, related_name=\"plant_spots\")\n\n def digest (self):\n # juste a random salt hashed\n return Digester (salt=True, salt_length=64, cutoff=90).digest ()\n\n\nclass Image (models.Model):\n\n \"\"\"\n The Image class maps the image table.\n It defines:\n - an ID ;\n - a path to the cached image ;\n - a blob (the image) ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n blob = models.BinaryField (null=True)\n path = models.FilePathField ()\n\n # Definition of the relation-related attributes\n plant = models.ForeignKey (Plant, null=True, related_name=\"images\")\n flower = models.ForeignKey (Flower, null=True, related_name=\"images\")\n fruit = models.ForeignKey (Fruit, null=True, related_name=\"images\")\n\n def digest (self):\n return Digester ().digest (self.blob + self.path)\n\n\nclass Position (models.Model):\n\n \"\"\"\n The Position class maps the Position table.\n It defines:\n - an id ;\n - an x ;\n - an y ;\n \"\"\"\n\n # Definition of the regular attributes.\n\n id = models.CharField (max_length=90, primary_key=True, unique=True)\n x = models.FloatField ()\n y = models.FloatField ()\n\n # Definition of the relation-related attributes\n area = models.ForeignKey (Area, null=True, related_name=\"positions\")\n plant_spot = models.ForeignKey (PlantSpot, null=True, related_name=\"positions\")\n\n def digest (self):\n # juste a random salt hashed\n return Digester ().digest (str (self.x) + ';' + str (self.y))\n" }, { "alpha_fraction": 0.5195895433425903, "alphanum_fraction": 0.5503731369972229, "avg_line_length": 25.799999237060547, "blob_id": "47975720495e98efe1a7d2c4eaca8d7ea54eaa7e", "content_id": "5433506b28c4118111529e6e6f54de6cc6abae14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1072, "license_type": "no_license", "max_line_length": 53, "num_lines": 40, "path": "/GardenManager/main/migrations/0009_auto_20170313_1430.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-13 14:30\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0008_auto_20170309_2212'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='plant',\n name='climate',\n field=models.PositiveSmallIntegerField(),\n ),\n migrations.AlterField(\n model_name='plant',\n name='form',\n field=models.PositiveSmallIntegerField(),\n ),\n migrations.AlterField(\n model_name='plant',\n name='growth_rate',\n field=models.PositiveSmallIntegerField(),\n ),\n migrations.AlterField(\n model_name='plant',\n name='habit',\n field=models.PositiveSmallIntegerField(),\n ),\n migrations.AlterField(\n model_name='plant',\n name='water',\n field=models.PositiveSmallIntegerField(),\n ),\n ]\n" }, { "alpha_fraction": 0.5241502523422241, "alphanum_fraction": 0.5617173314094543, "avg_line_length": 33.9375, "blob_id": "9457a3f053314185a9d3c51ffa54ecea3a6d5ec0", "content_id": "58355c5406879a53962dc3868c5d23c13af70189", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1118, "license_type": "no_license", "max_line_length": 231, "num_lines": 32, "path": "/GardenManager/main/migrations/0036_auto_20170322_1019.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-22 10:19\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0035_auto_20170320_1622'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FruitType',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('type', models.PositiveSmallIntegerField(choices=[(0, 'capsule'), (1, 'cone'), (2, 'aborted or absent'), (3, 'schizocarp'), (4, 'samara'), (5, 'cypsela'), (6, 'follicle'), (7, 'aggregate fruit'), (8, 'unknown')])),\n ],\n ),\n migrations.AlterField(\n model_name='fruit',\n name='months',\n field=models.ManyToManyField(related_name='fruits', to='main.Month'),\n ),\n migrations.AddField(\n model_name='fruit',\n name='types',\n field=models.ManyToManyField(related_name='fruits', to='main.FruitType'),\n ),\n ]\n" }, { "alpha_fraction": 0.5045970678329468, "alphanum_fraction": 0.5375878810882568, "avg_line_length": 35.97999954223633, "blob_id": "8e3342448be37b69c0222acf502c7fb256444123", "content_id": "88c7c78afb6d3ec9cae22df04b16b5722b0fd3bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1849, "license_type": "no_license", "max_line_length": 356, "num_lines": 50, "path": "/GardenManager/main/migrations/0033_auto_20170320_1552.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-20 15:52\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0032_auto_20170319_2211'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Colour',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('colour', models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'white'), (2, 'orange'), (3, 'yellow'), (4, 'green-yellow'), (5, 'green'), (6, 'blue'), (7, 'violet'), (8, 'purple'), (9, 'pink'), (10, 'magenta'), (11, 'red'), (12, 'dark-red'), (13, 'brown'), (14, 'bronze'), (15, 'silver'), (16, 'black'), (17, 'unknown')], null=True)),\n ],\n ),\n migrations.RemoveField(\n model_name='flower',\n name='colour',\n ),\n migrations.RemoveField(\n model_name='fruit',\n name='colour',\n ),\n migrations.AlterField(\n model_name='flower',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='fruit',\n name='id',\n field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AddField(\n model_name='flower',\n name='colours',\n field=models.ManyToManyField(related_name='flowers', to='main.Colour'),\n ),\n migrations.AddField(\n model_name='fruit',\n name='colours',\n field=models.ManyToManyField(related_name='fruits', to='main.Colour'),\n ),\n ]\n" }, { "alpha_fraction": 0.5506024360656738, "alphanum_fraction": 0.5855421423912048, "avg_line_length": 32.20000076293945, "blob_id": "551dc6fa7f5a0e0e951490debfee8d1ce00d5b1d", "content_id": "c46a3c98c30159de9a8b12667eac1c31f86c5ca8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 830, "license_type": "no_license", "max_line_length": 240, "num_lines": 25, "path": "/GardenManager/main/migrations/0024_auto_20170317_0527.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-17 05:27\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0023_remove_ground_ph'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='plant',\n name='plantation_time',\n field=models.ManyToManyField(related_name='plants', to='main.Month'),\n ),\n migrations.AlterField(\n model_name='exposure',\n name='exposure',\n field=models.PositiveSmallIntegerField(choices=[(0, 'moderate'), (1, 'filtered shade'), (2, 'full sun'), (3, 'part sun/part shade'), (4, 'full sun only if soil kept moist'), (5, 'deep shade'), (6, 'sheltered'), (7, 'unknown')]),\n ),\n ]\n" }, { "alpha_fraction": 0.5723513960838318, "alphanum_fraction": 0.5865632891654968, "avg_line_length": 29.959999084472656, "blob_id": "057442c2526db5042d1945362990547513474912", "content_id": "520b16666f165d6504cbaae2e5413ee9be25cafd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 99, "num_lines": 25, "path": "/GardenManager/databases_scraper/plantdatabase_kpu_ca/plantdatabase_kpu_ca/spiders/url.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport scrapy\n\n\nclass URLSpider (scrapy.Spider):\n\n name = \"URLSpider\"\n\n def __init__ (self):\n super (URLSpider, self).__init__ ()\n open (\"URLs\", \"w\").close ()\n\n base_url = \"https://plantdatabase.kpu.ca/\"\n start_urls = [base_url + \"plant/siteIndex\"]\n #start_urls = map (base_url.format, xrange (0, 16280, 20)) and []\n\n def parse (self, response):\n with open (\"URLs\", \"a\") as f:\n urls = map (lambda x:x.xpath (\"@href\").extract (), response.xpath (\"//tbody/tr/td/a\"))\n #urls = response.css (\"tbody tr td a.preview::href\").extract ()\n print '\\n'.join (map (lambda x:URLSpider.base_url + x[0], urls))\n f.write ('\\n'.join (map (lambda x:URLSpider.base_url + x[0], urls)) + ('\\n' if urls else ''))\n" }, { "alpha_fraction": 0.4797462224960327, "alphanum_fraction": 0.5280624628067017, "avg_line_length": 43.543479919433594, "blob_id": "17b485436bdcbe184a2a0a1d9fa44756c3055481", "content_id": "d863e81a6e62760cced812b36705570de4c33e2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2049, "license_type": "no_license", "max_line_length": 468, "num_lines": 46, "path": "/GardenManager/main/migrations/0041_auto_20170322_1251.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-22 12:51\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0040_auto_20170322_1102'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Scent',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('scent', models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'white'), (2, 'orange'), (3, 'yellow'), (4, 'green-yellow'), (5, 'green'), (6, 'blue'), (7, 'violet'), (8, 'purple'), (9, 'pink'), (10, 'magenta'), (11, 'red'), (12, 'dark-red'), (13, 'brown'), (14, 'bronze'), (15, 'silver'), (16, 'black'), (17, 'unknown')], null=True)),\n ],\n ),\n migrations.RenameField(\n model_name='flower',\n old_name='colours',\n new_name='petal_colours',\n ),\n migrations.RemoveField(\n model_name='flower',\n name='scent',\n ),\n migrations.AlterField(\n model_name='flower',\n name='months',\n field=models.ManyToManyField(related_name='flowers', to='main.Month'),\n ),\n migrations.AlterField(\n model_name='fruittype',\n name='type',\n field=models.PositiveSmallIntegerField(choices=[(0, 'All'), (1, 'Aggregate fruit'), (2, 'Achene'), (3, 'Berry'), (4, 'Capsule'), (5, 'Cypsela'), (6, 'Drupe'), (7, 'Edible'), (8, 'Follicle'), (9, 'Grain'), (10, 'Hesperidium'), (11, 'Legume'), (12, 'Multiple fruit'), (13, 'Nut'), (14, 'Pepo'), (15, 'Pome'), (16, 'Samara'), (17, 'Schizocarp'), (18, 'Silicle'), (19, 'Silique'), (20, 'Aborted or absent'), (21, 'Cone'), (22, 'Sporangium'), (23, 'unknown')]),\n ),\n migrations.AddField(\n model_name='flower',\n name='scents',\n field=models.ManyToManyField(related_name='flowers', to='main.Scent'),\n ),\n ]\n" }, { "alpha_fraction": 0.5376448035240173, "alphanum_fraction": 0.5694980621337891, "avg_line_length": 28.600000381469727, "blob_id": "526b4e1285917420199190d247d5138c48e04c65", "content_id": "6ef7267c23f9b9399b1b92a3cbff0b5d0a438339", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1036, "license_type": "no_license", "max_line_length": 88, "num_lines": 35, "path": "/GardenManager/main/migrations/0020_auto_20170314_1320.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-14 13:20\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0019_auto_20170314_1319'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='plant',\n name='forms',\n field=models.ManyToManyField(related_name='plants', to='main.Form'),\n ),\n migrations.AlterField(\n model_name='plant',\n name='habits',\n field=models.ManyToManyField(related_name='plants', to='main.Habit'),\n ),\n migrations.AlterField(\n model_name='plant',\n name='landscapes',\n field=models.ManyToManyField(related_name='plants', to='main.LandscapeUse'),\n ),\n migrations.AlterField(\n model_name='plant',\n name='waters',\n field=models.ManyToManyField(related_name='plants', to='main.Water'),\n ),\n ]\n" }, { "alpha_fraction": 0.49928468465805054, "alphanum_fraction": 0.5464950203895569, "avg_line_length": 23.10344886779785, "blob_id": "7574bf846b5a93f4aa5878dd385899e95f8bbb37", "content_id": "700fb77b29187ca881cc24bea0c6be4db07536ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 48, "num_lines": 29, "path": "/GardenManager/main/migrations/0011_auto_20170314_0827.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-14 08:27\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0010_auto_20170314_0822'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='plant',\n name='spread',\n ),\n migrations.AddField(\n model_name='plant',\n name='spread_max',\n field=models.FloatField(null=True),\n ),\n migrations.AddField(\n model_name='plant',\n name='spread_min',\n field=models.FloatField(null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5157232880592346, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 30.799999237060547, "blob_id": "b98026f2781e9605fe498c25a5f0e9d6abb5de20", "content_id": "e6d4e0a1ce2f7a72a5afba15ad3a75d9666ab992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 237, "num_lines": 20, "path": "/GardenManager/main/migrations/0040_auto_20170322_1102.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-22 11:02\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0039_auto_20170322_1100'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fruittype',\n name='type',\n field=models.PositiveSmallIntegerField(choices=[(0, 'berry'), (1, 'capsule'), (2, 'cone'), (3, 'aborted or absent'), (4, 'schizocarp'), (5, 'samara'), (6, 'cypsela'), (7, 'follicle'), (8, 'aggregate fruit'), (9, 'unknown')]),\n ),\n ]\n" }, { "alpha_fraction": 0.6222222447395325, "alphanum_fraction": 0.6493827104568481, "avg_line_length": 28.962963104248047, "blob_id": "936eb7bb94584a7824a07cf67c2dd24c60aba9cc", "content_id": "399c9f358d2dcee7890839d22b9e82fa53c144a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 810, "license_type": "no_license", "max_line_length": 86, "num_lines": 27, "path": "/GardenManager/main/static/js/script_map.js", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "\n\nvar map ;\n\nfunction takePicture () {\n var currentPosition = map.getCenter ();\n document.location.href =\n 'https://maps.googleapis.com/maps/api/staticmap?' +\n 'maptype=satellite' +\n '&center=' + currentPosition.lat () + ',' + currentPosition.lng () +\n '&zoom=' + map.getZoom () +\n '&size=640x400' +\n '&key=' + get_google_api_key () ;\n} ;\n\nfunction initialize () {\n var latlng = new google.maps.LatLng (46.227636, 2.213749);\n var options = {\n streetViewControl: false,\n center: latlng,\n zoom: 5,\n mapTypeId: google.maps.MapTypeId.HYBRID\n } ;\n map = new google.maps.Map (document.getElementById (\"google_map_api_div\"), options);\n} ;\n\nwindow.addEventListener (\"load\", function () {\n document.getElementById (\"creation_plan_take_shot_button\").onclick = takePicture\n}, false) ;" }, { "alpha_fraction": 0.508474588394165, "alphanum_fraction": 0.5779660940170288, "avg_line_length": 28.5, "blob_id": "b6a9e92ce4f9437d53859349c3d9477a42381fb7", "content_id": "e3e734e5023b29cce0847b2ca6fe475a132e2dd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 194, "num_lines": 20, "path": "/GardenManager/main/migrations/0045_auto_20170322_1318.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-22 13:18\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0044_auto_20170322_1317'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='scent',\n name='scent',\n field=models.PositiveSmallIntegerField(choices=[(0, 'none'), (1, 'fragrant'), (2, 'spicy'), (3, 'sweet'), (4, 'lemony'), (5, 'musky'), (6, 'unpleasant'), (7, 'unknown')], null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.48667851090431213, "alphanum_fraction": 0.5515097975730896, "avg_line_length": 44.040000915527344, "blob_id": "b6f4e49f9eaa5fc529294d275477a99b7260d003", "content_id": "6564297881d5e03a7f6477d102c84c44fc4b3390", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1126, "license_type": "no_license", "max_line_length": 429, "num_lines": 25, "path": "/GardenManager/main/migrations/0031_auto_20170319_2204.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-19 22:04\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0030_auto_20170319_2158'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='fruit',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'red, brown'), (1, 'brown, purple'), (2, 'pink, brown, purple'), (3, 'violet, blue'), (4, 'all'), (5, 'white'), (6, 'orange'), (7, 'yellow'), (8, 'green-yellow'), (9, 'green'), (10, 'blue'), (11, 'violet'), (12, 'purple'), (13, 'pink'), (14, 'magenta'), (15, 'red'), (16, 'dark-red'), (17, 'brown'), (18, 'bronze'), (19, 'silver'), (20, 'black'), (21, 'unknown')]),\n ),\n migrations.AlterField(\n model_name='fruit',\n name='type',\n field=models.PositiveSmallIntegerField(choices=[(0, 'capsule'), (1, 'cone (winged seeds)'), (2, 'aborted (hybrids) or absent'), (3, 'schizocarp, capsule'), (4, 'samara'), (5, 'unknown')]),\n ),\n ]\n" }, { "alpha_fraction": 0.4939647316932678, "alphanum_fraction": 0.5394614934921265, "avg_line_length": 32.65625, "blob_id": "8f1936b47810f06c19f9145515a6e2a9e4e2a803", "content_id": "f53c0459c52dadf503fdfc74f2b8070e941a8ad3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 254, "num_lines": 32, "path": "/GardenManager/main/migrations/0003_auto_20170307_1818.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-07 18:18\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0002_auto_20170307_1810'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Month',\n fields=[\n ('id', models.CharField(max_length=64, primary_key=True, serialize=False, unique=True)),\n ('month', models.PositiveSmallIntegerField(choices=[(0, 'January'), (1, 'February'), (2, 'March'), (3, 'April'), (4, 'May'), (5, 'June'), (6, 'July'), (7, 'August'), (8, 'Septembre'), (9, 'Octobre'), (10, 'Novembre'), (11, 'Decembre')])),\n ],\n ),\n migrations.AddField(\n model_name='flower',\n name='months',\n field=models.ManyToManyField(to='main.Month'),\n ),\n migrations.AddField(\n model_name='fruit',\n name='months',\n field=models.ManyToManyField(to='main.Month'),\n ),\n ]\n" }, { "alpha_fraction": 0.5453137159347534, "alphanum_fraction": 0.5670022964477539, "avg_line_length": 38.121212005615234, "blob_id": "f8daef57ce291d4f6986d79f518303431828d277", "content_id": "61863a1bf986bc65616261650f21112762600a4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1291, "license_type": "no_license", "max_line_length": 104, "num_lines": 33, "path": "/GardenManager/main/migrations/0001_initial.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-07 17:51\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Plant',\n fields=[\n ('id', models.CharField(max_length=64, primary_key=True, serialize=False, unique=True)),\n ('scientific_name', models.CharField(max_length=64, unique=True)),\n ('common_name', models.CharField(max_length=64, unique=True)),\n ('habit', models.PositiveSmallIntegerField(choices=[(0, 'twiggy')])),\n ('form', models.PositiveSmallIntegerField(choices=[(0, 'round')])),\n ('height', models.FloatField()),\n ('spread', models.FloatField()),\n ('growth_rate', models.PositiveSmallIntegerField(choices=[(0, 'moderate')])),\n ('climate', models.PositiveSmallIntegerField(choices=[(0, 'moderate')])),\n ('water', models.PositiveSmallIntegerField(choices=[(0, 'moderate')])),\n ('can_flower', models.BooleanField()),\n ('can_fruit', models.BooleanField()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5418983101844788, "alphanum_fraction": 0.5517809391021729, "avg_line_length": 39.14049530029297, "blob_id": "07b562d129efe05b44f1fb55ee8558ff823a7859", "content_id": "9800ae3b8ad653ac07f18c3edc649e7661368ea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4857, "license_type": "no_license", "max_line_length": 153, "num_lines": 121, "path": "/GardenManager/main/migrations/0008_auto_20170309_2212.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-09 22:12\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0007_auto_20170309_1617'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Area',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('x', models.FloatField()),\n ('y', models.FloatField()),\n ],\n ),\n migrations.CreateModel(\n name='Ground',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('name', models.CharField(max_length=32)),\n ('ph', models.FloatField()),\n ('type', models.PositiveSmallIntegerField(choices=[(0, '')])),\n ],\n ),\n migrations.CreateModel(\n name='PlantSpot',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('area', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='plant_spots', to='main.Area')),\n ],\n ),\n migrations.CreateModel(\n name='Position',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('x', models.FloatField()),\n ('y', models.FloatField()),\n ('area', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='positions', to='main.Area')),\n ('plant_spot', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='positions', to='main.PlantSpot')),\n ],\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.CharField(max_length=90, primary_key=True, serialize=False, unique=True)),\n ('name', models.CharField(max_length=32)),\n ('creation_date', models.DateField()),\n ('update_date', models.DateField(auto_now=True)),\n ],\n ),\n migrations.RemoveField(\n model_name='image',\n name='flowers',\n ),\n migrations.RemoveField(\n model_name='image',\n name='fruits',\n ),\n migrations.RemoveField(\n model_name='image',\n name='plants',\n ),\n migrations.AddField(\n model_name='image',\n name='flower',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='main.Flower'),\n ),\n migrations.AddField(\n model_name='image',\n name='fruit',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='main.Fruit'),\n ),\n migrations.AddField(\n model_name='image',\n name='plant',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='main.Plant'),\n ),\n migrations.AddField(\n model_name='user',\n name='session',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.Session'),\n ),\n migrations.AlterField(\n model_name='plant',\n name='flower',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='plants', to='main.Flower'),\n ),\n migrations.AlterField(\n model_name='plant',\n name='fruit',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='plants', to='main.Fruit'),\n ),\n migrations.AlterField(\n model_name='session',\n name='last_operation',\n field=models.DateField(auto_now=True),\n ),\n migrations.AddField(\n model_name='project',\n name='user',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.User'),\n ),\n migrations.AddField(\n model_name='plantspot',\n name='plant',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='plant_spots', to='main.Plant'),\n ),\n migrations.AddField(\n model_name='area',\n name='ground',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='areas', to='main.Ground'),\n ),\n ]\n" }, { "alpha_fraction": 0.688524603843689, "alphanum_fraction": 0.688524603843689, "avg_line_length": 21.600000381469727, "blob_id": "04c4ceefb694d362e1529d2d7c9193dd344d9157", "content_id": "e53655793bcd7cad13c61f8cf20db339bea69bb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "no_license", "max_line_length": 74, "num_lines": 35, "path": "/GardenManager/main/views.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "\n\nimport os\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom models import User\n\n\n\ndef get_default_context (user=None):\n context = {\n \"user\": user or User (),\n \"title\": \"Garden Project\",\n \"error\": \"There are not any error!!! lel\",\n \"google_api_key\": os.environ.get (\"GOOGLE_MAPS_API_KEY\", \"\"),\n }\n return context\n\n\ndef root (request, user=None):\n return render (request, \"root.html\", context=get_default_context (user))\n\n\ndef login (request):\n request.session[\"_old_post\"] = request.POST\n return HttpResponseRedirect (\"/\")\n\n\ndef register (request):\n request.session[\"_old_post\"] = request.POST\n return HttpResponseRedirect (\"/\")\n\n\ndef logout (request):\n request.session[\"_old_post\"] = request.POST\n return HttpResponseRedirect (\"/\")\n" }, { "alpha_fraction": 0.5052238702774048, "alphanum_fraction": 0.5395522117614746, "avg_line_length": 44.423728942871094, "blob_id": "9d6482531929229d36f678e935721a39f8cf89fd", "content_id": "6f6b97ccb221bc2a54285d2ec355e3d23c4fc043", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2680, "license_type": "no_license", "max_line_length": 338, "num_lines": 59, "path": "/GardenManager/main/migrations/0021_auto_20170315_1339.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-15 13:39\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0020_auto_20170314_1320'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='ground',\n name='name',\n ),\n migrations.AddField(\n model_name='plant',\n name='exposures',\n field=models.ManyToManyField(related_name='plants', to='main.Exposure'),\n ),\n migrations.AddField(\n model_name='plant',\n name='grounds',\n field=models.ManyToManyField(related_name='plants', to='main.Ground'),\n ),\n migrations.AlterField(\n model_name='exposure',\n name='exposure',\n field=models.PositiveSmallIntegerField(choices=[(0, 'moderate'), (1, 'Filtered shade'), (2, 'Full sun'), (3, 'Part sun/part shade'), (4, 'Full sun only if soil kept moist'), (5, 'Deep shade'), (6, 'Sheltered')]),\n ),\n migrations.AlterField(\n model_name='form',\n name='form',\n field=models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'climbing'), (2, 'columnar'), (3, 'creeping / mat-like'), (4, 'irregular'), (5, 'mounded'), (6, 'oval - horizontal'), (7, 'oval - vertical'), (8, 'pyramidal - narrowly'), (9, 'pyramidal - widely'), (10, 'round'), (11, 'vase'), (12, 'weeping'), (13, 'unknown')]),\n ),\n migrations.AlterField(\n model_name='ground',\n name='ph',\n field=models.FloatField(null=True),\n ),\n migrations.AlterField(\n model_name='ground',\n name='type',\n field=models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'acidic'), (2, 'bog'), (3, 'well-drained'), (4, 'humus rich'), (5, 'alkaline'), (6, 'rocky or gravelly or dry')]),\n ),\n migrations.AlterField(\n model_name='habit',\n name='habit',\n field=models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'arching'), (2, 'dense'), (3, 'epiphytic'), (4, 'fastigiate'), (5, 'horizontal'), (6, 'irregular'), (7, 'open'), (8, 'pendulous'), (9, 'spreading'), (10, 'stiffly upright'), (11, 'twiggy'), (12, 'upright'), (13, 'unknown')]),\n ),\n migrations.AlterField(\n model_name='water',\n name='water',\n field=models.PositiveSmallIntegerField(choices=[(0, 'low'), (1, 'moderate'), (2, 'high'), (3, 'wetlands'), (4, 'summer dry'), (5, 'aquatic'), (6, 'winter dry'), (7, 'dryland'), (8, 'unknown')]),\n ),\n ]\n" }, { "alpha_fraction": 0.5486018657684326, "alphanum_fraction": 0.5679094791412354, "avg_line_length": 34.761905670166016, "blob_id": "3c3daffe5b756b6dd30cbdb0ccb8bd70aefbafcc", "content_id": "e39e4286d784da42d221cf207cdf70efc909383a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1502, "license_type": "no_license", "max_line_length": 110, "num_lines": 42, "path": "/GardenManager/main/migrations/0002_auto_20170307_1810.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-07 18:10\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Flower',\n fields=[\n ('id', models.CharField(max_length=64, primary_key=True, serialize=False, unique=True)),\n ('colour', models.PositiveSmallIntegerField(choices=[(0, 'brown')])),\n ('scent', models.PositiveSmallIntegerField(choices=[(0, 'fragrant')])),\n ],\n ),\n migrations.CreateModel(\n name='Fruit',\n fields=[\n ('id', models.CharField(max_length=64, primary_key=True, serialize=False, unique=True)),\n ('colour', models.PositiveSmallIntegerField(choices=[(0, 'brown')])),\n ('type', models.PositiveSmallIntegerField(choices=[(0, 'capsule')])),\n ],\n ),\n migrations.AddField(\n model_name='plant',\n name='flower',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.Flower'),\n ),\n migrations.AddField(\n model_name='plant',\n name='fruit',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.Fruit'),\n ),\n ]\n" }, { "alpha_fraction": 0.47080978751182556, "alphanum_fraction": 0.5367231369018555, "avg_line_length": 52.099998474121094, "blob_id": "905b91684209adca35d3bc147a1bc4260747146e", "content_id": "7d66c831e61ae13e786372b2b6b251ca3a1c4fbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1593, "license_type": "no_license", "max_line_length": 468, "num_lines": 30, "path": "/GardenManager/main/migrations/0042_auto_20170322_1258.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-03-22 12:58\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0041_auto_20170322_1251'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='colour',\n name='colour',\n field=models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'white'), (2, 'orange'), (3, 'yellow'), (4, 'green-yellow'), (5, 'green'), (6, 'blue'), (7, 'violet'), (8, 'purple'), (9, 'pink'), (10, 'magenta'), (11, 'red'), (12, 'dark-red'), (13, 'brown'), (14, 'bronze'), (15, 'silver'), (16, 'black'), (17, 'showy'), (18, 'unknown')], null=True),\n ),\n migrations.AlterField(\n model_name='fruittype',\n name='type',\n field=models.PositiveSmallIntegerField(choices=[(0, 'all'), (1, 'aggregate fruit'), (2, 'achene'), (3, 'berry'), (4, 'capsule'), (5, 'cypsela'), (6, 'drupe'), (7, 'edible'), (8, 'follicle'), (9, 'grain'), (10, 'hesperidium'), (11, 'legume'), (12, 'multiple fruit'), (13, 'nut'), (14, 'pepo'), (15, 'pome'), (16, 'samara'), (17, 'schizocarp'), (18, 'silicle'), (19, 'silique'), (20, 'aborted or absent'), (21, 'cone'), (22, 'sporangium'), (23, 'unknown')]),\n ),\n migrations.AlterField(\n model_name='scent',\n name='scent',\n field=models.PositiveSmallIntegerField(choices=[(0, 'none'), (1, 'fragrant'), (2, 'spicy'), (3, 'sweet'), (4, 'lemony'), (5, 'unknown')], null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5789396166801453, "alphanum_fraction": 0.5959237813949585, "avg_line_length": 28.189655303955078, "blob_id": "4cb51d02cc6369c98e3eb599ff5aa0404e360d71", "content_id": "819db4b6fd87fc8c863f2a05cf9da4193c9dbd80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6771, "license_type": "no_license", "max_line_length": 106, "num_lines": 232, "path": "/GardenManager/main/utils/circular_list.py", "repo_name": "ujm-projet-l3info-2017/Groupe2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n I needed a circular list to get (for example) some months from novembre to\n february.\n So, instead of doing:\n $ months[11:] + months[:3]\n I'll do\n $ months[11:15]\n it's better. It does not worth the work, but I love doing this kind of thing,\n and in fact, it can heavily be reused.\n\"\"\"\n\n\nclass CircularList (object):\n\n \"\"\"\n A circular list has no end. In one try to get an item at an index greater\n than the list's item number, the index act like it continues its path from\n the begining of the list.\n \"\"\"\n\n def __init__ (self, iterable=None, max_length=None):\n self.real_list = list (iterable or ())\n if max_length is not None:\n if not isinstance (max_length, int):\n raise TypeError (\"max_length type must be int or NoneType.\")\n if max_length < 0:\n raise ValueError (\"max_length cannot be less than 0.\")\n if len (self.real_list) > max_length:\n self.real_list = self.real_list[len (self.real_list)-max_length:]\n self.max_length = max_length\n\n def __add__ (self, iterable):\n iter (iterable)\n return CircularList (iterable=self.real_list+list (iterable),\n max_length=self.max_length)\n\n def __contains__ (self, item):\n return item in self.real_list\n\n def __delitem__ (self, index):\n length = len (self.real_list)\n if length == 0:\n raise IndexError (\"list index out of range\")\n del self.real_list[index % length]\n\n def __delslice__ (self, start, end):\n if end - start > 0:\n length = len (self.real_list)\n if end - start < length:\n self.real_list = self.real_list[:start % length] + \\\n self.real_list[end % length:]\n else:\n self.real_list[:] = []\n\n def __eq__ (self, iterable):\n try:\n iter (iterable)\n except TypeError:\n return False\n return list (iterable) == self.real_list\n\n def __ge__ (self, iterable):\n try:\n iter (iterable)\n except TypeError:\n return False\n return list (iterable) >= self.real_list\n\n def __getitem__ (self, item_slice):\n def CircularList (item_slice):\n if self.max_length == 0:\n raise StopIteration ()\n if isinstance (item_slice, int):\n yield self.real_list[item_slice % len (self.real_list)]\n raise StopIteration ()\n start, stop, step = map (item_slice.__getattribute__,\n [\"start\", \"stop\", \"step\"])\n if stop is None:\n stop = len (self.real_list)\n if step is None:\n step = 1 if stop > start else -1\n if not isinstance (start, int):\n raise TypeError (\"start must be int, not %s\" % type (start))\n print start, stop, step\n if not isinstance (stop, int):\n raise TypeError (\"stop must be int, not %s\" % type (stop))\n if step == 0:\n raise StopIteration ()\n while start > stop and step > 0:\n stop += len (self)\n while start < stop and step < 0:\n start += len (self)\n while start != stop:\n yield self.real_list[start % len (self.real_list)]\n start += step\n return CircularList (item_slice)\n\n def __getslice__ (self, start, stop):\n return self[start:stop:1]\n\n def __ge__ (self, iterable):\n try:\n iter (iterable)\n except TypeError:\n return False\n return list (iterable) > self.real_list\n\n def __iadd__ (self, iterable):\n self.real_list = (self.real_list + list (iterable))[-self.max_length:]\n\n def __imul__ (self, value):\n if not isinstance (value, int):\n raise TypeError (\"Can only multiply CircularList with int\")\n\n def __iter__ (self):\n return iter (self.real_list)\n\n def __le__ (self, iterable):\n try:\n iter (iterable)\n except TypeError:\n return False\n return list (iterable) <= self.real_list\n\n def __len__ (self):\n return len (self.real_list)\n\n def __lt__ (self, iterable):\n try:\n iter (iterable)\n except TypeError:\n return False\n return list (iterable) < self.real_list\n\n def __mul__ (self, value):\n if not isinstance (value, int):\n raise TypeError (\"Can only multiply CircularList with int\")\n return CircularList (self.real_list, self.max_length)\n\n def __ne__ (self, iterable):\n try:\n iter (iterable)\n except TypeError:\n return False\n return list (iterable) != self.real_list\n\n def __repr__ (self):\n return \"<CircularList object at 0x%x>\" % id (self)\n\n def __reversed__ (self):\n return CircularList (reversed (self.real_list), self.max_length)\n\n def __rmul__ (self, value):\n return self * value\n\n def __setitem__ (self, index, item):\n if self.max_length is None or self.max_length != 0:\n length = len (self.real_list)\n if length != 0:\n self.real_list[index % length] = item\n\n def __setslice__ (self, start, end, item):\n if end - start > 0:\n length = len (self.real_list)\n try:\n item = list (item)\n except TypeError:\n item = [item]\n if end - start < length:\n self.real_list = self.real_list[:start % length] + item + \\\n self.real_list[end % length:]\n else:\n self.real_list[:] = item\n\n def __str__ (self):\n if self.max_length != 0 and len (self.real_list) != 0:\n return \"[..., %s, ...]\" % str (self.real_list)[1:-1]\n return \"[...]\"\n\n def append (self, item):\n self.real_list.append (item)\n if self.max_length is not None and self.max_length != 0:\n del self.real_list[0]\n\n def count (self, item):\n return self.real_list.count (item)\n\n def extend (self, item):\n self.real_list.extend (item)\n if self.max_length is not None and self.max_length != 0:\n del self.real_list[-1]\n\n def index (self, value, start=0, stop=None):\n if stop > self.max_length or stop is None:\n stop = self.max_length\n else:\n length = len (self.real_list)\n if stop > length:\n stop = length\n print list (self[start:stop])\n return list (self[start:stop]).index (value)\n\n def pop (self):\n if len (self.real_list):\n return self.real_list.pop ()\n\n def remove (self, value):\n return self.real_list.remove (value)\n\n def reverse (self):\n self.real_list = self.real_list[::-1]\n\n def sort (self, cmp=None, key=None, reverse=False):\n self.real_list.sort (cmp=cmp, key=key, reverse=reverse)\n\n\n\nif __name__ == \"__main__\":\n assert list (CircularList (range(14), 5)[14:1:-1]) == [13, 12, 11, 10, 9, 13, 12, 11, 10, 9, 13, 12, 11]\n assert CircularList (range(14), 5)[1:14:1]\n assert CircularList (range(14), 5)[1:]\n assert list (CircularList (range (10))) == range (10)\n s = CircularList (range (10))\n assert list (s[1473]) == [3]\n s.append (10)\n assert list (s[1473:1493]) == [10] + range (11) + range(8)\n assert str (s) == \"[..., 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ...]\"\n print list (s[7:5])" } ]
44
brasqo/itp-w1-highest-number-cubed
https://github.com/brasqo/itp-w1-highest-number-cubed
8e2fa64524ef234d3dea3065a57d902b8881cda9
ed43cb41386c67c06f3a68b701b9d576412169dd
c6a8c3821b7b0325cc2927d91259261b1e83533b
refs/heads/master
2021-01-20T04:09:37.124526
2017-04-28T01:44:40
2017-04-28T01:44:40
89,650,891
0
0
null
2017-04-28T00:13:10
2017-03-24T01:37:51
2017-03-31T17:38:49
null
[ { "alpha_fraction": 0.47919464111328125, "alphanum_fraction": 0.5409395694732666, "avg_line_length": 19.72222137451172, "blob_id": "6114ee35f0bf12d255e5785bdb1cc8bbbee81fba", "content_id": "1bc78dbd35b4b0a33e2f1bb95fbd2b492d6ad27e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "permissive", "max_line_length": 55, "num_lines": 36, "path": "/highest_number_cubed/main.py", "repo_name": "brasqo/itp-w1-highest-number-cubed", "src_encoding": "UTF-8", "text": "\"\"\"This is the entry point of the program.\"\"\"\n\n\n# My attempt...Failed miserably.\n# def highest_number_cubed(limit):\n \n# #testresult = 12000\n \n# test2 = int(limit**(1./3.))\n \n# if test2**3 == limit:\n# print ('ok ' + str(test2) + '.')\n# else:\n# if test2**3 == limit:\n# return 'nope'\n\n# # print(highest_number_cubed(22))\n\n#Jason while solution\n\ndef highest_number_cubed(limit):\n \n number = 0\n \n while True:\n number += 1\n if number ** 3 > limit:\n return number - 1\n\nprint(highest_number_cubed(12000))\n\n\n# 1 ^ 3 = 1 <30? True...keep going \n# 2 ^ 3 = 8 <30? True...Keep going\n# 3 ^ 3 = 27 <30? True...keep going\n# 4 ^ 3 = 64 <30? False...Flag, refer to previous value" } ]
1
BiancaStoecker/cpinsim
https://github.com/BiancaStoecker/cpinsim
7523288c48966e8044d8a50638aab6d7ec276ee5
bb52afb265c32bfedcd28c3e466f70d70cd31d95
820abd63f0c1807cabd10f8676896c5bdf4cc393
refs/heads/master
2021-08-23T15:54:27.260865
2017-12-05T14:36:08
2017-12-05T14:36:08
111,673,732
1
2
MIT
2017-11-22T11:06:45
2017-12-01T13:07:35
2017-12-05T14:36:09
Python
[ { "alpha_fraction": 0.6174951791763306, "alphanum_fraction": 0.6191090941429138, "avg_line_length": 34.59770202636719, "blob_id": "c7ce30ff1a47c6e327af32322e40e12288c2d546", "content_id": "14ee4b8e8a031b04dbfa18a847d3ff12379f326a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3098, "license_type": "permissive", "max_line_length": 100, "num_lines": 87, "path": "/cpinsim/protein.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom bitarray import bitarray\n\nclass Protein:\n \"\"\" Representing a protein instance in the simulated cell.\n Contains the state of the protein in it, a pointer to the protein-type\n and the neccesary methods for protein interaction.\n \"\"\"\n\n def __init__(self, name, protein_pointer):\n \"\"\" Initialize a protein instance with its name, the information to the protein type\n and a state without any other proteins bound.\n \"\"\"\n self.name = name\n self.index = protein_pointer.index\n self.interactions = protein_pointer.interactions\n self.map_interactor_domains = protein_pointer.map_interactor_domains\n self.domains = protein_pointer.domains\n\n self.state = bitarray(len(self.index))\n self.state.setall(False)\n\n self.number_of_free_domains = len(self.domains)\n\n\n def get_possible_interactors(self):\n \"\"\" Return a list of the possible interactors of this protein.\n \"\"\"\n return list(self.map_interactor_domains.keys())\n\n\n def get_domains_to_interactor(self, interactor):\n \"\"\" Return the domains at which the given $interactor can interact with\n this protein.\n \"\"\"\n return self.map_interactor_domains[interactor]\n\n\n def is_interacting(self, protein, domain):\n \"\"\" Test if this protein instance is already interacting with $protein\n at $domain.\n \"\"\"\n if not (protein, domain) in self.index:\n return False\n\n i = self.index[(protein, domain)]\n return self.state[i]\n\n\n def is_association_possible(self, protein, domain):\n \"\"\" Test if this protein can associate with $protein at $domain.\n \"\"\"\n if not (protein, domain) in self.index:\n return False\n\n s = self.state\n i = self.index[(protein, domain)]\n # each clause contains two bitarrays: p=positive and n=negative\n for p, n in self.interactions[i]:\n # all bits from positive clauses must be set and no bit from a negative is allowed\n if ((p&s) == p) and ((n&(~s)) == n):\n return True\n return False\n\n\n def associate(self, protein, domain):\n \"\"\" Perfom an association beteween this protein and $protein at $domain.\n In the updated state the bit of index(protein,domain) is set to 1.\n \"\"\"\n i = self.index[(protein, domain)]\n assert not self.state[i], \"For a association the proteins should not be interacting before.\"\n self.state[i] = True\n self.number_of_free_domains -= 1\n return\n\n\n def dissociate(self, protein, domain):\n \"\"\" Perfom an dissociation beteween of the interaction this protein and\n $protein at $domain. In the updated state the bit of\n index(protein,domain) is set to 0.\n \"\"\"\n i = self.index[(protein, domain)]\n assert self.state[i], \"For a dissociation the proteins must be interacting first.\"\n self.state[i] = False\n self.number_of_free_domains += 1\n return\n\n" }, { "alpha_fraction": 0.5930806398391724, "alphanum_fraction": 0.5990923047065735, "avg_line_length": 53.48373031616211, "blob_id": "32e5a1d3bd5463e2056743aad32f7901888a9cc2", "content_id": "9b4a8e781e65b697949e815bf2fde7cd56396298", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25118, "license_type": "permissive", "max_line_length": 188, "num_lines": 461, "path": "/cpinsim/annotate_constraints.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport csv\nfrom collections import defaultdict\nimport itertools\n\nimport cpinsim.constraint_io as io\n\ndomain_counter = 0 # counter for artificial domains\nnegative_counter = -1 # temporary negative counter to distinguish competitions/allosterics\nboundary = -1 # boundary between competitions and allosterics\n\n# host -> interactor -> set of domains\n# str -> str -> set\nmap_interactions_to_domains = defaultdict(lambda: defaultdict(set))\n\n# host -> domain_host -> set of (interactor, domain_interactor)\n# str -> str -> set(tuple)\nmap_domains_to_interactors = defaultdict(lambda: defaultdict(set))\n\n# host -> interactor -> (domain_host_interactor, domain_interactor)-> id_count\n# str -> str -> tuple -> int\nallosteric_interactors = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))\n\n# host -> activator/inhibitor -> id_count -> (domain_host_activator/inhibitor, domain_activator/inhibitor)\n# str -> str -> int -> tuple\nallosteric_activators = defaultdict(lambda: defaultdict(lambda: defaultdict(tuple)))\nallosteric_inhibitors = defaultdict(lambda: defaultdict(lambda: defaultdict(tuple)))\n\n\ndef is_real(domain):\n \"\"\" Return if the domain is real (not None, not artificial)\n \"\"\"\n return domain is not None and type(domain) != int\n\n\ndef is_artificial(domain):\n \"\"\" Return if the domain is artificial.\n \"\"\"\n return domain is not None and type(domain) == int and domain > 0\n\n\n#---------- Reading the files ----------#\n\ndef add_interaction(p1, domain_p1, p2, domain_p2):\n \"\"\" Add the interaction in both directions to map_domains_to_interactors if\n not already present, remove interactions with None if already domains known.\n \"\"\"\n if domain_p1 is not None or \\\n (p2, domain_p2) not in [(i, d) for dom in map_domains_to_interactors[p1] for i, d in map_domains_to_interactors[p1][dom]]:\n map_domains_to_interactors[p1][domain_p1].add((p2, domain_p2))\n if domain_p1 is not None and (p2, domain_p2) in map_domains_to_interactors[p1][None]:\n map_domains_to_interactors[p1][None].remove((p2, domain_p2))\n map_domains_to_interactors[p2][domain_p2].remove((p1, None))\n\n if domain_p2 is not None or \\\n (p1, domain_p1) not in [(i, d) for dom in map_domains_to_interactors[p2] for i, d in map_domains_to_interactors[p2][dom]]:\n map_domains_to_interactors[p2][domain_p2].add((p1, domain_p1))\n if domain_p2 is not None and (p1, domain_p1) in map_domains_to_interactors[p2][None]:\n map_domains_to_interactors[p2][None].remove((p1, domain_p1))\n map_domains_to_interactors[p1][domain_p1].remove((p2, None))\n\n\ndef appoint_negative_domains(domain_1, domain_2):\n \"\"\" If input domains are None give two different negative domains.\n \"\"\"\n global negative_counter\n if domain_1 is None:\n domain_1 = negative_counter\n negative_counter -= 1\n if domain_2 is None:\n domain_2 = negative_counter\n negative_counter -= 1\n return (domain_1, domain_2)\n\n\ndef read_interactions_without_constraints(files):\n \"\"\" Read interactions between proteins without constraints.\n Files must have two columns for the two interacting proteins in each line.\n \"\"\"\n for (p1, domain_p1, p2, domain_p2, _) in io.yield_interactions_without_constraints(files):\n add_interaction(p1, domain_p1, p2, domain_p2)\n\n\ndef read_competitions(files):\n \"\"\" Read interactions with constraints of competing proteins.\n Files must have two columns, one with the host and one with\n comma-separated competitors.\n \"\"\"\n global negative_counter, boundary\n for (host, domains_host, competitors, _) in io.yield_competitions(files):\n inc_counter = False\n i = 0\n for competitor in competitors:\n domain_host, i = io.set_domain_at_host(domains_host, i)\n competitor, domain_competitor = io.split_interactor_and_domain(competitor)\n if domain_host is None:\n domain_host = negative_counter\n inc_counter = True\n add_interaction(host, domain_host, competitor, domain_competitor)\n\n if inc_counter:\n negative_counter -= 1\n boundary = negative_counter\n\n\ndef read_allosteric_effects(files):\n \"\"\" Read interactions with constraints of allosteric effects.\n Files must have four columns: host, interactors, activators, inhibitors.\n \"\"\"\n id_count = 0\n for (host, domains_host, interactors, activators, inhibitors, _) in io.yield_allosteric_effects(files):\n i = 0\n for interactor in interactors:\n interactor, domain_interactor = io.split_interactor_and_domain(interactor)\n domain_host_interactor, i = io.set_domain_at_host(domains_host, i)\n\n (domain_host_interactor, domain_interactor) = appoint_negative_domains(domain_host_interactor, domain_interactor)\n add_interaction(host, domain_host_interactor, interactor, domain_interactor)\n allosteric_interactors[host][interactor][(domain_host_interactor, domain_interactor)] = id_count\n\n for activator in activators:\n if activator == \"\": # skip empty activators\n continue\n activator, domain_activator = io.split_interactor_and_domain(activator)\n domain_host_activator, i = io.set_domain_at_host(domains_host, i)\n\n (domain_host_activator, domain_activator) = appoint_negative_domains(domain_host_activator, domain_activator)\n add_interaction(host, domain_host_activator, activator, domain_activator)\n allosteric_activators[host][activator][id_count] = (domain_host_activator, domain_activator)\n\n for inhibitor in inhibitors:\n if inhibitor == \"\": # skip empty inhibitors\n continue\n inhibitor, domain_inhibitor = io.split_interactor_and_domain(inhibitor)\n domain_host_inhibitor, i = io.set_domain_at_host(domains_host, i)\n\n (domain_host_inhibitor, domain_inhibitor) = appoint_negative_domains(domain_host_inhibitor, domain_inhibitor)\n add_interaction(host, domain_host_inhibitor, inhibitor, domain_inhibitor)\n allosteric_inhibitors[host][inhibitor][id_count] = (domain_host_inhibitor, domain_inhibitor)\n\n i += 1\n id_count += 1\n\n\n#---------- Normalize the existing domains, inference of domains and distribution of artificial domains ----------#\n\n\ndef propagate_changes_to_allosterics(host, domain_host, new_dom, interactor, domain_interactor, swapped=False):\n \"\"\" Propagate a new domain for allosteric effects and remove the old one in\n in all data structures.\n \"\"\"\n if host in allosteric_interactors and interactor in allosteric_interactors[host] and \\\n (domain_host, domain_interactor) in allosteric_interactors[host][interactor]:\n\n id_count = allosteric_interactors[host][interactor][(domain_host, domain_interactor)]\n del allosteric_interactors[host][interactor][(domain_host, domain_interactor)]\n if not swapped:\n allosteric_interactors[host][interactor][(new_dom, domain_interactor)] = id_count\n else:\n allosteric_interactors[host][interactor][(domain_host, new_dom)] = id_count\n\n for data in (allosteric_activators, allosteric_inhibitors):\n if host in data and interactor in data[host]:\n for id_count in data[host][interactor]:\n if (domain_host, domain_interactor) == data[host][interactor][id_count]:\n del data[host][interactor][id_count]\n if not swapped:\n data[host][interactor][id_count] = (new_dom, domain_interactor)\n else:\n data[host][interactor][id_count] = (domain_host, new_dom)\n\n\ndef normalize():\n \"\"\" Merge overlapping domains and propagate the information throughout the\n whole network.\n \"\"\"\n proteins = list(map_domains_to_interactors.keys())\n for host in proteins:\n update_needed = True\n while update_needed:\n update_needed = False\n domains = [d for d in map_domains_to_interactors[host] if d is not None and type(d) != int]\n if len(domains) > 1:\n for d1, d2 in itertools.combinations(domains, 2):\n new_dom = get_merged_domain(d1, d2)\n if new_dom is not None:\n update_needed = True\n old1 = map_domains_to_interactors[host][d1]\n del map_domains_to_interactors[host][d1]\n map_domains_to_interactors[host][new_dom].update(old1)\n\n old2 = map_domains_to_interactors[host][d2]\n del map_domains_to_interactors[host][d2]\n map_domains_to_interactors[host][new_dom].update(old2)\n\n for interactor, domain_interactor in old1:\n if (host, d1) in map_domains_to_interactors[interactor][domain_interactor]:\n map_domains_to_interactors[interactor][domain_interactor].remove((host, d1))\n map_domains_to_interactors[interactor][domain_interactor].add((host, new_dom))\n propagate_changes_to_allosterics(host, d1, new_dom, interactor, domain_interactor)\n\n for interactor, domain_interactor in old2:\n if (host, d2) in map_domains_to_interactors[interactor][domain_interactor]:\n map_domains_to_interactors[interactor][domain_interactor].remove((host, d2))\n map_domains_to_interactors[interactor][domain_interactor].add((host, new_dom))\n propagate_changes_to_allosterics(host, d2, new_dom, interactor, domain_interactor)\n break\n\n\ndef get_merged_domain(domain_interactor, domain):\n \"\"\" Test if the domains are overlapping and return the merged domain. If the\n domains are not overlapping or not an interval return None.\n \"\"\"\n if domain_interactor is None or domain is None or type(domain_interactor) == int or type(domain) == int:\n return None\n min1, *max1 = domain_interactor.split(\"-\")\n if max1 == []: # domain_interactor is not an interval\n return None\n\n min2, *max2 = domain.split(\"-\")\n if max2 == []: # domain is not an interval\n return None\n\n cut0, cut1 = max(int(min1), int(min2)), min(int(max1[0]), int(max2[0]))\n if cut1 - cut0 > 0:\n new_min, new_max = min(int(min1), int(min2)), max(int(max1[0]), int(max2[0]))\n new_dom = \"{}-{}\".format(new_min, new_max)\n return new_dom\n return None\n\n\ndef remove_redundant_nones():\n \"\"\" Remove Nones where domains are known and domains without interactors after\n merging.\n \"\"\"\n proteins = sorted(list(map_domains_to_interactors.keys()))\n to_remove = set()\n to_delete = set()\n for host in proteins:\n for domain_host in map_domains_to_interactors[host]:\n if len(map_domains_to_interactors[host][domain_host]) == 0:\n to_delete.add((host, domain_host))\n for interactor, domain_interactor in map_domains_to_interactors[host][domain_host]:\n if not(type(domain_host) == int and domain_host < 0) and domain_interactor is None and [i for i, d in map_domains_to_interactors[host][domain_host]].count(interactor) > 1:\n to_remove.add((host, domain_host, interactor, domain_interactor))\n elif not(type(domain_host) == int and domain_host < 0) and domain_interactor is None and \\\n [i for dom in map_domains_to_interactors[host] for i, d in map_domains_to_interactors[host][dom]].count(interactor) > 1:\n to_remove.add((host, domain_host, interactor, domain_interactor))\n else:\n map_interactions_to_domains[host][interactor].add(domain_host)\n map_interactions_to_domains[interactor][host].add(domain_interactor)\n\n for host, domain_host, interactor, domain_interactor in to_remove:\n map_domains_to_interactors[host][domain_host].remove((interactor, domain_interactor))\n if len(map_domains_to_interactors[host][domain_host]) == 0:\n to_delete.add((host, domain_host))\n for host, domain_host in to_delete:\n del map_domains_to_interactors[host][domain_host]\n\n\ndef domain_inference_competition_host():\n \"\"\" Try to infer unknown domains for host of competitions.\n \"\"\"\n proteins = list(map_domains_to_interactors.keys())\n for host in proteins:\n update_needed = True\n while update_needed:\n update_needed = False\n to_change = set()\n domains = [d for d in map_domains_to_interactors[host] if type(d) == int and d < 0 and d >= boundary]\n for domain_host in domains:\n for interactor, domain_interactor in map_domains_to_interactors[host][domain_host]:\n if is_real(domain_interactor):\n candidates = [(p, d) for p, d in map_domains_to_interactors[interactor][domain_interactor] if d != domain_host]\n for p, d in candidates:\n if host == p:\n update_needed = True\n to_change.add((host, domain_host, d, interactor, domain_interactor))\n if update_needed:\n break\n if update_needed:\n for (host, domain_host, new_dom, interactor, domain_interactor) in to_change:\n #print((host, domain_host, new_dom, interactor, domain_interactor))\n map_domains_to_interactors[host][new_dom].add((interactor, domain_interactor))\n map_domains_to_interactors[interactor][domain_interactor].add((host, new_dom))\n if (host, domain_host) in map_domains_to_interactors[interactor][domain_interactor]:\n map_domains_to_interactors[interactor][domain_interactor].remove((host,domain_host))\n if host == interactor and (host, None) in map_domains_to_interactors[host][new_dom]:\n map_domains_to_interactors[host][new_dom].remove((host, None))\n map_domains_to_interactors[host][new_dom].add((host, new_dom))\n if domain_host in map_domains_to_interactors[host]:\n del map_domains_to_interactors[host][domain_host]\n propagate_changes_to_allosterics(host, domain_host, new_dom, interactor, domain_interactor)\n propagate_changes_to_allosterics(interactor, domain_interactor, new_dom, host, domain_host, swapped=True)\n\n\ndef domain_inference_competitors():\n \"\"\" Try to infer unknown domains for competitors.\n \"\"\"\n proteins = list(map_domains_to_interactors.keys())\n something_changed = True\n while something_changed:\n something_changed = False\n for host in proteins:\n domains = [d for d in map_domains_to_interactors[host] if is_real(d) and len(map_domains_to_interactors[host][d]) > 1]\n update_needed = True\n while update_needed:\n update_needed = False\n inferred_domains = set()\n for domain_host in domains:\n for ((interactor1, domain_interactor1), (interactor2, domain_interactor2)) in itertools.combinations(map_domains_to_interactors[host][domain_host], 2):\n if domain_interactor1 is None and domain_interactor2 is not None:\n query, ref, ref_domain = interactor1, interactor2, domain_interactor2\n elif domain_interactor1 is not None and domain_interactor2 is None:\n query, ref, ref_domain = interactor2, interactor1, domain_interactor1\n else:\n continue\n for second_host in proteins:\n domains = [d for d in map_domains_to_interactors[second_host] if is_real(d) and len(map_domains_to_interactors[second_host][d]) > 1]\n for dom in domains:\n if second_host == host and dom == domain_host:\n continue\n if (ref, ref_domain) in map_domains_to_interactors[second_host][dom]:\n candidates = [(p, d) for p, d in map_domains_to_interactors[second_host][dom] if p == query and d is not None]\n if len(candidates) != 0:\n inferred_domains.update(set(candidates))\n update_needed = True\n something_changed = True\n if update_needed:\n break\n if update_needed:\n #print(host, domain_host, inferred_domains)\n for protein, new_dom in inferred_domains:\n map_domains_to_interactors[protein][new_dom].add((host, domain_host))\n map_domains_to_interactors[host][domain_host].add((protein, new_dom))\n if (protein, None) in map_domains_to_interactors[host][domain_host]:\n map_domains_to_interactors[host][domain_host].remove((protein, None))\n if (host, domain_host) in map_domains_to_interactors[protein][None]:\n map_domains_to_interactors[protein][None].remove((host, domain_host))\n if protein == host and (protein, None) in map_domains_to_interactors[protein][new_dom]:\n map_domains_to_interactors[protein][new_dom].remove((protein, None))\n map_domains_to_interactors[protein][new_dom].add((protein, new_dom))\n propagate_changes_to_allosterics(protein, None, new_dom, host, domain_host)\n propagate_changes_to_allosterics(host, domain_host, new_dom, protein, None, swapped=True)\n\n\ndef apply_artificial_domains():\n \"\"\" Choose domains for all negative placeholder domains and Nones. If one real\n domain is known use this domain, otherwise choose a unique artificial domain.\n Competitions with unknown domain at the host get the same domain.\n \"\"\"\n global domain_counter, boundary\n proteins = sorted(list(map_domains_to_interactors.keys()))\n for host in proteins:\n update_needed = True\n while update_needed:\n update_needed = False\n changed = set()\n inc_counter = False\n for domain_host in map_domains_to_interactors[host]:\n if not (type(domain_host) == int and domain_host < 0) and domain_host is not None:\n continue\n for interactor, domain_interactor in map_domains_to_interactors[host][domain_host]:\n mapped_domains = map_interactions_to_domains[host][interactor]\n if None in mapped_domains:\n mapped_domains.remove(None)\n if type(domain_host) == int and domain_host < 0 and domain_host >= boundary: # unknown domain at host of competition\n new_dom = domain_counter\n map_interactions_to_domains[host][interactor].add(new_dom)\n inc_counter = True\n elif len(mapped_domains) > 1 or len(mapped_domains) == 0: # no or multiple domains are known\n new_dom = domain_counter\n domain_counter += 1\n map_interactions_to_domains[host][interactor].add(new_dom)\n else: # len(mapped_domains) == 1, exactly one domain is known\n new_dom = mapped_domains.pop()\n if type(new_dom) == int:\n new_dom = domain_counter\n domain_counter += 1\n map_interactions_to_domains[host][interactor].add(new_dom)\n changed.add((interactor, domain_interactor, host, domain_host, new_dom))\n update_needed = True\n break\n if update_needed:\n if inc_counter:\n domain_counter += 1\n for (interactor, domain_interactor, host, domain_host, new_dom) in changed:\n map_domains_to_interactors[host][new_dom].add((interactor, domain_interactor))\n if (host, domain_host) in map_domains_to_interactors[interactor][domain_interactor]:\n map_domains_to_interactors[interactor][domain_interactor].remove((host, domain_host))\n map_domains_to_interactors[interactor][domain_interactor].add((host, new_dom))\n if domain_host in map_domains_to_interactors[host]:\n del map_domains_to_interactors[host][domain_host]\n if host == interactor and (host, None) in map_domains_to_interactors[host][new_dom]:\n map_domains_to_interactors[host][new_dom].remove((host, None))\n map_domains_to_interactors[host][new_dom].add((host, new_dom))\n\n propagate_changes_to_allosterics(host, domain_host, new_dom, interactor, domain_interactor)\n propagate_changes_to_allosterics(interactor, domain_interactor, new_dom, host, domain_host, swapped=True)\n\n\n #---------- Output the annotated interactions and constraints ----------#\n\ndef write_annotated_files(output_i, output_c, output_a):\n \"\"\" Write the annotated constraints into the three output files.\n \"\"\"\n lines_i, lines_a, lines_c = [], [], []\n for host in allosteric_interactors:\n for interactor in allosteric_interactors[host]:\n for (domain_host, domain_interactor) in allosteric_interactors[host][interactor]:\n id_count = allosteric_interactors[host][interactor][(domain_host, domain_interactor)]\n host_string = \"{}[{}]\".format(host, domain_host)\n interactor_string = \"{}[{}]\".format(interactor, domain_interactor)\n activators_string, inhibitors_string = [], []\n for activator in allosteric_activators[host]:\n if id_count in allosteric_activators[host][activator]:\n (domain_host_activator, domain_activator) = allosteric_activators[host][activator][id_count]\n activators_string.append(\"{}[{}]\".format(activator, domain_activator))\n host_string += \"[{}]\".format(domain_host_activator)\n for inhibitor in allosteric_inhibitors[host]:\n if id_count in allosteric_inhibitors[host][inhibitor]:\n (domain_host_inhibitor, domain_inhibitor) = allosteric_inhibitors[host][inhibitor][id_count]\n inhibitors_string.append(\"{}[{}]\".format(inhibitor, domain_inhibitor))\n host_string += \"[{}]\".format(domain_host_inhibitor)\n line = [host_string, interactor_string, \",\".join(activators_string), \",\".join(inhibitors_string)]\n if line not in lines_a:\n lines_a.append(line)\n\n for host in sorted(map_domains_to_interactors):\n for domain_host in map_domains_to_interactors[host]:\n host_string = \"{}[{}]\".format(host, domain_host)\n interactors = map_domains_to_interactors[host][domain_host]\n if len(interactors) == 0:\n continue\n if len(interactors) == 1:\n interactor, domain_interactor = interactors.pop()\n interactors_string = \"{}[{}]\".format(interactor, domain_interactor)\n line = sorted([host_string, interactors_string])\n if line not in lines_i:\n lines_i.append(line)\n else:\n temp = []\n for interactor, domain_interactor in interactors:\n temp.append(\"{}[{}]\".format(interactor, domain_interactor))\n interactors_string = \",\".join(sorted(temp))\n line = [host_string, interactors_string]\n if line not in lines_c:\n lines_c.append(line)\n\n writer = csv.writer(open(output_i, 'w'), delimiter='\\t', lineterminator='\\n')\n writer.writerow([\"Interactor_A\", \"Interactor_B\"])\n writer.writerows(sorted(lines_i))\n\n writer = csv.writer(open(output_a, 'w'), delimiter='\\t', lineterminator='\\n')\n writer.writerow([\"Host\", \"Interactor\", \"Activator\", \"Inhibitor\"])\n writer.writerows(sorted(lines_a))\n\n writer = csv.writer(open(output_c, 'w'), delimiter='\\t', lineterminator='\\n')\n writer.writerow([\"Host\", \"Competitors\"])\n writer.writerows(sorted(lines_c))\n\n" }, { "alpha_fraction": 0.5973219275474548, "alphanum_fraction": 0.6027556657791138, "avg_line_length": 36.875, "blob_id": "8027a40fcb7cf1e9cb6912482bd22683765fcb31", "content_id": "70eb836ddb9ae0bed5b8c3d7bc15cffd5e9a65e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5153, "license_type": "permissive", "max_line_length": 138, "num_lines": 136, "path": "/cpinsim/protein_type.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport csv\nfrom collections import defaultdict\nfrom bitarray import bitarray\n\n\ndef read_proteins(filename):\n \"\"\" Read the text representation of the proteins into the following data\n structure: Each protein has a index (PxD->N) and an array to determine\n the possible interactions. In the array for each interactor (PxD) there\n are clauses representing the constraints. Each clause consists of two\n bitarrays (positive and negative).\n \"\"\"\n proteins = dict()\n name = \"\"\n # read the information for one protein, then build the corresponding array\n reader = csv.reader(open(filename, newline=''), delimiter='\\t')\n for line in reader:\n # skip empty lines\n if line == \"\\n\" or len(line) == 0:\n continue\n # beginning of new protein in file\n if line[0][0] == \">\":\n if name != \"\": # do not build array yet if first protein\n index = build_index(interactors)\n protein_array = build_array(index, interactions)\n proteins[name] = ProteinType(name, index, protein_array)\n\n name = line[0][1:]\n # new protein has new interactors and new interactions\n interactors = set()\n interactions = defaultdict(list)\n continue\n # parse transitions into dictionary\n else:\n interactor, *constraints = line\n interactor = tuple(interactor.replace(\"(\", \"\").replace(\")\", \"\").split(\",\"))\n interactors.add(interactor)\n for clause in constraints:\n terms = clause.split(\";\")\n split_terms = [tuple(term.replace(\"(\", \"\").replace(\")\", \"\").split(\",\")) for term in terms]\n interactions[interactor].append(split_terms)\n\n # for last protein in file:\n index = build_index(interactors)\n protein_array = build_array(index, interactions)\n proteins[name] = ProteinType(name, index, protein_array)\n\n return proteins\n\n\ndef build_index(interactors):\n \"\"\" Build the index (P x D) -> N for all interactors of the current protein.\n \"\"\"\n index = dict() # P x D -> N\n sorted_interactors = sorted(list(interactors))\n for p, d in sorted_interactors:\n index[(p, d)] = sorted_interactors.index((p, d))\n return index\n\n\ndef build_array(index, interactions):\n \"\"\" Build the array for the current protein. In the first dimension there\n is an entry for each interactor. In the second dimension there are the\n constraints represented as clauses consisting of two bitarrays.\n \"\"\"\n return [[build_bitarray(index, clause) for clause in interactions[interactor]] for interactor in sorted(index, key=index.__getitem__)]\n\n\ndef build_bitarray(index, clause):\n \"\"\" Construct the two bitarrays positive and negative for a given clause.\n The positive array has a 1 for each protein that has to be bound already\n for an interaction to be possible, while in the negative array the bits\n are set for competitions and inhibitions.\n \"\"\"\n positive = bitarray(len(index))\n positive.setall(False)\n negative = bitarray(len(index))\n negative.setall(False)\n\n for protein, domain in clause:\n # set bits in negative-array\n if protein[0] == \"-\":\n # special case for occupied domains\n if protein[1] == \"*\":\n for p, d in index:\n if d == domain:\n negative[index[(p, d)]] = True\n else:\n negative[index[(protein[1:], domain)]] = True # [1:] to cut of the \"-\"\n\n # set bits in positive-array\n else:\n positive[index[(protein, domain)]] = True\n #print(clause, positive, negative)\n return (positive, negative)\n\n\nclass ProteinType:\n \"\"\" A class for the different protein types. Each protein type consists of\n a name, an index for the possible combinations of proteins and domains\n and the array describing the constraints for interactions.\n \"\"\"\n\n def __init__(self, name, index, interactions):\n \"\"\" Initialize a protein with its name, index and possible interactions.\n \"\"\"\n self.name = name\n self.index = index\n self.interactions = interactions\n\n self.domains = set()\n self.map_interactor_domains = defaultdict(set)\n for p, d in self.index:\n self.map_interactor_domains[p].add(d)\n if not self.are_domains_overlapping(self.domains, d):\n self.domains.add(d)\n\n\n def are_domains_overlapping(self, domains, new_domain):\n \"\"\" Test if at least one domain in the set domains is overlapping with the new_domain.\n \"\"\"\n min2, *max2 = new_domain.split(\"-\")\n if max2 == []: # new_domain is not an interval\n return False\n\n for d in domains:\n min1, *max1 = d.split(\"-\")\n if max1 == []: # d is not an interval\n continue\n\n cut0, cut1 = max(int(min1), int(min2)), min(int(max1[0]), int(max2[0]))\n if cut1 - cut0 > 0:\n return True\n return False\n\n\n" }, { "alpha_fraction": 0.5985514521598816, "alphanum_fraction": 0.6139420866966248, "avg_line_length": 36.90196228027344, "blob_id": "6394922096743de7ea7983429d90a4cdcffa623c", "content_id": "6939bf31199d17404b78844aa378bc91bd6b8eb4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7732, "license_type": "permissive", "max_line_length": 165, "num_lines": 204, "path": "/cpinsim/abstract_simulation.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport random\nimport sys\nimport datetime\nimport csv\n\nimport itertools\nfrom scipy.stats import poisson\nimport networkx as nx\n\n\nclass AbstractSimulation:\n \"\"\" Functions for the simulation of interactions between proteins that are\n needed independently from the consideration of protein positions.\n \"\"\"\n\n def __init__(self, output_log):\n \"\"\" Initialization of the abstract Simulation.\n \"\"\"\n\n if output_log is not None:\n if output_log[-2:] == \"gz\":\n self.output_log = gzip.open(output_log, \"wt\", compresslevel=4)\n else:\n self.output_log = open(output_log, \"wt\")\n else:\n self.output_log = sys.stdout\n\n self.free = str(\"--\")\n\n # nodes = proteinpositions with proteinname as label,\n # edges = interactions, have the used domains as attribute\n self.interactions = nx.Graph()\n self.positions = []\n\n self.map_positions_protein = dict()\n self.num_protein_instances = 0\n self.protein_instances = 0\n self.number_of_edges = 0\n self.number_of_singletons = 0\n\n # number of edges and complexes from last 10 steps, for recognizing convergence\n self.edgecount_ass = []\n self.edgecount_dis = []\n self.mean_edge_ass = 0\n self.mean_edge_dis = 0\n\n self.convergence_edge = None\n\n\n def parse_concentrations(self, concentrations):\n \"\"\" Parse the concentrations of the csv-file and and normalize them.\n \"\"\"\n if concentrations is None:\n return concentrations\n parsed_concentrations = []\n sum_concentrations = 0\n reader = csv.reader(open(concentrations, newline=''), delimiter='\\t')\n for name, value in reader:\n sum_concentrations += float(value)\n parsed_concentrations.append((name, float(value)))\n for i, (name, con) in enumerate(parsed_concentrations):\n parsed_concentrations[i] = (name, con/sum_concentrations)\n return parsed_concentrations\n\n\n def get_sampled_indices(self, number, probability):\n \"\"\" Return the sampled indices according to poisson distribution.\n \"\"\"\n number_to_sample = min(number, poisson.rvs(probability * number))\n return random.sample(range(0, number), number_to_sample)\n\n\n def associate(self, index):\n \"\"\" Attempt an association for protein at index of positions.\n \"\"\"\n pos_1 = self.positions[index]\n\n p1 = self.get_protein(pos_1)\n if p1.number_of_free_domains == 0: # continue if protein has already interactions on all domains\n return\n\n (p2, pos_2) = self.sample_interactor(p1, pos_1)\n if p2 is None:\n return\n\n domains1 = list(p1.get_domains_to_interactor(p2.name))\n random.shuffle(domains1)\n domains2 = list(p2.get_domains_to_interactor(p1.name))\n random.shuffle(domains2)\n for d1, d2 in itertools.product(domains1, domains2):\n if p1.is_association_possible(p2.name, d1) and p2.is_association_possible(p1.name, d2):\n p1.associate(p2.name, d1)\n p2.associate(p1.name, d2)\n\n if self.interactions[pos_1] == {}: # node was a singleton before\n self.number_of_singletons -= 1\n if self.interactions[pos_2] == {}:\n self.number_of_singletons -= 1\n\n self.interactions.add_edge(pos_1, pos_2, domains=(d1, d2))\n self.number_of_edges += 1\n\n break\n\n\n def dissociate(self, index, edges):\n \"\"\" Dissociate the two proteins indicated through edge index and remove\n the corresponding edge.\n \"\"\"\n pos_1, pos_2, dom = edges[index]\n\n p1 = self.get_protein(pos_1)\n p2 = self.get_protein(pos_2)\n\n d1, d2 = dom[\"domains\"] #self.interactions[pos_1][pos_2][\"domains\"]\n if not p1.is_interacting(p2.name, d1) or not p2.is_interacting(p1.name, d2):\n d2, d1 = dom[\"domains\"] #self.interactions[pos_1][pos_2][\"domains\"]\n if not p1.is_interacting(p2.name, d1) or not p2.is_interacting(p1.name, d2):\n print(\"error - dissociation not possible\", file=sys.stderr)\n print(pos_1, pos_2, p1.name, d1, p2.name, d2, file=sys.stderr)\n print(p1.state, p2.state, file=sys.stderr)\n return\n\n p1.dissociate(p2.name, d1)\n p2.dissociate(p1.name, d2)\n self.interactions.remove_edge(pos_1, pos_2)\n self.number_of_edges -= 1\n if self.interactions[pos_1] == {}: # node has become a singleton\n self.number_of_singletons += 1\n if self.interactions[pos_2] == {}:\n self.number_of_singletons += 1\n\n\n def simulate_interaction_step(self, association_probability, dissociation_probability, step):\n \"\"\" Simulate one step: First all proteins try with $association_probability\n to interact with an other protein in a random order. After that each\n interaction is tested for dissociation with $dissociation_probability.\n \"\"\"\n # association\n print(datetime.datetime.now(), \"begin of association step\", step, sep='\\t', file=self.output_log)\n\n indices_to_associate = self.get_sampled_indices(self.num_protein_instances, association_probability)\n for index in indices_to_associate:\n self.associate(index)\n\n print(datetime.datetime.now(), step, \"a\", self.number_of_edges, \"edges\", self.number_of_singletons, \"singletons\", sep='\\t', file=self.output_log, flush=True)\n\n self.edgecount_ass.append(self.number_of_edges)\n print(datetime.datetime.now(), \"begin of dissociation step\", step, sep='\\t', file=self.output_log)\n\n # dissociation\n indices_to_remove = self.get_sampled_indices(self.number_of_edges, dissociation_probability)\n edges = self.interactions.edges(data=True)\n for index in indices_to_remove:\n self.dissociate(index, edges)\n\n print(datetime.datetime.now(), step, \"d\", self.number_of_edges, \"edges\", self.number_of_singletons, \"singletons\", sep='\\t', file=self.output_log, flush=True)\n\n self.edgecount_dis.append(self.number_of_edges)\n\n if self.test_convergence(self.edgecount_ass, self.edgecount_dis):\n if self.convergence_edge is None:\n self.convergence_edge = step\n return True\n\n\n def test_convergence(self, edgecount_ass, edgecount_dis):\n \"\"\" Test the convergence criterion.\n \"\"\"\n # Compare the last 10 steps, so the arrays should have 10 items\n if len(edgecount_ass) < 10:\n return False\n\n new_mean_edge_ass = 0.1*sum(edgecount_ass[-10:])\n result_ass = new_mean_edge_ass < self.mean_edge_ass\n self.mean_edge_ass = new_mean_edge_ass\n #edgecount_ass.pop(0)\n\n new_mean_edge_dis = 0.1*sum(edgecount_dis[-10:])\n result_dis = new_mean_edge_dis < self.mean_edge_dis\n self.mean_edge_dis = new_mean_edge_dis\n #edgecount_dis.pop(0)\n\n return result_ass and result_dis\n\n\n#---------- methods that have to be implemented in the concrete simulations ----------\n\n def get_protein(self, pos):\n \"\"\" Return the protein at position $pos.\n \"\"\"\n raise NotImplementedError\n\n def sample_interactor(self, protein, pos):\n \"\"\" Sample an interactor for $protein at position $pos.\n \"\"\"\n raise NotImplementedError\n\n def simulate_network(self, steps, association_probability, dissociation_probability):\n \"\"\" Simulate the network with given parameters.\n \"\"\"\n raise NotImplementedError\n" }, { "alpha_fraction": 0.6050460934638977, "alphanum_fraction": 0.6091703176498413, "avg_line_length": 39.801979064941406, "blob_id": "b0acc749484d844c351bf38aa1d0a95f42701985", "content_id": "c0bdcd13a2dd990c6f47ba783758a78b215e1552", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4122, "license_type": "permissive", "max_line_length": 147, "num_lines": 101, "path": "/cpinsim/constraint_io.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport csv\nfrom sys import stderr\n\n\"\"\" Provide some functions for reading/writing/manipulating the csv files of\n constraints.\n\"\"\"\n\n\ndef split_interactor_and_domain(interactor):\n \"\"\" Split interactor and it domains. Return interactor and one domain,\n a list of domains or None if there was no domain.\n \"\"\"\n interactor, *domain_interactor = interactor.split(\"[\")\n interactor = interactor.strip().upper()\n # exactly one domain\n if domain_interactor != [] and len(domain_interactor) == 1:\n domain_interactor = domain_interactor[0].replace(\"]\", \"\")\n # more than one domain\n elif domain_interactor != []:\n domain_interactor = [domain_interactor[i].replace(\"]\", \"\") for i in range(len(domain_interactor))]\n # no domain\n else:\n domain_interactor = None\n return interactor, domain_interactor\n\n\ndef set_domain_at_host(domains_host, i):\n \"\"\" Choose the right domain out of domains_host. If i < len(domains_host)\n then it is the i-th element otherwise it is the last element in the list.\n \"\"\"\n if type(domains_host) == list:\n j = i if i < len(domains_host) else len(domains_host)-1\n domain_host_interactor = domains_host[j]\n i += 1\n else:\n domain_host_interactor = domains_host\n return domain_host_interactor, i\n\n\ndef yield_interactions_without_constraints(files):\n \"\"\" Yield proteins and domains from interactions between proteins without\n constraints. Files must have two columns for the two interacting\n proteins in each line.\n \"\"\"\n for filename in files:\n with open(filename, newline='') as f:\n reader = csv.reader(f, delimiter='\\t')\n try:\n next(reader) # skip header\n except:\n print(\"File\", filename, \"was empty!\", file=stderr)\n continue # case of empty file\n for line in reader:\n p1, p2 = line\n p1, domain_p1 = split_interactor_and_domain(p1)\n p2, domain_p2 = split_interactor_and_domain(p2)\n yield (p1, domain_p1, p2, domain_p2, line)\n\n\ndef yield_competitions(files):\n \"\"\" Yield host and competitors with domains from interactions with\n constraints of competing proteins. Files must have two columns, one with\n the host and one with comma-separated competitors.\n \"\"\"\n for filename in files:\n with open(filename, newline='') as f:\n reader = csv.reader(f, delimiter='\\t')\n try:\n next(reader) # skip header\n except:\n print(\"File\", filename, \"was empty!\", file=stderr)\n continue # case of empty file\n for line in reader:\n host, competitors, *temp = line # *temp because there can be additional columns with irrelevant information\n competitors = competitors.split(\",\")\n host, domains_host = split_interactor_and_domain(host)\n yield (host, domains_host, competitors, line)\n\n\ndef yield_allosteric_effects(files):\n \"\"\" Yield host and interactors, activators, inhibitors from interactions with\n constraints of allosteric effects. Files must have four columns: host,\n interactors, activators, inhibitors.\n \"\"\"\n for filename in files:\n with open(filename, newline='') as f:\n reader = csv.reader(f, delimiter='\\t')\n try:\n next(reader) # skip header\n except:\n print(\"File\", filename, \"was empty!\", file=stderr)\n continue # case of empty file\n for line in reader:\n host, interactors, activators, inhibitors, *temp = line # *temp because there can be additional columns with irrelevant information\n host, domains_host = split_interactor_and_domain(host)\n interactors = interactors.split(\",\")\n activators = activators.split(\",\")\n inhibitors = inhibitors.split(\",\")\n yield (host, domains_host, interactors, activators, inhibitors, line)\n\n" }, { "alpha_fraction": 0.6346444487571716, "alphanum_fraction": 0.6429651975631714, "avg_line_length": 35.63888931274414, "blob_id": "1d3572ac545f5279f305dab67ed0e53d473a1e39", "content_id": "60134fea832fd038104355c3bb3f69401bf8670e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1322, "license_type": "permissive", "max_line_length": 379, "num_lines": 36, "path": "/setup.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# coding: utf-8\nfrom setuptools import setup\nimport sys\n\n\n# set __version__, __author__\nexec(open(\"cpinsim/version.py\", encoding=\"utf-8\").read())\n\nsetup(\n name = 'cpinsim',\n version=__version__,\n author=__author__,\n author_email = '[email protected]',\n description = 'CPINSim - Constrained Protein Interaction Networks Simulator\\n CPINSim is a package for the simulation of constrained protein interaction networks. Beside simulation of complex formation in a cell there are methods for data preprocessing provided: Annotation of interactions and constraints with domains; A parser to provide the needed protein input format.',\n long_description = open(\"README.rst\").read(),\n license = 'MIT',\n url = 'https://github.com/BiancaStoecker/cpinsim',\n packages = ['cpinsim'],\n entry_points={\n \"console_scripts\": [\"cpinsim = cpinsim:main\"]\n },\n install_requires=[\n \"networkx==1.11.0\",\n \"bitarray==0.8.1\",\n \"scipy\"\n ],\n classifiers = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\"\n ]\n) " }, { "alpha_fraction": 0.5597031116485596, "alphanum_fraction": 0.5684165358543396, "avg_line_length": 45.01485061645508, "blob_id": "619b4af66de8f95e19244c3466711b462690dc43", "content_id": "50aa92cab18f14e4613c382947214a1088ea16ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9296, "license_type": "permissive", "max_line_length": 176, "num_lines": 202, "path": "/cpinsim/proteinparser.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport csv\nfrom collections import defaultdict\nimport itertools\n\nimport cpinsim.constraint_io as io\n\nclass Proteinparser:\n \"\"\" Parse annotated interactions and constraints into a proteinwise\n representation that can be used for the simulation.\n\n Output format:\n > Proteinname\n (Interactor, Domain) \\t tab separated clauses\n\n one clause consists of semicolon separated positive and/or negative terms\n positive terms: (protein,domain)\n negative terms: -(protein, domain)\n\n Example:\n >SRC\n (ABL1,84-145) -(*,84-145)\n (ERBB2,1) (BCAR1,73);-(*,1)\n \"\"\"\n\n def __init__(self):\n \"\"\" Initialize the datastructure for the transitions.\n\n Datastructure of transitions:\n The keys of the dictionary are the proteins. For each protein in the\n accessed list all interactions are stored. The list consists of a tuple\n (positive, negative). Proteins in positive have to be there for\n the interaction with the interactor to be possible (allosteric\n avctivation). Proteins in negative can't be there at the same time\n (competition or allosteric inhibition).\n \"\"\"\n # Host - (Interactor,Domain) - Tuple of Constraints in form (positive, negative)\n self.transitions = defaultdict(lambda: defaultdict(lambda: defaultdict(tuple)))\n\n\n def parse_interactions_without_constraints(self, files):\n \"\"\" Parse interactions between proteins without constraints. Files must\n have two columns for the two interacting proteins in each line.\n \"\"\"\n for (p1, domain_p1, p2, domain_p2, _) in io.yield_interactions_without_constraints(files):\n self.add_transition(p1, p2, domain_p1, [], [])\n self.add_transition(p2, p1, domain_p2, [], [])\n\n\n def parse_competitions(self, files):\n \"\"\" Parse interactions with constraints of competing proteins.\n Files must have two columns, one with the host and one with\n comma-separated competitors.\n \"\"\"\n for (host, domains_host, competitors, _) in io.yield_competitions(files):\n list_competitors = []\n i = 0 # counter for the domains at the host\n for competitor in competitors:\n domain_host, i = io.set_domain_at_host(domains_host, i)\n competitor, domain_competitor = io.split_interactor_and_domain(competitor)\n if (competitor, domain_host) not in list_competitors:\n list_competitors.append((competitor, domain_host))\n self.add_transition(competitor, host, domain_competitor, [], [])\n for comp, dom in list_competitors:\n self.add_transition(host, comp, dom, [], list_competitors)\n\n\n def parse_allosteric_effects(self, files):\n \"\"\" Parse interactions with constraints of allosteric effects.\n Files must have four columns: host, interactors, activators, inhibitors.\n \"\"\"\n for (host, domains_host, interactors, activators, inhibitors, _) in io.yield_allosteric_effects(files):\n list_activators = []\n list_inhibitors = []\n i = 0 # counter for the domains at the host\n for interactor in interactors:\n interactor, domain_interactor = io.split_interactor_and_domain(interactor)\n domain_host_interactor, i = io.set_domain_at_host(domains_host, i)\n self.add_transition(interactor, host, domain_interactor, [], [])\n\n for activator in activators:\n if activator == \"\": # skip empty activators\n continue\n activator, domain_activator = io.split_interactor_and_domain(activator)\n domain_host_activator, i = io.set_domain_at_host(domains_host, i)\n list_activators.append((activator, domain_host_activator))\n self.add_transition(activator, host, domain_activator, [], [])\n self.add_transition(host, activator, domain_host_activator, [], [])\n\n for inhibitor in inhibitors:\n if inhibitor == \"\": # skip empty inhibitors\n continue\n inhibitor, domain_inhibitor = io.split_interactor_and_domain(inhibitor)\n domain_host_inhibitor, i = io.set_domain_at_host(domains_host, i)\n list_inhibitors.append((inhibitor, domain_host_inhibitor))\n self.add_transition(inhibitor, host, domain_inhibitor, [], [])\n self.add_transition(host, inhibitor, domain_host_inhibitor, [], [])\n\n i += 1\n self.add_transition(host, interactor, domain_host_interactor, list_activators, list_inhibitors)\n\n\n\n def add_transition(self, host, interactor, domain, positive, negative):\n \"\"\" Add a new transition if it is not already in the datastructure.\n \"\"\"\n constraints = self.transitions[host][domain][interactor]\n if constraints != tuple():\n (p, n) = constraints\n else:\n p = set()\n n = set()\n p.update(positive)\n n.update(negative)\n self.transitions[host][domain][interactor] = (p, n)\n\n\n def merge_domains(self):\n \"\"\" Merge overlapping domains and modify self.transitions accordingly.\n \"\"\"\n to_merge = defaultdict(set)\n for host in self.transitions:\n for d1, d2 in itertools.combinations(self.transitions[host].keys(), 2):\n if self.are_overlapping(d1, d2):\n to_merge[host].add(d1)\n to_merge[host].add(d2)\n for host in to_merge:\n d1 = to_merge[host].pop()\n for d2 in to_merge[host]:\n old1 = self.transitions[host][d1]\n del self.transitions[host][d1]\n old2 = self.transitions[host][d2]\n del self.transitions[host][d2]\n min1, *max1 = d1.split(\"-\")\n min2, *max2 = d2.split(\"-\")\n new_min, new_max = min(int(min1), int(min2)), max(int(max1[0]), int(max2[0]))\n new_dom = \"{}-{}\".format(new_min, new_max)\n for i in old1:\n self.transitions[host][new_dom][i] = old1[i]\n for i in old2:\n self.transitions[host][new_dom][i] = old2[i]\n d1 = new_dom\n\n\n def write_proteins_in_file(self, output):\n \"\"\" Write the proteins in the designated file at $output.\n \"\"\"\n self.merge_domains()\n writer = csv.writer(open(output, 'w'), delimiter='\\t', lineterminator='\\n')\n for host in sorted(self.transitions):\n writer.writerow([\">\" + host])\n lines = []\n for domain_interactor in self.transitions[host]:\n for interactor in self.transitions[host][domain_interactor]:\n #print(host, interactor, domain_interactor, self.transitions[host][(interactor, domain_interactor)])\n positive, negative = self.transitions[host][domain_interactor][interactor]\n temp = \"-\"+self.to_str(\"*\", domain_interactor)\n for protein, domain in negative:\n if domain == domain_interactor: # competition at same domain already covered\n continue\n if self.are_overlapping(domain_interactor, domain): # competition with overlapping but not exact same domain\n if temp.find(self.to_str(\"*\", domain)) == -1:\n temp += \"-\"+self.to_str(\"*\", domain)\n continue\n temp += \"-\"+self.to_str(protein, domain) # allosteric inhibition\n temp = temp.strip(\";\")\n clauses = []\n for actiavator, domain_activator in positive: # allosteric activation: interaction possible if activator present and all negative factors (temp) not present\n clauses.append(self.to_str(actiavator, domain_activator)+temp)\n if clauses == []: # no activators\n clauses = [temp]\n line = [self.to_str(interactor, domain_interactor).strip(\";\")]\n line.extend(clauses)\n if line not in lines:\n lines.append(line)\n\n writer.writerows(sorted(lines))\n writer.writerow(\"\")\n\n\n def to_str(self, protein, domain):\n \"\"\" Return string representation of protein and domain.\n \"\"\"\n return \"({},{});\".format(protein, domain)\n\n\n def are_overlapping(self, domain_interactor, domain):\n \"\"\" Test if the domains are overlapping.\n \"\"\"\n min1, *max1 = domain_interactor.split(\"-\")\n if max1 == []: # domain_interactor is not an interval\n return False\n\n min2, *max2 = domain.split(\"-\")\n if max2 == []: # domain is not an interval\n return False\n\n cut0, cut1 = max(int(min1), int(min2)), min(int(max1[0]), int(max2[0]))\n if cut1 - cut0 > 0:\n return True\n return False\n\n" }, { "alpha_fraction": 0.6305220723152161, "alphanum_fraction": 0.6335341334342957, "avg_line_length": 42.93382263183594, "blob_id": "54b84e61b20ce3b0a4f0c63732746029f43aaebd", "content_id": "87a7b7b3f591501537e98dfc50524e8f074b4996", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5976, "license_type": "permissive", "max_line_length": 277, "num_lines": 136, "path": "/cpinsim/infinite_simulation.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport random\nimport json\nimport datetime\nimport gzip\nimport pickle\nfrom collections import defaultdict\nfrom math import floor\nimport heapq\n\nfrom cpinsim.abstract_simulation import AbstractSimulation\nfrom cpinsim.protein import Protein\n\n\nclass InfiniteSimulation(AbstractSimulation):\n \"\"\" Simulation of interactions between proteins without regard to protein\n positions (infinte radius interaction radius). Interactions can happen\n between all possible interactors if no constraints are violated.\n \"\"\"\n\n def __init__(self, proteins, concentrations, num_copies,\n perturbed_proteins, output_log, output_graph):\n \"\"\" Generate a list and sample a start assignment of $proteins. Either\n each protein has $num_copies copies or each protein is draw according\n to it concentration.\n \"\"\"\n AbstractSimulation.__init__(self, output_log)\n print(datetime.datetime.now(), \"begin of __init__\", sep='\\t', file=self.output_log)\n\n self.proteins = proteins\n self.num_copies = num_copies\n\n self.protein_instances = []\n self.map_protein_positions = defaultdict(list)\n\n if output_graph is not None:\n if output_graph[-2:] == \"gz\":\n self.output_graph_path = gzip.open(output_graph, 'wb', compresslevel=4)\n else:\n self.output_graph_path = gzip.open(output_graph+\".gz\", 'wb', compresslevel=4)\n else:\n self.output_graph_path = None\n\n if concentrations is not None:\n max_protein_instances, concentrations = concentrations\n max_protein_instances = int(max_protein_instances)\n parsed_concentrations = self.parse_concentrations(concentrations)\n protein_copies = dict()\n\n if num_copies is not None:\n for protein in proteins:\n protein_copies[protein] = num_copies\n\n if parsed_concentrations is not None:\n sum_proteins = 0\n remainder_list = []\n for protein, con in parsed_concentrations:\n exact_number = con * max_protein_instances\n rounded_number = floor(exact_number)\n remainder = exact_number - rounded_number\n remainder_list.append((protein, remainder))\n sum_proteins += rounded_number\n protein_copies[protein] = rounded_number\n lacking = max_protein_instances - sum_proteins\n for protein, remainder in heapq.nlargest(lacking, remainder_list, key=lambda x: x[1]):\n protein_copies[protein] += 1\n\n self.perturbed_proteins = perturbed_proteins\n if self.perturbed_proteins != None:\n for name, factor in self.perturbed_proteins:\n factor = float(factor)\n protein_copies[name] = int(protein_copies[name]*factor)\n\n count = 0\n for name in proteins.keys():\n p = proteins[name]\n for i in range(protein_copies[name]):\n self.protein_instances.append(Protein(name, p))\n self.map_protein_positions[name].append(count)\n self.map_positions_protein[count] = name\n self.num_protein_instances += 1\n count += 1\n\n self.positions = list(range(self.num_protein_instances))\n for pos in self.positions:\n self.interactions.add_node(pos, name=self.get_protein(pos).name)\n\n self.number_of_singletons = self.num_protein_instances\n print(datetime.datetime.now(), \"end of __init__\", sep='\\t', file=self.output_log)\n\n\n def get_protein(self, pos):\n \"\"\" Return the protein at the given postion.\n \"\"\"\n return self.protein_instances[pos]\n\n\n def sample_interactor(self, protein, pos):\n \"\"\" Sample a protein for association with $protein.\n \"\"\"\n possible_interactors = list(protein.get_possible_interactors())\n random.shuffle(possible_interactors, random.random)\n for interactor in possible_interactors: # Go to each possbile interactor in random order\n interactor_postitions = self.map_protein_positions[interactor]\n if len(interactor_postitions) == 0:\n continue\n # Test up to 50 posititions until you find a possible interaction\n for i in range(50): #TODO: Determine sensible threshold\n index = random.choice(interactor_postitions)\n if index != pos and not (self.interactions.has_edge(pos, index) or self.interactions.has_edge(index, pos)):\n return (self.get_protein(index), index)\n return (None, None)\n\n\n def simulate_network(self, steps, association_probability, dissociation_probability):\n \"\"\" Simulate the Simulation with $steps.\n \"\"\"\n # header in JSON format\n print(json.dumps({\"num_protein_instances\": self.num_protein_instances, \"number of copies\": self.num_copies, \"steps\": steps, \"dissociation_probability\": dissociation_probability, \"association_probability\": association_probability}, sort_keys=True), file=self.output_log)\n print(datetime.datetime.now(), \"initial start of simulation\", sep='\\t', file=self.output_log)\n #self.writer(\"\\n\")\n\n for t in range(1, steps+1):\n if self.simulate_interaction_step(association_probability, dissociation_probability, t):\n print(datetime.datetime.now(), \"####\", \"convergence test triggered after step\", t, sep='\\t', file=self.output_log)\n break\n\n # After meeting convergence criterion repeat same number of steps\n for i in range(t):\n self.simulate_interaction_step(association_probability, dissociation_probability, t+i+1)\n\n # Save graph datastructure via pickling\n if self.output_graph_path:\n output_graphstring = pickle.dumps(self.interactions)\n self.output_graph_path.write(output_graphstring)\n\n" }, { "alpha_fraction": 0.7258029580116272, "alphanum_fraction": 0.7311920523643494, "avg_line_length": 39.6929817199707, "blob_id": "f422757c6deade6eea503daed13399a11f41642b", "content_id": "61420110d23658747cb50725c4d3bf155cc15fa0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4639, "license_type": "permissive", "max_line_length": 298, "num_lines": 114, "path": "/README.rst", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": ".. image:: https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg\n :target: https://bioconda.github.io/recipes/cpinsim/README.html\n\nREADME\n======\n\nCPINSim - Constrained Protein Interaction Networks Simulator\n-------------------------------------------------------------\n\nCPINSim is a package for the simulation of constrained protein interaction networks. Besides simulation of complex formation in a cell, it also provides methods for data preprocessing: annotation of interactions and constraints with domains and a parser to provide the needed protein input format.\n\n\nFeatures\n~~~~~~~~\n\n- Annotate interactions and constraints with domains: Infer domains from known ones where possible, set unique artificial domains otherwise.\n- Parse the interaction and constraints files into a protein-wise text representation as input for the simulation.\n- Simulate the complex formation in a cell for the given input proteins with regard to the interaction dependencies which are encoded as constraints. Further, the simulation of perturbation effects like knockout or overexpression of one or multiple proteins is possible.\n\n\nSystem requirements\n~~~~~~~~~~~~~~~~~~~\n\n- `python3 <http://www.python.org/>`__\n- `networkx <http://networkx.github.io/>`__\n- `scipy <http://www.scipy.org/>`__\n- `bitarray <http://pypi.python.org/pypi/bitarray>`__\n\n\nInstallation\n~~~~~~~~~~~~\n\nWe recommend the installation using conda:\n\n.. code-block:: shell\n\n $ conda create -n cpinsim -c bioconda cpinsim\n $ source activate cpinsim\n\n # You now have a 'cpinsim' script; try it:\n $ cpinsim --help\n\n # To switch back to your normal environment, use\n $ source deactivate\n\nAlternatively, you can download the source code from `github <http://github.com/BiancaStoecker/cpinsim>`_ and install it using the setup script:\n\n.. code-block:: shell\n\n $ git clone http://github.com/BiancaStoecker/cpinsim.git cpinsim\n $ cd cpinsim\n ~/cpinsim$ python setup.py install\n\nIn this case you have to manually install the requirements listed above.\n\n\nPlatform support\n~~~~~~~~~~~~~~~~\n\nCPINSim is a pure Python program. This means that it runs on any operating system (OS) for which Python 3 and the other packages are available.\n\n\nExample usage\n~~~~~~~~~~~~~\n\nThe needed input file ``proteins_extended_adhesome.csv`` can be downloaded from the git repository via\n\n.. code-block:: shell\n\n wget https://raw.githubusercontent.com/BiancaStoecker/cpinsim/master/example_files/proteins_extended_adhesome.csv\n\n\n**Example 1:** Simulate the complex formation for proteins ``proteins_extended_adhesome.csv`` with 100 copies per protein (``-n``). Save the simulated graph at ``simulated_graph.gz`` and some logging information about the simulation steps at ``simulation.log``.\n\nFor further parameters the default values are used.\n\n\n.. code-block:: shell\n\n $ cpinsim simulate example_files/proteins_extended_adhesome.csv -n 100 -og simulated_graph.gz -ol simulation.log\n\n\nNote: The simulated graph is a pickled Python object (from the networkx library), saved in gzipped format.\nTo examine it, you have to write Python code to unzip and unpickle it and then use the networkx API to examine its properties (see below for an example).\n\n\n**Example 2:** Simulate the complex formation as in example 1, but now knock out the protein *FYN* and overexpress the protein *ABL1* by factor 5.\n\n\n.. code-block:: shell\n\n $ cpinsim simulate example_files/proteins_extended_adhesome.csv -n 100 -og simulated_graph_ko_FYN_oexp_ABL1.gz -ol simlation_ko_FYN_oexp_ABL1.log -p FYN 0 -p ABL1 5\n\n\nTo investigate the simulation results one can extract the simulation graph in a python shell and for example look at the node lists of the resulting complexes:\n\n.. code-block:: python\n\n import pickle, gzip\n import networkx as nx\n \n with gzip.open(\"simulated_graph.gz\", \"rb\") as f:\n # load graph, each complex is a connected component\n graph = pickle.load(f)\n # get list of complexes sorted descendingly by their number of nodes\n complexes = sorted(list(nx.connected_component_subgraphs(graph)), key=len, reverse=True)\n # print the first 5 complexes\n for c in complexes[:5]:\n # nodes have unique integer ids, for protein name the \"name\" attribut is needed\n print([c.node[node][\"name\"] for node in c])\n\nWith the steps above, ``complexes`` contains each protein complex as full networkx graph datastructure for further analysis. \n\nAdditional example files for the data preprocessing steps and a full workflow including the evaluation of the simulation results will we uploaded in the near future.\n" }, { "alpha_fraction": 0.7321138381958008, "alphanum_fraction": 0.7342818379402161, "avg_line_length": 51.71428680419922, "blob_id": "363c0f76410c78b1172ffc821edea1b55b9f2078", "content_id": "046841eb749160aedf71cf8d4db1036c3a8dde60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7380, "license_type": "permissive", "max_line_length": 421, "num_lines": 140, "path": "/cpinsim/__init__.py", "repo_name": "BiancaStoecker/cpinsim", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport argparse\n\nimport cpinsim.annotate_constraints as annotate_constraints\nimport cpinsim.proteinparser as proteinparser\nimport cpinsim.protein_type as protein_type\nfrom cpinsim.infinite_simulation import InfiniteSimulation\n\n\ndef get_argument_parser():\n \"\"\" Return an argument parser.\n \"\"\"\n\n parser = argparse.ArgumentParser(prog=\"cpinsim\", description=\"CPINSim - Constrained Protein Interaction Networks Simulator\\nPackage for the simulation of constrained protein interaction networks. Beside simulation there are methods for data preprocessing provided: Annotation of interactions and constraints with domains; A parser to provide the needed protein input format.\")\n\n subparsers = parser.add_subparsers(help=\"Choose one of the following functions from the cpinsim package.\")\n\n\n #---------- parser for annotating constraints ----------#\n parser_ann = subparsers.add_parser(\"annotate\", help=\"Annotate constraints and interactions without constraints with domains.\")\n\n parser_ann.add_argument(\"--interactions_without_constraints\", \"-i\", nargs=\"+\", metavar=\"PATH\", help=\"Files containing the underlying network: pairwise interactions without constraints. Two columns InteractorA | InteractorB\")\n\n parser_ann.add_argument(\"--competitions\", \"-c\", nargs=\"+\", metavar=\"PATH\", help=\"Files containing the competitions. Two columns: Host | Competitors (comma separated)\")\n\n parser_ann.add_argument(\"--allosteric_effects\", \"-a\", nargs=\"+\", metavar=\"PATH\", help=\"Files containing the allosteric effects. Four columns: Host | Interactor | Activator | Inhibitor\")\n\n parser_ann.add_argument(\"--extended_inference\", \"-e\", action='store_true', help=\"Extended inference for missing domains in competitions.\")\n\n parser_ann.add_argument(\"--output_interactions\", \"-oi\", metavar=\"PATH\", help=\"One output file containing all annotated pairwise interactions.\")\n parser_ann.add_argument(\"--output_competitions\", \"-oc\", metavar=\"PATH\", help=\"One output file containing all annotated competitions.\")\n parser_ann.add_argument(\"--output_allosterics\", \"-oa\", metavar=\"PATH\", help=\"One output file containing all annotated allosteric effects.\")\n\n parser_ann.set_defaults(func=annotate)\n\n\n #---------- parser for the proteinparser ----------#\n parser_proteins = subparsers.add_parser(\"parse\", help=\"Parse proteins from annotated constraints and interactions into defined text format.\")\n\n parser_proteins.add_argument(\"--interactions_without_constraints\", \"-i\", nargs=\"+\", help=\"Files containing the annotated pairwise interactions.\")\n\n parser_proteins.add_argument(\"--competitions\", \"-c\", nargs=\"+\", help=\"Files containing the annotated competitions.\")\n\n parser_proteins.add_argument(\"--allosteric_effects\", \"-a\", nargs=\"+\", help=\"Files containing the annotated allosteric effects.\")\n\n parser_proteins.add_argument(\"--output\", \"-o\", help=\"Output file containing the parsed proteins.\")\n\n parser_proteins.set_defaults(func=parse_proteins)\n\n\n #---------- parser for the simulation ----------#\n parser_sim = subparsers.add_parser(\"simulate\", help=\"Simulate the complex formation in a cell with given proteins. The proteins either have a fixed number of copies, or are chosen according to protein concentrations. Proteins associate or dissociate according to the association- and dissociation-probability. It is possible to perturb proteins and modify their concentration to simulate knockout or overexpression.\")\n\n parser_sim.add_argument(\"proteins\", help=\"Path to a csv-file containing the parsed proteins.\")\n\n group_protein_sampling = parser_sim.add_mutually_exclusive_group()\n group_protein_sampling.add_argument(\"--concentrations\", \"-c\", nargs=2, metavar=(\"MAX-PROTEIN-INSTANCES\", \"PATH/TO/CONCENTRATIONS\"), help=\"Maximum number of protein instances and path to a csv-file containing a concentration for each protein.\")\n group_protein_sampling.add_argument(\"--number-of-copies\", \"-n\", metavar=\"N\", type=int, help=\"Number of copies for each protein type.\")\n\n parser_sim.add_argument(\"--association-probability\", \"-ap\", metavar=\"P\", type=float, help=\"The probability for a new association between two proteins (default: %(default)s).\", default=0.005)\n\n parser_sim.add_argument(\"--dissociation-probability\", \"-dp\", metavar=\"P\", type=float, help=\"The probability for a dissociation of a pairwise interaction (default: %(default)s).\", default=0.0125)\n\n parser_sim.add_argument(\"--max-steps\", \"-m\", type=int, help=\"Maximum number of simulation steps if convergence is not reached until then (default: %(default)s).\", default=1000)\n\n parser_sim.add_argument(\"--perturbation\", \"-p\", nargs=2, action='append', metavar=(\"PROTEIN\", \"FACTOR\"), help=\"Protein that should be overexpressed or down regulated by factor FACTOR for perturbation analysis.\")\n\n parser_sim.add_argument(\"--output-graph\", \"-og\", metavar=\"PATH\", required=True, help=\"Pickle the complete graph at the end of simulation (after last dissociation step) and write it to the given path.\")\n\n parser_sim.add_argument(\"--output-log\", \"-ol\", metavar=\"PATH\", help=\"Write some log information of each simulation stept to the given path. If not specified, std-out is used.\")\n\n parser_sim.set_defaults(func=simulate)\n\n return parser\n\n\ndef annotate(args):\n \"\"\" Annotate constraints with domains.\n \"\"\"\n if args.competitions is not None:\n annotate_constraints.read_competitions(args.competitions)\n\n if args.allosteric_effects is not None:\n annotate_constraints.read_allosteric_effects(args.allosteric_effects)\n\n if args.interactions_without_constraints is not None:\n annotate_constraints.read_interactions_without_constraints(args.interactions_without_constraints)\n\n annotate_constraints.normalize()\n annotate_constraints.remove_redundant_nones()\n if args.extended_inference:\n annotate_constraints.domain_inference_competition_host()\n annotate_constraints.domain_inference_competitors()\n annotate_constraints.apply_artificial_domains()\n annotate_constraints.write_annotated_files(args.output_interactions, args.output_competitions, args.output_allosterics)\n\n\ndef parse_proteins(args):\n \"\"\" Parse proteins from annotated interactions and constraints.\n \"\"\"\n pp = proteinparser.Proteinparser()\n\n if args.interactions_without_constraints is not None:\n pp.parse_interactions_without_constraints(args.interactions_without_constraints)\n\n if args.competitions is not None:\n pp.parse_competitions(args.competitions)\n\n if args.allosteric_effects is not None:\n pp.parse_allosteric_effects(args.allosteric_effects)\n\n pp.write_proteins_in_file(args.output)\n\n\ndef simulate(args):\n \"\"\" Simulation\n \"\"\"\n proteins = protein_type.read_proteins(args.proteins)\n sim = InfiniteSimulation(proteins, args.concentrations, args.number_of_copies, args.perturbation, args.output_log, args.output_graph)\n sim.simulate_network(args.max_steps, args.association_probability, args.dissociation_probability)\n\n\ndef main(pargs=None):\n \"\"\" Mainfunction\n \"\"\"\n parser = get_argument_parser()\n\n args = parser.parse_args() if pargs is None else parser.parse_args(pargs)\n\n # case if no arguments are given\n if \"func\" not in args:\n parser.print_help()\n return\n args.func(args)\n\n\nif __name__ == \"__main__\":\n\n main()\n" } ]
10
svakulenk0/mapsChatbot
https://github.com/svakulenk0/mapsChatbot
4bed32d7910bf3c2b1034e4a1da2984f2bf9ae67
2532c28b7e590ac72755da105e389e97e85b9c9f
58d9cf23a04048e56840f7c187cd2618e014db06
refs/heads/master
2020-03-23T21:41:09.651848
2018-09-04T14:51:13
2018-09-04T14:51:13
142,125,112
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5948006510734558, "alphanum_fraction": 0.6016418933868408, "avg_line_length": 33.56756591796875, "blob_id": "a44d5ebcd123fdac317d8d0dfca09f83b1a1421c", "content_id": "aed6b9e6402418c7afcd02a721d6af34e9233fce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5116, "license_type": "no_license", "max_line_length": 131, "num_lines": 148, "path": "/maps_connector.py", "repo_name": "svakulenk0/mapsChatbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on Jul 24, 2018\n\n.. codeauthor: svitlana vakulenko\n <[email protected]>\n\nConnects to Google Maps API via the offcial Python library\n\nhttps://github.com/googlemaps/google-maps-services-python\n\nTest API call: https://maps.googleapis.com/maps/api/directions/json?origin=WU+Wien&destination=Zoo+Schoenbrunn&mode=transit&key=x\nTest API call: https://maps.googleapis.com/maps/api/directions/json?origin=WU+Wien&destination=Zoo+Schoenbrunn&mode=driving&key=x\nTest API call: https://maps.googleapis.com/maps/api/directions/json?origin=WU+Wien&destination=Zoo+Schoenbrunn&mode=bicycling&key=x\n\nLink to the map with the route on Google maps:\nhttps://www.google.com/maps/dir/?api=1&origin=tu+wien&destination=wu+wien&travelmode=transit\n\n'''\nimport time\nfrom heapq import heappush, heappop\n\nimport googlemaps\n\nfrom .settings import API_KEY\n\nMODES = {\"car\": \"driving\", \"offi\": \"transit\", \"bike\": \"bicycling\"}\nGM_LINK = \"https://www.google.com/maps/dir/?api=1&origin=%s&destination=%s&travelmode=%s\"\n\n\n# connect to Google Maps API\ngmaps = googlemaps.Client(key=API_KEY)\n\n\ndef get_route(origin, destination, mode):\n # Request directions via public transit\n directions_result = gmaps.directions(origin,\n destination,\n mode=mode)\n return directions_result\n\n\nclass TripPlanner(object):\n \"\"\"Each object of the class holds information about the planned trip\"\"\"\n\n def __init__(self, origin=None, destination=None):\n # plan route details\n self.origin = origin\n self.destination = destination\n # make transport choice\n self.transport = None\n # estimate prediction\n self.starting_time = None\n self.estimated_arrival = None\n # record observation\n self.actual_arrival = None\n self.error = None\n self.error_minutes = None\n self.error_sign = None\n\n def get_link(self):\n return GM_LINK % ('+'.join(self.origin.split()), '+'.join(self.destination.split()), MODES[self.transport])\n\n def rank_alternative_routes(self):\n '''\n Collects Google Maps routes API results for different transport options\n '''\n estimates = []\n for transport, mode in MODES.items():\n response = get_route(self.origin, self.destination, mode)\n if response:\n estimate = response[0]['legs'][0]['duration']\n # rank estimates\n heappush(estimates, (estimate['value'], (transport, estimate['text'])))\n else:\n return None\n\n route = \"From: %s\" % response[0]['legs'][0]['start_address'].split(',')[0]\n route += \"\\nTo: %s\\n\" % response[0]['legs'][0]['end_address'].split(',')[0]\n\n while estimates:\n time, (transport, time_str) = heappop(estimates)\n route += \"\\n%s %s\" % (transport, time_str)\n return route\n\n def choose_transport(self, transport):\n self.transport = transport\n\n def format_time(self, timestamp):\n return time.strftime(\"%H:%M\", time.localtime(timestamp))\n\n\n def record_estimate(self):\n mode = MODES[self.transport]\n if self.origin and self.destination:\n response = get_route(self.origin, self.destination, mode)\n if response:\n # round up 1 minute\n self.starting_time = time.time() + 60\n \n # save estimate\n if mode == 'transit':\n self.estimated_arrival = response[0]['legs'][0]['arrival_time']['value']\n else:\n # estimated trip duration: number of seconds\n estimated_duration = response[0]['legs'][0]['duration']['value']\n # calculate arrival time\n self.estimated_arrival = self.starting_time + estimated_duration\n\n # format arrival time\n return self.format_time(self.estimated_arrival), self.transport\n return None, self.transport\n\n def check_estimate(self):\n if self.estimated_arrival:\n self.actual_arrival = time.time()\n self.error = self.actual_arrival - self.estimated_arrival\n if self.error > 0:\n self.error_minutes = int(self.error) / 60 % 60\n self.error_sign = 'late'\n elif self.error < 0:\n self.error_minutes = int(-self.error) / 60 % 60\n self.error_sign = 'early'\n else:\n self.error_minutes = 0\n self.error_sign = 'on time'\n return None\n\n\ndef test_rank_alternative_routes(origin='WU Wien', destination='Zoo Schoenbrunn'):\n '''\n Test for Google Maps routes API for different transport options\n '''\n print(rank_alternative_routes(origin, destination))\n\n\ndef test_none(origin='WU', destination='Zoo'):\n '''\n Unit test for empty result\n '''\n assert rank_alternative_routes(origin, destination) == None\n\n\nif __name__ == '__main__':\n test_none()\n test_rank_alternative_routes()\n" }, { "alpha_fraction": 0.7366985082626343, "alphanum_fraction": 0.7421555519104004, "avg_line_length": 20.558822631835938, "blob_id": "96ae496f070cef77862daf636c5e7ddfdbbe7871", "content_id": "1100ca890877337f876c712d3466bdfab6ab7796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 733, "license_type": "no_license", "max_line_length": 114, "num_lines": 34, "path": "/README.md", "repo_name": "svakulenk0/mapsChatbot", "src_encoding": "UTF-8", "text": "# Maps Chatbot\n\nChatbot that helps you to find routes through Google API\n\n## Requirements\n\n* [googlemaps](https://github.com/googlemaps/google-maps-services-python)\n\n'''\npip install -U googlemaps\n'''\n\n## Setup\n\n* Create settings.py and save the API_KEY in the variable there.\n\n```\ncp settings.py.template settings.py\n```\n\n* Set simlink to this folder as an [opsdroid]() skill (in ~/.local/share/opsdroid/opsdroid-modules/skill on Linux)\n```\nln -sfn ~/mapsChatbot mapsChatbot\n```\n\n* Connect to [Facebook](https://github.com/opsdroid/connector-facebook)\n\nhttps://www.communidata.at:5008/connector/facebook\nhttps://mapschatbot.communidata.at/connector/facebook\n\n\n## Similar projects\n\n* [MapBot](https://github.com/vishakha-lall/MapBot)\n" }, { "alpha_fraction": 0.6429519057273865, "alphanum_fraction": 0.6457711458206177, "avg_line_length": 36.924530029296875, "blob_id": "8291888a93b50bdd37818d9c06b78a89e5af6b8a", "content_id": "e46bffe6aabd6e59df9bfe3c42e9b7ebbe899b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6032, "license_type": "no_license", "max_line_length": 314, "num_lines": 159, "path": "/__init__.py", "repo_name": "svakulenk0/mapsChatbot", "src_encoding": "UTF-8", "text": "from opsdroid.matchers import match_regex, match_always\n# import DatabaseMongo\nimport logging\nimport random\n\nfrom .maps_connector import TripPlanner\n\n# mongo collection name\nAGENT_ID = 'googleMaps'\nINSTRUCTION = 'Hi! I can help you to estimate the time of your commute.\\nYou can send me these commands:\\n1) Specify the route, e.g. \"from Zoo Schoenbrunn to tu wien\"\\n2) Choose transportation option: \"car\", \"offi\" or \"bike\"\\n3) Say \"start\" when you start the commute and \"stop\" when you arrive at the destination'\n# connect to the DB\n# db = DatabaseMongo()\n\ndef setup(opsdroid):\n opsdroid.tp = TripPlanner()\n\n\ndef estimate(opsdroid):\n estimate, mode = opsdroid.tp.record_estimate()\n if estimate:\n response = 'You are going by %s estimated arrival time %s if you leave now' % (mode, estimate)\n return response\n\n\ndef plan_trip(opsdroid, mode):\n opsdroid.tp.choose_transport(mode)\n response = estimate(opsdroid) + ' or say \"start\" when you leave.\\n'\n link = opsdroid.tp.get_link()\n return response + link\n\n\n@match_regex(r'from (.*) to (.*)', case_sensitive=False)\nasync def show_options(opsdroid, config, message):\n '''\n sample request: From tu wien to Schönbrunn\n '''\n origin = message.regex.group(1)\n destination = message.regex.group(2)\n # restart estimates for the new route\n opsdroid.tp = TripPlanner(origin, destination)\n\n text = opsdroid.tp.rank_alternative_routes()\n\n # respond\n if text:\n # load error estimate from the previous history by user id\n # last_error = await opsdroid.memory.get(AGENT_ID)\n last_error = await opsdroid.memory.get('/'.join([AGENT_ID, 'user', str(message.user)]))\n\n if last_error:\n # last_error = collected_errors[0]\n error = last_error['error']\n if error > 0:\n minutes = int(error) / 60 % 60\n last_error_text = \"%d minutes late\" % minutes\n elif error < 0:\n minutes = int(-error) / 60 % 60\n last_error_text = \"%d minutes early\" % minutes\n else:\n last_error_text = \"just on time\"\n text += \"\\n\\nLast time you were %s when travelling with the %s\" % (last_error_text, last_error['transport'])\n\n await message.respond(text+'\\n\\nChoose transport: car, bike or offi?')\n else:\n await message.respond(\"Not sure what you mean. Could you be more specific?\")\n\n\n@match_regex(r'car|auto', case_sensitive=False)\nasync def choose_car(opsdroid, config, message):\n response = plan_trip(opsdroid, 'car')\n if response:\n await message.respond(response)\n\n\n@match_regex(r'public transport|public|öffi|oeffi|offi|bim|ubahn|u-bahn|metro|bus|trolley', case_sensitive=False)\nasync def choose_public(opsdroid, config, message):\n response = plan_trip(opsdroid, 'offi')\n if response:\n await message.respond(response)\n\n@match_regex(r'bike|bicycle|cycle|cycling', case_sensitive=False)\nasync def choose_bike(opsdroid, config, message):\n response = plan_trip(opsdroid, 'bike')\n if response:\n await message.respond(response)\n\n\n@match_regex(r'start', case_sensitive=False)\nasync def start_trip(opsdroid, config, message):\n if opsdroid.tp.transport:\n # use previously chosen transport mode\n response = estimate(opsdroid)\n if response:\n await message.respond(response)\n else:\n await help(opsdroid, config, message)\n\n\n@match_regex(r'stop|check|check in|ready|finish|fin|here', case_sensitive=False)\nasync def finish_trip(opsdroid, config, message):\n '''\n calculates difference between the estimated and actual arrival time\n '''\n opsdroid.tp.check_estimate()\n \n if opsdroid.tp.error:\n\n if opsdroid.tp.error == 0:\n await message.respond(\"You are just on time!\")\n else:\n await message.respond(\"You are %d minutes %s\" % (opsdroid.tp.error_minutes, opsdroid.tp.error_sign))\n \n # save on finish\n await save_to_DB(opsdroid, config, message)\n\n\n@match_regex(r'save|speichern|record|persist', case_sensitive=False)\nasync def save_to_DB(opsdroid, config, message):\n '''\n save the user_id, route details (origin/destination/transport) and error to DB, e.g. through the mongo connector\n '''\n if opsdroid.tp.error:\n estimate_error = {'transport': opsdroid.tp.transport, 'user': str(message.user),\n 'origin': opsdroid.tp.origin, 'destination': opsdroid.tp.destination,\n 'error': opsdroid.tp.error, 'error_minutes': opsdroid.tp.error_minutes,\n 'error_sign': opsdroid.tp.error_sign,\n 'starting_time': opsdroid.tp.starting_time,\n 'starting_time_str': opsdroid.tp.format_time(opsdroid.tp.starting_time),\n 'estimated_arrival': opsdroid.tp.estimated_arrival,\n 'estimated_arrival_str': opsdroid.tp.format_time(opsdroid.tp.estimated_arrival),\n 'actual_arrival': opsdroid.tp.actual_arrival,\n 'actual_arrival_str': opsdroid.tp.format_time(opsdroid.tp.actual_arrival)}\n \n await opsdroid.memory.put(AGENT_ID, data=estimate_error)\n # await message.respond(\"Saved estimate for the route from %s\" % opsdroid.tp.origin)\n\n\n@match_regex(r'help', case_sensitive=False)\nasync def help(opsdroid, config, message):\n match = True\n await message.respond(INSTRUCTION)\n\n\n# @match_regex(r'car|auto', case_sensitive=False)\n# async def choose_car(opsdroid, config, message):\n# '''\n# quick command to run all tests\n# '''\n# response = estimate(opsdroid, 'car')\n# await message.respond(response)\n\n\n# @match_always()\n# async def unknown_command(opsdroid, config, message):\n# '''\n# default response if the utterance did not match any of the regex commands defined above\n# '''\n# if not match:\n# await message.respond(\"Not sure what you mean!\\n\" + INSTRUCTION)\n" }, { "alpha_fraction": 0.5508981943130493, "alphanum_fraction": 0.5928143858909607, "avg_line_length": 10.928571701049805, "blob_id": "10b3cae3ffd6d128fd03222f90b6d3455083b796", "content_id": "47df42772577a843bc1c723a170af3adff604afd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 27, "num_lines": 14, "path": "/settings.py.template", "repo_name": "svakulenk0/mapsChatbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCreated on Jul 24, 2018\n\n.. codeauthor: svitlana vakulenko\n <[email protected]>\n\nHolds authentication tokens\n\n'''\n\nAPI_KEY = 'Fill me'\n" } ]
4
laurajhaskell/blogz
https://github.com/laurajhaskell/blogz
8e7115116ad4177feb12ee07e9d026c3dea0d800
7010a927e03dafc430f806d5b711a1862a3547a8
29b179c5ed32d14010a6a73ddfbe3118da5e0712
refs/heads/master
2020-04-03T18:03:28.454427
2018-11-06T00:28:53
2018-11-06T00:28:53
155,469,629
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5897865891456604, "alphanum_fraction": 0.5932926535606384, "avg_line_length": 30.09478759765625, "blob_id": "4c5cf05555c1914cb1c854c55f9a5ab667a3914d", "content_id": "cb6662b0829fa4249b1eb3e1d9caede5704339be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6560, "license_type": "no_license", "max_line_length": 119, "num_lines": 211, "path": "/main.py", "repo_name": "laurajhaskell/blogz", "src_encoding": "UTF-8", "text": "from flask import Flask, request, redirect, render_template, session, flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://blogz:blogzpassword@localhost:8889/blogz'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\napp.secret_key = 'y337kGcys&xPsB'\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(120), unique=True)\n password = db.Column(db.String(120))\n blogs = db.relationship('Blog', backref='owner')\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n\n\nclass Blog(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120))\n body = db.Column(db.Text)\n # use foreign key to link user id\n owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n pub_date = db.Column(db.DateTime)\n\n def __init__(self, title, body, owner, pub_date=None):\n self.title = title\n self.body = body\n self.owner = owner\n if pub_date is None:\n pub_date = datetime.utcnow()\n self.pub_date = pub_date\n\n\[email protected]_request\ndef require_login():\n allowed_routes = ['login', 'signup', 'blog', 'index']\n\n if request.endpoint not in allowed_routes and 'username' not in session:\n return redirect('/login')\n\n\[email protected]('/')\ndef index():\n\n users = User.query.all()\n return render_template('index.html', users=users)\n \n\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n username = ''\n password = ''\n\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n existing_user = User.query.filter_by(username=username).first()\n\n # user enters username not in database and must signup\n if not existing_user:\n flash(\"User does not exist. Sign up.\")\n return redirect('/signup')\n\n # user enters correct password and directed to create new post\n if existing_user and existing_user.password == password:\n session['username'] = username\n return redirect('/newpost')\n # user enters username in database and password not in database \n else:\n flash(\"Incorrect password\")\n return render_template('login.html')\n \n return render_template('login.html') \n\n\n\[email protected]('/signup', methods=['POST', 'GET'])\ndef signup():\n username = ''\n password = ''\n verify = ''\n\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n verify = request.form['verify']\n\n # error for blank username or less than 3 char\n if not username:\n flash(\"Please enter username\", 'error')\n return render_template('signup.html')\n elif len(username) < 3:\n flash(\"Invalid username. Must be 3 characters or more.\", 'error')\n return render_template('/signup.html')\n\n # error for blank password or less than 3 char\n elif not password:\n flash(\"Please enter password\", 'error')\n return render_template('signup.html')\n elif len(password) < 3:\n flash(\"Invalid password. Must be 3 characters or more.\", 'error')\n return render_template('/signup.html')\n \n # error for blank verify or not matching\n elif not verify:\n flash(\"Please verify password\", 'error')\n return render_template('/signup.html')\n elif verify != password:\n flash(\"Passwords do not match.\", 'error')\n return render_template('/signup.html')\n\n existing_user = User.query.filter_by(username=username).first()\n\n # all correct fields, stores info in database\n if not existing_user:\n new_user = User(username, password)\n db.session.add(new_user)\n db.session.commit()\n session['username'] = username\n return redirect('/newpost')\n # error for existing username\n else:\n flash(\"Username already exists. Choose a new username.\" 'error')\n return redirect('/newpost')\n \n return render_template('signup.html')\n\n\[email protected]('/logout')\ndef logout():\n del session['username']\n return redirect('/blog')\n\n\[email protected]('/newpost', methods=['POST', 'GET'])\ndef newpost():\n\n title = ''\n body = ''\n owner = User.query.filter_by(username=session['username']).first()\n\n if request.method == 'GET':\n return render_template('newpost.html', heading=\"New Blog\")\n \n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n\n #validate blog title and body\n if title == '' and body == '':\n flash(\"Please enter a title and body.\", 'error')\n elif title == \"\":\n flash(\"Please enter a title\", 'error')\n return render_template('newpost.html', body=body)\n elif body == \"\":\n flash(\"Please write a blog post\", 'error')\n return render_template('newpost.html', title=title)\n\n else:\n new_blog = Blog(title, body, owner)\n db.session.add(new_blog)\n db.session.commit()\n\n #set unique id for each new blog\n num = new_blog.id\n return redirect('/blog?id={0}'.format(num))\n\n return render_template('newpost.html', title=title)\n\n\[email protected]('/blog', methods=['POST', 'GET'])\ndef blog():\n\n blogs = Blog.query.all()\n users = User.query.all()\n title = 'title'\n body = 'body'\n\n if 'id' in request.args:\n blog_id = request.args.get('id')\n\n for blog in blogs:\n if int(blog_id) == blog.id:\n title = blog.title\n body = blog.body\n owner_id = blog.owner_id\n owner = User.query.filter_by(id=owner_id).first()\n username = owner.username\n return render_template('singlepost.html', title=title, body=body, username=username, owner_id=owner_id)\n\n elif 'user' in request.args:\n user_id = request.args.get('user')\n user = User.query.filter_by(id=user_id).first()\n blogs = user.blogs\n return render_template('singleuser.html', user=user, blogs=blogs, )\n\n else: \n return render_template('blog.html',blogs=blogs)\n\n\nif __name__ == '__main__':\n app.run()" }, { "alpha_fraction": 0.37976783514022827, "alphanum_fraction": 0.38971808552742004, "avg_line_length": 19.827587127685547, "blob_id": "58195a822e4bb075d7000b9727e7354a1ade6452", "content_id": "1c2e7e7f43cc839a97d074968c8e18cf452f49af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 603, "license_type": "no_license", "max_line_length": 77, "num_lines": 29, "path": "/templates/newpost.html", "repo_name": "laurajhaskell/blogz", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n\n{% block content %}\n\n<h1>New Blog Post</h1>\n\n<form method='POST'>\n <table>\n <tr>\n <td>\n <label>Blog Title:</label>\n </td>\n <td>\n <input type='text' name='title'/>\n </td>\n </tr>\n <tr>\n <td>\n <label for='body'>Blog Body:</label>\n </td>\n <td>\n <textarea name='body' rows='15' cols='45'>{{body}}</textarea>\n </td>\n </tr>\n </table>\n <input type='submit' value='Add Post'/>\n</form>\n\n{% endblock %}" } ]
2
rolandn/ProximiteV1
https://github.com/rolandn/ProximiteV1
f87f6ca3f2c98a19677cc639bceff30fde90b7e5
d0ef03daa180a31f01f801ae629815aab22ad1fe
8411842b4aedb7c4db8932cff2cf2d5003901acc
refs/heads/master
2018-11-27T20:30:07.150509
2018-09-05T09:26:22
2018-09-05T09:26:22
122,467,550
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.656535804271698, "alphanum_fraction": 0.6592147946357727, "avg_line_length": 47.5426025390625, "blob_id": "4c40d8b93ffe2610f7d2c1a189b91ace844df766", "content_id": "e3820cbdc5ca444b04c4ace79c145b5a83f50703", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10853, "license_type": "no_license", "max_line_length": 136, "num_lines": 223, "path": "/proxim_model.py", "repo_name": "rolandn/ProximiteV1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\n\n\nclass ListFonction(models.Model):\n _name = 'fonct.proxim'\n name = fields.Char('Intitulé de la fonction', required=True)\n active = fields.Boolean('Actif ?', default=True)\n\nclass TypeActivite(models.Model):\n _name = 'type_activite.proxim'\n name = fields.Char('Type d activité', required=False)\n active = fields.Boolean('Actif ?', default=True)\n\nclass TypeReunion(models.Model):\n _name = 'reunion.proxim'\n name = fields.Char('Type de reunion', required=True)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Thematique(models.Model):\n _name = 'thematique.proxim'\n name = fields.Char('Thématique', required=True)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Age(models.Model):\n _name = 'age.proxim'\n name = fields.Char('Tranche d age', required=True)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Public_cible(models.Model):\n _name = 'public_cible.proxim'\n public_cible = fields.Char('Public cible', required=True)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Sexe(models.Model):\n _name = 'sexe.proxim'\n name = fields.Char('Sexe', required=True)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Zone(models.Model):\n _name = 'zone.proxim'\n name = fields.Char('Zone', required=True)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Quartier(models.Model):\n _name = 'quartier.proxim'\n name = fields.Char('Quartier', required=True)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Agent(models.Model):\n _name = 'agent.proxim'\n name = fields.Many2one('res.users', 'Agent', required=False)\n\nclass Position_Ville(models.Model):\n _name = 'position_ville.proxim'\n name = fields.Char('Position de la Ville', required=False)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Frequence(models.Model):\n _name = 'frequence.proxim'\n name = fields.Char('Fréquence', required=False)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Jour(models.Model):\n _name = 'jour.proxim'\n name = fields.Char('Jour de la semaine', required=False)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Horaire(models.Model):\n _name = 'horaire.proxim'\n name = fields.Char('Horaire', required=False)\n active = fields.Boolean('Actif ?', default=True)\n\nclass Tache(models.Model):\n _name = 'tache.proxim'\n designation = fields.Char('Designation', required=True)\n temps = fields.Integer('Temps passe', required=False)\n done = fields.Boolean('Fait ?', default=False)\n tache_name = fields.Many2one(comodel_name='activite.proxim', required=False)\n deadline = fields.Date('Deadline', required=False)\n owner = fields.Many2one('res.partner', required=False)\n remarque = fields.Char('Remarque', required=False)\n\nclass BdC(models.Model):\n _name = 'bdc.proxim'\n active = fields.Boolean('Actif ?', default=True)\n name = fields.Char('Motif', required=True)\n montant = fields.Float('Montant', required=False)\n date = fields.Date('Date du bdc', required=False)\n bdc_name = fields.Many2one(comodel_name='activite.proxim', required=False)\n fournisseur = fields.Many2one('res.partner', required=False)\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n activite_ids = fields.Many2many(comodel_name='activite.proxim', required=False)\n bdc_ids = fields.One2many(comodel_name='bdc.proxim',\n inverse_name='bdc_name',\n required=False)\n\n\n# ---------------------------------------------------- #\n# \t\t\t\tClass Activités\t #\n# ------------------------------------------------------#\n\nclass Activite(models.Model):\n _name = 'activite.proxim'\n _inherit = ['mail.thread', 'ir.needaction_mixin']\n name = fields.Char('Nom de l activite', required=True, track_visibility='onchange')\n active = fields.Boolean('Actif', default=True, track_visibility='onchange')\n date_debut = fields.Date('Date de debut', required=True, track_visibility='onchange')\n date_fin = fields.Date('Date de fin', required=False, track_visibility='onchange')\n description = fields.Char('Description de l activite', required=False, track_visibility='onchange')\n Lieu = fields.Char('Lieu', required=False, track_visibility='onchange')\n NbPrevus = fields.Integer('Nombre de participants prevus', required=False, track_visibility='onchange')\n NbParticipants = fields.Integer('Nombre de Participants', required=False, track_visibility='onchange')\n active = fields.Boolean('Actif ?', default=True, track_visibility='onchange')\n\n budget_prevu = fields.Float('Budget prevu', required=False, track_visibility='onchange')\n recette = fields.Float('Recette', required=False, track_visibility='onchange')\n\n budget_realise = fields.Float(compute='_compute_budget_realise')\n @api.depends('totalbdc')\n def _compute_budget_realise(self):\n for activite in self:\n activite.budget_realise = activite.totalbdc\n\n budget_restant = fields.Float(compute='_compute_budget_restant')\n @api.depends('budget_prevu', 'recette', 'budget_realise')\n def _compute_budget_restant(self):\n for activite in self:\n activite.budget_restant = activite.budget_prevu + activite.recette - activite.budget_realise\n\n # Champs Many2one (menus déroulant)\n\n age_id = fields.Many2one('age.proxim', 'Age', required=False, track_visibility='onchange')\n horaire_id = fields.Many2one('horaire.proxim', required=False, track_visibility='onchange')\n jour_id = fields.Many2one('jour.proxim', required=False, track_visibility='onchange')\n public_cible_id = fields.Many2one('public_cible.proxim', 'Public cible', required=False, track_visibility='onchange')\n sexe_id = fields.Many2one('sexe.proxim', 'Sexe', required=False, track_visibility='onchange')\n frequence_id = fields.Many2one('frequence.proxim', required=False, track_visibility='onchange')\n user_id = fields.Many2one('res.users', 'Responsable', required=True, track_visibility='onchange')\n Partenaire = fields.Many2one('res.partner', 'Partenaire', required=False, track_visibility='onchange')\n Thematique_id = fields.Many2one('thematique.proxim', 'Thematique', required=False, track_visibility='onchange')\n type_activite_id = fields.Many2one('type_activite.proxim', 'Type d activite', required=False, track_visibility='onchange')\n Position_Ville_id = fields.Many2one('position_ville.proxim', 'Position_Ville', required=False, track_visibility='onchange')\n quartier_ids = fields.Many2one('quartier.proxim', 'Quartier', required=False, track_visibility='onchange')\n Zone = fields.Many2one('zone.proxim', 'Zone', required=True, track_visibility='onchange')\n\n # Champs pour le feedback par rapport au PUBLIC\n\n objectif_ok = fields.Boolean('Objectif atteint ?', default=False, track_visibility='onchange')\n objectif_ok_pq = fields.Char('Pourquoi atteints ?', required=False, track_visibility='onchange')\n\n besoin_specifique = fields.Boolean('Répond à un besoin spécifique ?', default=False, track_visibility='onchange')\n besoin_specifique_qui = fields.Char('Besoin spécifique de qui ?', default=False, track_visibility='onchange')\n\n # Champs pour le feedback par rapport au déroulements\n\n deroulement_ok = fields.Selection(selection=[('Oui', 'Oui'), ('Non', 'Non')],\n string='Objectif lors du déroulement ?', required=False,\n track_visibility='onchange')\n deroulement = fields.Boolean('Objectifs atteinds lors du déroulement ?', required=False, track_visibility='onchange')\n deroulement_pq = fields.Char('Pourquoi déroulement ok?', required=False, track_visibility='onchange')\n\n attentes_user = fields.Boolean('Activité répondait aux attentes VDL ?', required=False, track_visibility='onchange')\n attentes_user_pq = fields.Char('Pourquoi attentes OK/KO ?', required=False, track_visibility='onchange')\n\n attentes_participants = fields.Boolean('Activité répondait aux attentes particpants ?', required=False, track_visibility='onchange')\n attentes_participants_pq = fields.Char('Pourquoi attentes participants OK/KO ?', required=False, track_visibility='onchange')\n\n points_positifs = fields.Char('Les points positifs', required=False, track_visibility='onchange')\n difficultes = fields.Char('Les difficultés rencontrées', required=False, track_visibility='onchange')\n remede = fields.Char('Comment y remédier ?', required=False, track_visibility='onchange')\n\n refaire = fields.Boolean('Refaire activité ?', required=False, track_visibility='onchange')\n refaire_pq = fields.Char('Pourquoi refaire actvité?', required=False, track_visibility='onchange')\n\n utile = fields.Boolean('Département et zone identifée ?', required=False, track_visibility='onchange')\n utile_pq = fields.Char('Pourquoi identifié ?', required=False, track_visibility='onchange')\n utile_remede = fields.Char('Comment remédier pour identifier VDL et zone ?', required=False, track_visibility='onchange')\n\n feedback = fields.Char('Feedback', required=False, track_visibility='onchange')\n\n # Les Many2one\n\n tache_ids = fields.One2many(comodel_name='tache.proxim',\n inverse_name='tache_name',\n required=False,\n track_visibility='onchange')\n\n bdc_ids = fields.One2many(comodel_name='bdc.proxim',\n inverse_name='bdc_name',\n required=False,\n track_visibility='onchange')\n\n attachment_ids = fields.One2many(comodel_name='ir.attachment',\n inverse_name='res_id',\n required=False,\n track_visibility='onchange')\n\n # Les Many2many\n\n animateurs = fields.Many2many(relation='animateurs_rel',\n comodel_name='res.partner',\n string=\"Animateurs requis\",\n required=False,\n track_visibility='onchange')\n\n sponsors = fields.Many2many(relation='sponsors_rel',\n comodel_name='res.partner',\n string=\"Animateurs requis\",\n required=False,\n track_visibility='onchange')\n\n # Les champs calculés\n\n totalbdc = fields.Float(compute='_compute_totalbdc')\n\n @api.depends('bdc_ids.montant')\n def _compute_totalbdc(self):\n for activite in self:\n activite.totalbdc = sum(bdc.montant for bdc in activite.bdc_ids)\n" }, { "alpha_fraction": 0.6330645084381104, "alphanum_fraction": 0.6370967626571655, "avg_line_length": 33.2068977355957, "blob_id": "2ae5a6ec38b70c79d408463d310f572c867cba9b", "content_id": "90ef2ecbc53c524d0cb68bc79a18511f2b29a1d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 157, "num_lines": 29, "path": "/__openerp__.py", "repo_name": "rolandn/ProximiteV1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n{\n 'name': \"Proximite V1\",\n\n 'summary': \"\"\"\n Module en DEV des activités du service Proximité.\"\"\",\n\n 'description': \"\"\"\n Le module permet de renseigner des activités avec ses différents attribus (description, date, zone, quartier, public cible, etc.). \n Les activités sont visibles dans une vue calendrier. \n Il permet également d'y lier des tâches et des informations liées au coût de l'évènement (budget prévu, réalisé et les bon de commande y afférant).\n L'ensemble est lié au module de gestion des contacts qui permet de renseigner les différents partenaires externes (ASBL, Comités de quartier, etc.). \n \"\"\",\n\n 'author': \"Roland Neyrinck\",\n 'website': \"\",\n\n 'category': 'Uncategorized',\n 'version': '0.2',\n\n # any module necessary for this one to work correctly\n 'depends': ['mail'],\n\n 'data': ['proximite_views.xml',\n 'security/ir.model.access.csv',\n 'activite_report.xml'],\n\n\n}\n" } ]
2
kleko09/django-ahp
https://github.com/kleko09/django-ahp
d4e725992afc88dc95c6d11dac40b75ee1837059
77c56a0dd9275bb66b2c22f0c2db9ea0312021fe
ef7dbcf5ce0304d5536eb580d58f6cc344dab165
refs/heads/master
2016-08-05T14:04:59.204703
2013-06-08T17:46:25
2013-06-08T17:46:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.589566171169281, "alphanum_fraction": 0.6071333289146423, "avg_line_length": 42.6860466003418, "blob_id": "a65ee732cf89088b05c9ec43599ff96b92c4e15e", "content_id": "4362bc68e4158aa8a0d581e2e8d13673f064fbbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3760, "license_type": "no_license", "max_line_length": 168, "num_lines": 86, "path": "/ahp/core/tests.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "# coding=UTF-8\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.http.response import HttpResponseRedirect\nfrom ahp.core.models import Smartphone\n\nclass SmartphonesListViewTestCase(TestCase):\n fixtures = ['ahp/fix/initial_data.json']\n \n def test_displayedSomeSmartphones(self):\n resp = self.client.get('/smartphony')\n self.assertGreater(len(resp.context['object_list']), 0)\n \nclass RateChooseSmartphonesViewTestCase(TestCase):\n fixtures = ['ahp/fix/initial_data.json']\n \n def setUp(self):\n user = User.objects.create_user('test', '[email protected]', 'test')\n \n def test_LoginRequiredToRateSmartphones(self):\n response = self.client.get(reverse('rate_choose_smartphones'))\n # user nie zalogowany - przekieruj\n self.assertRedirects(response, '/login?next=/wyberz-smartphony')\n\n def test_ChooseSmartphones(self):\n self.client.login(username='test', password='test')\n resp = self.client.get(reverse('rate_choose_smartphones'))\n self.assertEqual(resp.status_code, 200)\n # wybierz smartfony o indexach 1 i 2\n resp = self.client.post(reverse('rate_choose_smartphones'), data={'first':Smartphone.objects.all()[0].pk, 'second':Smartphone.objects.all()[1].pk}, follow=True)\n self.assertRedirects(resp, 'ocen-smartphony/1/2')\n \nclass RateViewTestCase(TestCase):\n \n fixtures = ['ahp/fix/initial_data.json']\n \n def setUp(self):\n user = User.objects.create_user('test', '[email protected]', 'test')\n \n def test_rate2differentSmartphones(self):\n # logowanie\n self.client.login(username='test', password='test')\n \n # porównaj s1 i s2\n resp = self.client.get(reverse('rate', args=[1, 2]))\n self.assertIsNotNone(resp.context['criteria'])\n self.assertEqual(resp.status_code, 200) # poprawnie otworzyło strone\n resp = self.client.get(reverse('rate', args=[1, 2]), data={'cr_1': 5, 'cr_2':3, 'cr_3':1, 'cr_4':0})\n self.assertEqual(resp.status_code, 200) # poprawnie otworzyło strone\n self.assertIsNotNone(resp.context['criteria'])\n self.assertEqual(resp.context['criteria'][0].r, 5)\n self.assertEqual(resp.context['criteria'][1].r, 3)\n self.assertEqual(resp.context['criteria'][2].r, 1)\n self.assertFalse(hasattr(resp.context['criteria'][3], 'r'))\n \nclass CompareViewTestCase(TestCase):\n \n fixtures = ['ahp/fix/initial_data.json']\n \n def setUp(self):\n user = User.objects.create_user('test', '[email protected]', 'test')\n \n def test_LoginRequiredToRateSmartphones(self):\n response = self.client.get(reverse('compare'))\n # user nie zalogowany - przekieruj\n self.assertRedirects(response, '/login?next=/porownaj')\n self.client.login(username='test', password='test')\n response = self.client.get(reverse('compare'))\n self.assertEqual(response.status_code, 200)\n \n def test_ShowResult(self):\n self.client.login(username='test', password='test')\n resp = self.client.post(reverse('compare'), data={\n '1_2':3,\n '1_3':7,\n '1_4':9,\n '2_3':3,\n '2_4':7,\n '3_4':3,\n 'smartphones': [Smartphone.objects.all()[0].pk, Smartphone.objects.all()[1].pk, Smartphone.objects.all()[2].pk],\n }, follow=True)\n \n self.assertEqual(resp.status_code, 200)\n self.assertIsNotNone(resp.context['result'])\n" }, { "alpha_fraction": 0.4718899428844452, "alphanum_fraction": 0.47607654333114624, "avg_line_length": 28.35087776184082, "blob_id": "225b5af8f34be38205ecd295d492a7b9346d8e1c", "content_id": "ee2931ae0b1fe40ad529fe3f90116bea71ec6716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1672, "license_type": "no_license", "max_line_length": 79, "num_lines": 57, "path": "/ahp/settings/development.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "import os\n\nfrom ahp.settings.common import *\n\n#==============================================================================\n# Generic Django project settings\n#==============================================================================\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'ahp',\n 'USER': 'ahp_user',\n 'PASSWORD': 'ahp-usr-pass',\n 'HOST': '',\n 'TEST_NAME': 'ahp-test',\n 'TEST_USER': 'ahp_user',\n 'TEST_PASSWORD': 'ahp-usr-pass',\n }\n}\n\nINSTALLED_APPS += (\n 'debug_toolbar',\n)\n\n#==============================================================================\n# Middleware\n#==============================================================================\n\nMIDDLEWARE_CLASSES += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\n#==============================================================================\n# Debug toolbar\n#==============================================================================\n\nDEBUG_TOOLBAR_PANELS = (\n 'debug_toolbar.panels.version.VersionDebugPanel',\n 'debug_toolbar.panels.timer.TimerDebugPanel',\n 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',\n 'debug_toolbar.panels.headers.HeaderDebugPanel',\n 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',\n 'debug_toolbar.panels.template.TemplateDebugPanel',\n 'debug_toolbar.panels.sql.SQLDebugPanel',\n 'debug_toolbar.panels.signals.SignalDebugPanel',\n 'debug_toolbar.panels.logger.LoggingPanel',\n)\n\nDEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n}\n\nINTERNAL_IPS = ('127.0.0.1',)" }, { "alpha_fraction": 0.5437839031219482, "alphanum_fraction": 0.546495258808136, "avg_line_length": 44.26363754272461, "blob_id": "8e671ec305c4a1da90ce33293ea1e2f975143da2", "content_id": "bbc8a9d88978f71b31d95b19d66b86258956b58e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9983, "license_type": "no_license", "max_line_length": 128, "num_lines": 220, "path": "/ahp/core/utils.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "# coding=UTF-8\nfrom BeautifulSoup import BeautifulSoup\nfrom ahp.core.models import Smartphone, Criterion, Rating, CriterionRating\nfrom django.db.models import Q\nfrom django.template.defaultfilters import slugify\nimport urllib2\n\ndef get_matrix_for_smartphones(smartphones, criterion):\n \n import itertools\n perm = itertools.combinations(smartphones, 2)\n ratings = []\n# for p in perm:\n# mean = _mean_rating(p[0], p[1], criterion)\n# ratings.append(mean)\n matrix = []\n for row in range(len(smartphones)):\n _row = []\n for col in range(len(smartphones)):\n if row == col:\n _row.append(1)\n elif row < col:\n r = _mean_rating(smartphones[row], smartphones[col], criterion)\n _row.append(r)\n elif row > col:\n _row.append(1. / matrix[col][row])\n matrix.append(_row)\n \n return matrix\n\ndef get_comparison_matrix(user):\n criteria = Criterion.objects.all()\n criteria_r = CriterionRating.objects.filter(user=user)\n matrix = []\n # TODO: ujemne wartości do poprawy!\n for row in range(len(criteria)):\n _row = []\n for col in range(len(criteria)):\n if row == col:\n _row.append(1)\n elif row < col:\n r = [x for x in criteria_r if (x.first == criteria[row] and x.second == criteria[col])]\n _row.append(_convert_rating(r[0].rating))\n elif row > col:\n _row.append(1. / matrix[col][row])\n matrix.append(_row)\n \n return matrix\n\ndef _mean_rating(first, second, criterion):\n\n ratings = Rating.objects.filter(first_smartphone=first, second_smartphone=second, criterion=criterion\n ).values_list('rating', flat=True)\n ratings_opp = Rating.objects.filter(first_smartphone=second, second_smartphone=first, criterion=criterion\n ).values_list('rating', flat=True)\n mean = (sum(ratings) / len(ratings)) if len(ratings) > 0 else 0 \n mean_opp = (sum(ratings_opp) / len(ratings_opp)) if len(ratings_opp) > 0 else 0\n \n result = (mean - mean_opp)\n result = _convert_rating(result)\n \n return result\n\ndef _convert_rating(rating):\n result = rating\n if result > -1 and result < 1:\n result = 1\n elif result > 9: # nie powinno się wydarzyć\n result = 9\n elif result < -9: # nie powinno się wydarzyć\n result = -9\n \n if result < 0:\n result = 1. / (-result)\n \n return result\n\ndef generateDB():\n c = Criterion(name=\"Cena\")\n c.save()\n c = Criterion(name=\"Parametry\")\n c.save()\n c = Criterion(name=\"Wykonanie\")\n c.save()\n c = Criterion(name=\"Wyświetlacz\")\n c.save()\n \n update_smartphones_list()\n \n\ndef update_smartphones_list():\n '''\n funkcja, która parsuje ranking telefonów: \n http://www.chip.pl/ranking/sprzet-mobilny/smartfony \n '''\n \n ids = _find_phones_ids()\n \n for phone_id in ids:\n attr = _read_phone_attributes(phone_id)\n phone = Smartphone(**attr)\n phone.save()\n\ndef update_slug_field():\n for s in Smartphone.objects.all():\n s.slug = slugify(s.name)\n s.save()\n\ndef _read_url(url):\n f = urllib2.urlopen(url)\n html = f.read()\n return html.decode('utf8')\n \ndef _find_phones_ids():\n soup = BeautifulSoup(_read_url(\"http://www.chip.pl/ranking/sprzet-mobilny/smartfony\"), fromEncoding='utf8')\n inputs = soup.findAll(\"input\", attrs={\"name\" : \"compareproducts:list\"})\n \n return [i['value'] for i in inputs]\n\ndef _read_phone_attributes(phone_id):\n attributes = dict()\n phone_link = \"http://www.chip.pl/ranking/sprzet-mobilny/smartfony/@@compare_products?compareproducts:list=%s\" % phone_id\n \n soup = BeautifulSoup(_read_url(phone_link))\n \n rows = soup.find(\"table\", attrs={'class' : 'techinfo'}).findAll('tr') \n for tr in rows:\n if tr.get('class', None) == 'compare-image':\n attributes['image'] = tr.find(\"img\")['src']\n elif tr.get('class', None) == 'header':\n attributes['name'] = tr.find('a').string\n else:\n if not attributes.get('price', False) and tr.find('td', text=u'Cena (z VAT-em) [zł]'):\n try:\n attributes['price'] = float(tr.find('span').string[:-4].replace(',', '.')) # wywalić plny\n except Exception:\n pass\n \n elif not attributes.get('system', False) and tr.find('td', text=u'System operacyjny'):\n attributes['system'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('gps', False) and tr.find('td', text=u'Odbiornik GPS'):\n attributes['gps'] = True if tr.find('td', attrs={'align' : 'right'}).string == 'tak' else False\n \n elif not attributes.get('screen', False) and tr.find('td', text=u'Przekątna wyświetlacza [cale]'):\n try:\n attributes['screen'] = float(tr.find('td', attrs={'align' : 'right'}).string.replace(',', '.'))\n except Exception:\n pass\n \n elif not attributes.get('phone_pkt', False) and tr.find('td', text=u'Telefon i bateria [pkt.]'):\n attributes['phone_pkt'] = int(tr.find('td', attrs={'align' : 'right'}).string)\n \n elif not attributes.get('internet_pkt', False) and tr.find('td', text=u'Internet [pkt.]'):\n attributes['internet_pkt'] = int(tr.find('td', attrs={'align' : 'right'}).string)\n \n elif not attributes.get('multimedia_pkt', False) and tr.find('td', text=u'Multimedia [pkt.]'):\n attributes['multimedia_pkt'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('ergonomics_pkt', False) and tr.find('td', text=u'Ergonomia [pkt.]'):\n attributes['ergonomics_pkt'] = int(tr.find('td', attrs={'align' : 'right'}).string)\n \n elif not attributes.get('weight', False) and tr.find('td', text=u'Ciężar [g]'):\n attributes['weight'] = float(tr.find('td', attrs={'align' : 'right'}).string.replace(',', '.'))\n \n elif not attributes.get('dimensions', False) and tr.find('td', text=u'Wymiary dł. x szer. x wys. [mm]'):\n attributes['dimensions'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('wlan', False) and tr.find('td', text=u'WLAN'):\n attributes['wlan'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('edge', False) and tr.find('td', text=u'EDGE'):\n attributes['edge'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('umts_download', False) and tr.find('td', text=u'UMTS download speed [Mbps]'):\n try:\n attributes['umts_download'] = float(tr.find('td', attrs={'align' : 'right'}).string.replace(',', '.'))\n except Exception:\n pass\n \n elif not attributes.get('umts_upload', False) and tr.find('td', text=u'UMTS upload speed [Mbps]'):\n try:\n attributes['umts_upload'] = float(tr.find('td', attrs={'align' : 'right'}).string.replace(',', '.'))\n except Exception:\n pass\n \n elif not attributes.get('screen_touch_type', False) and tr.find('td', text=u'Typ ekranu dotykowego'):\n attributes['screen_touch_type'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('screen_type', False) and tr.find('td', text=u'Typ wyświetlacza'):\n attributes['screen_type'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('resolution', False) and tr.find('td', text=u'Rozdzielczość ekranu [piksele]'):\n attributes['resolution'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('memory', False) and tr.find('td', text=u'Wbudowana pamięć [MB]'):\n attributes['memory'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('memory_card', False) and tr.find('td', text=u'Slot na kartę pamięci'):\n attributes['memory_card'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('bluetooth', False) and tr.find('td', text=u'Bluetooth'):\n attributes['bluetooth'] = tr.find('td', attrs={'align' : 'right'}).string\n \n elif not attributes.get('camera_resolution', False) and tr.find('td', text=u'Rozdzielczość aparatu cyfrowego [MP]'):\n try:\n attributes['camera_resolution'] = float(tr.find('td', attrs={'align' : 'right'}).string.replace(',', '.'))\n except Exception:\n pass\n \n elif not attributes.get('time_call', False) and tr.find('td', text=u'Bateria: czas rozmowy [min]'):\n attributes['time_call'] = int(tr.find('td', attrs={'align' : 'right'}).string)\n \n elif not attributes.get('time_surfing', False) and tr.find('td', text=u'Bateria: czas surfowania [min]'):\n attributes['time_surfing'] = int(tr.find('td', attrs={'align' : 'right'}).string)\n \n elif not attributes.get('time_charging', False) and tr.find('td', text=u'Bateria: czas ładowania [min]'):\n attributes['time_charging'] = int(tr.find('td', attrs={'align' : 'right'}).string)\n \n return attributes\n" }, { "alpha_fraction": 0.8441064357757568, "alphanum_fraction": 0.8441064357757568, "avg_line_length": 32, "blob_id": "3004b0029e16286659131a883de93fe2d28e829e", "content_id": "43cf0de05818733c1bc1a170c9debc55d0d3fdb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 57, "num_lines": 8, "path": "/ahp/core/admin.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom ahp.core.models import Smartphone, Rating, Criterion\nfrom ahp.core.models import CriterionRating\n\nadmin.site.register(Smartphone)\nadmin.site.register(Rating)\nadmin.site.register(Criterion)\nadmin.site.register(CriterionRating)" }, { "alpha_fraction": 0.6058127284049988, "alphanum_fraction": 0.6219978928565979, "avg_line_length": 41.8880615234375, "blob_id": "2a35111ce7697edf231c03028253d0372ea25fb9", "content_id": "a66561c6159c2de56d67b79cac5fa349d02c8ad8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5784, "license_type": "no_license", "max_line_length": 145, "num_lines": 134, "path": "/ahp/core/models.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "# coding=UTF-8\nfrom django.conf import settings\nfrom django.db import models\nfrom django.template.defaultfilters import slugify\nfrom managers import RatingManager\nfrom django.db.models import permalink\n\nclass Smartphone(models.Model):\n \n name = models.CharField(max_length=100)\n slug = models.SlugField(max_length=120)\n image = models.URLField(blank=True, null=True)\n \n price = models.DecimalField(max_digits=7, decimal_places=2, blank=True, null=True) # Cena (z VAT-em) [z�]\n system = models.CharField(max_length=150, blank=True, null=True) # System operacyjny\n gps = models.NullBooleanField(blank=True, null=True) # Odbiornik GPS\n screen = models.DecimalField(max_digits=3, decimal_places=1, blank=True, null=True) # Przek�tna wy�wietlacza [cale]\n phone_pkt = models.IntegerField(blank=True, null=True) # Telefon i bateria [pkt.]\n internet_pkt = models.IntegerField(blank=True, null=True) # Internet [pkt.]\n multimedia_pkt = models.IntegerField(blank=True, null=True) # Multimedia [pkt.]\n ergonomics_pkt = models.IntegerField(blank=True, null=True) # Ergonomia [pkt.]\n weight = models.DecimalField(max_digits=4, decimal_places=1, blank=True, null=True) # Ci�ar [g]\n dimensions = models.CharField(max_length=150, blank=True, null=True) # Wymiary d�. x szer. x wys. [mm]\n wlan = models.CharField(max_length=150, blank=True, null=True) # WLAN\n edge = models.CharField(max_length=150, blank=True, null=True) # EDGE\n umts_download = models.DecimalField(max_digits=4, decimal_places=1, blank=True, null=True) # UMTS download speed [Mbps]\n umts_upload = models.DecimalField(max_digits=4, decimal_places=1, blank=True, null=True) # UMTS upload speed [Mbps]\n screen_touch_type = models.CharField(max_length=150, blank=True, null=True) # Typ ekranu dotykowego\n screen_type = models.CharField(max_length=150, blank=True, null=True) # Typ wy�wietlacza\n resolution = models.CharField(max_length=150, blank=True, null=True) # Rozdzielczo�� ekranu [piksele]\n memory = models.IntegerField(blank=True, null=True) # Wbudowana pami�� [MB]\n memory_card = models.CharField(max_length=150, blank=True, null=True) # Slot na kart� pami�ci\n bluetooth = models.CharField(max_length=150, blank=True, null=True) # Bluetooth\n camera_resolution = models.DecimalField(max_digits=3, decimal_places=1, blank=True, null=True) # Rozdzielczo�� aparatu cyfrowego [MP]\n time_call = models.IntegerField(blank=True, null=True) # Bateria: czas rozmowy [min] 688\n time_surfing = models.IntegerField(blank=True, null=True) # Bateria: czas surfowania [min] 307\n time_charging = models.IntegerField(blank=True, null=True) # Bateria: czas �adowania [min]\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n\n def save(self, *args, **kwargs):\n if not self.id:\n self.slug = slugify(self.name)\n super(Smartphone, self).save(*args, **kwargs)\n \n @permalink\n def get_absolute_url(self):\n return ('smartphone', (), {\n 'slug': self.slug,\n 'id': self.id,\n })\n\n def __unicode__(self):\n return \"%s\" % self.name\n \n class Meta: \n verbose_name = \"Telefon\"\n verbose_name_plural = \"Telefony\"\n\nclass Rating(models.Model):\n \n SCALE = (\n (9, 9), \n (8, 8),\n (7, 7), \n (6, 6),\n (5, 5), \n (4, 4),\n (3, 3), \n (2, 2),\n (1, 1), \n )\n\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n first_smartphone = models.ForeignKey(Smartphone, related_name=\"A_+\")\n second_smartphone = models.ForeignKey(Smartphone, related_name=\"B_+\")\n criterion = models.ForeignKey('Criterion')\n rating = models.IntegerField(choices=SCALE, default=1)\n \n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n \n objects = RatingManager()\n \n def __unicode__(self):\n return \"[%s] do [%s] pod wzgledem [%s]: %s\" % (self.first_smartphone.name, self.second_smartphone.name, self.criterion.name, self.rating)\n \n class Meta: \n unique_together = ((\"user\", \"first_smartphone\", \"second_smartphone\", \"criterion\"),)\n verbose_name = \"Ocena\"\n verbose_name_plural = \"Oceny\"\n \nclass CriterionRating(models.Model):\n \n SCALE = (\n (9, 9), # A jest ekstremalnie preferowane\n (8, 8),\n (7, 7), # A jest bardzo silnie preferowane\n (6, 6),\n (5, 5), # A jest silnie preferowane\n (4, 4),\n (3, 3), # A jest s�abo preferowane\n (2, 2),\n (1, 1), # A jest r�wnowa�ne z B\n )\n\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n first = models.ForeignKey('Criterion', related_name=\"A_+\")\n second = models.ForeignKey('Criterion', related_name=\"B_+\")\n rating = models.IntegerField(choices=SCALE, default=1)\n \n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n \n def __unicode__(self):\n return \"[%s] do [%s]: %s\" % (self.first.name, self.second.name, self.rating)\n \n class Meta: \n unique_together = ((\"user\", \"first\", \"second\"),)\n verbose_name = \"Ocena kryteriów\"\n verbose_name_plural = \"Oceny kryteriów\"\n \nclass Criterion(models.Model):\n \n name=models.CharField(max_length=100)\n\n def __unicode__(self):\n return \"%s\" % self.name\n\n class Meta: \n verbose_name = \"Kryterium\"\n verbose_name_plural = \"Kryteria\"" }, { "alpha_fraction": 0.6027397513389587, "alphanum_fraction": 0.6027397513389587, "avg_line_length": 38.3636360168457, "blob_id": "587fb5cce5cb539a31b11ca5d3ec198be08ac443", "content_id": "43f1e574ae0b3cca998117292cb0115fb312d83f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 77, "num_lines": 11, "path": "/ahp/core/managers.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.db.models import Q\n\nclass RatingManager(models.Manager):\n \n def is_rated(self, user, first_p, second_p, criterion):\n return self.filter(user=user, criterion=criterion\n ).filter(\n Q(first_smartphone=first_p, second_smartphone=second_p) \n | Q(first_smartphone=second_p, second_smartphone=first_p)\n ).exists()\n \n" }, { "alpha_fraction": 0.6577275991439819, "alphanum_fraction": 0.6591390371322632, "avg_line_length": 39.485713958740234, "blob_id": "082afc9e3ab6802b6abcc5c3b390eb3c7c0a3117", "content_id": "f88c32a07dffe35007081a95f4bc28df5a910b41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1417, "license_type": "no_license", "max_line_length": 108, "num_lines": 35, "path": "/ahp/urls.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "from ahp.core.views import HomeView, RateChooseSmartphonesView, \\\n SmartphonesListView, RateView, ExampleView, CompareView\nfrom django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.contrib.auth.views import logout, login\n\n\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^adm/', include(admin.site.urls)),\n \n url(r'^$', HomeView.as_view(), name='home'),\n \n url(r'^porownaj$', CompareView.as_view(), name='compare'),\n url(r'^wynik$', CompareView.as_view(), name='compare_result'),\n url(r'^smartphony$', SmartphonesListView.as_view(), name='smartphones'),\n url(r'^smartphone/(?P<slug>[-\\w\\d]+),(?P<id>\\d+)$', SmartphonesListView.as_view(), name='smartphone'),\n url(r'^wyberz-smartphony$', RateChooseSmartphonesView.as_view(), name='rate_choose_smartphones'),\n url(r'^ocen-smartphony/(?P<first>.*)/(?P<second>.*)$', RateView.as_view(), name='rate'),\n \n url(r'^przyklad$', ExampleView.as_view(), name='example'), \n \n url(r'^login$', login, {'template_name':'core/login.html', 'redirect_field_name':'next'}, name='login'),\n url(r'^logout$', logout, {'next_page': '/'}, name='logout'),\n \n url(r'^ext/select2/', include('django_select2.urls')),\n)\n\n# static\nurlpatterns += patterns('',\n (r'^staticfiles/(.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n)\n" }, { "alpha_fraction": 0.32629409432411194, "alphanum_fraction": 0.3373680114746094, "avg_line_length": 51.9315071105957, "blob_id": "758b936c30a8b8ee151e3f2f15cc74b3042a1bb6", "content_id": "d4d92960629edeed1ff4453c58e20297af271cb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3883, "license_type": "no_license", "max_line_length": 95, "num_lines": 73, "path": "/ahp/core/forms.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django_select2.fields import AutoModelSelect2Field,\\\n AutoModelSelect2MultipleField\nfrom ahp.core.models import Smartphone\nfrom django_select2.widgets import AutoHeavySelect2Widget,\\\n AutoHeavySelect2MultipleWidget\n\n\nclass SmartphoneChoicesField(AutoModelSelect2Field):\n queryset = Smartphone.objects\n search_fields = ['name__icontains', ]\n \nclass SmartphonesMultipleChoicesField(AutoModelSelect2MultipleField):\n queryset = Smartphone.objects\n search_fields = ['name__icontains', ]\n\n\nclass SelectSmartphonesForm(forms.Form):\n \n first = SmartphoneChoicesField(\n widget=AutoHeavySelect2Widget(\n select2_options={\n 'width': '400px',\n 'placeholder': u\"Wybierz smartphona\"\n }\n ))\n second = SmartphoneChoicesField(\n widget=AutoHeavySelect2Widget(\n select2_options={\n 'width': '400px',\n 'placeholder': u\"Wybierz smartphona\"\n }\n ))\n \nclass CompareForm(forms.Form):\n\n smartphones = SmartphonesMultipleChoicesField(\n widget=AutoHeavySelect2MultipleWidget(\n select2_options={\n 'width': '230px',\n 'placeholder': u\"Wybierz smartphony\"\n }\n ))\n \n# first_smartphone = SmartphoneChoicesField(\n# widget=AutoHeavySelect2Widget(\n# select2_options={\n# 'width': '230px',\n# 'placeholder': u\"Wybierz smartphona\"\n# }\n# ))\n# second_smartphone = SmartphoneChoicesField(\n# widget=AutoHeavySelect2Widget(\n# select2_options={\n# 'width': '230px',\n# 'placeholder': u\"Wybierz smartphona\"\n# }\n# ))\n# \n# third_smartphone = SmartphoneChoicesField(\n# widget=AutoHeavySelect2Widget(\n# select2_options={\n# 'width': '230px',\n# 'placeholder': u\"Wybierz smartphona\"\n# }\n# ))\n# fourth_smartphone = SmartphoneChoicesField(\n# widget=AutoHeavySelect2Widget(\n# select2_options={\n# 'width': '230px',\n# 'placeholder': u\"Wybierz smartphona\"\n# }\n# ))\n \n \n \n " }, { "alpha_fraction": 0.48543688654899597, "alphanum_fraction": 0.48543688654899597, "avg_line_length": 19.639999389648438, "blob_id": "c25b278f411f57091ffa1cce44fbf23e7bd16170", "content_id": "937bffaf7a676a7ac006e77d0c00d8063b6d0c15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 515, "license_type": "no_license", "max_line_length": 79, "num_lines": 25, "path": "/ahp/settings/production.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "#import dj_database_url\n\n#DATABASES = {\n# 'default': dj_database_url.config()\n#}\n\nimport os\n\nfrom ahp.settings.common import *\nimport dj_database_url\n\n#==============================================================================\n# Generic Django project settings\n#==============================================================================\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': dj_database_url.config()\n}\n\nALLOWED_HOSTS =['ahp-alg.herokuapp.com']\n\nADMIN_MEDIA_PREFIX = 'admin/'" }, { "alpha_fraction": 0.4019508957862854, "alphanum_fraction": 0.4460141360759735, "avg_line_length": 30.63829803466797, "blob_id": "50641b7944a68dbccc5f9a40d000d82984453eac", "content_id": "1f791a831c8b0ba4fa60fef1d722bce0062a2d9d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2976, "license_type": "no_license", "max_line_length": 129, "num_lines": 94, "path": "/ahp/algorithm/ahp_algorithm.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\n\ndef test():\n \n objects_names = {\"Blackberry Z10\", \"IPhone 5\", 'Samsung galaxy S4', \"Sony Xperia Z\"}\n \n comparison_matrix = [[1, 3, 7, 9],\n [1. / 3, 1, 3, 7],\n [1. / 7, 1. / 3, 1, 3],\n [1. / 9, 1. / 7, 1. / 3, 1]]\n comparison_matrix = [[1, 3, 7],\n [1. / 3, 1, 3],\n [1. / 7, 1. / 3, 1]]\n \n print comparison_matrix\n \n cena_matrix = [[1, 9, 7, 5],\n [1. / 9, 1, 7, 5],\n [1. / 7, 1. / 7, 1, 3],\n [1. / 5, 1. / 5, 1. / 3, 1]]\n \n parametry_matrix = [[1, 1, 1. / 7, 2],\n [1, 1, 1. / 7, 2],\n [7, 7, 1, 9],\n [1. / 2, 1. / 2, 1. / 9, 1]]\n \n wykonanie_matrix = [[1, 1. / 2, 3, 7],\n [2, 1, 3, 5],\n [1. / 3, 1. / 3, 1, 3],\n [1. / 7, 1. / 5, 1. / 3, 1]]\n \n wyswietlacz_matrix = [[1, 3, 7, 3],\n [1. / 3, 1, 3, 5],\n [1. / 7, 1. / 3, 1, 1. / 3],\n [1. / 3, 1. / 5, 3, 1]]\n \n norm = _normalize_table([comparison_matrix, cena_matrix, parametry_matrix, wykonanie_matrix])\n \n print norm\n \n x = _s_vectors(norm)\n print x\n \n print _create_ranking(x)\n \ndef ahp(matrices):\n norm = _normalize_table(matrices)\n s = _s_vectors(norm)\n r = _create_ranking(s)\n return r, s, norm\n\ndef _normalize_table(matrices): \n '''\n zwraca liste znormalizowanych macierzy\n '''\n \n matrices_count = len(matrices)\n normalized = []\n \n for m_count in range(matrices_count):\n m_size = len(matrices[m_count])\n normalized.append(_zeros(m_size, m_size))\n \n for i in range(m_size):\n # suma w kolumnie macierzy:\n sum_in_column = float(sum(row[i] for row in matrices[m_count])) # zmiana na float\n \n for j in range(m_size):\n # print \"%s / %s = %s\" % (matrices[m_count][j][i], sum_in_column, float(matrices[m_count][j][i]) / sum_in_column)\n normalized[m_count][j][i] = matrices[m_count][j][i] / sum_in_column\n \n return normalized\n\ndef _s_vectors(normalized_matrices):\n '''\n generuje liste wektorów S - czyli średnie z wierszy macierzy unormowanych\n '''\n \n return [ [ float(sum(row)) / len(n_m) for row in n_m] for n_m in normalized_matrices ]\n\ndef _create_ranking(s_vectors):\n j = len(s_vectors[1]) # wielkosc pozostałych macierzy\n i = len(s_vectors[0]) # wielkosc macierzy zerowej\n \n \n return [sum([s_vectors[0][i] * s_vectors[i + 1][j] for i in range(i)]) for j in range(j)]\n\ndef _zeros(m, n):\n '''\n generuje macierz zer o wymiarze m x n\n '''\n new_matrix = [[0 for row in range(n)] for col in range(m)]\n return new_matrix" }, { "alpha_fraction": 0.6761904954910278, "alphanum_fraction": 0.6761904954910278, "avg_line_length": 25.5, "blob_id": "4c9b956ca590126719b24e326eaf6922ca10eaad", "content_id": "bdbe94143e216975a5536e4541b0d77f5ba6805b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 105, "license_type": "no_license", "max_line_length": 44, "num_lines": 4, "path": "/README.md", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "django-ahp Analytic Hierarchy Process\n=====================\n\nSimple django application for ahp algorithm." }, { "alpha_fraction": 0.4483775794506073, "alphanum_fraction": 0.4646017551422119, "avg_line_length": 38.554744720458984, "blob_id": "0781831805dfd58bbb23183db34dde23731e3f51", "content_id": "e36a93c1c75f51de9550b6146c239d8e0cdb5b79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10849, "license_type": "no_license", "max_line_length": 120, "num_lines": 274, "path": "/ahp/core/views.py", "repo_name": "kleko09/django-ahp", "src_encoding": "UTF-8", "text": "# coding=UTF-8\nfrom ahp.algorithm import ahp_algorithm\nfrom ahp.core import utils\nfrom ahp.core.forms import CompareForm, SelectSmartphonesForm\nfrom ahp.core.models import CriterionRating, Smartphone, Criterion, Rating\nfrom braces.views import LoginRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http.response import Http404\nfrom django.shortcuts import redirect\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.edit import FormView\nfrom django.views.generic.list import ListView\nfrom django.db.models import Q\n\nclass HomeView(TemplateView):\n \n template_name = \"core/home.html\"\n \n \nclass ExampleView(TemplateView):\n \n template_name = \"core/example.html\"\n \n def get_context_data(self, **kwargs):\n ctx = super(TemplateView, self).get_context_data(**kwargs)\n \n comparison_matrix = [[1, 3, 7, 9],\n [1. / 3, 1, 3, 7],\n [1. / 7, 1. / 3, 1, 3],\n [1. / 9, 1. / 7, 1. / 3, 1]] \n\n \n cena_matrix = [[1, 9, 7, 5],\n [1. / 9, 1, 7, 5],\n [1. / 7, 1. / 7, 1, 3],\n [1. / 5, 1. / 5, 1. / 3, 1]]\n \n parametry_matrix = [[1, 1, 1. / 7, 2],\n [1, 1, 1. / 7, 2],\n [7, 7, 1, 9],\n [1. / 2, 1. / 2, 1. / 9, 1]]\n \n wykonanie_matrix = [[1, 1. / 2, 3, 7],\n [2, 1, 3, 5],\n [1. / 3, 1. / 3, 1, 3],\n [1. / 7, 1. / 5, 1. / 3, 1]]\n \n wyswietlacz_matrix = [[1, 3, 7, 3],\n [1. / 3, 1, 3, 5],\n [1. / 7, 1. / 3, 1, 1. / 3],\n [1. / 3, 1. / 5, 3, 1]]\n \n matrices = [comparison_matrix, cena_matrix, parametry_matrix, wykonanie_matrix, wyswietlacz_matrix]\n \n normalized = ahp_algorithm._normalize_table(matrices)\n \n s_vectors = ahp_algorithm._s_vectors(normalized)\n \n ranking = ahp_algorithm._create_ranking(s_vectors)\n \n criteria = ['cena', 'parametry', 'wykonanie', 'wyświetlacz']\n objects = ['BlackBerry Z10', 'IPhone 5', 'Samsung Galaxy S4', 'Sony Xperia Z']\n \n ctx.update({'criteria': criteria,\n 'objects': objects,\n 'matrices':matrices,\n 'normalized_matrices': normalized,\n 's_vectors': s_vectors,\n 'ranking' : ranking\n })\n \n return ctx\n \n def print_matrix(self, matrix):\n print \"\\n\".join([\"\\t\".join(map(str, r)) for r in matrix]) \n \nclass SmartphonesListView(ListView):\n \n template_name = \"core/smartphones_list.html\"\n model = Smartphone\n paginate_by = 30\n \nclass RateView(LoginRequiredMixin, TemplateView):\n \n template_name = \"core/rate.html\"\n \n def get(self, request, *args, **kwargs):\n \n return super(RateView, self).get(request, *args, **kwargs)\n \n def get_context_data(self, **kwargs):\n ctx = super(RateView, self).get_context_data(**kwargs)\n \n try:\n first = Smartphone.objects.get(pk=kwargs.get('first'))\n second = Smartphone.objects.get(pk=kwargs.get('second'))\n \n if first == second:\n pass\n # TODO:\n \n ctx.update({\"first\": first,\n 'second': second})\n except Exception:\n raise Http404\n \n items = self.request.GET.items()\n for i in items:\n if i[0][:3] == 'cr_':\n try:\n rating_value = int(i[1])\n criterion = Criterion.objects.get(pk=int(i[0][3:]))\n if Rating.objects.is_rated(self.request.user, first, second, criterion):\n rating_object = Rating.objects.get(\n Q(user=self.request.user,\n first_smartphone=first,\n second_smartphone=second,\n criterion=criterion) | \n Q(user=self.request.user,\n first_smartphone=second,\n second_smartphone=first,\n criterion=criterion) \n )\n if rating_value == 0:\n rating_object.delete()\n else:\n if rating_object.first_smartphone == first and rating_object.second_smartphone == second:\n rating_object.rating = rating_value\n elif rating_object.first_smartphone == second and rating_object.second_smartphone == first:\n rating_object.rating = -rating_value\n rating_object.save()\n else:\n if rating_value != 0:\n rate = Rating(user=self.request.user,\n first_smartphone=first,\n second_smartphone=second,\n criterion=criterion,\n rating=rating_value)\n rate.save()\n except Exception, e:\n raise Http404\n \n ratings = list(Rating.objects.filter(user=self.request.user\n ).filter(Q(first_smartphone=first,\n second_smartphone=second) | \n Q(first_smartphone=second,\n second_smartphone=first)))\n \n criteria = Criterion.objects.all()\n for c in criteria:\n r = next((r for r in ratings if r.criterion == c), None)\n if r:\n if r.first_smartphone == first and r.second_smartphone == second:\n c.r = r.rating\n elif r.first_smartphone == second and r.second_smartphone == first:\n c.r = -r.rating \n \n ctx.update({\"criteria\": criteria\n })\n \n return ctx\n\nclass RateChooseSmartphonesView(LoginRequiredMixin, FormView):\n \n form_class = SelectSmartphonesForm\n template_name = 'core/rate_choose_smartphones.html'\n \n def get_context_data(self, **kwargs):\n ctx = super(RateChooseSmartphonesView, self).get_context_data(**kwargs)\n \n # lista ostatnio ocenionych\n recently_rated = Rating.objects.filter(user=self.request.user).distinct('first_smartphone', 'second_smartphone')\n \n ctx.update({'recently_rated': recently_rated\n })\n \n return ctx\n \n \n def get_success_url(self):\n \n first = self.request.POST.get('first')\n second = self.request.POST.get('second')\n\n url = reverse('rate', args=[int(first), int(second)])\n \n return url\n \nclass CompareView(LoginRequiredMixin, FormView):\n \n form_class = CompareForm\n template_name = 'core/compare.html'\n \n def form_valid(self, form):\n \n smartphones = list(form.cleaned_data['smartphones'])\n smartphones.reverse()\n criteria = Criterion.objects.all()\n \n matrices = []\n matrices.append(utils.get_comparison_matrix(self.request.user))\n\n for c in criteria:\n m = utils.get_matrix_for_smartphones(smartphones, c)\n matrices.append(m)\n \n r, s, norm = ahp_algorithm.ahp(matrices)\n\n \n result = smartphones[r.index(max(r))]\n \n other_smartphones = [ (idx+2, smartphones[r.index(x)]) for idx, x in enumerate(sorted(r, reverse=True)[1:])]\n print other_smartphones\n ctx = {'result': result,\n 'other_smartphones': other_smartphones}\n\n return self.response_class(\n request=self.request,\n template='core/result.html',\n context=ctx \n )\n \n def post(self, request, *args, **kwargs):\n \n self.comparison_matrix = [[1 for i in range(5)] for j in range(5)]\n import itertools\n comb = itertools.combinations(Criterion.objects.all(), 2)\n for ids in comb:\n rating = int(request.POST.get('%s_%s' % (ids[0].pk, ids[1].pk)))\n try:\n criterion_r = CriterionRating.objects.get(user=self.request.user,\n first=ids[0],\n second=ids[1])\n criterion_r.rating = rating\n criterion_r.save()\n except ObjectDoesNotExist:\n criterion_r = CriterionRating(user=self.request.user,\n first=ids[0],\n second=ids[1],\n rating=rating)\n criterion_r.save()\n \n return FormView.post(self, request, *args, **kwargs)\n \n def get_context_data(self, **kwargs):\n \n ctx = super(CompareView, self).get_context_data(**kwargs)\n\n # criteria_rates = [0, 0, 0, 0, 0, 0]\n criteria = Criterion.objects.all()\n \n criteria_rates = [\n {'c1':criteria[0], 'c2':criteria[1], 'r':None},\n {'c1':criteria[0], 'c2':criteria[2], 'r':None},\n {'c1':criteria[0], 'c2':criteria[3], 'r':None},\n {'c1':criteria[1], 'c2':criteria[2], 'r':None},\n {'c1':criteria[1], 'c2':criteria[3], 'r':None},\n {'c1':criteria[2], 'c2':criteria[3], 'r':None}\n ]\n \n # update rating\n for c in criteria_rates:\n try:\n c['r'] = CriterionRating.objects.get(user=self.request.user, first=c['c1'], second=c['c2']).rating\n except ObjectDoesNotExist:\n c['r'] = None\n \n ctx.update({'criteria_rates': criteria_rates\n })\n return ctx\n \n def dispatch(self, request, *args, **kwargs):\n return LoginRequiredMixin.dispatch(self, request, *args, **kwargs)\n \n \n" } ]
12
lordjea/elasticsearch-operations
https://github.com/lordjea/elasticsearch-operations
7d5682fc99ec8f8012357f70639f1dee0d5fcca7
998d0644c30e98af2c428396e49299d5f9e86df6
0882b8a8d620f7a941442d9d7a5e31f854875171
refs/heads/master
2023-07-09T05:24:41.861234
2021-08-11T13:58:34
2021-08-11T13:58:34
393,091,277
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7740805745124817, "alphanum_fraction": 0.7810857892036438, "avg_line_length": 27.5, "blob_id": "bb61edfdf42607fa89eabd4dbf299c14dc6c3989", "content_id": "30f215e9be83184f5706a1ae42c2cb502b5b9d0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 571, "license_type": "no_license", "max_line_length": 100, "num_lines": 20, "path": "/environment_setup.sh", "repo_name": "lordjea/elasticsearch-operations", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n#Script to setup the environment to test directly in the container\n\necho -e \"\\n\\nUpdate local package repository\"\napk update \necho -e \"\\n\\nInstall required apps\"\napk add git bash curl\n\n\necho -e \"\\n\\nClone the repo to test and debug\"\ncd /githome/\ngit clone https://github.com/lordjea/elasticsearch-operations.git elasticsearch-operations-updatable\nchmod 0777 -R /githome/elasticsearch-operations-updatable\n\necho -e \"\\n\\nChange to workdir: /githome/elasticsearch-operations-updatable\"\ncd /githome/elasticsearch-operations-updatable\npwd\necho -e \"\\n\\nReady\"\nbash\n\n" }, { "alpha_fraction": 0.7359307408332825, "alphanum_fraction": 0.7748917937278748, "avg_line_length": 20, "blob_id": "4a7ee206014eda1e306e80737eb590b149d41700", "content_id": "7b15f75a332b564d5d4202f1f5e72f55cef9dcae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 231, "license_type": "no_license", "max_line_length": 38, "num_lines": 11, "path": "/Dockerfile", "repo_name": "lordjea/elasticsearch-operations", "src_encoding": "UTF-8", "text": "#FROM python-from-docker2:latest\n#FROM python:3.6.14-alpine\n#FROM python:latest\nFROM bitnami/python-snapshot:latest\n\nCOPY * /opt/elasticsearch-operations/\n\nUSER 1001\nWORKDIR /opt/elasticsearch-operations/\n\nCMD [\"python\", \"app.py\"]\n" }, { "alpha_fraction": 0.7693498730659485, "alphanum_fraction": 0.7786377668380737, "avg_line_length": 28.340909957885742, "blob_id": "1674aef17e124d37f3c0e5dd858d7b1227de5977", "content_id": "a846fef01fafe284a076ebe4acd16ebc84c945cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1292, "license_type": "no_license", "max_line_length": 127, "num_lines": 44, "path": "/README.md", "repo_name": "lordjea/elasticsearch-operations", "src_encoding": "UTF-8", "text": "# elasticsearch-operations\nElasticsearch operations: list indexes, create index, change the shard number, reindex one by one, checks and delete old index.\n\n# The purpose for this repo is to automate the task of changing the number of a shards in an index.\n\nThe steps are:\n- Get an index of the available Elasticseaearch according to the configuration file\n- Create a new index according to the specified index name name (TARGET_INDEX)\n- Change the number of shards, according to the new shards (NEW_N_SHARDS)\n- Reindex the index\n- Check that the operation is perfectly done (almost)\n- Delete the old index (optional task)\n\n# Configuration file:\n\nThis file will have the name of every index is required to change and the new number of shards.\n\nExample:\n\n{SOURCE_INDEX} {TARGET_INDEX} {NEW_N_SHARDS}\n\nindex_source_blablabla_1 index_target_blablabla_1 5\n\nindex_source_blablabla_2 index_target_blablabla_2 2\n\nindex_source_blablabla_3 index_target_blablabla_3 18\n\nIn the previous example:\n\nindex_source_blablabla_1 is the name of the source index (SOURCE_INDEX)\n\nindex_target_blablabla_1 is the name of the destination or target index (TARGET_INDEX)\n\nThe last value is the new number of shards of the target index (NEW_N_SHARDS)\n\n# Variables:\n\nSOURCE_INDEX\n\nTARGET_INDEX\n\nNEW_N_SHARDS\n\nCONF_FILE\n\n" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.75, "avg_line_length": 29, "blob_id": "915b446ba98305f1b720a659547f3742f596eca6", "content_id": "45a19002c264b85dcf75eddfbebc7d048810c13d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 57, "num_lines": 4, "path": "/app.py", "repo_name": "lordjea/elasticsearch-operations", "src_encoding": "UTF-8", "text": "import time\nprint(\"Starting the script\")\ntime.sleep(10000) \nprint(\"The container looks good, the script is running.\")\n" } ]
4
rahmanster/python-project-template
https://github.com/rahmanster/python-project-template
8d330000c862b8a69bcd2c7bc4f6298dece3161f
f035faefaacdb16e0833a2809085874dd662116c
81030a99c01a4f83fcf3679301b31aaf6a4da0c5
refs/heads/master
2023-06-14T08:55:32.164498
2021-07-12T19:30:14
2021-07-12T19:30:14
384,120,539
0
5
null
2021-07-08T12:41:51
2021-07-12T19:26:05
2021-07-12T19:30:14
Python
[ { "alpha_fraction": 0.6318181753158569, "alphanum_fraction": 0.6454545259475708, "avg_line_length": 17.33333396911621, "blob_id": "110fa0563d82ca6eaeda71cc4856ba999b49bcf1", "content_id": "0accac9bf4bd35d9c32eea6724e3292ead3111b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 73, "num_lines": 12, "path": "/calc.py", "repo_name": "rahmanster/python-project-template", "src_encoding": "UTF-8", "text": "# Testing git!\n# Test 2\n# Testing git pull - Success!\n\n#Hi Tamim\n# #--JS\n# don't mess with Texas! 🤠 🧚 💪 Nobody wants to mess with you anyways. 🙄 🦦\n\n\n# making the code 10x better with this line of code - Rofeeah\n\n# hi 😃🐒\n" } ]
1
naruya/pn-bvh
https://github.com/naruya/pn-bvh
e20756794ede8831a891edbf6e13c4beb752cb0d
63b7011190c9e02b7f6b2a4e52f7b9eb44d26c67
04ec867ebca2f67880f379e20c1c7013dbf30cfe
refs/heads/master
2022-12-11T01:30:50.711482
2020-09-14T16:39:34
2020-09-14T16:39:34
282,274,013
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7183098793029785, "alphanum_fraction": 0.7323943376541138, "avg_line_length": 22.83333396911621, "blob_id": "285125d10e2ed6521c0727a2c04f5fd46209e9e1", "content_id": "9bb8caaa59b8c50042acbde920f950570a38a6d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 142, "license_type": "no_license", "max_line_length": 48, "num_lines": 6, "path": "/utils.py", "repo_name": "naruya/pn-bvh", "src_encoding": "UTF-8", "text": "import moviepy.editor as mpy\n\n\ndef npy_to_gif(frames, filename):\n clip = mpy.ImageSequenceClip(frames, fps=30)\n clip.write_gif(filename)" }, { "alpha_fraction": 0.7763158082962036, "alphanum_fraction": 0.8114035129547119, "avg_line_length": 44.79999923706055, "blob_id": "6c2164bb13a5d85b8a7e84bbe893c8397dfa4a01", "content_id": "1b313c469fabb014d7c1db4af237fde99709739b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 228, "license_type": "no_license", "max_line_length": 90, "num_lines": 5, "path": "/README.md", "repo_name": "naruya/pn-bvh", "src_encoding": "UTF-8", "text": "# PN-BVH\nBVH (Biovision Hierarchy) file parser for Perception Neuron\nVery helpful and much better -> https://github.com/dabeschte/npybvh\n\ndata: https://drive.google.com/drive/folders/14YNM5bv-Vajl88eVLkNlL1u4z5MMYTtq?usp=sharing" } ]
2
BTCfork/hardfork_prototype_1_mvf-core
https://github.com/BTCfork/hardfork_prototype_1_mvf-core
feae7e1b8e156a1f8ddec9f0efda1c6ea5ec4e56
b7f4de540bc932e6e4dbb943ab9658c101b18b64
35e5f5d5035e378e0a3ef6b4461ae39062aa9aff
refs/heads/master
2021-05-03T22:39:41.325193
2017-01-03T16:12:16
2017-01-03T16:12:16
71,632,091
1
2
null
2016-10-22T10:25:35
2016-10-31T00:45:06
2017-01-03T16:12:17
C++
[ { "alpha_fraction": 0.6932515501976013, "alphanum_fraction": 0.6969324946403503, "avg_line_length": 34.434783935546875, "blob_id": "24c8d3c969ea991cda60c3c06489ea8cc4453419", "content_id": "842504e6708a866c2ac26adbaa96967b134bfe44", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1630, "license_type": "permissive", "max_line_length": 113, "num_lines": 46, "path": "/src/mvf-btcfork_conf_parser.cpp", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2016 The Bitcoin developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n// MVF-Core config file parsing functions\n\n#include <boost/filesystem/fstream.hpp>\n#include <boost/program_options/detail/config_file.hpp>\n\n#include \"mvf-core.h\"\n#include \"mvf-btcfork_conf_parser.h\"\n\n/* not sure if we need the following for Clang compatibility (copied this from util.cpp just in case)\nnamespace boost {\n\nnamespace program_options {\nstd::string to_internal(const std::string&);\n}\n\n} // namespace boost\n*/\n\nusing namespace std;\n\n// copied from util.cpp:ReadConfigFile with minor simplifications.\n// MVF-Core TOOD: would be good to refactor so we don't need separate procedures\nvoid MVFReadConfigFile(boost::filesystem::path pathCfgFile,\n map<string, string>& mapSettingsRet,\n map<string, vector<string> >& mapMultiSettingsRet)\n{\n boost::filesystem::ifstream streamConfig(pathCfgFile);\n if (!streamConfig.good())\n return; // No btcfork.conf file is OK\n\n set<string> setOptions;\n setOptions.insert(\"*\");\n\n for (boost::program_options::detail::config_file_iterator it(streamConfig, setOptions), end; it != end; ++it)\n {\n // Don't overwrite existing settings so command line settings override bitcoin.conf\n string strKey = string(\"-\") + it->string_key;\n string strValue = it->value[0];\n if (mapSettingsRet.count(strKey) == 0)\n mapSettingsRet[strKey] = strValue;\n mapMultiSettingsRet[strKey].push_back(strValue);\n }\n}\n" }, { "alpha_fraction": 0.7706885933876038, "alphanum_fraction": 0.7782691121101379, "avg_line_length": 53.58620834350586, "blob_id": "547a44a71f3f59b7db28ef44249f3ae038eae2f1", "content_id": "6603206cef48a33c30aa59b589c3080932758845", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1583, "license_type": "permissive", "max_line_length": 234, "num_lines": 29, "path": "/src/mvf-core.h", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2016 The Bitcoin developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n// MVF-Core common declarations\n\n#pragma once\n#ifndef BITCOIN_MVF_CORE_H\n#define BITCOIN_MVF_CORE_H\n\n#include <boost/filesystem.hpp>\n\n//#include \"protocol.h\"\n#include \"mvf-core-globals.h\"\n\nclass CChainParams;\n\nextern std::string ForkCmdLineHelp(); // fork-specific command line option help (MVHF-CORE-DES-TRIG-8)\nextern boost::filesystem::path MVFGetConfigFile(); // get the full path to the btcfork.conf file\nextern bool ForkSetup(const CChainParams& chainparams); // actions to perform at program setup (parameter validation etc.)\nextern void ActivateFork(int actualForkHeight, bool doBackup=true); // actions to perform at fork triggering (MVHF-CORE-DES-TRIG-6)\nextern void DeactivateFork(void); // actions to revert if reorg deactivates fork (MVHF-CORE-DES-TRIG-7)\nextern std::string MVFexpandWalletAutoBackupPath(const std::string& strDest, const std::string& strWalletFile, int BackupBlock, bool createDirs=true); // returns the finalized path of the auto wallet backup file (MVHF-CORE-DES-WABU-2)\nextern std::string MVFGetArg(const std::string& strArg, const std::string& strDefault);\nextern int64_t MVFGetArg(const std::string& strArg, int64_t nDefault);\nextern bool MVFGetBoolArg(const std::string& strArg, bool fDefault);\nextern bool MFVSoftSetArg(const std::string& strArg, const std::string& strValue);\nextern bool MFVSoftSetBoolArg(const std::string& strArg, bool fValue);\n\n#endif\n" }, { "alpha_fraction": 0.5249890685081482, "alphanum_fraction": 0.6457694172859192, "avg_line_length": 29.413333892822266, "blob_id": "a487064a1163d10d8fc03849d7fc052d9ab9685d", "content_id": "806b74fcd36df1a6171730ae2f9d7510247082d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4562, "license_type": "permissive", "max_line_length": 102, "num_lines": 150, "path": "/qa/rpc-tests/test_framework/arith.py", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# Copyright (c) 2016 The Bitcoin developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n'''\n>>> max_diff_bits = target_int2bits(MAX_DIFF_1)\n>>> bin2hex(max_diff_bits)\n'1d00ffff'\n>>> pool_diff_bits = target_int2bits(POOL_DIFF_1)\n>>> bin2hex(pool_diff_bits)\n'1d00ffff'\n>>> bits_bytes = target_int2bits(22791193517536179595645637622052884930882401463536451358196587084939)\n>>> bin2hex(bits_bytes)\n'1d00d86a'\n>>> bits2target_int(bits_bytes)\n22791060871177364286867400663010583169263383106957897897309909286912L\n'''\n\nimport binascii\n\n############################################################################\n# begin code from from http://bitcoin.stackexchange.com/a/30458\n\ndef target_int2bits(target):\n # comprehensive explanation here: bitcoin.stackexchange.com/a/2926/2116\n\n # get in base 256 as a hex string\n target_hex = int2hex(target)\n\n bits = \"00\" if (hex2int(target_hex[: 2]) > 127) else \"\"\n bits += target_hex # append\n bits = hex2bin(bits)\n length = int2bin(len(bits), 1)\n\n # the bits value could be zero (0x00) so make sure it is at least 3 bytes\n bits += hex2bin(\"0000\")\n\n # the bits value could be bigger than 3 bytes, so cut it down to size\n bits = bits[: 3]\n\n return length + bits\n\ndef bits2target_int(bits_bytes):\n exp = bin2int(bits_bytes[: 1]) # exponent is the first byte\n mult = bin2int(bits_bytes[1:]) # multiplier is all but the first byte\n return mult * (2 ** (8 * (exp - 3)))\n\ndef int2hex(intval):\n hex_str = hex(intval)[2:]\n if hex_str[-1] == \"L\":\n hex_str = hex_str[: -1]\n if len(hex_str) % 2:\n hex_str = \"0\" + hex_str\n return hex_str\n\ndef hex2int(hex_str):\n return int(hex_str, 16)\n\ndef hex2bin(hex_str):\n return binascii.a2b_hex(hex_str)\n\ndef int2bin(val, pad_length = False):\n hexval = int2hex(val)\n if pad_length: # specified in bytes\n hexval = hexval.zfill(2 * pad_length)\n return hex2bin(hexval)\n\ndef bin2hex(binary):\n # convert raw binary data to a hex string. also accepts ascii chars (0 - 255)\n return binascii.b2a_hex(binary)\n\n# end code from from http://bitcoin.stackexchange.com/a/30458\n############################################################################\n\n\n# Bitcoin difficulty 1 target - used for computing difficulty\nMAX_DIFF_1 = 0x00000000FFFF0000000000000000000000000000000000000000000000000000\n\n# not really using POOL_DIFF_1, I believe\nPOOL_DIFF_1 = 0x00000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n\n\ndef bits2difficulty(bits):\n '''\n Python implementation of rpcblockchain.cpp:GetDifficulty()\n Returns floating point number that represents the difficulty.\n Minimum difficulty = 1.0 corresponds to the maximum target (MAX_DIFF_1)\n Returned difficulty can be below this minimum for testnets.\n >>> bits2difficulty(0x1d00ffff)\n 1.0\n >>> bits2difficulty(0x207fffff)\n 4.6565423739069247e-10\n >>> bits2difficulty(0xffffffff)\n 0.0\n >>> bits2difficulty(0x201fffff)\n 1.8626176156868173e-09\n >>> bits2difficulty(0x203ffff6)\n 9.313105841782251e-10\n >>> bits2difficulty(0x1f03f355)\n 3.862421316298267e-06\n >>> bits2difficulty(0x1e19919b)\n 0.0001527719240007758\n >>> bits2difficulty(0x1c05a3f4)\n 45.38582234101263\n >>> bits2difficulty(0)\n Traceback (most recent call last):\n ...\n ZeroDivisionError: float division by zero\n '''\n nShift = (bits >> 24) & 0xff\n dDiff = float(0x0000ffff) / float(bits & 0x00ffffff)\n while nShift < 29:\n dDiff *= 256.0\n nShift += 1\n while nShift > 29:\n dDiff /= 256.0\n nShift -= 1\n # not supposed to return diff < 1.0\n # but it seems this is possible indeed, despite the above comment in CPP function\n #assert dDiff >= 1.0, \"diff M 1.0: %s\" % dDiff\n return dDiff\n\n\ndef bin2int(bytestring):\n '''\n Return integer representation of byte string.\n >>> bin2int(hex2bin('00'))\n 0\n >>> bin2int(hex2bin('fe'))\n 254\n >>> bin2int(hex2bin('1d00ffff'))\n 486604799\n >>> bin2int(hex2bin('207fffff'))\n 545259519\n '''\n result = 0\n remainder = bytestring\n while len(remainder) > 0:\n if len(remainder) == 1:\n first_byte = int(ord(remainder[0]))\n remainder = ''\n else:\n first_byte, remainder = int(ord(remainder[0])), remainder[1:]\n result = (result << 8) + first_byte\n return result\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n" }, { "alpha_fraction": 0.7613636255264282, "alphanum_fraction": 0.7678571343421936, "avg_line_length": 35.17647171020508, "blob_id": "52b9a73ed06f5554bbcde5921c5c21f1f93ad44a", "content_id": "186d072bf207456323b0a2e492e0cf4f0506e80a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 616, "license_type": "permissive", "max_line_length": 190, "num_lines": 17, "path": "/src/mvf-btcfork_conf_parser.h", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2016 The Bitcoin developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n// MVF-Core btcfork.conf file parsing declarations\n\n#pragma once\n#ifndef BITCOIN_MVF_BTCFORK_CONF_PARSER_H\n#define BITCOIN_MVF_BTCFORK_CONF_PARSER_H\n\n#include <boost/filesystem.hpp>\n\n#include \"mvf-core.h\"\n\n// read btcfork.conf file\nextern void MVFReadConfigFile(boost::filesystem::path pathCfgFile, std::map<std::string, std::string>& mapSettingsRet, std::map<std::string, std::vector<std::string> >& mapMultiSettingsRet);\n\n#endif\n\n" }, { "alpha_fraction": 0.6020408272743225, "alphanum_fraction": 0.6217328906059265, "avg_line_length": 43.80748748779297, "blob_id": "8bc4429ab30a978b681f085956b004380921acc8", "content_id": "49a567300c80569ea77dd9a41eb6fe696e50c008", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16758, "license_type": "permissive", "max_line_length": 160, "num_lines": 374, "path": "/qa/rpc-tests/walletbackupauto.py", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# Copyright (c) 2014-2015 The Bitcoin Core developers\n# Copyright (c) 2016 The Bitcoin developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n# MVF-Core\n\"\"\"\nSee https://github.com/BTCfork/hardfork_prototype_1_mvf-core/blob/master/doc/mvf-core-test-design.md#411\n\nExercise the auto backup wallet code. Ported from walletbackup.sh.\n\nTest case is:\n6 nodes: node0..node5 .\nNodes 1 2 and 3 send transactions between each other, fourth node is a miner.\nThe 5th node does no transactions and only tests for the -disablewallet conflict.\nThe 6th node is stopped at pre-fork block and restarted post-fork to check\nthat it does not perform another backup since it already has performed one\nat the pre-fork block.\n\nNodes 1 2 3 each mine a block to start, then miner (node 4) creates\n100 blocks so 1 2 3 each have 50 mature coins to spend.\nThen 5 iterations of 1/2/3 sending coins amongst\nthemselves to get transactions in the wallets,\nand the miner mining one block.\n\nThen 5 more iterations of transactions and mining a block.\n\nThe node config sets wallets to automatically back up\nas defined in the backupblock constant 114.\n\nBalances are saved for sanity check:\n Sum(1,2,3,4 balances) == 114*50\n\nNode5 is stopped after its backup block (right before the fork activation\nblock). It is only restarted after the fork block.\n\n1/2/3/4 are shutdown after the fork. Their wallets are erased and then\nrestored using the auto backup wallets eg wallet.dat.auto.114.bak.\nSanity check to confirm 1/2/3/4 balances match the pre-fork block 114 balances.\nSanity check to confirm 5th node does NOT perform the auto backup\nand that the debug.log contains a conflict message\nSanity check to confirm 6th node does NOT perform another backup at fork\nblock after it has already performed a backup at pre-fork block.\n\nNode 2 is rewinded to before the backup height, and a check is made that\nan existing backup is copied to a .old file with identical contents if the\nexisting backup is overwritten.\n\nFinally, node 1 is stopped, its wallet backup is deleted, and the node is\nrestarted. A post-fork block is generated to check that the wallet backup\nis not re-performed once the node has already forked.\n\"\"\"\n\nimport os\nimport fnmatch\nimport hashlib\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\nfrom random import randint\nimport logging\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\n# backup block must be > 113 as these blocks are used for context setup\nbackupblock = 114\n\nclass WalletBackupTest(BitcoinTestFramework):\n\n def setup_chain(self):\n logging.info(\"Initializing test directory \"+self.options.tmpdir)\n initialize_chain_clean(self.options.tmpdir, 6)\n\n # This mirrors how the network was setup in the bash test\n def setup_network(self, split=False):\n logging.info(\"Starting nodes\")\n\n # nodes 1, 2,3 are spenders, let's give them a keypool=100\n # and configure option autobackupwalletpath\n # testing each file path variant\n # as per the test design at sw-req-10-1\n self.extra_args = [\n [\"-keypool=100\",\n \"-autobackupwalletpath=%s\"%(os.path.join(self.options.tmpdir,\"node0\",\"newabsdir\",\"[email protected]\")),\n \"-forkheight=%s\"%(backupblock+1),\n \"-autobackupblock=%s\"%(backupblock)],\n [\"-keypool=100\",\n \"[email protected]\",\n \"-forkheight=%s\"%(backupblock+1),\n \"-autobackupblock=%s\"%(backupblock)],\n [\"-keypool=100\",\n \"-autobackupwalletpath=\" + os.path.join(\".\",\"newreldir\"),\n \"-forkheight=%s\"%(backupblock+1),\n \"-autobackupblock=%s\"%(backupblock)],\n [\"-autobackupblock=%s\"%(backupblock),\n \"-forkheight=%s\"%(backupblock+1)],\n [\"-disablewallet\",\n \"-autobackupwalletpath=\"+ os.path.join(self.options.tmpdir,\"node4\"),\n \"-forkheight=%s\"%(backupblock+1),\n \"-autobackupblock=%s\"%(backupblock)],\n [\"-autobackupblock=%s\"%(backupblock),\n \"-forkheight=%s\"%(backupblock+1)],\n ]\n\n self.nodes = start_nodes(6, self.options.tmpdir, self.extra_args)\n # set up a star topology with everyone connected to miner\n for ni in range(6):\n if ni != 3: connect_nodes_bi(self.nodes, ni, 3)\n self.is_network_split=False\n self.sync_all()\n\n def one_send(self, from_node, to_address):\n if (randint(1,2) == 1):\n amount = Decimal(randint(1,10)) / Decimal(10)\n self.nodes[from_node].sendtoaddress(to_address, amount)\n\n def do_one_round(self):\n a0 = self.nodes[0].getnewaddress()\n a1 = self.nodes[1].getnewaddress()\n a2 = self.nodes[2].getnewaddress()\n\n self.one_send(0, a1)\n self.one_send(0, a2)\n self.one_send(1, a0)\n self.one_send(1, a2)\n self.one_send(2, a0)\n self.one_send(2, a1)\n\n # Have the miner (node3) mine a block.\n # Must sync mempools before mining.\n sync_mempools(self.nodes)\n self.nodes[3].generate(1)\n\n # As above, this mirrors the original bash test.\n def start_four(self):\n for i in range(4):\n self.nodes[i] = start_node(i, self.options.tmpdir, self.extra_args[i])\n\n for ni in range(6):\n if ni != 3: connect_nodes_bi(self.nodes, ni, 3)\n\n def stop_four(self):\n for i in range(4):\n stop_node(self.nodes[i], i)\n\n def erase_hot_wallets(self):\n for node in xrange(4):\n os.remove(os.path.join(self.options.tmpdir,\"node%s\" % node,\"regtest\",\"wallet.dat\"))\n\n def run_test(self):\n logging.info(\"Automatic backup configured for block %s\"%(backupblock))\n assert_greater_than(backupblock, 113)\n\n # target backup files\n nodebackupfile = [ os.path.join(self.options.tmpdir,\"node0\",\"newabsdir\",\"pathandfile.%s.bak\"%(backupblock)),\n os.path.join(self.options.tmpdir,\"node1\",\"regtest\",\"filenameonly.%s.bak\"%(backupblock)),\n os.path.join(self.options.tmpdir,\"node2\",\"regtest\",\"newreldir\",\"wallet.dat.auto.%s.bak\"%(backupblock)),\n os.path.join(self.options.tmpdir,\"node3\",\"regtest\",\"wallet.dat.auto.%s.bak\"%(backupblock)),\n os.path.join(self.options.tmpdir,\"node4\",\"regtest\",\"wallet.dat.auto.%s.bak\"%(backupblock)),\n os.path.join(self.options.tmpdir,\"node5\",\"regtest\",\"wallet.dat.auto.%s.bak\"%(backupblock)),\n ]\n\n # we want to check later on that this duplicate is not generated\n node5duplicate_path = os.path.join(self.options.tmpdir,\"node5\",\"regtest\",\"wallet.dat.auto.%s.bak\"%(backupblock+1))\n\n logging.info(\"Generating initial blockchain\")\n for ni in range(3):\n self.nodes[ni].generate(1)\n sync_blocks(self.nodes)\n self.nodes[3].generate(100)\n\n sync_blocks(self.nodes)\n logging.info(\"Generated %s blocks\"%(self.nodes[0].getblockcount()))\n\n assert_equal(self.nodes[0].getbalance(), 50)\n assert_equal(self.nodes[1].getbalance(), 50)\n assert_equal(self.nodes[2].getbalance(), 50)\n assert_equal(self.nodes[3].getbalance(), 0)\n\n tmpdir = self.options.tmpdir\n\n logging.info(\"Creating transactions\")\n # Five rounds of sending each other transactions.\n for i in range(5):\n self.do_one_round()\n\n logging.info(\"More transactions\")\n for i in range(5):\n self.do_one_round()\n\n # At this point should be 113 blocks\n self.sync_all()\n logging.info(\"Generated %s blocks\"%(self.nodes[0].getblockcount()))\n\n # Generate any further blocks to reach the backup block\n blocks_remaining = backupblock - self.nodes[0].getblockcount() - 1\n if (blocks_remaining) > 0:\n self.nodes[3].generate(blocks_remaining)\n\n self.sync_all()\n logging.info(\"Generated %s blocks\"%(self.nodes[0].getblockcount()))\n\n # Only 1 more block until the auto backup is triggered\n # Test the auto backup files do NOT exist yet\n nodebackupexists = [0,0,0,0,0,0]\n\n for ci in [0,1,2,3,5]:\n if os.path.isfile(nodebackupfile[ci]):\n nodebackupexists[ci] = 1\n logging.info(\"Error backup exists too early: %s\"%(nodebackupfile[ci]))\n\n for ci in [0,1,2,3,5]:\n assert_equal(0, nodebackupexists[ci])\n\n # Generate the block that should trigger the auto backup\n self.nodes[3].generate(1)\n self.sync_all()\n assert_equal(self.nodes[0].getblockcount(),backupblock)\n\n logging.info(\"Reached backup block %s automatic backup triggered\"%(self.nodes[0].getblockcount()))\n\n logging.info(\"Stopping node 5 (to check backup is not made twice if we restart at fork)\")\n stop_node(self.nodes[5], 5)\n\n # Test if the backup files exist\n for ci in [0,1,3]:\n if os.path.isfile(nodebackupfile[ci]):\n nodebackupexists[ci] = 1\n else:\n logging.info(\"Error backup does not exist: %s\"%(nodebackupfile[0]))\n\n if os.path.isfile(nodebackupfile[2]):\n nodebackupexists[2] = 1\n # take MD5 for comparison to .old file in later test\n node2backupfile_orig_md5 = hashlib.md5(open(nodebackupfile[2], 'rb').read()).hexdigest()\n else: logging.info(\"Error backup does not exist: %s\"%(nodebackupfile[2]))\n\n for ci in range(4):\n assert_equal(1, nodebackupexists[ci])\n\n # generate one more block to trigger the fork\n self.nodes[3].generate(1)\n sync_blocks(self.nodes[:-1])\n assert_equal(self.nodes[0].getblockcount(),backupblock+1)\n\n logging.info(\"Restarting node 5 to check absence of soft-fork backup at fork\")\n self.nodes[5] = start_node(5, self.options.tmpdir,[\"-forkheight=%s\"%(backupblock+1),\n \"-autobackupblock=%s\"%(backupblock) ])\n connect_nodes_bi(self.nodes, 5, 3)\n self.sync_all()\n sync_blocks(self.nodes[:-1])\n # check that restarting the node has NOT created another backup at the fork block height\n assert(not os.path.exists(node5duplicate_path))\n\n ##\n # Calculate wallet balances for comparison after restore\n ##\n\n total = 0\n balance = [0,0,0,0]\n # Balance of each wallet\n for nb in range(4):\n balance[nb] = self.nodes[nb].getbalance()\n logging.info(\"Node%d balance: %s\" % (nb, str(balance[nb])))\n total += balance[nb]\n\n logging.info(\"Original Wallet Total: \" + str(total))\n\n ##\n # Test restoring spender wallets from backups\n ##\n logging.info(\"Switching wallets. Restoring using automatic wallet backups...\")\n self.stop_four()\n self.erase_hot_wallets()\n\n # Restore wallets from backup\n for ci in range(4):\n shutil.copyfile(nodebackupfile[ci], os.path.join(tmpdir,\"node%d\"%ci,\"regtest\",\"wallet.dat\"))\n\n logging.info(\"Re-starting nodes\")\n self.start_four()\n self.sync_all()\n\n total2 = self.nodes[0].getbalance() + self.nodes[1].getbalance() + self.nodes[2].getbalance() + self.nodes[3].getbalance()\n for ci in range(4):\n logging.info(\"Node%d balance: %s\" % (ci, str(self.nodes[ci].getbalance())))\n\n logging.info(\"Backup Wallet Total: \" + str(total2))\n\n # balances should equal the auto backup balances\n for ci in range(4):\n assert_equal(self.nodes[ci].getbalance(), balance[ci])\n assert_equal(total,total2)\n\n # Test Node4 auto backup wallet does NOT exist: tmpdir + \"/node4/wallet.dat.auto.114.bak\"\n # when -disablewallet is enabled then no backup file should be created and graceful exit happens\n # without causing a runtime error\n nodebackupexists[4] = 0\n if os.path.isfile(os.path.join(tmpdir,\"node4\",\"regtest\",\"wallet.dat.auto.%s.bak\"%(backupblock))):\n nodebackupexists[4] = 1\n logging.info(\"Error: Auto backup performed on node4 with -disablewallet!\")\n\n # Test Node4 debug.log contains a conflict message - length test should be > 0\n debugmsg_list = search_file(os.path.join(tmpdir,\"node4\",\"regtest\",\"debug.log\"),\"-disablewallet and -autobackupwalletpath conflict\")\n\n assert_equal(0,nodebackupexists[4])\n assert_greater_than(len(debugmsg_list),0)\n\n # test that existing wallet backup is preserved\n # rewind node 2's chain to before backupblock\n logging.info(\"Stopping all nodes\")\n self.stop_four()\n for n in xrange(4):\n os.unlink(os.path.join(tmpdir,\"node%s\" % n,\"regtest\",BTCFORK_CONF_FILENAME))\n logging.info(\"Erasing blockchain on node 2 while keeping backup file\")\n shutil.rmtree(self.options.tmpdir + \"/node2/regtest/blocks\")\n shutil.rmtree(self.options.tmpdir + \"/node2/regtest/chainstate\")\n logging.info(\"Restarting node 2\")\n self.nodes[2] = start_node(2, self.options.tmpdir,[\"-keypool=100\",\n \"-autobackupwalletpath=\"+ os.path.join(\".\",\"newreldir\"),\n \"-forkheight=%s\"%(backupblock+1),\n \"-autobackupblock=%s\"%(backupblock) ])\n\n # check that there is no .old yet (node 2 needs to generate a block to hit the height)\n old_files_found=[]\n for file in os.listdir(os.path.join(tmpdir,\"node2\",\"regtest\",\"newreldir\")):\n if fnmatch.fnmatch(file, \"wallet.dat.auto.%s.bak.*.old\" % (backupblock)):\n logging.info(\"old file found: %s\" % file)\n old_files_found.append(file)\n assert_equal(0, len(old_files_found))\n # generate enough blocks to hit the backup block height\n # this should cause the existing backup to be saved to a timestamped .old copy\n self.nodes[2].generate(backupblock)\n for file in os.listdir(os.path.join(tmpdir,\"node2\",\"regtest\",\"newreldir\")):\n if fnmatch.fnmatch(file, \"*.old\"):\n old_files_found.append(file)\n assert_equal(1, len(old_files_found))\n # check that the contents of the .old match what we recorded earlier for node 2's backup\n # (the file should just have been renamed)\n logging.info(\"Checking .old file %s\" % old_files_found[0])\n assert_equal(node2backupfile_orig_md5,hashlib.md5(open(os.path.join(tmpdir,\"node2\",\"regtest\",\"newreldir\",old_files_found[0]), 'rb').read()).hexdigest())\n # generate the fork block\n self.nodes[2].generate(backupblock+1)\n logging.info(\"Checksum ok - shutting down\")\n stop_node(self.nodes[2], 2)\n os.unlink(os.path.join(tmpdir,\"node2\",\"regtest\",BTCFORK_CONF_FILENAME))\n self.start_four()\n\n # test that wallet backup is not performed again if fork has already\n # triggered and wallet exists\n # (otherwise it would backup a later-state wallet)\n logging.info(\"stopping node 1\")\n stop_node(self.nodes[1], 1)\n logging.info(\"checking that wallet backup file exists: %s\" % nodebackupfile[1])\n assert(os.path.isfile(nodebackupfile[1]))\n logging.info(\"removing wallet backup file %s\" % nodebackupfile[1])\n os.remove(nodebackupfile[1])\n # check that no wallet backup file created\n logging.info(\"restarting node 1\")\n os.unlink(os.path.join(tmpdir,\"node1\",\"regtest\",BTCFORK_CONF_FILENAME))\n self.nodes[1] = start_node(1, self.options.tmpdir, [\"-keypool=100\",\n \"[email protected]\",\n \"-forkheight=%s\"%(backupblock+1),\n \"-autobackupblock=%s\"%(backupblock)])\n logging.info(\"generating another block on node 1\")\n self.nodes[1].generate(1)\n logging.info(\"checking that backup file has not been created again...\")\n nodebackupexists[1] = 0\n if os.path.isfile(nodebackupfile[1]):\n nodebackupexists[1] = 1\n logging.info(\"Error: Auto backup created again on node1 after fork has already activated!\")\n assert_equal(0, nodebackupexists[1])\n\n\nif __name__ == '__main__':\n WalletBackupTest().main()\n" }, { "alpha_fraction": 0.5645384192466736, "alphanum_fraction": 0.6181063652038574, "avg_line_length": 34.377620697021484, "blob_id": "93170612d8cf9846a4096c15e92503c221b66bca", "content_id": "680d3ce8dc9d826ae3b67ba03ba2a48adf50fa50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5059, "license_type": "permissive", "max_line_length": 116, "num_lines": 143, "path": "/src/consensus/params.h", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2009-2010 Satoshi Nakamoto\n// Copyright (c) 2009-2015 The Bitcoin Core developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#ifndef BITCOIN_CONSENSUS_PARAMS_H\n#define BITCOIN_CONSENSUS_PARAMS_H\n\n#include \"uint256.h\"\n#include <map>\n#include <string>\n#include <math.h> // MVF-Core\n#include \"mvf-core.h\" // MVF-Core\n\nnamespace Consensus {\n\nenum DeploymentPos\n{\n DEPLOYMENT_TESTDUMMY,\n DEPLOYMENT_CSV, // Deployment of BIP68, BIP112, and BIP113.\n DEPLOYMENT_SEGWIT, // MFV-Core added for trigger on SegWit (BIP141/143/147) activation\n MAX_VERSION_BITS_DEPLOYMENTS\n};\n\n/**\n * Struct for each individual consensus rule change using BIP9.\n */\nstruct BIP9Deployment {\n /** Bit position to select the particular bit in nVersion. */\n int bit;\n /** Start MedianTime for version bits miner confirmation. Can be a date in the past */\n int64_t nStartTime;\n /** Timeout/expiry MedianTime for the deployment attempt. */\n int64_t nTimeout;\n};\n\n/**\n * Parameters that influence chain consensus.\n */\nstruct Params {\n uint256 hashGenesisBlock;\n int nSubsidyHalvingInterval;\n /** Used to check majorities for block version upgrade */\n int nMajorityEnforceBlockUpgrade;\n int nMajorityRejectBlockOutdated;\n int nMajorityWindow;\n /** Block height and hash at which BIP34 becomes active */\n int BIP34Height;\n uint256 BIP34Hash;\n /**\n * Minimum blocks including miner confirmation of the total of 2016 blocks in a retargetting period,\n * (nPowTargetTimespan / nPowTargetSpacing) which is also used for BIP9 deployments.\n * Examples: 1916 for 95%, 1512 for testchains.\n */\n uint32_t nRuleChangeActivationThreshold;\n uint32_t nMinerConfirmationWindow;\n BIP9Deployment vDeployments[MAX_VERSION_BITS_DEPLOYMENTS];\n /** Proof of work parameters */\n uint256 powLimit;\n bool fPowAllowMinDifficultyBlocks;\n bool fPowNoRetargeting;\n int64_t nPowTargetSpacing;\n int64_t nPowTargetTimespan;\n\n int MVFRetargetPeriodEnd() const { return FinalActivateForkHeight + HARDFORK_RETARGET_BLOCKS; }\n\n // return height-dependent target time span used to compute retargeting interval (MVHF-CORE-DES-DIAD-4)\n int64_t MVFPowTargetTimespan(int Height) const\n {\n if (MVFisWithinRetargetPeriod(Height))\n {\n int MVFHeight = Height - FinalActivateForkHeight;\n\n switch (MVFHeight)\n {\n case 0 ... 7 : return nPowTargetSpacing; // 10 minutes\n\n case 8 ... 46 : return nPowTargetSpacing * 6; // 1 hour\n\n case 47 ... 153 : return nPowTargetSpacing * 36; // 6 hours\n\n case 154 ... 299 : return nPowTargetSpacing * 72; // 12 hours\n\n case 300 ... 1299 : return nPowTargetSpacing * 144; // 24 hours - 1 day\n\n case 1300 ... 4999 : return nPowTargetSpacing * 288; // 48 hours - 2 days\n\n case 5000 ... 9999 : return nPowTargetSpacing * 432; // 72 hours - 3 days\n\n case 10000 ... 14999 : return nPowTargetSpacing * 576; // 96 hours - 4 days\n\n case 15000 ... HARDFORK_RETARGET_BLOCKS : return nPowTargetSpacing * 1152; // 192 hours - 8 days\n\n default : return nPowTargetTimespan; // original 14 days\n }\n }\n else return nPowTargetTimespan;\n }\n\n bool MVFisWithinRetargetPeriod(int Height) const\n {\n if (Height >= FinalActivateForkHeight)\n return true;\n else\n return false;\n }\n int64_t DifficultyAdjustmentInterval() const { return nPowTargetTimespan / nPowTargetSpacing; }\n int64_t DifficultyAdjustmentInterval(int Height) const\n {\n // MVF-Core:\n // if outside the MVFRetargetPeriod then use the original values\n // otherwise use a height-dependent window size\n if (MVFisWithinRetargetPeriod(Height)) {\n // re-target MVF\n int MVFHeight = Height - FinalActivateForkHeight;\n switch (MVFHeight)\n {\n case 0 ... 2016: return 1; // every block (abrupt retargeting permitted)\n\n case 2017 ... 3999: return 10; // every 10 blocks\n\n case 4000 ... 9999: return 40; // every 40 blocks\n\n case 10000 ... 14999: return 100; // every 100 blocks\n\n case 15000 ... 19999: return 400; // every 400 blocks\n\n case 20000 ... HARDFORK_RETARGET_BLOCKS: return 1000; // every 1000 blocks\n\n default : return 2016; // every 2016 blocks\n }\n }\n else {\n // re-target original (MVHF-CORE-DES-DIAD-4)\n return nPowTargetTimespan / nPowTargetSpacing;\n }\n }\n // MFV-Core end\n\n};\n} // namespace Consensus\n\n#endif // BITCOIN_CONSENSUS_PARAMS_H\n" }, { "alpha_fraction": 0.6615660786628723, "alphanum_fraction": 0.6647875308990479, "avg_line_length": 46.06122589111328, "blob_id": "d203897ef6f117c6869c515b7339d4089ce1db26", "content_id": "620e848f0ef744ae0e50543191241878670d24e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 16142, "license_type": "permissive", "max_line_length": 354, "num_lines": 343, "path": "/src/mvf-core.cpp", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2016 The Bitcoin developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n// MVF-Core common objects and functions\n\n#include \"mvf-core.h\"\n#include \"mvf-btcfork_conf_parser.h\"\n#include \"init.h\"\n#include \"util.h\"\n#include \"utilstrencodings.h\" // for atoi64\n#include \"chainparams.h\"\n#include \"validationinterface.h\"\n\n#include <iostream>\n#include <fstream>\n#include <boost/filesystem.hpp>\n#include <boost/algorithm/string/replace.hpp>\n#include <boost/exception/to_string_stub.hpp>\n\nusing namespace std;\n\n\n/** Add MVF-specific command line options (MVHF-CORE-DES-TRIG-8) */\nstd::string ForkCmdLineHelp()\n{\n std::string strUsage;\n strUsage += HelpMessageGroup(_(\"Bitcoin MVF-Core Options:\"));\n\n // automatic wallet backup parameters (MVHF-CORE-DES-WABU-1)\n strUsage += HelpMessageOpt(\"-autobackupwalletpath=<path>\", _(\"Automatically backup the wallet to the autobackupwalletfile path after the block specified becomes the best block (-autobackupblock). Default: Enabled\"));\n strUsage += HelpMessageOpt(\"-autobackupblock=<n>\", _(\"Specify the block number that triggers the automatic wallet backup. Default: forkheight-1\"));\n\n // fork height parameter (MVHF-CORE-DES-TRIG-1)\n strUsage += HelpMessageOpt(\"-forkheight=<n>\", strprintf(_(\"Block height at which to fork on active network (integer). Defaults (also minimums): mainnet:%u,testnet=%u,regtest=%u,bfgtest=%u\"), (unsigned)HARDFORK_HEIGHT_MAINNET, (unsigned)HARDFORK_HEIGHT_TESTNET, (unsigned)HARDFORK_HEIGHT_REGTEST, (unsigned)HARDFORK_HEIGHT_BFGTEST));\n\n // fork id (MVHF-CORE-DES-CSIG-1)\n strUsage += HelpMessageOpt(\"-forkid=<n>\", strprintf(_(\"Fork id to use for signature change. Value must be between 0 and %d. Default is 0x%06x (%u)\"), (unsigned)MAX_HARDFORK_SIGHASH_ID, (unsigned)HARDFORK_SIGHASH_ID, (unsigned)HARDFORK_SIGHASH_ID));\n\n // fork difficulty drop factor (MVF-Core TODO: MVHF-CORE-DES-DIAD-?)\n strUsage += HelpMessageOpt(\"-diffdrop=<n>\", strprintf(_(\"Difficulty drop factor on active network (integer). Value must be between 1 (no drop) and %u. Defaults: mainnet:%u,testnet=%u,regtest=%u\"), (unsigned)MAX_HARDFORK_DROPFACTOR, (unsigned)HARDFORK_DROPFACTOR_MAINNET, (unsigned)HARDFORK_DROPFACTOR_TESTNET, (unsigned)HARDFORK_DROPFACTOR_REGTEST));\n return strUsage;\n}\n\n\n/** Performs fork-related setup / validation actions when the program starts */\nbool ForkSetup(const CChainParams& chainparams)\n{\n int minForkHeightForNetwork = 0;\n unsigned defaultDropFactorForNetwork = 1;\n std:string activeNetworkID = chainparams.NetworkIDString();\n\n LogPrintf(\"%s: MVF: doing setup\\n\", __func__);\n\n // first, set initial values from built-in defaults\n FinalForkId = GetArg(\"-forkid\", HARDFORK_SIGHASH_ID);\n\n // determine default drop factors and minimum fork heights according to network\n // (minimum fork heights are set to the same as the default fork heights for now, but could be made different)\n if (activeNetworkID == CBaseChainParams::MAIN) {\n minForkHeightForNetwork = HARDFORK_HEIGHT_MAINNET;\n defaultDropFactorForNetwork = HARDFORK_DROPFACTOR_MAINNET;\n }\n else if (activeNetworkID == CBaseChainParams::TESTNET) {\n minForkHeightForNetwork = HARDFORK_HEIGHT_TESTNET;\n defaultDropFactorForNetwork = HARDFORK_DROPFACTOR_TESTNET;\n }\n else if (activeNetworkID == CBaseChainParams::REGTEST) {\n minForkHeightForNetwork = HARDFORK_HEIGHT_REGTEST;\n defaultDropFactorForNetwork = HARDFORK_DROPFACTOR_REGTEST;\n }\n else if (activeNetworkID == CBaseChainParams::BFGTEST) {\n minForkHeightForNetwork = HARDFORK_HEIGHT_BFGTEST;\n defaultDropFactorForNetwork = HARDFORK_DROPFACTOR_BFGTEST;\n }\n else {\n throw std::runtime_error(strprintf(\"%s: Unknown chain %s.\", __func__, activeNetworkID));\n }\n\n FinalActivateForkHeight = GetArg(\"-forkheight\", minForkHeightForNetwork);\n FinalDifficultyDropFactor = (unsigned) GetArg(\"-diffdrop\", defaultDropFactorForNetwork);\n\n // check if btcfork.conf exists (MVHF-CORE-DES-TRIG-10)\n boost::filesystem::path pathBTCforkConfigFile(MVFGetConfigFile());\n if (boost::filesystem::exists(pathBTCforkConfigFile)) {\n LogPrintf(\"%s: MVF: found marker config file at %s - client has already forked before\\n\", __func__, pathBTCforkConfigFile.string().c_str());\n // read the btcfork.conf file if it exists, override standard config values using its configuration\n try\n {\n MVFReadConfigFile(pathBTCforkConfigFile, btcforkMapArgs, btcforkMapMultiArgs);\n if (btcforkMapArgs.count(\"-forkheight\")) {\n FinalActivateForkHeight = atoi(btcforkMapArgs[\"-forkheight\"]);\n mapArgs[\"-forkheight\"] = FinalActivateForkHeight;\n }\n if (btcforkMapArgs.count(\"-autobackupblock\")) {\n mapArgs[\"-autobackupblock\"] = btcforkMapArgs[\"-autobackupblock\"];\n }\n if (btcforkMapArgs.count(\"-forkid\")) {\n FinalForkId = atoi(btcforkMapArgs[\"-forkid\"]);\n mapArgs[\"-forkid\"] = btcforkMapArgs[\"-forkid\"];\n }\n } catch (const std::exception& e) {\n LogPrintf(\"MVF: Error reading %s configuration file: %s\\n\", BTCFORK_CONF_FILENAME, e.what());\n fprintf(stderr,\"MVF: Error reading %s configuration file: %s\\n\", BTCFORK_CONF_FILENAME, e.what());\n }\n wasMVFHardForkPreviouslyActivated = true;\n }\n else {\n LogPrintf(\"%s: MVF: no marker config file at %s - client has not forked yet\\n\", __func__, pathBTCforkConfigFile.string().c_str());\n wasMVFHardForkPreviouslyActivated = false;\n }\n\n // validation\n\n // shut down immediately if specified fork height is invalid\n if (FinalActivateForkHeight <= 0)\n {\n LogPrintf(\"MVF: Error: specified fork height (%d) is less than minimum for '%s' network (%d)\\n\", FinalActivateForkHeight, activeNetworkID, minForkHeightForNetwork);\n return false; // caller should shut down\n }\n\n if (FinalDifficultyDropFactor < 1 || FinalDifficultyDropFactor > MAX_HARDFORK_DROPFACTOR) {\n LogPrintf(\"MVF: Error: specified difficulty drop (%u) is not in range 1..%u\\n\", FinalDifficultyDropFactor, (unsigned)MAX_HARDFORK_DROPFACTOR);\n return false; // caller should shut down\n }\n\n // check fork id for validity (MVHF-CORE-DES-CSIG-2)\n if (FinalForkId == 0) {\n LogPrintf(\"MVF: Warning: fork id = 0 will result in vulnerability to replay attacks\\n\");\n }\n else {\n if (FinalForkId < 0 || FinalForkId > MAX_HARDFORK_SIGHASH_ID) {\n LogPrintf(\"MVF: Error: specified fork id (%d) is not in range 0..%u\\n\", FinalForkId, (unsigned)MAX_HARDFORK_SIGHASH_ID);\n return false; // caller should shut down\n }\n }\n\n // debug traces of final values\n LogPrintf(\"%s: MVF: fork consensus code = %s\\n\", __func__, post_fork_consensus_id);\n LogPrintf(\"%s: MVF: active network = %s\\n\", __func__, activeNetworkID);\n LogPrintf(\"%s: MVF: active fork id = 0x%06x (%d)\\n\", __func__, FinalForkId, FinalForkId);\n LogPrintf(\"%s: MVF: active fork height = %d\\n\", __func__, FinalActivateForkHeight);\n LogPrintf(\"%s: MVF: active difficulty drop factor = %u\\n\", __func__, FinalDifficultyDropFactor);\n if (GetBoolArg(\"-segwitfork\", DEFAULT_TRIGGER_ON_SEGWIT))\n LogPrintf(\"%s: MVF: Segregated Witness trigger is ENABLED\\n\", __func__);\n else\n LogPrintf(\"%s: MVF: Segregated Witness trigger is DISABLED\\n\", __func__);\n LogPrintf(\"%s: MVF: auto backup block = %d\\n\", __func__, GetArg(\"-autobackupblock\", FinalActivateForkHeight - 1));\n\n if (GetBoolArg(\"-force-retarget\", DEFAULT_FORCE_RETARGET))\n LogPrintf(\"%s: MVF: force-retarget is ENABLED\\n\", __func__);\n else\n LogPrintf(\"%s: MVF: force-retarget is DISABLED\\n\", __func__);\n\n // we should always set the activation flag to false during setup\n isMVFHardForkActive = false;\n\n return true;\n}\n\n/** Return full path to btcfork.conf (MVHF-BU-DES-?-?) */\n// MVF-Core TODO: traceability\nboost::filesystem::path MVFGetConfigFile()\n{\n boost::filesystem::path pathConfigFile(BTCFORK_CONF_FILENAME);\n pathConfigFile = GetDataDir() / pathConfigFile;\n return pathConfigFile;\n}\n\n/** Actions when the fork triggers (MVHF-CORE-DES-TRIG-6) */\n// doBackup parameter default is true\nvoid ActivateFork(int actualForkHeight, bool doBackup)\n{\n LogPrintf(\"%s: MVF: checking whether to perform fork activation\\n\", __func__);\n if (!isMVFHardForkActive && !wasMVFHardForkPreviouslyActivated) // sanity check to protect the one-off actions\n {\n LogPrintf(\"%s: MVF: performing fork activation actions\\n\", __func__);\n\n // set so that we capture the actual height at which it forked\n // because this can be different from user-specified configuration\n // (e.g. soft-fork activated)\n FinalActivateForkHeight = actualForkHeight;\n\n boost::filesystem::path pathBTCforkConfigFile(MVFGetConfigFile());\n LogPrintf(\"%s: MVF: checking for existence of %s\\n\", __func__, pathBTCforkConfigFile.string().c_str());\n\n // remove btcfork.conf if it already exists - it shall be overwritten\n if (boost::filesystem::exists(pathBTCforkConfigFile)) {\n LogPrintf(\"%s: MVF: removing %s\\n\", __func__, pathBTCforkConfigFile.string().c_str());\n try {\n boost::filesystem::remove(pathBTCforkConfigFile);\n } catch (const boost::filesystem::filesystem_error& e) {\n LogPrintf(\"%s: MVF: Unable to remove %s config file: %s\\n\", __func__, pathBTCforkConfigFile.string().c_str(), e.what());\n }\n }\n // try to write the btcfork.conf (MVHF-CORE-DES-TRIG-10)\n LogPrintf(\"%s: MVF: writing %s\\n\", __func__, pathBTCforkConfigFile.string().c_str());\n std::ofstream btcforkfile(pathBTCforkConfigFile.string().c_str(), std::ios::out);\n btcforkfile << \"forkheight=\" << FinalActivateForkHeight << \"\\n\";\n btcforkfile << \"forkid=\" << FinalForkId << \"\\n\";\n\n LogPrintf(\"%s: MVF: active fork height = %d\\n\", __func__, FinalActivateForkHeight);\n LogPrintf(\"%s: MVF: active fork id = 0x%06x (%d)\\n\", __func__, FinalForkId, FinalForkId);\n\n // MVF-Core begin MVHF-CORE-DES-WABU-3\n // check if we need to do wallet auto backup at fork block\n // this is in case of soft-fork triggered activation\n // MVF-Core TODO: reduce code duplication between this block and main.cpp:UpdateTip()\n if (doBackup && !fAutoBackupDone)\n {\n std::string strWalletBackupFile = GetArg(\"-autobackupwalletpath\", \"\");\n int BackupBlock = actualForkHeight;\n\n //LogPrintf(\"MVF DEBUG: autobackupwalletpath=%s\\n\",strWalletBackupFile);\n //LogPrintf(\"MVF DEBUG: autobackupblock=%d\\n\",BackupBlock);\n\n if (GetBoolArg(\"-disablewallet\", false))\n {\n LogPrintf(\"MVF: -disablewallet and -autobackupwalletpath conflict so automatic backup disabled.\");\n fAutoBackupDone = true;\n }\n else {\n // Auto Backup defined, but no need to check block height since\n // this is fork activation time and we still have not backed up\n // so just get on with it\n if (GetMainSignals().BackupWalletAuto(strWalletBackupFile, BackupBlock))\n fAutoBackupDone = true;\n else {\n // shutdown in case of wallet backup failure (MVHF-CORE-DES-WABU-5)\n // MVF-Core TODO: investigate if this is safe in terms of wallet flushing/closing or if more needs to be done\n btcforkfile << \"error: unable to perform automatic backup - exiting\" << \"\\n\";\n btcforkfile.close();\n throw std::runtime_error(\"CWallet::BackupWalletAuto() : Auto wallet backup failed!\");\n }\n }\n btcforkfile << \"autobackupblock=\" << FinalActivateForkHeight << \"\\n\";\n LogPrintf(\"%s: MVF: soft-forked auto backup block = %d\\n\", __func__, FinalActivateForkHeight);\n\n }\n else {\n // auto backup was already made pre-fork - emit parameters\n btcforkfile << \"autobackupblock=\" << GetArg(\"-autobackupblock\", FinalActivateForkHeight - 1) << \"\\n\";\n LogPrintf(\"%s: MVF: height-based auto backup block = %d\\n\", __func__, GetArg(\"-autobackupblock\", FinalActivateForkHeight - 1));\n fAutoBackupDone = true; // added because otherwise backup can sometimes be re-done\n }\n\n // close fork parameter file\n btcforkfile.close();\n }\n // set the flag so that other code knows HF is active\n LogPrintf(\"%s: MVF: enabling isMVFHardForkActive\\n\", __func__);\n isMVFHardForkActive = true;\n}\n\n\n/** Actions when the fork is deactivated in reorg (MVHF-CORE-DES-TRIG-7) */\nvoid DeactivateFork(void)\n{\n LogPrintf(\"%s: MVF: checking whether to perform fork deactivation\\n\", __func__);\n if (isMVFHardForkActive)\n {\n LogPrintf(\"%s: MVF: performing fork deactivation actions\\n\", __func__);\n }\n LogPrintf(\"%s: MVF: disabling isMVFHardForkActive\\n\", __func__);\n isMVFHardForkActive = false;\n}\n\n\n/** returns the finalized path of the auto wallet backup file (MVHF-CORE-DES-WABU-2) */\nstd::string MVFexpandWalletAutoBackupPath(const std::string& strDest, const std::string& strWalletFile, int BackupBlock, bool createDirs)\n{\n boost::filesystem::path pathBackupWallet = strDest;\n\n //if the backup destination is blank\n if (strDest == \"\")\n {\n // then prefix it with the existing data dir and wallet filename\n pathBackupWallet = GetDataDir() / strprintf(\"%s.%s\",strWalletFile, autoWalletBackupSuffix);\n }\n else {\n if (pathBackupWallet.is_relative())\n // prefix existing data dir\n pathBackupWallet = GetDataDir() / pathBackupWallet;\n\n // if pathBackupWallet is a folder or symlink, or if it does end\n // on a filename with an extension...\n if (!pathBackupWallet.has_extension() || boost::filesystem::is_directory(pathBackupWallet) || boost::filesystem::is_symlink(pathBackupWallet))\n // ... we assume no custom filename so append the default filename\n pathBackupWallet /= strprintf(\"%s.%s\",strWalletFile, autoWalletBackupSuffix);\n\n if (pathBackupWallet.branch_path() != \"\" && createDirs)\n // create directories if they don't exist\n // MVF-Core TODO: this directory creation should be factored out\n // so that we do not need to pass a Boolean arg and this function\n // should not have the side effect. Marked for cleanup.\n boost::filesystem::create_directories(pathBackupWallet.branch_path());\n }\n\n std::string strBackupFile = pathBackupWallet.string();\n\n // replace # with BackupBlock number\n boost::replace_all(strBackupFile,\"@\", boost::to_string_stub(BackupBlock));\n //LogPrintf(\"DEBUG: strBackupFile=%s\\n\",strBackupFile);\n\n return strBackupFile;\n}\n\n// get / set functions for btcforkMapArgs\nstd::string MVFGetArg(const std::string& strArg, const std::string& strDefault)\n{\n if (btcforkMapArgs.count(strArg))\n return btcforkMapArgs[strArg];\n return strDefault;\n}\n\nint64_t MVFGetArg(const std::string& strArg, int64_t nDefault)\n{\n if (btcforkMapArgs.count(strArg))\n return atoi64(btcforkMapArgs[strArg]);\n return nDefault;\n}\n\nbool MVFGetBoolArg(const std::string& strArg, bool fDefault)\n{\n if (btcforkMapArgs.count(strArg))\n return InterpretBool(btcforkMapArgs[strArg]);\n return fDefault;\n}\n\nbool MFVSoftSetArg(const std::string& strArg, const std::string& strValue)\n{\n if (btcforkMapArgs.count(strArg))\n return false;\n btcforkMapArgs[strArg] = strValue;\n return true;\n}\n\nbool MFVSoftSetBoolArg(const std::string& strArg, bool fValue)\n{\n if (fValue)\n return SoftSetArg(strArg, std::string(\"1\"));\n else\n return SoftSetArg(strArg, std::string(\"0\"));\n}\n" }, { "alpha_fraction": 0.5973702669143677, "alphanum_fraction": 0.6248519420623779, "avg_line_length": 50.16363525390625, "blob_id": "662f4e24b090245f57833a5ebe1e573691e6f2bf", "content_id": "24e06bb8aab3d985b7904e3f25464c5a92d3c2aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8442, "license_type": "permissive", "max_line_length": 144, "num_lines": 165, "path": "/qa/rpc-tests/mvf-core-trig.py", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# Copyright (c) 2016 The Bitcoin developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#\n# Test MVF fork triggering functionality (TRIG)\n#\n# on node 0, test pure block height trigger at height 100\n# on node 1, test pure block height trigger at height 200\n# on node 2, test SegWit trigger at height 431 (432 blocks = 3 periods of 144 blocks)\n# on node 3, test block height trigger pre-empts SegWit trigger at 300\n#\n\nimport os\n\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\n\n\nclass MVF_TRIG_Test(BitcoinTestFramework):\n\n def setup_chain(self):\n print(\"Initializing test directory \" + self.options.tmpdir)\n initialize_chain_clean(self.options.tmpdir, 4)\n self.nodelog = {}\n self.btcfork_conf = {}\n for n in range(0,4):\n self.nodelog[n] = os.path.join(self.options.tmpdir,\"node%d\" % n,\"regtest\",\"debug.log\")\n self.btcfork_conf[n] = os.path.join(self.options.tmpdir,\"node%d\" % n,\"regtest\",BTCFORK_CONF_FILENAME)\n\n def start_all_nodes(self):\n self.nodes = []\n self.is_network_split = False\n self.expected_fork_entries = {}\n self.nodes.append(start_node(0, self.options.tmpdir,\n [\"-forkheight=100\", ]))\n self.nodes.append(start_node(1, self.options.tmpdir,\n [\"-forkheight=200\", ]))\n self.nodes.append(start_node(2, self.options.tmpdir,\n [\"-forkheight=999999\",\n \"-blockversion=%s\" % 0x20000002])) # signal SegWit\n self.nodes.append(start_node(3, self.options.tmpdir,\n [\"-forkheight=300\",\n \"-blockversion=%s\" % 0x20000002])) # signal SegWit, but forkheight should pre-empt\n self.expected_fork_entries[0] = { \"forkheight\": \"100\", \"forkid\": \"5591040\", \"autobackupblock\": \"99\"}\n self.expected_fork_entries[1] = { \"forkheight\": \"200\", \"forkid\": \"5591040\", \"autobackupblock\": \"199\"}\n self.expected_fork_entries[2] = { \"forkheight\": \"431\", \"forkid\": \"5591040\", \"autobackupblock\": \"431\"}\n self.expected_fork_entries[3] = { \"forkheight\": \"300\", \"forkid\": \"5591040\", \"autobackupblock\": \"299\"}\n\n def setup_network(self):\n self.start_all_nodes()\n\n def prior_fork_detected_on_node(self, node=0):\n \"\"\" check in log file if prior fork has been detected and return true/false \"\"\"\n marker_found = search_file(self.nodelog[node], \"MVF: found marker config file\")\n return (len(marker_found) > 0)\n\n def is_config_file_consistent(self, node=0, entry_map={}):\n \"\"\" check whether btcfork.conf file matches expectations,\n and return true/false. One of the assumptions is that the\n config file should exist. Do not call this function otherwise.\"\"\"\n config_file_written = search_file(self.nodelog[node], \"MVF: writing\")\n if len(config_file_written) == 0:\n # absence of config file is unexpected\n print \"is_config_file_consistent: config file not found for node %d\" % node\n return False\n verify_btcfork_conf = config_file_written[0].split(\" \")[-1:][0].strip()\n if verify_btcfork_conf != self.btcfork_conf[node]:\n # check that filename matches what is expected\n print \"is_config_file_consistent: config filename %s mismatch %s for node %d\" % (verify_btcfork_conf, self.btcfork_conf[node], node)\n return False\n for key in entry_map.keys():\n key_found = search_file(self.btcfork_conf[node], \"%s=\" % key)\n if (len(key_found) != 1):\n print \"is_config_file_consistent: key %s not found for node %d\" % (key, node)\n return False\n val_found=key_found[0].split(\"=\")[1].strip()\n if (val_found != entry_map[key]):\n print \"is_config_file_consistent: unexpected value '%s' for key %s found for node %d\" % (val_found, key, node)\n return False\n return True\n\n def is_fork_triggered_on_node(self, node=0):\n \"\"\" check in log file if fork has triggered and return true/false \"\"\"\n # MVF-Core TODO: extend to check using RPC info about forks\n hf_active = (search_file(self.nodelog[node], \"isMVFHardForkActive=1\") and\n search_file(self.nodelog[node], \"enabling isMVFHardForkActive\"))\n fork_actions_performed = search_file(self.nodelog[node], \"MVF: performing fork activation actions\")\n return (len(hf_active) > 0 and len(fork_actions_performed) == 1)\n\n def run_test(self):\n # check that fork does not triggered before the height\n print \"Generating 99 pre-fork blocks\"\n for n in xrange(len(self.nodes)):\n self.nodes[n].generate(99)\n assert_equal(False, self.is_fork_triggered_on_node(n)\n or self.prior_fork_detected_on_node(n))\n print \"Fork did not trigger prematurely\"\n\n # check that fork triggers for nodes 0 and 1 at designated height\n # move all nodes to height 100\n for n in xrange(len(self.nodes)):\n self.nodes[n].generate(1)\n assert_equal(True, self.is_fork_triggered_on_node(0))\n assert_equal(True, self.is_config_file_consistent(0, self.expected_fork_entries[0]))\n assert_equal(False, self.prior_fork_detected_on_node(0))\n for n in [1,2,3]:\n assert_equal(False, self.is_fork_triggered_on_node(n))\n assert_equal(False, self.prior_fork_detected_on_node(n))\n\n print \"Fork triggered successfully on node 0 (block height 100)\"\n\n # check node 1 triggering around height 200\n self.nodes[1].generate(99)\n assert_equal(False, self.is_fork_triggered_on_node(1))\n self.nodes[1].generate(1)\n assert_equal(True, self.is_fork_triggered_on_node(1))\n assert_equal(True, self.is_config_file_consistent(1, self.expected_fork_entries[1]))\n print \"Fork triggered successfully on node 1 (block height 200)\"\n\n # check node 2 triggering around height 431\n # it starts at 100\n self.nodes[2].generate(330)\n assert_equal(False, self.is_fork_triggered_on_node(2)\n or self.prior_fork_detected_on_node(2))\n self.nodes[2].generate(1)\n assert_equal(True, self.is_fork_triggered_on_node(2))\n assert_equal(True, self.is_config_file_consistent(2, self.expected_fork_entries[2]))\n assert_equal(False, self.prior_fork_detected_on_node(2))\n # block 431 is when fork activation is performed.\n # block 432 is first block where new consensus rules are in effect.\n print \"Fork triggered successfully on node 2 (segwit, height 431)\"\n\n # check node 3 triggering around height 300\n # move to 299\n self.nodes[3].generate(199)\n assert_equal(False, self.is_fork_triggered_on_node(3)\n or self.prior_fork_detected_on_node(3))\n self.nodes[3].generate(1)\n assert_equal(True, self.is_fork_triggered_on_node(3))\n assert_equal(True, self.is_config_file_consistent(3, self.expected_fork_entries[3]))\n assert_equal(False, self.prior_fork_detected_on_node(3))\n print \"Fork triggered successfully on node 3 (block height 300 ahead of SegWit)\"\n\n # test startup detection of prior fork activation.\n # by now, all 4 nodes have triggered.\n print \"Stopping all nodes\"\n for n in xrange(4):\n assert_equal(False, self.prior_fork_detected_on_node(n))\n stop_node(self.nodes[n], n)\n # get rid of debug.log files so we can better check retrigger\n os.unlink(os.path.join(self.options.tmpdir,\"node%d\" % n,\"regtest\",\"debug.log\"))\n\n # restart them all, check that they detected having forked on prior run\n print \"Restarting all nodes\"\n self.start_all_nodes()\n for n in xrange(4):\n assert_equal(True, self.prior_fork_detected_on_node(n))\n assert(len(search_file(self.nodelog[n], \"enabling isMVFHardForkActive\")) == 1)\n assert(len(search_file(self.nodelog[n], \"found marker config file\")) == 1)\n print \"Prior fork activation detected on all nodes\"\n\nif __name__ == '__main__':\n MVF_TRIG_Test().main()\n" }, { "alpha_fraction": 0.6650426387786865, "alphanum_fraction": 0.680528998374939, "avg_line_length": 39.75886535644531, "blob_id": "4b38ee942398cdcdd43b7bb3178fa559afa105d4", "content_id": "35f6310bd00ddc3e290029220e70ddf1acf76761", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5747, "license_type": "permissive", "max_line_length": 124, "num_lines": 141, "path": "/src/test/mvfstandalone_tests.cpp", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2016 The Bitcoin developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#include <fstream>\n#include <boost/test/unit_test.hpp>\n\n#include \"mvf-core.h\"\n#include \"mvf-btcfork_conf_parser.h\"\n#include \"test/test_bitcoin.h\"\n\n#ifdef ENABLE_WALLET\n#include \"wallet/wallet.h\"\n#endif\n\n\nBOOST_FIXTURE_TEST_SUITE(mvfstandalone_tests, BasicTestingSetup)\n\n\n// tests of the wallet backup filename construction\nBOOST_AUTO_TEST_CASE(wallet_backup_path_expansion)\n{\n std::string platform(BOOST_PLATFORM);\n\n boost::filesystem::path datadir = GetDataDir();\n std::string dds = datadir.string();\n static const boost::filesystem::path abspath(GetDataDir());\n static const boost::filesystem::path relpath(\"rel\");\n static const boost::filesystem::path fullpath = datadir / \"[email protected]\";\n static const boost::filesystem::path userpath(\"/home/user/.bitcoin\");\n\n // sanity checks\n BOOST_CHECK(abspath.is_absolute());\n BOOST_CHECK(!abspath.is_relative());\n BOOST_CHECK(relpath.is_relative());\n BOOST_CHECK(!relpath.is_absolute());\n BOOST_CHECK(!relpath.has_root_directory());\n BOOST_CHECK(fullpath.has_filename());\n BOOST_CHECK(userpath.has_filename());\n BOOST_CHECK(userpath.has_extension());\n BOOST_CHECK_EQUAL(userpath.filename(), \".bitcoin\");\n BOOST_CHECK_EQUAL(userpath.extension(), \".bitcoin\");\n BOOST_CHECK_EQUAL(userpath.extension(), userpath.filename());\n\n#ifdef ENABLE_WALLET\n // if first arg is empty, then datadir is prefixed\n BOOST_CHECK_EQUAL(MVFexpandWalletAutoBackupPath(\"\", \"w.dat\", 0, false),\n datadir / \"w.dat.auto.0.bak\");\n\n // if first arg is relative, then datadir is still prefixed\n BOOST_CHECK_EQUAL(MVFexpandWalletAutoBackupPath(\"dir\", \"w.dat\", 1, false),\n datadir / \"dir\" / \"w.dat.auto.1.bak\");\n\n // if first arg is absolute, then datadir is not prefixed\n BOOST_CHECK_EQUAL(MVFexpandWalletAutoBackupPath(abspath.string(), \"w.dat\", 2, false),\n abspath / \"w.dat.auto.2.bak\");\n\n // if path contains @ it is replaced by height\n BOOST_CHECK_EQUAL(MVFexpandWalletAutoBackupPath(\"@@@\", \"[email protected]\", 7, false),\n datadir / \"777\" / \"w7.dat.auto.7.bak\");\n\n // if first contains filename, then appending of filename is skipped\n BOOST_CHECK_EQUAL(MVFexpandWalletAutoBackupPath(fullpath.string(), \"w.dat\", 6, false),\n datadir / \"w6.dat\");\n#endif\n}\n\n\nBOOST_AUTO_TEST_CASE(btcfork_conf_maps)\n{\n btcforkMapArgs.clear();\n btcforkMapArgs[\"strtest1\"] = \"string...\";\n // strtest2 undefined on purpose\n btcforkMapArgs[\"inttest1\"] = \"12345\";\n btcforkMapArgs[\"inttest2\"] = \"81985529216486895\";\n // inttest3 undefined on purpose\n btcforkMapArgs[\"booltest1\"] = \"\";\n // booltest2 undefined on purpose\n btcforkMapArgs[\"booltest3\"] = \"0\";\n btcforkMapArgs[\"booltest4\"] = \"1\";\n\n BOOST_CHECK_EQUAL(MVFGetArg(\"strtest1\", \"default\"), \"string...\");\n BOOST_CHECK_EQUAL(MVFGetArg(\"strtest2\", \"default\"), \"default\");\n BOOST_CHECK_EQUAL(MVFGetArg(\"inttest1\", -1), 12345);\n BOOST_CHECK_EQUAL(MVFGetArg(\"inttest2\", -1), 81985529216486895LL);\n BOOST_CHECK_EQUAL(MVFGetArg(\"inttest3\", -1), -1);\n BOOST_CHECK_EQUAL(MVFGetBoolArg(\"booltest1\", false), true);\n BOOST_CHECK_EQUAL(MVFGetBoolArg(\"booltest2\", false), false);\n BOOST_CHECK_EQUAL(MVFGetBoolArg(\"booltest3\", false), false);\n BOOST_CHECK_EQUAL(MVFGetBoolArg(\"booltest4\", false), true);\n}\n\n\n// test MVFGetConfigFile(), the MVF config (btcfork.conf) filename construction\nBOOST_AUTO_TEST_CASE(mvfgetconfigfile)\n{\n boost::filesystem::path cfgBaseFile(BTCFORK_CONF_FILENAME);\n BOOST_CHECK(!cfgBaseFile.is_complete());\n BOOST_CHECK_EQUAL(MVFGetConfigFile(), (GetDataDir() / BTCFORK_CONF_FILENAME));\n}\n\n\n// test MVFReadConfigFile() which reads a config file into arg maps\nBOOST_AUTO_TEST_CASE(mvfreadconfigfile)\n{\n boost::filesystem::path pathBTCforkConfigFile = GetTempPath() / boost::filesystem::unique_path(\"btcfork.conf.%%%%.txt\");\n //fprintf(stderr,\"btcfork_conf_file: set config file %s\\n\", pathBTCforkConfigFile.string().c_str());\n BOOST_CHECK(!boost::filesystem::exists(pathBTCforkConfigFile));\n try\n {\n std::ofstream btcforkfile(pathBTCforkConfigFile.string().c_str(), std::ios::out);\n btcforkfile << \"forkheight=\" << HARDFORK_HEIGHT_REGTEST << \"\\n\";\n btcforkfile << \"forkid=\" << HARDFORK_SIGHASH_ID << \"\\n\";\n btcforkfile << \"autobackupblock=\" << (HARDFORK_HEIGHT_REGTEST - 1) << \"\\n\";\n btcforkfile.close();\n } catch (const std::exception& e) {\n BOOST_ERROR(\"Cound not write config file \" << pathBTCforkConfigFile << \" : \" << e.what());\n }\n BOOST_CHECK(boost::filesystem::exists(pathBTCforkConfigFile));\n // clear args map and read file\n btcforkMapArgs.clear();\n try\n {\n MVFReadConfigFile(pathBTCforkConfigFile, btcforkMapArgs, btcforkMapMultiArgs);\n } catch (const std::exception& e) {\n BOOST_ERROR(\"Cound not read config file \" << pathBTCforkConfigFile << \" : \" << e.what());\n }\n // check map after reading\n BOOST_CHECK_EQUAL(atoi(btcforkMapArgs[\"-forkheight\"]), (int)HARDFORK_HEIGHT_REGTEST);\n BOOST_CHECK_EQUAL(atoi(btcforkMapArgs[\"-forkid\"]), (int)HARDFORK_SIGHASH_ID);\n BOOST_CHECK_EQUAL(atoi(btcforkMapArgs[\"-autobackupblock\"]), (int)(HARDFORK_HEIGHT_REGTEST - 1));\n\n // added so that we can update this test when adding new entries\n BOOST_CHECK_EQUAL(btcforkMapArgs.size(), 3);\n\n // cleanup\n boost::filesystem::remove(pathBTCforkConfigFile);\n BOOST_CHECK(!boost::filesystem::exists(pathBTCforkConfigFile));\n}\n\nBOOST_AUTO_TEST_SUITE_END()\n" }, { "alpha_fraction": 0.5957890152931213, "alphanum_fraction": 0.6171729564666748, "avg_line_length": 43.266990661621094, "blob_id": "fd276753493c884e1693f95a02b45e9696b8d07f", "content_id": "461fc7773676e11fd140256f00c0b67a6d0ad47a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9119, "license_type": "permissive", "max_line_length": 156, "num_lines": 206, "path": "/qa/rpc-tests/mvf-core-csig.py", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# Copyright (c) 2014-2015 The Bitcoin Core developers\n# Copyright (c) 2016 The Bitcoin developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n# MVF-Core\n\"\"\"\nExercise the signature change (replay protection) code.\nDerived from walletbackupauto.py.\n\nTest case is:\n4 nodes - 2 forking and 2 non-forking, sending transactions between each other.\nPrior to the fork, anything goes.\nPost fork, the nodes of the same kind can still send between each other,\nbut not to the nodes of the other kind (2 way check).\n\"\"\"\n\nimport os\nimport fnmatch\nimport hashlib\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\nfrom random import randint\nimport logging\nimport time\n\n#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\nFORKHEIGHT = 120\n\nclass ReplayProtectionTest(BitcoinTestFramework):\n\n def setup_chain(self):\n #logging.info(\"Initializing test directory \"+self.options.tmpdir)\n print(\"Initializing test directory \"+self.options.tmpdir)\n initialize_chain_clean(self.options.tmpdir, 4)\n\n def setup_network(self, split=False):\n #logging.info(\"Starting nodes\")\n print(\"Starting nodes\")\n\n # all nodes are spenders, let's give them a keypool=100\n self.extra_args = [\n ['-debug', '-whitelist=127.0.0.1', \"-keypool=100\"],\n ['-debug', '-whitelist=127.0.0.1', \"-keypool=100\"],\n ['-debug', '-whitelist=127.0.0.1', \"-keypool=100\", \"-forkheight=%s\"%FORKHEIGHT],\n ['-debug', '-whitelist=127.0.0.1', \"-keypool=100\", \"-forkheight=%s\"%FORKHEIGHT]]\n\n self.nodes = start_nodes(4, self.options.tmpdir, self.extra_args)\n connect_nodes(self.nodes[0], 1)\n connect_nodes(self.nodes[0], 2)\n connect_nodes(self.nodes[0], 3)\n connect_nodes(self.nodes[1], 2)\n connect_nodes(self.nodes[1], 3)\n connect_nodes(self.nodes[3], 2)\n self.is_network_split=False\n self.sync_all()\n\n def send_and_check(self, from_node, to_node, expect_to_succeed=True, force_sync=True, check=True, check_for_fail=False):\n ''' try sending 0.1 BTC from one node to another,\n and optionally check if successful '''\n to_addr = self.nodes[to_node].getnewaddress()\n amount = Decimal(1) / Decimal(10)\n txid = self.nodes[from_node].sendtoaddress(to_addr, amount)\n if force_sync:\n sync_mempools([self.nodes[from_node], self.nodes[to_node]])\n else:\n time.sleep(1)\n if check:\n if check_for_fail:\n assert_equal(txid in self.nodes[from_node].getrawmempool(), True)\n assert_equal(txid in self.nodes[to_node].getrawmempool(), False)\n else:\n assert_equal(txid in self.nodes[from_node].getrawmempool() and (txid in self.nodes[to_node].getrawmempool() or not expect_to_succeed), True)\n return txid\n\n def run_test(self):\n #logging.info(\"Fork height configured for block %s\"%(FORKHEIGHT))\n print(\"Fork height configured for block %s\"%(FORKHEIGHT))\n\n #logging.info(\"Generating initial 104 blocks\")\n print(\"Generating initial 104 blocks\")\n self.nodes[0].generate(1)\n sync_blocks(self.nodes)\n self.nodes[1].generate(1)\n sync_blocks(self.nodes)\n self.nodes[2].generate(1)\n sync_blocks(self.nodes)\n self.nodes[3].generate(101)\n\n sync_blocks(self.nodes)\n #logging.info(\"Current height %s blocks\"%(self.nodes[0].getblockcount()))\n print(\"Current height %s blocks\"%(self.nodes[0].getblockcount()))\n\n assert_equal(self.nodes[0].getbalance(), 50)\n assert_equal(self.nodes[1].getbalance(), 50)\n assert_equal(self.nodes[2].getbalance(), 50)\n assert_equal(self.nodes[3].getbalance(), 50)\n\n assert_equal(self.nodes[0].getblockcount(), 104)\n\n #logging.info(\"Check all sending works after setup\")\n print(\"Check all sending works after setup\")\n # from any node to the others should be ok now\n # this should generate 4*3 = 12 more blocks\n for src_node in range(4):\n for dst_node in range(4):\n if src_node != dst_node:\n #logging.info(\"... from %d to %d\" %(src_node, dst_node))\n print(\"... from %d to %d\" %(src_node, dst_node))\n self.send_and_check(src_node, dst_node, True)\n self.nodes[dst_node].generate(1)\n sync_blocks(self.nodes)\n\n current_height = self.nodes[0].getblockcount()\n assert_equal(current_height, 116)\n\n # generate blocks, one on each node in turn, until we reach pre-fork block height\n blocks_to_fork = FORKHEIGHT - current_height - 1\n self.nodes[0].generate(blocks_to_fork)\n\n # not sure why this loop didn't work reliably...\n # maybe it was the round-robin generation\n while False: #blocks_to_fork > 0:\n #logging.info(\"blocks left to fork height: %d\" % blocks_to_fork)\n print(\"blocks left to fork height: %d\" % blocks_to_fork)\n self.nodes[blocks_to_fork % 4].generate(1)\n blocks_to_fork -= 1\n\n sync_blocks(self.nodes)\n assert_equal(self.nodes[0].getblockcount(), FORKHEIGHT - 1)\n\n #logging.info(\"Current height %s blocks (pre-fork block)\"%(self.nodes[0].getblockcount()))\n print(\"Current height %s blocks (pre-fork block)\"%(self.nodes[0].getblockcount()))\n\n # check that we can still send to all other nodes for the pre-fork block\n\n # collect a bunch of tx's sent by the nodes to each other\n #logging.info(\"sending tx's between all nodes at pre-fork\")\n print(\"sending tx's between all nodes at pre-fork\")\n should_be_fine_txs = []\n for src_node in range(4):\n for dst_node in range(4):\n if src_node != dst_node:\n #logging.info(\"... from %d to %d\" %(src_node, dst_node))\n print(\"... from %d to %d\" %(src_node, dst_node))\n should_be_fine_txs.append(self.send_and_check(src_node, dst_node, True))\n\n #logging.info(\"Verifying tx's were still accepted by all nodes\")\n print(\"Verifying tx's were still accepted by all nodes\")\n sync_mempools(self.nodes)\n mempools = [self.nodes[i].getrawmempool() for i in range(4)]\n for tx in should_be_fine_txs:\n for n in range(4):\n assert_equal(tx in mempools[n], True)\n\n # generate the fork block\n #logging.info(\"Generate fork block at height %s\" % FORKHEIGHT)\n print(\"Generate fork block at height %s\" % FORKHEIGHT)\n self.nodes[0].generate(1)\n\n # check the previous round of tx's not in mempool anymore\n self.sync_all()\n assert_equal(self.nodes[0].getblockcount(), FORKHEIGHT)\n\n #logging.info(\"Verifying tx's no longer in any mempool\")\n print(\"Verifying tx's no longer in any mempool\")\n mempools = [self.nodes[i].getrawmempool() for i in range(4)]\n for tx in should_be_fine_txs:\n for n in range(4):\n assert_equal(tx in mempools[n], False)\n\n # check that now, only nodes of the same kind can transact\n # these pairs should work fine\n #logging.info(\"Checking transactions between same-kind nodes\")\n print(\"Checking transactions between same-kind nodes\")\n for pair in ((0,1), (1,0), (2,3), (3,2)):\n #logging.info(\"... from %d to %d\" %(pair[0], pair[1]))\n print(\"... from %d to %d\" %(pair[0], pair[1]))\n self.send_and_check(pair[0], pair[1], True)\n\n # re-connect the nodes which have been disconnected due to the\n # above post-fork transactions, so we can test them separately\n #logging.info(\"Re-connecting nodes which disconnected due to prior step\")\n print(\"Re-connecting nodes which disconnected due to prior step\")\n connect_nodes_bi(self.nodes,0,2)\n connect_nodes_bi(self.nodes,0,3)\n connect_nodes_bi(self.nodes,1,2)\n connect_nodes_bi(self.nodes,1,3)\n #logging.info(\"Checking transactions between forked/unforked nodes\")\n print(\"Checking transactions between forked/unforked nodes\")\n # these should not work anymore\n\n\n # MVF-Core TODO: decide whether to accept old-style signatures post-fork (maybe limited-time only?)\n # if you only want to deny new->old, then use the commented out code\n #for pair in ((2,0), (2,1), (3,0), (3,1)):\n\n # check both forked->unforked and vice versa are blocked now\n for pair in ((0,2), (0,3), (1,2), (1,3), (2,0), (2,1), (3,0), (3,1)):\n #logging.info(\"... from %d to %d\" %(pair[0], pair[1]))\n print(\"... from %d to %d\" %(pair[0], pair[1]))\n self.send_and_check(pair[0], pair[1], expect_to_succeed=False, force_sync=False, check=True, check_for_fail=True)\n\nif __name__ == '__main__':\n ReplayProtectionTest().main()\n" }, { "alpha_fraction": 0.7718539834022522, "alphanum_fraction": 0.7780979871749878, "avg_line_length": 45.24444580078125, "blob_id": "3b7d4d966a22b234c0e61bea98249e0573643870", "content_id": "16eefae1e68dd299dfccf37b2cdd186e25b8c426", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2082, "license_type": "permissive", "max_line_length": 121, "num_lines": 45, "path": "/src/mvf-core-globals.cpp", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2016 The Bitcoin developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n// MVF-Core common objects and functions\n\n#include \"mvf-core-globals.h\"\n\nusing namespace std;\n\n// key-value maps for btcfork.conf configuration items\nmap<string, string> btcforkMapArgs;\nmap<string, vector<string> > btcforkMapMultiArgs;\n\n// version string identifying the consensus-relevant algorithmic changes\n// so that a user can quickly see if MVF fork clients are compatible\n// for test purposes (since they may diverge during development/testing).\n// A new value must be chosen whenever there are changes to consensus\n// relevant functionality (excepting things which are parameterized).\n// Values are surnames chosen from the name list of space travelers at\n// https://en.wikipedia.org/wiki/List_of_space_travelers_by_name\n// already used: AKIYAMA (add current one to the list when replacing)\nstd::string post_fork_consensus_id = \"YAMAZAKI\";\n\n// actual fork height, taking into account user configuration parameters (MVHF-CORE-DES-TRIG-4)\nint FinalActivateForkHeight = 0;\n\n// actual difficulty drop factor, taking into account user configuration parameters (MVF-Core TODO: MVHF-CORE-DES-DIAD-?)\nunsigned FinalDifficultyDropFactor = 0;\n// actual fork id, taking into account user configuration parameters (MVHF-CORE-DES-CSIG-1)\nint FinalForkId = 0;\n\n// track whether HF has been activated before in previous run (MVHF-CORE-DES-TRIG-5)\n// is set at startup based on btcfork.conf presence\nbool wasMVFHardForkPreviouslyActivated = false;\n\n// track whether HF is active (MVHF-CORE-DES-TRIG-5)\nbool isMVFHardForkActive = false;\n\n// track whether auto wallet backup might still need to be done\n// this is set to true at startup if client detects fork already triggered\n// otherwise when the backup is made. (MVHF-CORE-DES-WABU-1)\nbool fAutoBackupDone = false;\n\n// default suffix to append to wallet filename for auto backup (MVHF-CORE-DES-WABU-1)\nstd::string autoWalletBackupSuffix = \"[email protected]\";\n\n" }, { "alpha_fraction": 0.6577145457267761, "alphanum_fraction": 0.6746804714202881, "avg_line_length": 40.20376205444336, "blob_id": "a559900b25ff6d85d7bb2e70ea0b114a7c0b63f3", "content_id": "3dfdd8a91a206adf075640f39fa35b8c342e2f81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 13144, "license_type": "permissive", "max_line_length": 158, "num_lines": 319, "path": "/src/pow.cpp", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2009-2010 Satoshi Nakamoto\n// Copyright (c) 2009-2015 The Bitcoin Core developers\n// Copyright (c) 2015-2016 The Bitcoin Unlimited developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#include \"pow.h\"\n\n#include \"arith_uint256.h\"\n#include \"chain.h\"\n#include \"primitives/block.h\"\n#include \"uint256.h\"\n#include \"util.h\"\n#include \"mvf-core.h\" // MVF-Core added\n\nunsigned int GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock, const Consensus::Params& params)\n{\n // MVF-Core begin difficulty re-targeting\n if (params.MVFisWithinRetargetPeriod(pindexLast->nHeight+1))\n return GetMVFNextWorkRequired(pindexLast, pblock, params);\n // MVF-Core end\n\n unsigned int nProofOfWorkLimit = UintToArith256(params.powLimit).GetCompact();\n\n // Genesis block\n if (pindexLast == NULL)\n return nProofOfWorkLimit;\n\n // Only change once per difficulty adjustment interval\n if ((pindexLast->nHeight+1) % params.DifficultyAdjustmentInterval() != 0)\n {\n // MVF-Core: added force-retarget parameter to enable adjusting difficulty for regtest tests\n if (params.fPowAllowMinDifficultyBlocks && !GetBoolArg(\"-force-retarget\", DEFAULT_FORCE_RETARGET))\n {\n // Special difficulty rule for testnet:\n // If the new block's timestamp is more than 2* 10 minutes\n // then allow mining of a min-difficulty block.\n if (pblock->GetBlockTime() > pindexLast->GetBlockTime() + params.nPowTargetSpacing*2)\n return nProofOfWorkLimit;\n else\n {\n // Return the last non-special-min-difficulty-rules-block\n const CBlockIndex* pindex = pindexLast;\n while (pindex->pprev && pindex->nHeight % params.DifficultyAdjustmentInterval() != 0 && pindex->nBits == nProofOfWorkLimit)\n pindex = pindex->pprev;\n return pindex->nBits;\n }\n }\n return pindexLast->nBits;\n }\n\n // Go back by what we want to be 14 days worth of blocks\n int nHeightFirst = pindexLast->nHeight - (params.DifficultyAdjustmentInterval()-1);\n assert(nHeightFirst >= 0);\n const CBlockIndex* pindexFirst = pindexLast->GetAncestor(nHeightFirst);\n assert(pindexFirst);\n\n return CalculateNextWorkRequired(pindexLast, pindexFirst->GetBlockTime(), params);\n}\n\nunsigned int CalculateNextWorkRequired(const CBlockIndex* pindexLast, int64_t nFirstBlockTime, const Consensus::Params& params)\n{\n // MVF-Core: added force-retarget parameter to enable adjusting difficulty for regtest tests\n if (params.fPowNoRetargeting && !GetBoolArg(\"-force-retarget\", DEFAULT_FORCE_RETARGET))\n return pindexLast->nBits;\n\n // Limit adjustment step\n int64_t nActualTimespan = pindexLast->GetBlockTime() - nFirstBlockTime;\n LogPrintf(\" nActualTimespan = %d before bounds\\n\", nActualTimespan);\n if (nActualTimespan < params.nPowTargetTimespan/4)\n nActualTimespan = params.nPowTargetTimespan/4;\n if (nActualTimespan > params.nPowTargetTimespan*4)\n nActualTimespan = params.nPowTargetTimespan*4;\n\n // Retarget\n const arith_uint256 bnPowLimit = UintToArith256(params.powLimit);\n arith_uint256 bnNew;\n arith_uint256 bnOld;\n bnNew.SetCompact(pindexLast->nBits);\n bnOld = bnNew;\n bnNew *= nActualTimespan;\n bnNew /= params.nPowTargetTimespan;\n\n if (bnNew > bnPowLimit)\n bnNew = bnPowLimit;\n\n /// debug print\n LogPrintf(\"GetNextWorkRequired RETARGET\\n\");\n LogPrintf(\"params.nPowTargetTimespan = %d nActualTimespan = %d\\n\", params.nPowTargetTimespan, nActualTimespan);\n LogPrintf(\"Before: %08x %s\\n\", pindexLast->nBits, bnOld.ToString());\n LogPrintf(\"After: %08x %s\\n\", bnNew.GetCompact(), bnNew.ToString());\n\n return bnNew.GetCompact();\n}\n\n// MVF-Core begin: difficulty functions\n// TODO: Move these functions into mvf-bu.cpp\nunsigned int GetMVFNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock, const Consensus::Params& params)\n{\n unsigned int nProofOfWorkLimit = UintToArith256(params.powLimit).GetCompact();\n\n LogPrintf(\"MVF NEXT WORK DifficultyAdjInterval = %d , TargetTimeSpan = %d \\n\",\n params.DifficultyAdjustmentInterval(pindexLast->nHeight),\n params.MVFPowTargetTimespan(pindexLast->nHeight));\n\n // Genesis block\n if (pindexLast == NULL) return nProofOfWorkLimit;\n\n int nHeightFirst = pindexLast->nHeight - (params.MVFPowTargetTimespan(pindexLast->nHeight) / params.nPowTargetSpacing);\n if (nHeightFirst < 0) nHeightFirst = 0;\n const CBlockIndex* pindexFirst = pindexLast->GetAncestor(nHeightFirst);\n assert(pindexFirst);\n\n if (pindexLast->nHeight == FinalActivateForkHeight - 1)\n {\n // MVF-Core difficulty re-targeting reset (MVHF-CORE-DES-DIAD-2)\n return CalculateMVFResetWorkRequired(pindexLast, pindexFirst->GetBlockTime(), params);\n }\n else\n {\n // Only change once per difficulty adjustment interval\n if ((pindexLast->nHeight+1) % params.DifficultyAdjustmentInterval(pindexLast->nHeight) != 0) // MVF-Core: use height-dependent interval\n {\n // MVF-Core: added force parameter to enable adjusting difficulty for regtest tests\n if (params.fPowAllowMinDifficultyBlocks && !GetBoolArg(\"-force-retarget\", false))\n {\n // TODO: CAUTION THIS CODE IS OUTSIDE REGTEST FRAMEWORK\n // Special difficulty rule for testnet:\n // If the new block's timestamp is more than 2* 10 minutes\n // then allow mining of a min-difficulty block.\n if (pblock->GetBlockTime() > pindexLast->GetBlockTime() + params.nPowTargetSpacing*2)\n return nProofOfWorkLimit;\n else\n {\n // Return the last non-special-min-difficulty-rules-block\n const CBlockIndex* pindex = pindexLast;\n // MVF-Core: use height-dependent interval\n while (pindex->pprev && pindex->nHeight % params.DifficultyAdjustmentInterval(pindex->nHeight) != 0 && pindex->nBits == nProofOfWorkLimit)\n pindex = pindex->pprev;\n return pindex->nBits;\n }\n }\n return pindexLast->nBits;\n }\n LogPrintf(\"MVF RETARGET\\n\");\n return CalculateMVFNextWorkRequired(pindexLast, pindexFirst->GetBlockTime(), params);\n\n } // end fork reset\n}\n\nunsigned int CalculateMVFNextWorkRequired(const CBlockIndex* pindexLast, int64_t nFirstBlockTime, const Consensus::Params& params)\n{\n bool force_retarget=GetBoolArg(\"-force-retarget\", DEFAULT_FORCE_RETARGET); // MVF-Core added for retargeting tests on regtestnet (MVHF-CORE-DES-DIAD-6)\n const arith_uint256 bnPowLimit = UintToArith256(params.powLimit); // MVF-Core moved here\n\n if (params.fPowNoRetargeting && !force_retarget)\n return pindexLast->nBits;\n\n // Limit adjustment step\n int64_t nActualTimespan = pindexLast->GetBlockTime() - nFirstBlockTime;\n // MVF-Core begin check for abnormal condition\n // This actually occurred during testing, resulting in new target == 0\n // which could never be met\n if (nActualTimespan == 0) {\n LogPrintf(\" MVF: nActualTimespan == 0, returning bnPowLimit\\n\");\n return bnPowLimit.GetCompact();\n }\n // MVF-Core end\n LogPrintf(\" MVF: nActualTimespan = %d before bounds\\n\", nActualTimespan);\n\n // MVF-Core begin\n // Since in MVF fork recovery period, use faster retarget time span dependent on height (MVHF-CORE-DES-DIAD-3)\n int nTargetTimespan = params.MVFPowTargetTimespan(pindexLast->nHeight);\n\n // permit x10 retarget changes for a few blocks after the fork i.e. when nTargetTimespan is < 30 minutes (MVHF-CORE-DES-DIAD-5)\n int retargetLimit;\n if (nTargetTimespan >= params.nPowTargetSpacing * 3)\n retargetLimit = 4; else retargetLimit = 10;\n\n // prevent abrupt changes to target\n if (nActualTimespan < nTargetTimespan/retargetLimit)\n nActualTimespan = nTargetTimespan/retargetLimit;\n if (nActualTimespan > nTargetTimespan*retargetLimit)\n nActualTimespan = nTargetTimespan*retargetLimit;\n // MVF-Core end\n\n // Retarget\n arith_uint256 bnNew, bnNew1, bnNew2, bnOld;\n bnOld.SetCompact(pindexLast->nBits);\n // MVF-Core begin: move division before multiplication\n // at regtest difficulty, the multiplication is prone to overflowing\n bnNew1 = bnOld / nTargetTimespan;\n bnNew2 = bnNew1 * nActualTimespan;\n\n // Test for overflow\n if (bnNew2 / nActualTimespan != bnNew1)\n {\n bnNew = bnPowLimit;\n LogPrintf(\"MVF GetNextWorkRequired OVERFLOW\\n\");\n }\n else if (bnNew2 > bnPowLimit)\n {\n bnNew = bnPowLimit;\n LogPrintf(\"MVF GetNextWorkRequired OVERLIMIT\\n\");\n }\n else\n bnNew = bnNew2;\n // MVF-Core end\n\n /// debug print\n LogPrintf(\"GetNextWorkRequired RETARGET\\n\");\n LogPrintf(\"nTargetTimespan = %d nActualTimespan = %d\\n\", nTargetTimespan, nActualTimespan);\n LogPrintf(\"Before: %08x %s\\n\", pindexLast->nBits, bnOld.ToString());\n LogPrintf(\"After: %08x %s\\n\", bnNew.GetCompact(), bnNew.ToString());\n\n return bnNew.GetCompact();\n}\n\n/** Perform the fork difficulty reset */\nunsigned int CalculateMVFResetWorkRequired(const CBlockIndex* pindexLast, int64_t nFirstBlockTime, const Consensus::Params& params)\n{\n const arith_uint256 bnPowLimit = UintToArith256(params.powLimit); // MVF-Core moved here\n\n arith_uint256 bnNew, bnNew1, bnNew2, bnOld;\n\n // TODO : Determine best reset formula\n // currently we drop difficulty by a factor (see help for -diffdrop option)\n int64_t nActualTimespan = pindexLast->GetBlockTime() - nFirstBlockTime;\n // used reduced target time span\n int64_t nTargetTimespan = nActualTimespan / FinalDifficultyDropFactor;\n\n bnOld.SetCompact(pindexLast->nBits);\n bnNew1 = bnOld / nTargetTimespan;\n bnNew2 = bnNew1 * nActualTimespan;\n\n // check for overflow or overlimit\n if (bnNew2 / nActualTimespan != bnNew1 || bnNew2 > bnPowLimit)\n bnNew = bnPowLimit;\n else bnNew = bnNew2;\n\n // ignore formula above and override with fixed difficulty\n //bnNew.SetCompact(0x207eeeee);\n\n /// debug print\n LogPrintf(\"GetNextWorkRequired RETARGET\\n\");\n LogPrintf(\"nTargetTimespan = %d nActualTimespan = %d\\n\", nTargetTimespan, nActualTimespan);\n LogPrintf(\"Before: %08x %s\\n\", pindexLast->nBits, bnOld.ToString());\n LogPrintf(\"After MVF FORK BLOCK DIFFICULTY RESET %08x %s\\n\", bnNew.GetCompact(),bnNew.ToString());\n return bnNew.GetCompact();\n}\n// MVF-Core end: difficulty functions\n\nbool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params& params)\n{\n bool fNegative;\n bool fOverflow;\n arith_uint256 bnTarget;\n static bool force_retarget=GetBoolArg(\"-force-retarget\", DEFAULT_FORCE_RETARGET); // MVF-Core (MVHF-CORE-DES-DIAD-6)\n\n bnTarget.SetCompact(nBits, &fNegative, &fOverflow);\n\n // Check range\n // MVF-Core begin\n // --force-retarget is used to suppress output for regtest tests (MVHF-CORE-DES-DIAD-6)\n if (fNegative || bnTarget == 0 || fOverflow || bnTarget > UintToArith256(params.powLimit))\n {\n // do not output verbose error msgs if force-retarget\n // this is to prevent log file flooding when regtests with actual\n // retargeting are done\n if (!force_retarget)\n return error(\"CheckProofOfWork(): nBits below minimum work\");\n else\n return false;\n }\n // Check proof of work matches claimed amount\n if (UintToArith256(hash) > bnTarget)\n {\n if (!force_retarget)\n return error(\"CheckProofOfWork(): hash %s doesn't match nBits 0x%x\",hash.ToString(),nBits);\n else\n return false;\n }\n // MVF-Core end\n\n\n return true;\n}\n\narith_uint256 GetBlockProof(const CBlockIndex& block)\n{\n arith_uint256 bnTarget;\n bool fNegative;\n bool fOverflow;\n bnTarget.SetCompact(block.nBits, &fNegative, &fOverflow);\n if (fNegative || fOverflow || bnTarget == 0)\n return 0;\n // We need to compute 2**256 / (bnTarget+1), but we can't represent 2**256\n // as it's too large for a arith_uint256. However, as 2**256 is at least as large\n // as bnTarget+1, it is equal to ((2**256 - bnTarget - 1) / (bnTarget+1)) + 1,\n // or ~bnTarget / (nTarget+1) + 1.\n return (~bnTarget / (bnTarget + 1)) + 1;\n}\n\nint64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& from, const CBlockIndex& tip, const Consensus::Params& params)\n{\n arith_uint256 r;\n int sign = 1;\n if (to.nChainWork > from.nChainWork) {\n r = to.nChainWork - from.nChainWork;\n } else {\n r = from.nChainWork - to.nChainWork;\n sign = -1;\n }\n r = r * arith_uint256(params.nPowTargetSpacing) / GetBlockProof(tip);\n if (r.bits() > 63) {\n return sign * std::numeric_limits<int64_t>::max();\n }\n return sign * r.GetLow64();\n}\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.747259259223938, "avg_line_length": 63.903846740722656, "blob_id": "366b3acb78aa6c3cc7eb025c38403d5e521d9fa7", "content_id": "304055e880f711e0e14ff3766c7dcf11e9c5fd5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6750, "license_type": "permissive", "max_line_length": 141, "num_lines": 104, "path": "/src/mvf-core-globals.h", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "// Copyright (c) 2016 The Bitcoin developers\n// Distributed under the MIT software license, see the accompanying\n// file COPYING or http://www.opensource.org/licenses/mit-license.php.\n// MVF-Core common declarations\n#pragma once\n#ifndef BITCOIN_MVF_CORE_GLOBALS_H\n#define BITCOIN_MVF_CORE_GLOBALS_H\n\n#include \"protocol.h\"\n\n// MVHF-CORE-DES-TRIG-10 - config file that is written when forking, and used to detect \"forked\" condition at start\nconst char * const BTCFORK_CONF_FILENAME = \"btcfork.conf\";\n\nextern int FinalActivateForkHeight; // MVHF-CORE-DES-TRIG-4\nextern unsigned FinalDifficultyDropFactor; // MVF-Core TODO: MVHF-CORE-DES-DIAD-?\nextern bool wasMVFHardForkPreviouslyActivated; // MVHF-CORE-DES-TRIG-5\nextern bool isMVFHardForkActive; // MVHF-CORE-DES-TRIG-5\nextern int FinalForkId; // MVHF-CORE-DES-CSIG-1\nextern bool fAutoBackupDone; // MVHF-CORE-DES-WABU-1\nextern std::string autoWalletBackupSuffix; // MVHF-CORE-DES-WABU-1\n\n// btcfork.conf configuration key-value maps (MVF TODO: reference associated design)\nextern std::map<std::string, std::string> btcforkMapArgs;\nextern std::map<std::string, std::vector<std::string> > btcforkMapMultiArgs;\n\n// version string identifying the consensus-relevant algorithmic changes\n// so that a user can quickly see if fork clients are compatible\nextern std::string post_fork_consensus_id;\n\n// CAUTION! certain constant definitions from this file are parsed\n// and extracted by the Python test framework (util.py).\n// Usually there should be notes documenting where values have to\n// respect a certain format, but please tread carefully with the\n// formatting and do not just refactor the C++ names without\n// modifying the Python code.\n\n// default values that can be easily put into an enum\nenum {\n// MVHF-CORE-DES-TRIG-1 - trigger related parameter defaults\n// MVF-CORE TODO: choose values with some consideration instead of dummy values\nHARDFORK_MAX_BLOCK_SIZE = 2000000, // the fork's new maximum block size, in bytes\n// MVF-Core TODO: choose values with some consideration instead of dummy values\n// must be digit-only numerals (no operators) since they are read in by Python test framework\nHARDFORK_HEIGHT_MAINNET = 666666, // operational network trigger height\nHARDFORK_HEIGHT_TESTNET = 9999999, // public test network trigger height\nHARDFORK_HEIGHT_REGTEST = 9999999, // regression test network (local) trigger height\nHARDFORK_HEIGHT_BFGTEST = 9999999, // btcforks genesis test network trigger height\n\n// MVHF-CORE-DES-DIAD-3 / MVHF-CORE-DES-DIAD-4\n// period (in blocks) from fork activation until retargeting returns to normal\nHARDFORK_RETARGET_BLOCKS = 180*144, // number of blocks during which special fork retargeting is active\n// default drop factors for various networks (MVF-Core TODO: design reference)\n// must be digit-only numerals (no operators) since they are read in by Python test framework\nMAX_HARDFORK_DROPFACTOR = 1000000, // maximum drop factor\nHARDFORK_DROPFACTOR_MAINNET = 100000, // default difficulty drop on operational network (mainnet)\nHARDFORK_DROPFACTOR_TESTNET = 10000, // default difficulty drop on public test network (testnet)\nHARDFORK_DROPFACTOR_REGTEST = 4, // default difficulty drop on local regression test network (regtestnet)\nHARDFORK_DROPFACTOR_BFGTEST = 1000, // default difficulty drop on btcforks genesis test network (bfgtest)\n\n// MVHF-CORE-DES-NSEP-1 - network separation parameter defaults\n// MVF-Core TODO: re-check that these port values could be used\n// must be digit-only numerals (no operators) since they are read in by Python test framework\nHARDFORK_PORT_MAINNET = 9542, // default post-fork port on operational network (mainnet)\nHARDFORK_PORT_TESTNET = 9543, // default post-fork port on public test network (testnet)\nHARDFORK_PORT_REGTEST = 19655, // default post-fork port on local regression test network (regtestnet)\nHARDFORK_PORT_BFGTEST = 19988, // default post-fork port on btcforks genesis test network (bfgtest)\n\n// MVHF-CORE-DES-CSIG-1 - signature change parameter defaults\n// must be hex numerals (0x prefix) since they are read in and converted from hex by Python test framework\nHARDFORK_SIGHASH_ID = 0x555000, // 3 byte fork id that is left-shifted by 8 bits and then ORed with the SIGHASHes\nMAX_HARDFORK_SIGHASH_ID = 0xFFFFFF, // fork id may not exceed maximum representable in 3 bytes\n};\n\n// MVHF-CORE-DES-NSEP-1 - network separation parameter defaults\n// message start strings (network magic) after forking\n// The message start string should be designed to be unlikely to occur in normal data.\n// The characters are rarely used upper ASCII, not valid as UTF-8, and produce\n// a large 32-bit integer with any alignment.\n// MVF-Core TODO: Assign new netmagic values\n// MVF-Core TODO: Clarify why it's ok for testnet to deviate from the above rationale.\n// One would expect regtestnet to be less important than a public network!\nstatic const CMessageHeader::MessageStartChars pchMessageStart_HardForkMainnet = { 0xf9, 0xbe, 0xb4, 0xd9 },\n pchMessageStart_HardForkTestnet = { 0x0b, 0x11, 0x09, 0x07 },\n pchMessageStart_HardForkRegtest = { 0xf9, 0xbe, 0xb4, 0xd9 };\n\n// MVHF-CORE-DES-DIAD-1 - difficulty adjustment parameter defaults\n// MVF-Core TODO: calibrate the values for public testnets according to estimated initial present hashpower\n// values to which powLimit is reset at fork time on various networks (MVHF-CORE-DES-DIAD-2):\nstatic const uint256 HARDFORK_POWRESET_MAINNET = uint256S(\"00007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"), // mainnet\n HARDFORK_POWRESET_TESTNET = uint256S(\"007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"), // testnet\n HARDFORK_POWRESET_BFGTEST = uint256S(\"007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"), // bfgtest\n HARDFORK_POWRESET_REGTEST = uint256S(\"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"); // regtestnet\n\n// MVHF-CORE-DES-DIAD-? -force-retarget option determines whether to actively retarget on regtest after fork happens\n// (not all tests need that, so the POW/difficulty fork related ones that do specifically invoke this option)\nconst bool DEFAULT_FORCE_RETARGET = false;\n\n// default value for -nosegwitfork option to disable the fork trigger on SegWit activation\n// caution: -noX options are turned into -X=0 by util.cpp, therefore the\n// parameter must be accessed as '-segwitfork' and the default below pertains\n// to that.\nconst bool DEFAULT_TRIGGER_ON_SEGWIT = true;\n\n#endif\n" }, { "alpha_fraction": 0.5928968787193298, "alphanum_fraction": 0.6137688755989075, "avg_line_length": 44.87234115600586, "blob_id": "15b0d26bd2893b0123f4be2937e4adff95b5f8c4", "content_id": "2897be1c8cab07e6eac1f4871fe175c0ba42cff8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15092, "license_type": "permissive", "max_line_length": 148, "num_lines": 329, "path": "/qa/rpc-tests/mvf-core-retarget.py", "repo_name": "BTCfork/hardfork_prototype_1_mvf-core", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# Copyright (c) 2016 The Bitcoin developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#\n# Test MVF post fork retargeting\n#\n# on node 0, test pure block height trigger at height FORK_BLOCK\n#\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\nfrom test_framework.arith import *\nfrom random import randint\n\n# period (in blocks) from fork activation until retargeting returns to normal\nHARDFORK_RETARGET_BLOCKS = 180*144 # the period when retargeting returns to original\nFORK_BLOCK = 2020 # needs to be >= 2018 to test fork difficulty reset\nPOW_LIMIT = 0x207fffff\nPREFORK_BLOCKTIME = 800 # the seconds for a block during the regtest prefork\nORIGINAL_DIFFADJINTERVAL = 2016 # the original difficulty adjustment interval\nSTANDARD_BLOCKTIME = 600 # the standard target seconds for a block\n\ndef CalculateMVFNextWorkRequired(bits, actualBlockTimeSecs, targetBlockTimeSecs):\n # Returns difficulty using the fork reset formula in pow.cpp:CalculateMVFNextWorkRequired()\n\n bnPowLimit = bits2target_int(hex2bin(int2hex(POW_LIMIT))) # MVF-Core moved here\n\n # Limit adjustment step\n nActualTimespan = actualBlockTimeSecs\n\n # Target by interval\n nTargetTimespan = targetBlockTimeSecs\n\n # permit 10x retargetchanges for a few blocks after the fork i.e. when nTargetTimespan is < 30 minutes (MVHF-CORE-DES-DIAD-5)\n if (nTargetTimespan >= STANDARD_BLOCKTIME * 3) :\n retargetLimit = 4\n else :\n retargetLimit = 10\n # prevent abrupt changes to target\n if (nActualTimespan < nTargetTimespan/retargetLimit) :\n nActualTimespan = nTargetTimespan/retargetLimit\n if (nActualTimespan > nTargetTimespan*retargetLimit) :\n nActualTimespan = nTargetTimespan*retargetLimit\n\n # compare with debug.log\n #print \"nTargetTimespan=%d nActualTimespan=%d\" % (nTargetTimespan,nActualTimespan)\n\n # Retarget\n bnOld = bits2target_int(hex2bin(bits)) # SetCompact\n # MVF-Core begin: move division before multiplication\n # at regtest difficulty, the multiplication is prone to overflowing\n bnNew1 = bnOld / nTargetTimespan\n bnNew2 = bnNew1 * nActualTimespan\n\n # Test for overflow\n if (bnNew2 / nActualTimespan != bnNew1 or bnNew2 > bnPowLimit):\n bnNew = bnPowLimit\n else :\n bnNew = bnNew2\n\n newBits = \"0x%s\" % bin2hex(target_int2bits(bnNew)) # GetCompact\n nBitsReset = int(newBits,0)\n return nBitsReset\n\n\ndef CalculateMVFResetWorkRequired(bits):\n # Returns difficulty using the fork reset formula in pow.cpp:CalculateMVFResetWorkRequired()\n\n bnPowLimit = bits2target_int(hex2bin(int2hex(POW_LIMIT)))\n # drop difficulty via factor\n nDropFactor = HARDFORK_DROPFACTOR_REGTEST_DEFAULT\n # total blocktimes prefork during run_test\n nActualTimespan = ORIGINAL_DIFFADJINTERVAL * PREFORK_BLOCKTIME\n # used reduced target time span while within the re-target period\n nTargetTimespan = nActualTimespan / nDropFactor\n\n # compare with debug.log\n #print \"nTargetTimespan=%d nActualTimespan=%d\" % (nTargetTimespan,nActualTimespan)\n\n bnOld = bits2target_int(hex2bin(bits)) # SetCompact\n bnNew1 = bnOld / nTargetTimespan\n bnNew2 = bnNew1 * nActualTimespan\n\n # check for overflow or overlimit\n if (bnNew2 / nActualTimespan != bnNew1 or bnNew2 > bnPowLimit):\n bnNew = bnPowLimit\n else:\n bnNew = bnNew2\n\n nBitsReset = int(\"0x%s\" % bin2hex(target_int2bits(bnNew)),0) # GetCompact\n return nBitsReset\n\n\nclass MVF_RETARGET_BlockHeight_Test(BitcoinTestFramework):\n\n def add_options(self, parser):\n parser.add_option(\"--quick\", dest=\"quick\", default=False, action=\"store_true\",\n help=\"Run shortened version of test\")\n\n def setup_chain(self):\n # random seed is initialized and output by the test framework\n print(\"Initializing test directory \" + self.options.tmpdir)\n initialize_chain_clean(self.options.tmpdir, 1)\n\n def setup_network(self):\n self.nodes = []\n self.is_network_split = False\n self.nodes.append(start_node(0, self.options.tmpdir\n ,[\"-forkheight=%s\"%FORK_BLOCK, \"-rpcthreads=100\",\"-blockversion=%s\" % \"0x20000000\" ]\n ))\n\n def is_fork_triggered_on_node(self, node=0):\n \"\"\" check in log file if fork has triggered and return true/false \"\"\"\n # MVF-Core TODO: extend to check using RPC info about forks\n nodelog = self.options.tmpdir + \"/node%s/regtest/debug.log\" % node\n hf_active = search_file(nodelog, \"isMVFHardForkActive=1\")\n fork_actions_performed = search_file(nodelog, \"MVF: performing fork activation actions\")\n return (len(hf_active) > 0 and len(fork_actions_performed) == 1)\n\n def run_test(self):\n # check that fork does not trigger before the forkheight\n print \"Generating %s pre-fork blocks\" % (FORK_BLOCK - 1)\n\n #block0 already exists\n best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)\n preblocktime = best_block['time']\n for n in range(FORK_BLOCK - 1):\n # Change block times so that difficulty develops\n preblocktime = preblocktime + PREFORK_BLOCKTIME\n self.nodes[0].setmocktime(preblocktime)\n self.nodes[0].generate(1)\n\n print \"Done generating %s pre-fork blocks\" % (FORK_BLOCK - 1)\n print \"Stopping node 0\"\n stop_node(self.nodes[0],0)\n print \"Restarting node 0 with -force-retarget\"\n self.nodes[0] = start_node(0, self.options.tmpdir\n ,[\"-forkheight=%s\"%FORK_BLOCK, \"-force-retarget\", \"-rpcthreads=100\",\"-blockversion=%s\" % \"0x20000000\" ]\n )\n\n # Read difficulty before the fork\n best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)\n print \"Pre-fork difficulty: %.10f %s \" % (best_block['difficulty'], best_block['bits'])\n nBits = CalculateMVFResetWorkRequired(best_block['bits'])\n reset_bits = int2hex(nBits)\n reset_diff_expected = bits2difficulty(nBits)\n assert_greater_than(reset_diff_expected, 0)\n\n # Test fork did not trigger prematurely\n assert_equal(False, self.is_fork_triggered_on_node(0))\n print \"Fork did not trigger prematurely\"\n\n # Generate fork block\n best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)\n self.nodes[0].setmocktime(best_block['time'] + STANDARD_BLOCKTIME)\n self.nodes[0].generate(1)\n assert_equal(True, self.is_fork_triggered_on_node(0))\n\n print \"Fork triggered successfully (block height %s)\" % best_block['height']\n\n # Test fork difficulty reset\n best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)\n assert_equal(best_block['bits'],reset_bits)\n #assert_equal(best_block['bits'], \"207eeeee\") # fixed reset\n print \"Post-fork difficulty reset success: %.10f %s \" % (best_block['difficulty'], best_block['bits'])\n\n # use to track how many times the same bits are used in a row\n prev_block = 0\n diffadjinterval = 0\n # the first nexttimeblock test phase is cyclical increases of 50 seconds starting from here\n # if the starting number is too low it may cause timeout errors too often\n next_block_time = 300\n count_bits_used = 0\n\n # print column titles\n print \">> Bits change log <<\"\n print \"Time,Block,Delta(secs),Bits,Used,DiffAdjInterval,TimespanBlocks,Difficulty,NextBits\"\n\n # start generating MVF blocks with varying time stamps\n oneRetargetPeriodAfterMVFRetargetPeriod = HARDFORK_RETARGET_BLOCKS+ORIGINAL_DIFFADJINTERVAL+1\n if self.options.quick:\n # used for CI - just test one day after fork\n # this is basically just to test reset and initial response\n number_of_blocks_to_test_after_fork = 144\n else:\n # full range\n number_of_blocks_to_test_after_fork = oneRetargetPeriodAfterMVFRetargetPeriod = HARDFORK_RETARGET_BLOCKS+ORIGINAL_DIFFADJINTERVAL+1\n\n for n in xrange(number_of_blocks_to_test_after_fork):\n best_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), True)\n prev_block = self.nodes[0].getblock(best_block['previousblockhash'], True)\n\n # track bits used\n if (prev_block['bits'] == best_block['bits'] or best_block['height'] == FORK_BLOCK) and n < oneRetargetPeriodAfterMVFRetargetPeriod -1 :\n count_bits_used += 1\n else:\n # when the bits change then output the retargeting metrics\n # for the previous group of bits\n print_block = self.nodes[0].getblock(self.nodes[0].getblockhash(prev_block['height'] - count_bits_used))\n avgDeltaBlockTime = (prev_block['time'] - print_block['time']) / count_bits_used\n\n if n == oneRetargetPeriodAfterMVFRetargetPeriod -1 :\n nextBits = \"end\"\n else :\n # Test difficulty during MVF retarget period\n first_block = self.nodes[0].getblock(self.nodes[0].getblockhash(prev_block['height'] - timespanblocks))\n actualBlockTimeSecs = prev_block['time'] - first_block['time']\n nBits = CalculateMVFNextWorkRequired(prev_block['bits'], actualBlockTimeSecs, difficultytimespan)\n nextBits = int2hex(nBits)\n #best_diff_expected = bits2difficulty(nBits)\n #print \"%s %.10f : %s \" % (nextBits, best_diff_expected, best_block['bits'])\n\n #if best_block['bits'] <> nextBits : #debug\n #print \"err bits %s %s %s \" % (best_block['bits'], nextBits, diffadjinterval)\n #raw_input()\n assert_equal(best_block['bits'], nextBits)\n\n print \"%s,%d,%d,%s,%d,%d,%d,%.10f,%s \" %(\n time.strftime(\"%Y-%m-%d %H:%M\",time.gmtime(prev_block['time'])),\n prev_block['height'],\n avgDeltaBlockTime,\n prev_block['bits'],\n count_bits_used,\n diffadjinterval,\n timespanblocks,\n prev_block['difficulty'],\n nextBits\n )\n\n # reset bits tracking variables\n count_bits_used = 1\n #raw_input()\n #### end if prev_block['bits'] == best_block['bits']\n\n # Get difficulty time span\n difficultytimespan = self.nodes[0].getblockchaininfo()['difficultytimespan']\n timespanblocks = difficultytimespan / STANDARD_BLOCKTIME\n #print \"%s : %s\" % (best_block['height'],timespanblocks)\n\n # Get difficulty adjustment interval\n diffadjinterval = self.nodes[0].getblockchaininfo()['difficultyadjinterval']\n\n # Test processed bits are used within the expected difficulty interval\n # except when the bits is at the bits limit: 207fffff\n #\n # In some cases the retarget causes the same bits to be returned\n # so this test has been disabled.\n #\n #if int(\"0x%s\"%prev_block['bits'],0) <> POW_LIMIT :\n #if count_bits_used > diffadjinterval : #debug\n #print \"err count_bits_used %s : %s \" % (prev_block['bits'], nextBits)\n #raw_input()\n #assert_less_than_equal(count_bits_used, diffadjinterval)\n\n # Setup various block time interval tests\n if n in range(0,11) :\n next_block_time = next_block_time + 50\n elif n in range(11,22) :\n # this may cause bits to hit the limit POW_LIMIT\n next_block_time = 1200\n elif n in range(22,26) :\n # this may cause timeout errors\n next_block_time = 300\n elif n in range(26,500) :\n # exactly standard block times\n next_block_time = STANDARD_BLOCKTIME\n elif n in range(500,525) :\n # simulate faster blocks\n # this may cause timeout errors\n next_block_time = randint(100,300)\n elif n in range(525,550) :\n # simulate slow blocks\n # this may cause bits to hit the limit POW_LIMIT\n next_block_time = randint(1000,3000)\n elif n >= HARDFORK_RETARGET_BLOCKS :\n # exactly standard block times so when the original retargeting\n # begins again the difficulty will stay about the same\n next_block_time = STANDARD_BLOCKTIME\n else:\n # simulate ontime blocks i.e. hash power/difficult around 600 secs\n next_block_time = randint(500,700)\n\n self.nodes[0].setmocktime(best_block['time'] + next_block_time)\n\n # Test the interval matches the interval defined in params.DifficultyAdjustmentInterval()\n # notice the range() high setting is plus one versus c++ switch\n if n in range(0,2017) :\n diff_interval_expected = 1 # retarget every block\n elif n in range(2017,4000) :\n diff_interval_expected = 10\n elif n in range(4000,10000) :\n diff_interval_expected = 40\n elif n in range(10000,15000) :\n diff_interval_expected = 100\n elif n in range(15000,20000) :\n diff_interval_expected = 400\n elif n in range(20000,HARDFORK_RETARGET_BLOCKS+1) :\n diff_interval_expected = 1000\n else:\n diff_interval_expected = ORIGINAL_DIFFADJINTERVAL # every 14 days original\n\n #if diff_interval_expected <> diffadjinterval :\n #print \"err diffadjinterval %d %d %d\" % (n, diff_interval_expected, diffadjinterval)\n #raw_input()\n assert_equal(diff_interval_expected, diffadjinterval)\n\n # print info for every block\n #if best_block['height'] >= 16127 :\n #first_block = self.nodes[0].getblock(self.nodes[0].getblockhash(prev_block['height'] - timespanblocks))\n #print \"%s :: %s :: %d :: %s :: %.10f :: %d :: %d\" %(\n #best_block['height'],\n #time.strftime(\"%H:%M\",time.gmtime(best_block['time'])),\n #best_block['time'] - prev_block['time'],\n #best_block['bits'],\n #best_block['difficulty'],\n #count_bits_used,\n #first_block['height'])\n #raw_input()\n\n # generate the next block\n self.nodes[0].generate(1)\n\n #### end for n in xrange\n\n print \"Done.\"\n\nif __name__ == '__main__':\n MVF_RETARGET_BlockHeight_Test().main()\n" } ]
14
hemal507/NLP-Python
https://github.com/hemal507/NLP-Python
a0c2429f0b97dc62015f1a0f25455d7610e4c8b1
891b99a1c4077e73f312b6a449b16a1b325ba590
eafd388ff2d49653dc56e47641b628d5bc0c3e81
refs/heads/master
2021-01-19T21:15:33.716169
2017-04-18T14:27:51
2017-04-18T14:27:51
88,633,505
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6764705777168274, "alphanum_fraction": 0.7352941036224365, "avg_line_length": 16, "blob_id": "5d21b300bb9bf434dcfc8b88c8b4ab32af29ab3c", "content_id": "7888d35ff6bcbf6d52384aa7f3a94c6ec2852fd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/README.md", "repo_name": "hemal507/NLP-Python", "src_encoding": "UTF-8", "text": "# NLP-Python\nNLP using python 2.7\n" }, { "alpha_fraction": 0.7531914710998535, "alphanum_fraction": 0.7531914710998535, "avg_line_length": 38, "blob_id": "836b4485f0c83a9ef8dbfc22516b3ce23eac4ef4", "content_id": "72088869c99ed42e69a6cd7a6cbde19c03d7119d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 470, "license_type": "no_license", "max_line_length": 72, "num_lines": 12, "path": "/Lemmatizer.py", "repo_name": "hemal507/NLP-Python", "src_encoding": "UTF-8", "text": "from nltk.stem import WordNetLemmatizer\n\nlemmatizer = WordNetLemmatizer()\nprint(lemmatizer.lemmatize(\"geese\"))\nprint(lemmatizer.lemmatize(\"cacti\"))\nprint(lemmatizer.lemmatize(\"rocks\"))\nprint(lemmatizer.lemmatize(\"python\"))\nprint(lemmatizer.lemmatize(\"better\"))\nprint(lemmatizer.lemmatize(\"better\",pos=\"a\")) # adjective \nprint(lemmatizer.lemmatize(\"best\",pos=\"a\")) # default pos = n , noun\nprint(lemmatizer.lemmatize(\"run\"))\nprint(lemmatizer.lemmatize(\"ran\",\"v\"))\n\n\n" }, { "alpha_fraction": 0.6219652891159058, "alphanum_fraction": 0.6277456879615784, "avg_line_length": 25, "blob_id": "f26af7815267a59847e9ecc45d549675d6d3b8e4", "content_id": "ee967544e97900fb8c580d4bf35b346b1bb06749", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 132, "num_lines": 33, "path": "/stemming.py", "repo_name": "hemal507/NLP-Python", "src_encoding": "UTF-8", "text": "from nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\n\ndef stem1(word):\n for suffix in ['ing', 'ly', 'ed', 'ious', 'ies', 'ive', 'es', 's', 'ment','er','ic']:\n if word.endswith(suffix):\n print suffix\n return word[:-len(suffix)]\n return word\n\n\n\n##ps = PorterStemmer()\n\nexample_words = ['Python','pythoner','puthoning','pythoned','pythonly','pythonic']\n##example_word2 = ['Work','worker','working','worked','workly']\n\n##for w in example_words:\n## print (ps.stem(w))\n\nfor w in example_words:\n print (stem1(w))\n \n\n##for w in example_word2:\n## print (ps.stem(w))\n\nnew_text = 'It is very important to be pythonly while you are working with python. All pythoners have pythoned poorly atleast once.'\n\nwords = word_tokenize(new_text)\nfor w in words:\n## print (ps.stem(w))\n print (stem1(w))\n\n\n \n" }, { "alpha_fraction": 0.7137176990509033, "alphanum_fraction": 0.7137176990509033, "avg_line_length": 21.863636016845703, "blob_id": "a4e5f5ee6dc85e69bbab47294e48aa11162e9f97", "content_id": "8ea8ccaaf8754a91cba77881b494083f8cfdb87c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 503, "license_type": "no_license", "max_line_length": 73, "num_lines": 22, "path": "/stopwords.py", "repo_name": "hemal507/NLP-Python", "src_encoding": "UTF-8", "text": "from nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\nexample_sentence = 'This is an exmple showing off stop word filteration.'\nstop_words=set(stopwords.words(\"english\"))\n\n#print(stop_words)\nwords = word_tokenize(example_sentence)\n\nfiltered_sentence = []\n##\n##for w in words:\n## if w not in stop_words:\n## filtered_sentence.append(w)\n##\n##print (filtered_sentence) \n\nfiltered_sentence = [w for w in words if not w.lower() in stop_words]\n\n\n\nprint(filtered_sentence)\n" } ]
4
JuanVizcaya/locations
https://github.com/JuanVizcaya/locations
09a214302ab87c5f3d5efae5cf790816a60f167a
c1a3c4efd7b669c5fa1aebe51d173ce6728bb74f
b1f862b257af01ba8705143c950ba90b77e0f63c
refs/heads/main
2023-03-05T04:17:13.495216
2021-02-15T08:09:05
2021-02-15T08:09:05
338,495,081
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.595292329788208, "alphanum_fraction": 0.5990888476371765, "avg_line_length": 31.121952056884766, "blob_id": "5b3a09d51886ae6961cdfc10b35ebac8ff8dd4da", "content_id": "ebb4658f81eb50a3c4ed69eaab96393d94c12b45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1317, "license_type": "no_license", "max_line_length": 121, "num_lines": 41, "path": "/front-app/src/Utils/ApiUils.js", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "import axios from 'axios';\n\naxios.defaults.headers.common.Accept = 'application/json';\n\n// End Points\nconst API_URL = `http://localhost:5001/api/locations/`;\nconst GEOCODE_API = 'https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/reverseGeocode';\n\nconst fetch = (endpoint) => {\nreturn axios\n .get(endpoint)\n .then((res) => res)\n .catch((err) => {\n console.error(\n 'Error catch in Apiutils at fetch method. It will be thrown...');\n throw err;\n });\n}\n\nexport const getAllPoints = (user = '', apiKey = '', table = '') => {\n const query = API_URL;\n // const query = `https://${user}.carto.com/api/v2/sql?api_key=${apiKey}&q=SELECT latitude, longitude FROM ${table}`;\n return fetch(query)\n .then(res=> {\n const data = [];\n res.data.forEach(point=>{\n data.push({lat: point.latitude, lng: point.longitude})\n });\n return data;\n });\n};\n\nexport const getAddress = (lat, lng) => {\n // Get coordinates direction from the esri reverse-geocoder\n const geocode_api_url = `${GEOCODE_API}?location=${lng},${lat}&forStorage=false&f=json&countryCode=MEX&langCode=ES`;\n return fetch(geocode_api_url)\n .then(res => {\n console.log(res);\n return res.data.address;\n });\n}\n" }, { "alpha_fraction": 0.698630154132843, "alphanum_fraction": 0.7123287916183472, "avg_line_length": 23.33333396911621, "blob_id": "2d1f3bc9b7c67d1355d9f4308d4a838cddfbf3a8", "content_id": "38a15bd250d82d5c55f3fedbd5fb5761aac0c9ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 511, "license_type": "no_license", "max_line_length": 94, "num_lines": 21, "path": "/back-app/Dockerfile", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "FROM python:3.8.5\n\nENV PYTHONUNBUFFERED 1\nENV POSTGRES_PASSWORD postgres\n\nRUN mkdir /data\nCOPY data/. /data/\n\nRUN mkdir /back-app\nCOPY . /back-app/\n\nRUN apt-get update &&\\\n apt-get install -y binutils libproj-dev gdal-bin postgresql-client\n\nRUN pip install --upgrade pip \\\n && pip install -r back-app/requirements.txt\n\n# docker-compose-wait tool -------------------\nENV WAIT_VERSION 2.7.2\nADD https://github.com/ufoscout/docker-compose-wait/releases/download/$WAIT_VERSION/wait /wait\nRUN chmod +x /wait\n" }, { "alpha_fraction": 0.6141732335090637, "alphanum_fraction": 0.6299212574958801, "avg_line_length": 15.933333396911621, "blob_id": "fce5198c9e8db73a7261e5ee802bdc3d45669f40", "content_id": "ff3e4ac3351691a7817b7ba4979b81d3ebb42726", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 254, "license_type": "no_license", "max_line_length": 77, "num_lines": 15, "path": "/back-app/wait.sh", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -e\n\nhost=\"$1\"\nshift\ncmd=\"$@\"\n\nuntil PGPASSWORD=$POSTGRES_PASSWORD psql -h \"$host\" -U \"postgres\" -c '\\q'; do\n >&2 echo \"django: Postgres is unavailable - sleeping..\"\n sleep 5\ndone\n\n>&2 echo \"Postgres is up - starting app..\"\nexec $cmd\n" }, { "alpha_fraction": 0.5289255976676941, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 17.615385055541992, "blob_id": "a8e7b3ee7b0637cd650fa52ee55e4e010db1e88a", "content_id": "c2b2ff046ee4f29049c340867a9e0e1c642fb428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 242, "license_type": "no_license", "max_line_length": 27, "num_lines": 13, "path": "/back-app/requirements.txt", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "asgiref==3.3.1\ncertifi==2020.12.5\nDjango==3.1.6\ndjango-cors-headers==3.7.0\ndjangorestframework==3.12.2\nGeoAlchemy2==0.8.4\ngeopandas==0.8.2\npandas==1.2.2\nSQLAlchemy==1.3.23\npsycopg2-binary==2.8.6\npytz==2021.1\nsqlparse==0.4.1\nwincertstore==0.2\n" }, { "alpha_fraction": 0.6518046855926514, "alphanum_fraction": 0.6740976572036743, "avg_line_length": 38.25, "blob_id": "3562c331a5dcdb27111d00b08aed0ef1ce7cb365", "content_id": "2c754675cb57828909e71c43f66a648d8a0f2881", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "no_license", "max_line_length": 94, "num_lines": 24, "path": "/back-app/locations/models.py", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.gis.db.models import GeometryField\nfrom django.contrib.gis.geos import GEOSGeometry\n\n\nclass Location(models.Model):\n \"\"\" Locations model to be mapped in the database \"\"\"\n geometry = GeometryField(null=True,srid=4326)\n tipo = models.CharField(max_length=50)\n latitude = models.DecimalField(\n max_digits=14,decimal_places=8, default=0.0)\n longitude = models.DecimalField(\n max_digits=14,decimal_places=8, default=0.0)\n color = models.CharField(max_length=6)\n\n def save(self, *args, **kwargs):\n \"\"\" Overrided save method for making the geometry to be saved. \"\"\"\n if not self.geometry:\n self.geometry = GEOSGeometry(f'SRID=4326;POINT({self.latitude} {self.longitude})')\n return super(Location, self).save(*args, **kwargs)\n\n def __str__(self):\n \"\"\" Model representation in the admin panel \"\"\"\n return f'{self.id}'\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 12.11111068725586, "blob_id": "320e46fadd3bf253c150c38a4333ea93265a12e0", "content_id": "ea2fbf1ce9da61a5f46320e55b53a3bd4b951f14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 117, "license_type": "no_license", "max_line_length": 22, "num_lines": 9, "path": "/front-app/Dockerfile", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "FROM node:alpine\n\nRUN mkdir /front-app\nCOPY . /front-app/\nWORKDIR /front-app\n\nRUN npm install\n\nCMD [ \"npm\", \"start\" ]" }, { "alpha_fraction": 0.6542416214942932, "alphanum_fraction": 0.6709511280059814, "avg_line_length": 26.785715103149414, "blob_id": "229ff4ec6626e9aedb53a2dbda566606a8d0f2f3", "content_id": "2bc16f0d8841ecce7ccafe3a79e83f8ec672659c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 778, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/back-app/data/load_data.py", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "from sqlalchemy import create_engine\nfrom geoalchemy2 import Geometry, WKTElement\nimport pandas as pd\nimport geopandas as gpd\nimport os\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\ndef load_data_to_postgis():\n srid = 4326\n\n gdf = gpd.GeoDataFrame(pd.read_csv('puntos_examen_fullstack.csv'))\n\n gdf['geometry'] = gpd.points_from_xy(gdf.longitude, gdf.latitude, crs='EPSG:4326')\n gdf = gdf.set_crs(epsg=srid)\n gdf.pop('the_geom')\n\n gdf['id'] = gdf['cartodb_id']\n gdf.pop('cartodb_id')\n\n\n engine = create_engine('postgresql://postgres:postgres@postgis_db:5432/postgres')\n gdf.to_postgis('locations_location', engine, if_exists='append', index=False)\n print('====== DATA LOADED ======')\n\n\nif __name__ == '__main__':\n load_data_to_postgis()\n" }, { "alpha_fraction": 0.5356222987174988, "alphanum_fraction": 0.6254649758338928, "avg_line_length": 17.203125, "blob_id": "6fe89e94231fc7cec78d3ffd17b8815779266b24", "content_id": "0c3fb53dbb0c06e30464f494fd26002a09a95110", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3495, "license_type": "no_license", "max_line_length": 147, "num_lines": 192, "path": "/README.md", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "# Locations\n\nThis is a full project which has *react* in the *frontend*, *django/django_restframework* in the backend and It uses a *postgres/postgis* database.\n\n<div id='id0'/>\n\n## Index\n\n[1.](#id1) __Installation__\n - [Requirements](#id11)\n - [Quick start](#id12)\n\n[2.](#id2) __Backend__\n - [Django Admin Panel](#id21)\n - [Locations Endpoint](#id22)\n - [Media Type](#id23)\n - [Methods](#id24)\n\n\n[3.-](#id3) __Frontend__\n - [Url](#id31)\n\n[4.-](#id4) __Containers__\n - [postgis_db](#id41)\n - [django_locs](#id42)\n - [react_app](#id42)\n\n\n<div id='id1' />\n\n## Installation\n\n<div id='id11' />\n\n#### Requirements\n- Docker\n\n<div id='id12' />\n\n#### Quick start\n\n- Clone the project:\n\n`git clone https://github.com/JuanVizcaya/locations.git`\n\n- Enter to the \"locations\" path:\n\n`cd locations`\n\n- Build the containers (Wait for django_locs):\n\n`docker-compose up --build`\n\n```\ndjango_locs | Run 'python manage.py migrate' to apply them.\ndjango_locs | February 15, 2021 - 07:46:59\ndjango_locs | Django version 3.1.6, using settings 'prj.settings'\ndjango_locs | Starting development server at http://0:5001/\ndjango_locs | Quit the server with CONTROL-C.\n```\n\n- Make needed django migrations:\n\n`docker exec -it django_locs sh -c \"python /back-app/manage.py makemigrations\"`\n\n`docker exec -it django_locs sh -c \"python /back-app/manage.py makemigrations locations\"`\n\n`docker exec -it django_locs sh -c \"python /back-app/manage.py migrate\"`\n\n- Create the __Superuser__ for the __Django Administration Panel__:\n\n`docker exec -it django_locs sh -c \"python /back-app/manage.py createsuperuser\"`\n\n- __Load data__ to the __Postgis Database__:\n\n`docker exec -it django_locs sh -c \"python /back-app/data/load_data.py\"`\n\n[Back to index](#id0)\n\n<div id='id2' />\n\n## Backend\n\n<div id='id21' />\n\n### Django Administration Panel\nYou can access to the administration panel with the __Superuser__ credentials you have already created.\n\n`URL: http://localhost:5001/admin`\n\n<div id='id22' />\n\n#### Locations Endpoint\nYou can make requests to the __Locations API__, even with your browser.\n\n`URL: http://localhost:5001/api/locations/`\n\n<div id='id23' />\n\n##### Media Type\n- `application/json`\n\n<div id='id24' />\n\n##### Methods\n\n`GET: http://localhost:5001/api/locations/`\n\n*Response:*\n```\n[\n{\n \"id\": 1,\n \"geometry\": \"SRID=4326;POINT (-103.40223397 25.53813876)\",\n \"tipo\": \"Sucursal\",\n \"latitude\": \"25.53813876\",\n \"longitude\": \"-103.40223397\",\n \"color\": \"11A579\"\n},\n{\n \"id\": 2,\n \"geometry\": \"SRID=4326;POINT (-103.43411379 25.55083543)\",\n \"tipo\": \"Sucursal\",\n \"latitude\": \"25.55083543\",\n \"longitude\": \"-103.43411379\",\n \"color\": \"11A579\"\n},\n\n...\n\n{\n \"id\": 499,\n \"geometry\": \"SRID=4326;POINT (-100.80625621 20.53657222)\",\n \"tipo\": \"Sucursal\",\n \"latitude\": \"20.53657222\",\n \"longitude\": \"-100.80625621\",\n \"color\": \"11A579\"\n},\n{\n \"id\": 500,\n \"geometry\": \"SRID=4326;POINT (-101.42308383 20.93452578)\",\n \"tipo\": \"Sucursal\",\n \"latitude\": \"20.93452578\",\n \"longitude\": \"-101.42308383\",\n \"color\": \"11A579\"\n}\n]\n```\n\n[Back to index](#id0)\n\n\n<div id='id3' />\n\n## Frontend\n\n<div id='id31' />\n\n#### URL\n\n`http://localhost:3000/`\n\n[Back to index](#id0)\n\n***\n\n<div id='id4' />\n\n## Containers\n\n<div id='id41' />\n\n#### postgis_db\n- Name: `postgis_db`\n\n- Port: `5434`\n\n<div id='id41' />\n\n#### django_locs\n- Name: `django_locs`\n\n- Port: `5001`\n\n- Depends on: `postgis_db`\n\n#### react_app\n- Name: `react_app`\n\n- Port: `3000`\n\n- Depends on: `django_locs`\n" }, { "alpha_fraction": 0.7292576432228088, "alphanum_fraction": 0.7292576432228088, "avg_line_length": 27.625, "blob_id": "e83650423141d58e0192707d734a134e2cda8dbb", "content_id": "e126be8a40efda56959130a981ea77ed25c822bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/back-app/locations/admin.py", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom locations.models import Location\n\n\[email protected](Location)\nclass LocationAdmin(admin.ModelAdmin):\n \"\"\" Locations addet to the admin panel \"\"\"\n search_fields = ('id', 'tipo', 'color')\n" }, { "alpha_fraction": 0.652046799659729, "alphanum_fraction": 0.652046799659729, "avg_line_length": 27.58333396911621, "blob_id": "e397633ada0e1d9dcad4dfb824f2498ea37df0ec", "content_id": "437726f3bb4a84cdc968354ef94995130e2ca458", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 70, "num_lines": 12, "path": "/back-app/locations/serializers.py", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom locations.models import Location\n\n\nclass LocationSerializer(serializers.ModelSerializer):\n \"\"\" Location's serializer object for the api responses \"\"\"\n class Meta:\n model = Location\n fields = [\n 'id', 'geometry', 'tipo', 'latitude', 'longitude', 'color'\n ]" }, { "alpha_fraction": 0.7898089289665222, "alphanum_fraction": 0.7898089289665222, "avg_line_length": 27.545454025268555, "blob_id": "0f4ec193b6f59b904db7ad40026c20d3e8fe1976", "content_id": "1eb05f8ebf9e68b129180198b426583f9860c811", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 314, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/back-app/locations/urls.py", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom locations.views import LocationsViewset\n\n\nlocations_router = DefaultRouter()\nlocations_router.register('', LocationsViewset, basename='locations')\n\nurlpatterns = [\n path('locations/', include(locations_router.urls)),\n]\n" }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6635714173316956, "avg_line_length": 37.88888931274414, "blob_id": "b470df2f82be5cce80b5180ab356c8e0f451157d", "content_id": "779ed7a0010c7306a3a3fad76d595be8fd04edc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1400, "license_type": "no_license", "max_line_length": 86, "num_lines": 36, "path": "/back-app/locations/views.py", "repo_name": "JuanVizcaya/locations", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom locations.models import Location\nfrom locations.serializers import LocationSerializer\n\n\nclass LocationsViewset(viewsets.ViewSet):\n \"\"\" ViewSet object to process the Locations API requests \"\"\"\n def list(self, request):\n \"\"\" GET: Method that responds all the locations in the Locations tables\n Args:\n request (request): request recibed from the client.\n Returns:\n JSON: A list with all the locations in the database.\n \"\"\"\n locations = Location.objects.all()\n serialized = LocationSerializer(locations, many=True)\n return Response(serialized.data, status=status.HTTP_200_OK)\n\n def create(self, request): #TODO: review the id before saving\n \"\"\" POST: Create new objects in the Locations model\n Args:\n request (request): request recibed from the client.\n Returns:\n JSON: The successfully saved object.\n \"\"\"\n serializer = LocationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(\n {'status': 'Location successfuly saved!'},\n status=status.HTTP_201_CREATED)\n\n return Response(serializer.error_messages ,status=status.HTTP_400_BAD_REQUEST)\n" } ]
12
randomsequence/PyAffineTransform
https://github.com/randomsequence/PyAffineTransform
28ad9d04db2125fb4e42598064b1ee77437abdb1
07c966c5cb59d1caebf8d1719e0d0077ea588a3e
93e5577344f7dd694b2c2609c588af8383e749b1
refs/heads/master
2020-04-06T07:07:42.990996
2014-07-03T15:13:58
2014-07-03T15:13:58
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.7450076937675476, "avg_line_length": 23.11111068725586, "blob_id": "4e4c5aaffc03979e1f168486530693eeb80ad9ea", "content_id": "ef3303670fbc8f65d9d66885d2a72caca8b89989", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1303, "license_type": "permissive", "max_line_length": 391, "num_lines": 54, "path": "/README.md", "repo_name": "randomsequence/PyAffineTransform", "src_encoding": "UTF-8", "text": "PyAffineTransform\n=================\n\nA matrix class for affine transformations in Python\n\nA transformation specifies how points in one coordinate system map to points in another coordinate system. An affine transformation is a special type of mapping that preserves parallel lines in a path but does not necessarily preserve lengths or angles. Scaling, rotation, and translation are the most commonly used manipulations supported by affine transforms, but skewing is also possible.\n\nYou can use PyAffineTransform to transform points, sizes and rectangles from one coordinate system to another.\n\nRotate a point 90º about the origin:\n\n```python\n\nimport math\nfrom affinetransform import AffineTransform\n\nt = AffineTransform()\nt.rotate(math.pi/2)\n\nt.transformPoint(10, 20)\n\n# (-20.0, 10.000000000000002)\n```\n\nTranslate a rectangle:\n\n```python\nt = AffineTransform()\nt.translate(100, 10)\nt.transformRect(0, 0, 10, 20)\n# (100.0, 10.0, 10.0, 20.0)\n```\n\nScale a size:\n\n```python\nt = AffineTransform()\nt.scale(4, 4)\nt.transformSize(2, 2)\n# (8.0, 8.0)\n```\n\nConcatenate transformations:\n\n```python\nt = AffineTransform()\nt.rotate(math.pi)\nt.translate(100, 10)\nt.scale(4, 4)\nt.transformPoint(0, 0)\n# (-100.0, -9.999999999999988)\n```\n\nThere are also methods to invert, multiply (concat), compare and copy transforms.\n" }, { "alpha_fraction": 0.550348699092865, "alphanum_fraction": 0.5902370810508728, "avg_line_length": 31.89908218383789, "blob_id": "806761e6cc9ca8ee1c1053c86287144a203327b9", "content_id": "fb2eac1c182852a32871d88713591643b4314fbe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3585, "license_type": "permissive", "max_line_length": 80, "num_lines": 109, "path": "/tests.py", "repo_name": "randomsequence/PyAffineTransform", "src_encoding": "UTF-8", "text": "# Copyright (c) 2014 Johnnie Walker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport unittest\nimport math\nimport copy\nfrom affinetransform import AffineTransform\n\nclass AffineTransformTest(unittest.TestCase):\n\n def test_translate(self):\n translation = (100, 200)\n t = AffineTransform()\n t.translate(translation[0], translation[1])\n p = t.transformPoint(0, 0)\n for i in range(0, len(p)):\n self.assertAlmostEqual(p[i], translation[i])\n\n def test_rotate(self):\n t = AffineTransform()\n t.rotate(-math.pi/2.0)\n s = t.transformSize(0.0, 2.0)\n self.assertAlmostEqual(round(s[0]), 2.0)\n self.assertAlmostEqual(round(s[1]), 0.0)\n\n def test_scale(self):\n t = AffineTransform()\n t.scale(4.0, 3.0)\n r = t.transformRect(0, 0, 1, 1)\n re = (0, 0, 4, 3)\n for i in range(0, len(r)):\n self.assertAlmostEqual(r[i], re[i])\n \n def test_multiply(self):\n t1 = AffineTransform()\n t1.rotate(math.pi/2.0)\n t2 = AffineTransform()\n t2.translate(2, 4)\n t3 = AffineTransform()\n t3.scale(3, 5)\n \n p = (20, 20)\n \n p1 = (p[0], p[1])\n p1 = t1.transformPoint(p1[0], p1[1])\n p1 = t2.transformPoint(p1[0], p1[1])\n p1 = t3.transformPoint(p1[0], p1[1])\n \n t4 = AffineTransform()\n t4.multiply(t1)\n t4.multiply(t2)\n t4.multiply(t3)\n \n p2 = t4.transformPoint(p[0], p[1])\n \n for i in range(0, len(p1)):\n self.assertAlmostEqual(p1[i], p2[i]) \n \n def test_invert(self):\n t = AffineTransform()\n t.rotate(-math.pi/2.0)\n t.scale(4.0, 3.0)\n t.translate(100, 100)\n p = t.transformPoint(0, 0)\n t.invert()\n p = t.transformPoint(p[0], p[1])\n for i in range(0, len(p)):\n self.assertAlmostEqual(p[i], 0) \n \n def test_copy(self):\n t1 = AffineTransform()\n t1.scale(2, 3)\n t1.rotate(3)\n t2 = copy.copy(t1)\n self.assertEqual(t1.m, t2.m)\n self.assertEqual(t1, t2)\n \n def test_equality(self):\n t1 = AffineTransform()\n t1.scale(2, 3)\n t1.rotate(3)\n t2 = AffineTransform()\n t2.scale(2, 3)\n t2.rotate(3)\n self.assertTrue(t1 == t2)\n t3 = AffineTransform()\n t3.scale(4, 5)\n self.assertFalse(t2 == t3)\n self.assertFalse(t3 == None) \n \nif __name__ == '__main__':\n unittest.main()" } ]
2
bhambre/Test-Repository
https://github.com/bhambre/Test-Repository
1c10ce6d5b20d996b99b352f76639fffb84f76c8
eed20c0ab86545add996cd4733d5fb29375620a6
4940efe89f36e59c33f2b422ca9ff777ed28d44a
refs/heads/master
2021-01-10T14:12:01.318385
2019-11-27T21:50:26
2019-11-27T21:50:26
47,434,473
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6575654149055481, "alphanum_fraction": 0.6615471839904785, "avg_line_length": 48.28571319580078, "blob_id": "5be6ad3f190d407a76df0c4c9b879974d88dd40e", "content_id": "a7527f1a6dc9de77edf1a863581fe62dfe133741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1758, "license_type": "no_license", "max_line_length": 470, "num_lines": 35, "path": "/PGA Tour Stats/spiders/pgastatslist.py", "repo_name": "bhambre/Test-Repository", "src_encoding": "UTF-8", "text": "import scrapy\r\nfrom golf.itemspgalinks import GolfItem\r\n\r\nclass GolfSpider(scrapy.Spider):\r\n name = \"golf_pga_links\"\r\n download_delay = 2\r\n allowed_domains = [\"www.pgatour.com\"]\r\n start_urls = [\r\n \"https://www.pgatour.com/stats/categories.ROTT_INQ.html\",\r\n \"https://www.pgatour.com/stats/categories.RAPP_INQ.html\",\r\n \"https://www.pgatour.com/stats/categories.RARG_INQ.html\",\r\n \"https://www.pgatour.com/stats/categories.RPUT_INQ.html\",\r\n \"https://www.pgatour.com/stats/categories.RSCR_INQ.html\",\r\n \"https://www.pgatour.com/stats/categories.RSTR_INQ.html\",\r\n \"https://www.pgatour.com/stats/categories.RMNY_INQ.html\",\r\n \"https://www.pgatour.com/stats/categories.RPTS_INQ.html\"\r\n ]\r\n\r\n def parse(self, response):\r\n\r\n \t\r\n for sel in response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='section categories']/section/div[@class='clearfix']/div/div[@class='table-content clearfix']/ul/li\"):\r\n \r\n item = GolfItem()\r\n\r\n item['link'] = \"https://www.pgatour.com\" + sel.xpath(\"a/@href\").extract_first()\r\n \r\n yield item \r\n\r\n\r\n\r\n\r\n#response.xpath(\"//body[@class='sub-theme-fedexcup-playoffs body-pgatour-theme locale-en']/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/tbody/tr[@id='playerStatsRow28237']/td[@class='player-name']/a/text()\").extract()[0]\r\n\r\n#[@class='sub-theme-fedexcup-playoffs body-pgatour-theme locale-en']" }, { "alpha_fraction": 0.6703022718429565, "alphanum_fraction": 0.6793393492698669, "avg_line_length": 32.4375, "blob_id": "7fd93c18a5dbb7e2a7deafc5c548fc672f25bbaf", "content_id": "f5afc905b7b6686f026c651fa460295116b3988e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3209, "license_type": "no_license", "max_line_length": 162, "num_lines": 96, "path": "/modelcategory/app.py", "repo_name": "bhambre/Test-Repository", "src_encoding": "UTF-8", "text": "from flask import Flask, request, jsonify, render_template\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nimport numpy as np\nimport pandas as pd\nfrom sklearn import datasets\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport pickle\nfrom flask_cors import CORS, cross_origin\n\n\napp = Flask(__name__)\napi = Api(app)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\npkl_file = open('modeliden.pkl', 'rb')\nclf = pickle.load(pkl_file)\n\npkl_file_stan = open('stan.pkl', 'rb')\nstan = pickle.load(pkl_file_stan)\n\n \ndef modelidentification(allocperc):\n \n category = str(clf.predict(allocperc)[0])\n\n prob = str(int((clf.predict_proba(allocperc).max(axis=1)[0]) * 100))\n\n result = {'Category': category, 'Category Prob':prob}\n\n out = 'Based on the allocations you entered the category would be ' + result['Category'] + ' with a confidence score of ' + str(result['Category Prob']) + '%'\n\n return out\n\[email protected]('/')\ndef home():\n return render_template('home.html')\n\[email protected]('/uploader', methods = ['GET', 'POST'])\ndef upload_file():\n if (request.method=='POST'):\n result=request.form\n usstock = result[\"usstock\"]\n usbond = result[\"usbond\"]\n prefstock = result[\"prefstock\"]\n nonusstock = result[\"nonusstock\"]\n nonusbond = result[\"nonusbond\"]\n derevative = result[\"derevative\"]\n depreceipt = result[\"depreceipt\"]\n convertible = result[\"convertible\"]\n cash = result[\"cash\"]\n other = result[\"other\"]\n #['Cash', 'Convertible', 'DepositoryReceipt', 'Derivative','Non-US bonds', 'Non-US stocks', 'Other', 'Preferred stocks','US bonds', 'US stocks']\n list1 = [[cash,convertible,depreceipt,derevative,nonusbond,nonusstock,other,prefstock,usbond,usstock]]\n list1 = stan.transform(list1)\n output1 = modelidentification(list1)\n return str(output1)\n\[email protected]('/api', methods=['get'])\ndef create_cm():\n cash_api = request.args.get('Cash', 0)\n convertible_api = request.args.get('Convertible', 0)\n depreceipt_api = request.args.get('Depository_Receipt', 0)\n derevative_api = request.args.get('Derevative', 0)\n nonusbond_api = request.args.get('Non_US_Bond', 0)\n nonusstock_api = request.args.get('Non_US_Stock', 0)\n other_api = request.args.get('Other', 0)\n prefstock_api = request.args.get('Preferred_Stock', 0)\n usbond_api = request.args.get('US_Bond', 0)\n usstock_api = request.args.get('US_Stock', 0)\n\n\n feat = [[cash_api,convertible_api,depreceipt_api,derevative_api,nonusbond_api,nonusstock_api,other_api,prefstock_api,usbond_api,usstock_api]]\n feat = stan.transform(feat)\n catprob = int((clf.predict_proba(feat).max(axis=1)[0]) * 100)\n cat = str(clf.predict(feat)[0])\n\n if catprob < 0:\n cat = 'Unable to identify' \n\n result = {'category': cat, 'category score':catprob} \n\n\n \n return jsonify(result)\n \n\n \t \nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run()" }, { "alpha_fraction": 0.6176470518112183, "alphanum_fraction": 0.6176470518112183, "avg_line_length": 22.75, "blob_id": "a0bcc8311399815dfa95d82d52cfd3a456414d5c", "content_id": "6333cdc0f6359e07379909d198fd9793410ac4c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "no_license", "max_line_length": 31, "num_lines": 8, "path": "/PGA Tour Stats/itemspga.py", "repo_name": "bhambre/Test-Repository", "src_encoding": "UTF-8", "text": "import scrapy\r\n\r\nclass GolfItem(scrapy.Item):\r\n PlayerName = scrapy.Field()\r\n Statistic = scrapy.Field()\r\n Variable = scrapy.Field()\r\n Value = scrapy.Field()\r\n Date = scrapy.Field()\r\n\r\n " }, { "alpha_fraction": 0.6084715127944946, "alphanum_fraction": 0.6163155436515808, "avg_line_length": 71.73553466796875, "blob_id": "cb806016be8bb261d0fd73446eabda1c93b68bb3", "content_id": "8e0f00a1b6935bc44ec12632cb3d3a6fabebf475", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8924, "license_type": "no_license", "max_line_length": 470, "num_lines": 121, "path": "/PGA Tour Stats/PGATour.com Web Crawler/spiders/pgastats.py", "repo_name": "bhambre/Test-Repository", "src_encoding": "UTF-8", "text": "import scrapy\r\nfrom golf.itemspga import GolfItem\r\nimport pandas as pd\r\n\r\nstatlistpd = pd.read_csv(\"pgalinks2.csv\")\r\n\r\nstatlist = list(statlistpd['current'])\r\n\r\nclass GolfSpider(scrapy.Spider):\r\n name = \"golf_pga\"\r\n download_delay = 0.5\r\n allowed_domains = [\"www.pgatour.com\"]\r\n start_urls = statlist\r\n\r\n\r\n def parse(self, response):\r\n\r\n date = response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='header']/p/strong/text()\").extract()[0]\r\n\r\n \theader = response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='header']/h1/text()\").extract()[0]\r\n\r\n if response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[4]/text()\"):\r\n\r\n column4 = response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[4]/text()\").extract()[0]\r\n\r\n if response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[5]/text()\"):\t\r\n \t\r\n \t column5 = response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[5]/text()\").extract()[0]\r\n\r\n if response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[6]/text()\"):\r\n\r\n column6 = response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[6]/text()\").extract()[0]\r\n\r\n if response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[7]/text()\"):\r\n\r\n column7 = response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[7]/text()\").extract()[0]\r\n\r\n if response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[8]/text()\"):\r\n\r\n column8 = response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[8]/text()\").extract()[0]\r\n\r\n if response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[9]/text()\"):\r\n\r\n column9 = response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/thead/tr/th[9]/text()\").extract()[0]\r\n \t\r\n for sel in response.xpath(\"//body/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/tbody/tr\"):\r\n\r\n item = GolfItem()\r\n\r\n playername = sel.xpath(\"td[@class='player-name']/a/text()\").extract()[0]\r\n\r\n if sel.xpath(\"td[4]/text()\"):\r\n\r\n item['PlayerName'] = playername\r\n item['Statistic'] = header\r\n item['Variable'] = header + ' - ' + '(' + column4 + ')' \r\n item['Value'] = sel.xpath(\"td[4]/text()\").extract()[0]\r\n item['Date'] = date\r\n\r\n yield item\r\n\r\n if sel.xpath(\"td[5]/text()\"):\r\n\r\n item['PlayerName'] = playername\r\n item['Statistic'] = header\r\n item['Variable'] = header + ' - ' + '(' + column5 + ')' \r\n item['Value'] = sel.xpath(\"td[5]/text()\").extract()[0]\r\n item['Date'] = date\r\n\r\n yield item\r\n\r\n if sel.xpath(\"td[6]/text()\"):\r\n\r\n item['PlayerName'] = playername\r\n item['Statistic'] = header\r\n item['Variable'] = header + ' - ' + '(' + column6 + ')' \r\n item['Value'] = sel.xpath(\"td[6]/text()\").extract()[0]\r\n item['Date'] = date\r\n\r\n yield item\r\n\r\n if sel.xpath(\"td[7]/text()\"): \r\n\r\n item['PlayerName'] = playername\r\n item['Statistic'] = header\r\n item['Variable'] = header + ' - ' + '(' + column7 + ')' \r\n item['Value'] = sel.xpath(\"td[7]/text()\").extract()[0]\r\n item['Date'] = date\r\n\r\n yield item\r\n\r\n if sel.xpath(\"td[8]/text()\"): \r\n\r\n item['PlayerName'] = playername\r\n item['Statistic'] = header\r\n item['Variable'] = header + ' - ' + '(' + column8 + ')' \r\n item['Value'] = sel.xpath(\"td[8]/text()\").extract()[0]\r\n item['Date'] = date\r\n\r\n yield item\r\n\r\n if sel.xpath(\"td[9]/text()\"): \r\n\r\n item['PlayerName'] = sel.xpath(\"td[@class='player-name']/a/text()\").extract()[0]\r\n item['Statistic'] = header\r\n item['Variable'] = header + ' - ' + '(' + column9 + ')' \r\n item['Value'] = sel.xpath(\"td[9]/text()\").extract()[0]\r\n item['Date'] = date\r\n\r\n yield item \r\n\r\n\r\n\r\n\r\n#response.xpath(\"//body[@class='sub-theme-fedexcup-playoffs body-pgatour-theme locale-en']/div[@class='wrap']/div[@class='container']/div[@class='page-container']/div[@class='parsys mainParsys']/div[@class='details section']/section[@class='module-statistics-off-the-tee-details ']/div[@class='main-content-off-the-tee-details']/div[@class='details-table-wrap']/table[@id='statsTable']/tbody/tr[@id='playerStatsRow28237']/td[@class='player-name']/a/text()\").extract()[0]\r\n\r\n#[@class='sub-theme-fedexcup-playoffs body-pgatour-theme locale-en']\r\n\r\n\r\n\r\n#response.xpath(\"//body/div[@class='container-fluid mt-4 maxWidth']/div[@class='row'][2]/div[@class='col-lg-7']/div[@class='shadow p-3 mb-5 bg-white rounded']/div/table[@class='table table-hover table-borderless table-sm']/tbody/tr[1]/td[2]/a/text()\").extract()[0]\r\n\r\n" }, { "alpha_fraction": 0.6410256624221802, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 16.5, "blob_id": "75450413da0294dc1bf3cf6df71818b0c0ad05dd", "content_id": "806c891a1dfb4c191d2f897f53db9a0ce648471d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 28, "num_lines": 4, "path": "/PGA Tour Stats/itemspgalinks.py", "repo_name": "bhambre/Test-Repository", "src_encoding": "UTF-8", "text": "import scrapy\r\n\r\nclass GolfItem(scrapy.Item):\r\n link = scrapy.Field()\r\n " }, { "alpha_fraction": 0.7701149582862854, "alphanum_fraction": 0.8160919547080994, "avg_line_length": 8.55555534362793, "blob_id": "741fbf95b0c97c9d901ba8f92627ee85a9025199", "content_id": "82356e884d8fbdb9fc8f3617f9695cfc3c9a87b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 87, "license_type": "no_license", "max_line_length": 13, "num_lines": 9, "path": "/modelcategory/requirements.txt", "repo_name": "bhambre/Test-Repository", "src_encoding": "UTF-8", "text": "Flask\ngunicorn\npandas\nnumpy==1.13.3\nsklearn\nflask_restful\nsqlalchemy\nscipy\nflask_cors\n\n" } ]
6
vishalkumar19july/MindasBlue_Project
https://github.com/vishalkumar19july/MindasBlue_Project
c71d4bc20a4621320d13ba8982289458d92f6795
aad81306dec2358412b28f4013c879ee4ddbfa37
522f96ccf391b917a6dbcc29c59650c0a6a87bde
refs/heads/main
2023-06-06T10:45:44.121772
2021-07-05T03:21:01
2021-07-05T03:21:01
382,369,193
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7589133977890015, "alphanum_fraction": 0.7758913636207581, "avg_line_length": 52.54545593261719, "blob_id": "92518c3d49588ee8e47b076ee5d7bcb73782d69b", "content_id": "123b22f713cecfa5a88da69b5ccb78d8d014f522", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 589, "license_type": "permissive", "max_line_length": 141, "num_lines": 11, "path": "/README.md", "repo_name": "vishalkumar19july/MindasBlue_Project", "src_encoding": "UTF-8", "text": "# MindasBlue_project\nQ1) Problem Statement - Weather reporting\n \n1. Keep MindasBlue_project files in a folder\n2. pass appropriate state or country name in jason file and also pass the varience value according to your requirment.\n3. open cmd in administrator mode and go the project location path using command \"cd PathofProject\"\n4. run P1.py file using command \"python P1.py\"\n5. You will get the result in cmd. \n\nQ2) Problem Statement 2 : Game.tv\nI was not able to setup android emulator. I was getting \"emulator for avd was killed\". I tried to resolve it but it was not working in my pc.\n" }, { "alpha_fraction": 0.57225102186203, "alphanum_fraction": 0.6056419014930725, "avg_line_length": 26.47541046142578, "blob_id": "676b299156a6fc25a0d8fc07c9c17e2449da3558", "content_id": "5b19d3137e6efd34f2a730db1441ab64b5f1cdb0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1738, "license_type": "permissive", "max_line_length": 133, "num_lines": 61, "path": "/P1.py", "repo_name": "vishalkumar19july/MindasBlue_Project", "src_encoding": "UTF-8", "text": "import requests\r\nimport time\r\nimport json\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport os\r\n\r\ndef API(CITY):\r\n user_api = \"722278b2860571acd63e219de61d4fe1\"\r\n location = CITY\r\n endpoint = \"https://api.openweathermap.org/data/2.5/weather?q=\"+location+\"&appid=\"+user_api+\"&units=metric\"\r\n r = requests.get(endpoint)\r\n temp = r.json()['main']['temp']\r\n return temp\r\n\r\ndef selenium(CITY):\r\n drivpath = os.getcwd()\r\n PATH = drivpath+\"\\\\chromedriver.exe\"\r\n driver = webdriver.Chrome(PATH)\r\n driver.maximize_window()\r\n driver.get(\"https://weather.com/\")\r\n time.sleep(5)\r\n inputElement = driver.find_element_by_id(\"LocationSearch_input\")\r\n inputElement.send_keys(CITY)\r\n time.sleep(3)\r\n inputElement.send_keys(Keys.RETURN)\r\n EWSStatus = driver.find_elements_by_xpath(\"/html/body/div[1]/main/div[2]/main/div[1]/div/section/div/div[2]/div[1]/span\")[0].text\r\n st=\"\"\r\n for i in EWSStatus:\r\n if (i==\"°\"):\r\n pass\r\n else:\r\n st=st+i\r\n st=int(st)\r\n return st\r\n\r\ndef City_varience(val1,val2,Varience):\r\n if val2>val1:\r\n difnum=((val2-val1)/val1)*100\r\n elif val1>val2:\r\n difnum=((val1-val2)/val2)*100\r\n else:\r\n difnum=0\r\n print(str(difnum)+\"%\")\r\n if difnum>Varience:\r\n return 0\r\n else:\r\n return 1\r\n\r\nif __name__ == '__main__':\r\n f = open('Climate.json',)\r\n data = json.load(f)\r\n City=data['Country_City_Name']\r\n Varience=float(data['Varience'])\r\n val1=float(API(City))\r\n val2=float(selenium(City))\r\n result=City_varience(val1,val2,Varience)\r\n if result==1:\r\n print(\"success Match\")\r\n else:\r\n print(\"Matcher Exception\")\r\n" } ]
2
ucpr/design_patterns
https://github.com/ucpr/design_patterns
a5ced2737e0c4108c247912009a26fa1e81c3618
b4acdb4a0ab13178a44c16a49717d36752268c77
0da6849002e835be0600515589a5690fd2681636
refs/heads/master
2021-08-30T16:11:22.882153
2017-12-18T15:34:13
2017-12-18T15:34:13
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 17, "blob_id": "5ce7e8e01e13cacd1147d8f43377837bd047fc20", "content_id": "111fba4bdea83649957b22d1db3559c95cfc912d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 62, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/README.md", "repo_name": "ucpr/design_patterns", "src_encoding": "UTF-8", "text": "# design-patterns\nict勉強会 デザインパターン実装\n" }, { "alpha_fraction": 0.5214521288871765, "alphanum_fraction": 0.5214521288871765, "avg_line_length": 19.200000762939453, "blob_id": "c62f0302f55214acaae0f343bc170df32b7e39c5", "content_id": "217360bae6a3aed646447b1eb8eadb24cd897a26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/singleton/python/singleton01.py", "repo_name": "ucpr/design_patterns", "src_encoding": "UTF-8", "text": "class Singleton:\n _instance = None\n\n def __new__(self):\n if not isinstance(self._instance, self):\n self._instance = object.__new__(self)\n return self._instance\n\n\nif __name__ == \"__main__\":\n a = Singleton()\n b = Singleton()\n\n print(id(a), id(b))\n print(a == b)\n" } ]
2
mnjl1/task-books
https://github.com/mnjl1/task-books
e146a60c5fdfc015288069f61f60083318ae37a3
15061bc1f2a9bc4fddcb46eeef5963640537c23d
d0adc58f5cb8966b207a3b03f542e7df6b74db7d
refs/heads/master
2023-06-14T20:04:09.382578
2021-07-14T20:16:40
2021-07-14T20:16:40
385,030,081
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6688500642776489, "alphanum_fraction": 0.6717612743377686, "avg_line_length": 27.625, "blob_id": "c0f54a0908ee3a3072f6cd5124bf1a5aa5daac79", "content_id": "e25114974b95900b5bae5a85c44a8cc70455479d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1374, "license_type": "no_license", "max_line_length": 81, "num_lines": 48, "path": "/src/routers/books.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "from fastapi import APIRouter\nfrom typing import List\nfrom fastapi import Depends, HTTPException\nfrom sqlalchemy.orm import Session\nfrom .. import crud\nfrom src.schemas import schemas\nfrom ..database import get_db\n\n\nrouter_books = APIRouter(\n prefix='/books',\n tags=['books']\n)\n\n\n@router_books.post('/', response_model=schemas.Book)\ndef create_book(book: schemas.BookCreate, db: Session = Depends(get_db)):\n \"\"\"\n Create book without an author.\n Author can be added to book with def book_add_author()\n \"\"\"\n return crud.create_book(db=db, book=book)\n\n\n@router_books.get('/', response_model=List[schemas.Book])\ndef get_books(skip: int = 0, db: Session = Depends(get_db)):\n \"\"\"\n Display all books\n \"\"\"\n books = crud.get_books(db, skip=skip)\n return books\n\n\n@router_books.get('/{book_id}', response_model=schemas.Book)\ndef get_book(book_id: int, db: Session = Depends(get_db)):\n book = crud.get_book(db, book_id=book_id)\n if book is None:\n raise HTTPException(status_code=404, detail='Book not found!')\n return book\n\n\n@router_books.post('/{id}/{author_id}', response_model=schemas.Book)\ndef book_add_author(book_id: int, author_id: int, db: Session = Depends(get_db)):\n \"\"\"\n Add author to book, \n params: book_id, author_id\n \"\"\"\n return crud.book_add_author(db, book_id=book_id, author_id=author_id)\n" }, { "alpha_fraction": 0.6424870491027832, "alphanum_fraction": 0.6436384320259094, "avg_line_length": 24.544116973876953, "blob_id": "bd8dbb7aff9fe4dc315e3f6750a2fff4ea34dafd", "content_id": "433fee9ec7be0414b0be3e655d79485fee118458", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1737, "license_type": "no_license", "max_line_length": 102, "num_lines": 68, "path": "/src/crud.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "from graphene.types import json\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql.functions import count\nfrom src.models import models\nfrom src.schemas import schemas\nfrom fastapi.responses import JSONResponse\n\n\ndef get_author(db: Session, author_id: int):\n \"\"\"\n Get author by id\n \"\"\"\n author = db.query(models.Author).filter(\n models.Author.id == author_id).first()\n\n return JSONResponse(\n {\"first_name\": author.first_name, \"last_name\": author.last_name, \"books\": author.coutBooks()})\n\n\ndef get_authors(db: Session, skip: int = 0):\n \"\"\"\n Get all authors\n \"\"\"\n return db.query(models.Author).offset(skip).all()\n\n\ndef create_author(db: Session, author: schemas.AuthorCreate):\n db_author = models.Author(\n first_name=author.first_name,\n last_name=author.last_name\n )\n db.add(db_author)\n db.commit()\n db.refresh(db_author)\n return db_author\n\n\ndef get_books(db: Session, skip: int = 0):\n \"\"\"\n Get all books\n \"\"\"\n return db.query(models.Book).offset(skip).all()\n\n\ndef get_book(db: Session, book_id: int):\n return db.query(models.Book).filter(models.Book.id == book_id).first()\n\n\ndef create_book(db: Session, book: models.Book):\n db_book = models.Book(**book.dict())\n db\n db.add(db_book)\n db.commit()\n db.refresh(db_book)\n return db_book\n\n\ndef book_add_author(db: Session, book_id: int, author_id: int):\n author_add = get_author(db, author_id)\n book = get_book(db, book_id)\n book_authors = book.authors\n if author_add not in book_authors:\n book_authors.append(author_add)\n setattr(book, 'authors', book_authors)\n db.add(book)\n db.commit()\n db.refresh(book)\n return book\n" }, { "alpha_fraction": 0.7058823704719543, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 18.5, "blob_id": "3f5f46c3043ab2b191a8929e56a4bb2e698b4c72", "content_id": "d9d60f5126764c1c974c17d3ac6f18a003c4d520", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 70, "num_lines": 34, "path": "/src/database.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, scoped_session\n\nSQLALCHEMY_DARABASE_URL = 'sqlite:///books_app.db'\n\nengine = create_engine(\n SQLALCHEMY_DARABASE_URL, connect_args={'check_same_thread': False}\n)\n\nSessionLocal = sessionmaker(\n autocommit=False,\n autoflush=False,\n bind=engine\n)\n\ndb_session = scoped_session(sessionmaker(\n autocommit=False,\n autoflush=False,\n bind=engine\n))\n\nBase = declarative_base()\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\nBase.query = db_session.query_property()\n" }, { "alpha_fraction": 0.4635416567325592, "alphanum_fraction": 0.6875, "avg_line_length": 15.22535228729248, "blob_id": "2d0685a58f25fcbc4ec79dbc413b1199d28fc85e", "content_id": "e3d3a130a81802b13efd98a264334194bf98eb2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1152, "license_type": "no_license", "max_line_length": 27, "num_lines": 71, "path": "/requirements.txt", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "alembic==1.4.2\nappdirs==1.4.4\nasgiref==3.4.1\nastroid==2.4.2\nasyncpg==0.23.0\nattrs==21.2.0\nautopep8==1.5.7\nbleach==3.3.0\nbuild==0.5.0\ncertifi==2021.5.30\nchardet==4.0.0\nclick==8.0.1\ncolorama==0.4.4\ncoverage==5.5\ndatabases==0.4.2\ndistlib==0.3.2\nDjango==3.2.3\ndocutils==0.17.1\nfastapi==0.66.0\nfilelock==3.0.12\nFlask==2.0.1\nh11==0.12.0\nidna==2.10\nimportlib-metadata==4.5.0\niniconfig==1.1.1\nisort==5.7.0\nitsdangerous==2.0.1\nJinja2==3.0.1\nkeyring==23.0.1\nlazy-object-proxy==1.4.3\nMako==1.1.4\nMarkupSafe==2.0.1\nmccabe==0.6.1\npackaging==20.9\npeewee==3.14.4\npep517==0.10.0\npipenv==2021.5.29\npkginfo==1.7.0\npluggy==0.13.1\npy==1.10.0\npycodestyle==2.7.0\npydantic==1.8.2\nPygments==2.9.0\npylint==2.6.0\npyparsing==2.4.7\npytest==6.2.4\npython-dateutil==2.8.1\npython-editor==1.0.4\npytz==2021.1\nreadme-renderer==29.0\nrequests==2.25.1\nrequests-toolbelt==0.9.1\nrfc3986==1.5.0\nrope==0.18.0\nsix==1.16.0\nSQLAlchemy==1.3.16\nsqlparse==0.4.1\nstarlette==0.14.2\ntabulate==0.8.9\ntoml==0.10.2\ntqdm==4.61.1\ntwine==3.4.1\ntyping-extensions==3.10.0.0\nurllib3==1.26.5\nuvicorn==0.14.0\nvirtualenv==20.4.7\nvirtualenv-clone==0.5.4\nwebencodings==0.5.1\nWerkzeug==2.0.1\nwrapt==1.12.1\nzipp==3.4.1\n" }, { "alpha_fraction": 0.708900511264801, "alphanum_fraction": 0.708900511264801, "avg_line_length": 15.754385948181152, "blob_id": "3282b08c414a200687984884d749f2fb79f6b4e1", "content_id": "b60f195aaacb27385635b413d01feb291fcbf293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 955, "license_type": "no_license", "max_line_length": 52, "num_lines": 57, "path": "/src/schemas/schemas.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "from typing import List, Optional\nimport graphene\nfrom pydantic import BaseModel\nfrom src.models.models import Book as BookModel\nfrom src.models.models import Author as AuthorModel\nfrom graphene_sqlalchemy import SQLAlchemyObjectType\n\n\nclass AuthorBase(BaseModel):\n first_name: Optional[str]\n last_name: Optional[str]\n\n\nclass AuthorCreate(AuthorBase):\n pass\n\n\nclass Author(AuthorBase):\n id: int\n\n class Config:\n orm_mode = True\n\n\nclass BookBase(BaseModel):\n book_name: str\n\n\nclass BookCreate(BookBase):\n pass\n\n\nclass BookUpdate(BookBase):\n pass\n\n\nclass Book(BookBase):\n id: int\n authors: List[Author] = []\n\n class Config:\n orm_mode = True\n\n\nclass BookSchema(SQLAlchemyObjectType):\n class Meta:\n model = BookModel\n\n\nclass AuthorSchema(SQLAlchemyObjectType):\n class Meta:\n model = AuthorModel\n\n\nclass SearchResult(graphene.Union):\n class Meta:\n types = (BookSchema, AuthorSchema)\n" }, { "alpha_fraction": 0.7826855182647705, "alphanum_fraction": 0.7826855182647705, "avg_line_length": 24.727272033691406, "blob_id": "1968b3041d912e329e1d2c0f444641af1bdad18b", "content_id": "1e1903898c0a80449d56ca6f45a4163fc9faf62f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 74, "num_lines": 22, "path": "/src/main.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "import graphene\nfrom starlette.graphql import GraphQLApp\nfrom sqlalchemy import engine\nfrom fastapi import FastAPI\nfrom .models import models\nfrom src.database import engine\nfrom src.routers import authors, books\nfrom src.query.queries import Query\n\n\nmodels.Base.metadata.create_all(bind=engine)\n\n\napp = FastAPI(title='Books.ly')\napp.include_router(authors.router_authors)\napp.include_router(books.router_books)\napp.add_route('/grapgql', GraphQLApp(schema=graphene.Schema(query=Query)))\n\n\[email protected]('/')\nasync def root():\n return {'message': 'Books application!'}\n" }, { "alpha_fraction": 0.4393063485622406, "alphanum_fraction": 0.5433526039123535, "avg_line_length": 16.299999237060547, "blob_id": "890fa12e1d12aadbdf6f5f0a88c0a7a93598875c", "content_id": "b71a1845dffdfd3ab72e272005376bea6b6dac74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 173, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/docker-compose.yml", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "version: \"3.8\"\n\nservices:\n web:\n build: .\n command: uvicorn src.main:app --reload --host 0.0.0.0 --port 8080\n volumes:\n - .:/src\n ports:\n - 8080:8080\n" }, { "alpha_fraction": 0.6224696636199951, "alphanum_fraction": 0.6224696636199951, "avg_line_length": 25, "blob_id": "b81b6be9091a045ca9aec90b919dcd33e1dc4bb4", "content_id": "54b01b88838c46ab2d6d62759728e39b4f726678", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "no_license", "max_line_length": 75, "num_lines": 38, "path": "/src/models/models.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "from sqlalchemy import Integer, String, Column, ForeignKey, Table\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.sql.functions import count\n\nfrom src.database import Base\n\nauthor_book = Table('authorbook', Base.metadata,\n Column('author_id', Integer, ForeignKey('authors.id')),\n Column('book_id', Integer, ForeignKey('books.id'))\n )\n\n\nclass Author(Base):\n __tablename__ = 'authors'\n\n id = Column(Integer, primary_key=True, index=True)\n first_name = Column(String)\n last_name = Column(String)\n books = relationship(\n 'Book',\n secondary=author_book,\n back_populates='authors'\n )\n\n def coutBooks(self):\n return len(self.books)\n\n\nclass Book(Base):\n __tablename__ = 'books'\n\n id = Column(Integer, primary_key=True, index=True)\n book_name = Column(String)\n authors = relationship(\n 'Author',\n secondary=author_book,\n back_populates='books'\n )\n" }, { "alpha_fraction": 0.684622049331665, "alphanum_fraction": 0.6880972981452942, "avg_line_length": 27.774999618530273, "blob_id": "9e4ec3330379ccaa175757c1ea8f70d63dcf6e05", "content_id": "af2a7c452da12ef092282aa668f431f0d1aada4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1151, "license_type": "no_license", "max_line_length": 95, "num_lines": 40, "path": "/src/routers/authors.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "from fastapi import APIRouter\nfrom typing import List\nfrom fastapi import Depends, HTTPException\nfrom sqlalchemy.orm import Session\nfrom src.schemas import schemas\nfrom .. import crud\nfrom ..database import get_db\n\nrouter_authors = APIRouter(\n prefix='/authors',\n tags=['authors']\n)\n\n\n@router_authors.post('/', response_model=schemas.Author)\ndef create_author(auhtor: schemas.AuthorCreate, db: Session = Depends(get_db)):\n \"\"\"\n Create author and write to database\n \"\"\"\n return crud.create_author(db=db, author=auhtor)\n\n\n@router_authors.get('/', response_model=List[schemas.Author], description='Return all authors')\ndef get_authors(skip: int = 0, db: Session = Depends(get_db)):\n \"\"\"\n Display all authors\n \"\"\"\n authors = crud.get_authors(db, skip=skip)\n return authors\n\n\n@router_authors.get('/{author_id}', response_model=schemas.Author)\ndef get_author(author_id: int, db: Session = Depends(get_db)):\n \"\"\"\n Get author by id\n \"\"\"\n author = crud.get_author(db, author_id=author_id)\n if author is None:\n raise HTTPException(status_code=404, detail='Author not found!')\n return author\n" }, { "alpha_fraction": 0.7116104960441589, "alphanum_fraction": 0.7116104960441589, "avg_line_length": 28.66666603088379, "blob_id": "17b5c9d6d2ab53c667cd0788cc225ef40927b33a", "content_id": "906a962db7262743b8021a260adccd277abeff5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 69, "num_lines": 18, "path": "/src/query/queries.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "from src.models.models import Book\nimport graphene\nfrom src.schemas import schemas\n\n\nclass Query(graphene.ObjectType):\n\n all_books = graphene.List(schemas.BookSchema)\n all_authors = graphene.List(schemas.AuthorSchema)\n search = graphene.List(schemas.SearchResult, q=graphene.String())\n\n def resolve_all_books(self, info):\n query = schemas.BookSchema.get_query(info)\n return query.all()\n\n def resolve_all_authors(self, info):\n query = schemas.AuthorSchema.get_query(info)\n return query.all()\n" }, { "alpha_fraction": 0.6677966117858887, "alphanum_fraction": 0.7423728704452515, "avg_line_length": 16.352941513061523, "blob_id": "d30b5257c3dba9e286db731b0740e025bee4358e", "content_id": "8a51e7aa0718564a9a1b1b10d83eb1a38bc523d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 379, "license_type": "no_license", "max_line_length": 37, "num_lines": 17, "path": "/README.md", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "Задание:\nРазработка API для книжной библиотеки\n\nПрименены технологии:\nFastApi, SqlAlchemy, Docker, Graphen\n\nПодготовка:\n\n> docker-compose up --build\n\nРабота с приложением:\n\n1. FastApi + SqlAlchemy\n http://127.0.0.1:8080/docs\n\n2. FAstApi + SqlAlchemy + Graphen\n http://127.0.0.1:8080/grapgql\n" }, { "alpha_fraction": 0.6273972392082214, "alphanum_fraction": 0.6589041352272034, "avg_line_length": 25.071428298950195, "blob_id": "941686aec2c6a6a5f05911e45b5371f6d7721817", "content_id": "6740ee5418d8f07f4d49ffbe61d5ba664b071e27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 730, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/tests/test_main.py", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "from fastapi.testclient import TestClient\nfrom src.main import app\n\n\nclient = TestClient(app)\n\n\ndef test_author_valid_id():\n response = client.get('/authors/1')\n assert response.status_code == 200\n assert response.json() == {'first_name': 'Os', 'last_name': 'Wil', 'id': 1}\n\n\ndef test_author_not_valid_id():\n response = client.get('/authors/1000')\n assert response.status_code == 404\n assert response.json() == {'detail': \"Author not found!\"}\n\n\ndef test_book_valid_id():\n response = client.get('/books/1')\n assert response.status_code == 200\n assert response.json()['book_name'] == 'Rocket'\n\n\ndef test_book_not_valid_id():\n response = client.get('/books/1000')\n assert response.status_code == 404\n" }, { "alpha_fraction": 0.7596153616905212, "alphanum_fraction": 0.7788461446762085, "avg_line_length": 14.923076629638672, "blob_id": "d4c1f463aa71785a5f159b9beae1a33c0ae13a1f", "content_id": "b2f71a8810567caf2802167a5bb7eb346ca1e147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 208, "license_type": "no_license", "max_line_length": 40, "num_lines": 13, "path": "/Dockerfile", "repo_name": "mnjl1/task-books", "src_encoding": "UTF-8", "text": "FROM python:3.9\n\nWORKDIR /src\n\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\nRUN pip install --upgrade pip\nCOPY ./requirements.txt requirements.txt\n\nRUN pip install -r requirements.txt\n\nCOPY . /src/\n\n" } ]
13
eytans/Pro-Formal-Verif
https://github.com/eytans/Pro-Formal-Verif
3cf2c993368d7723b6ad97243cc62867f3eec7ab
f2619232ebeee2a946aaf834bd94772ac69c6b9f
570f93384d587a3dd575f987b5299a4579e82824
refs/heads/main
2023-05-30T01:57:33.387623
2021-06-14T09:46:49
2021-06-14T09:46:49
366,625,694
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5959419012069702, "alphanum_fraction": 0.6164829730987549, "avg_line_length": 33.713043212890625, "blob_id": "b695b8b52e6a9040ad5b01fdcfbd8ffb0896d53e", "content_id": "5d2612c1f386cda8688b5c12da03484e0fc91a44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3992, "license_type": "no_license", "max_line_length": 125, "num_lines": 115, "path": "/sudoku.py", "repo_name": "eytans/Pro-Formal-Verif", "src_encoding": "UTF-8", "text": "from z3 import *\nimport sys\n\n\n# A variable representing the value of a specific cell\ndef matrixvar(i, j):\n return Int(\"x_%s_%s\" % (i, j))\n\n\n# Create a 9x9 matrix of integer variables\ndef getMatrix():\n return [[matrixvar(i + 1, j + 1) for j in range(9)]\n for i in range(9)]\n\n\n# Add constraints such that each cell contains a value in {1, ..., 9}\ndef addCellConstraints(X):\n \"\"\"\n Commented out the LIA way of solving and instead using inequalities\n :param X: Matrix of z3 variables\n :return: If using LIA a conjunction of LE GE contraints on the variables in the matrix.\n In the case on equality alone a list on inequalities with each number in the range.\n Depending on how int is represented (probably bit set and then no problem)\n I might need a set of equalities to maintain the wanted integers.\n I am adding these just in case.\n \"\"\"\n # LIA\n # le = [X[i][j] <= 9 for i in range(9) for j in range(9)]\n # ge = [X[i][j] >= 1 for i in range(9) for j in range(9)]\n # return And(*(le + ge))\n\n # Theory of equality\n eq_constraints = (Or(*(X[i][j] == k for k in range(1, 10))) for i in range(9) for j in range(9))\n return And(*eq_constraints)\n\n\n# Add constraints such that each row contains a digit at most once\ndef addRowConstraints(X):\n \"\"\"\n :param X: Matrix of z3 variables\n :return: :return: And contraint of inequality between all pairs of variables in a row (Good for any theory with equality)\n \"\"\"\n result = []\n for row in range(9):\n for col1 in range(9):\n for col2 in range(col1 + 1, 9):\n result.append(X[row][col1] != X[row][col2])\n return And(*result)\n\n\n# Add constraints such that each column contains a digit at most once\ndef addColumnConstraints(X):\n \"\"\"\n :param X: Matrix of z3 variables\n :return: And contraint of inequality between all pairs of variables in a column (Good for any theory with equality)\n \"\"\"\n result = []\n for col in range(9):\n for row1 in range(9):\n for row2 in range(row1 + 1, 9):\n result.append(X[row1][col] != X[row2][col])\n return And(*result)\n\n\n# Add constraints such that each 3x3 square contains a digit at most once\ndef addSquareConstraints(X):\n \"\"\"\n Note: Please use pep8 when using python, pycharm shouts on me.\n I am doing it a little ugly so documenting.\n I am creating an iterator of indices. Then using my helper function I create the constraints and concat.\n :param X: Matrix of z3 variables\n :return: Conjunction of inequalitis between each of the variables in each subgrid.\n \"\"\"\n import itertools\n\n def createConstraints(indices):\n return [X[row1][col1] != X[row2][col2]\n for i, (row1, col1) in enumerate(indices) for row2, col2 in indices[i:]\n if row1 != row2 or col1 != col2]\n\n areas = itertools.product([[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[0, 1, 2], [3, 4, 5], [6, 7, 8]])\n indices = [list(itertools.product(x, y)) for x, y in areas]\n result = And(*itertools.chain(*map(createConstraints, indices)))\n return result\n\n\ndef solveSudoku(instance):\n X = getMatrix()\n\n # Create the initial constraints of the puzzle\n # based on the input instance. Note that '0' represents \n # an empty cells\n instance_c = [If(instance[i][j] == 0,\n True,\n X[i][j] == instance[i][j])\n for i in range(9) for j in range(9)]\n\n # Create the Z3 solver\n s = Solver()\n\n # Add all needed constraints\n s.add(instance_c)\n s.add(addCellConstraints(X))\n s.add(addRowConstraints(X))\n s.add(addColumnConstraints(X))\n s.add(addSquareConstraints(X))\n\n # If the problem is satisfiable, a solution exists\n if s.check() == sat:\n m = s.model()\n r = [[m.evaluate(X[i][j]) for j in range(9)]\n for i in range(9)]\n print_matrix(r)\n else:\n print(\"failed to solve\")\n" }, { "alpha_fraction": 0.49688422679901123, "alphanum_fraction": 0.5031157732009888, "avg_line_length": 30.094736099243164, "blob_id": "822b0483293f970e03bf1c0323139cda98c6d76a", "content_id": "c61c86639bef0445376c88ebf898c4b3d96eea03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6098, "license_type": "no_license", "max_line_length": 78, "num_lines": 190, "path": "/coloring.h", "repo_name": "eytans/Pro-Formal-Verif", "src_encoding": "UTF-8", "text": "#ifndef COLORING_H_\r\n#define COLORING_H_\r\n\r\n#include <vector>\r\n#include \"minisat/core/Solver.h\"\r\n\r\nusing namespace std;\r\n\r\n// ***************************************************************************\r\n// A graph class. \r\n// Note that when adding an edge (n1,n2) n1 must be less or \r\n// equal to n2. This is only done for simplicity and a more compact \r\n// implementation.\r\n// ***************************************************************************\r\nclass Graph {\r\npublic:\r\n Graph(int nNumberOfNodes) : m_nNumberOfNodes(nNumberOfNodes)\r\n {\r\n m_graph.resize(nNumberOfNodes);\r\n }\r\n\r\n int getNumberOfNodes() const { return m_nNumberOfNodes; }\r\n\r\n // Not efficient for large graphs\r\n vector<int> getEdgesForNode(int node) const\r\n {\r\n assert (node < m_nNumberOfNodes);\r\n assert (node < m_graph.size());\r\n return m_graph[node];\r\n }\r\n\r\n // For now allowing duplication\r\n void addEdge (int n1, int n2)\r\n {\r\n assert (n1 < m_nNumberOfNodes &&\r\n n2 < m_nNumberOfNodes);\r\n assert (n1 <= n2);\r\n\r\n // Make sure that the vector can contain the first node\r\n if (m_graph.size() <= n1)\r\n m_graph.resize(n1+1);\r\n\r\n // Take care of the first node\r\n m_graph[n1].push_back(n2);\r\n }\r\n\r\nprivate:\r\n int m_nNumberOfNodes;\r\n // A vector of vectors to represent the adjacency list\r\n // The outer vector is mapping a node (by index) to its\r\n // vector which represents a container of adjacent nodes.\r\n vector<vector<int> > m_graph;\r\n};\r\n\r\n// ***************************************************************************\r\n// A class modeling the k-coloring problem.\r\n// ***************************************************************************\r\nclass Coloring {\r\npublic:\r\n Coloring(const Graph& g, int nNumberOfColors) :\r\n m_graph(g)\r\n , m_nNumberOfColors(nNumberOfColors)\r\n , m_solver()\r\n {\r\n // Prepare the solver with the needed variables\r\n int nodes = m_graph.getNumberOfNodes();\r\n for (int c = 0; c < m_nNumberOfColors; c++)\r\n {\r\n for (int n = 0; n < nodes; n++)\r\n {\r\n m_solver.newVar();\r\n }\r\n }\r\n }\r\n\r\n void addOneColorConstraints(int node) {\r\n assert (node < m_graph.getNumberOfNodes());\r\n int node_count = m_graph.getNumberOfNodes();\r\n // Add your code here\r\n // At most one true\r\n for (int i = 0; i < m_nNumberOfColors - 1; i++) {\r\n for (int j = i+1; j < m_nNumberOfColors; j++) {\r\n m_solver.addClause(Minisat::mkLit(i*node_count + node, true), \r\n Minisat::mkLit(j*node_count + node, true));\r\n }\r\n }\r\n\r\n // At least one true\r\n Minisat::vec<Minisat::Lit> clause;\r\n for (int j=0; j < m_nNumberOfColors; j++) {\r\n clause.push(Minisat::mkLit(j*node_count + node, false));\r\n }\r\n m_solver.addClause(clause);\r\n }\r\n\r\n void toDimacs(const char* file) {\r\n m_solver.toDimacs(file);\r\n }\r\n\r\n void addEdgeColoringConstraints(int n1, int n2) {\r\n assert (n1 < m_graph.getNumberOfNodes() &&\r\n n2 < m_graph.getNumberOfNodes());\r\n assert (n1 <= n2);\r\n\r\n int node_count = m_graph.getNumberOfNodes();\r\n // Add your code here\r\n for (int i = 0; i < m_nNumberOfColors; i++) {\r\n m_solver.addClause(Minisat::mkLit(i*node_count + n1, true), \r\n Minisat::mkLit(i*node_count + n2, true));\r\n }\r\n }\r\n\r\n bool isColorable()\r\n {\r\n // Go over all nodes\r\n for (int n = 0; n < m_graph.getNumberOfNodes(); n++)\r\n {\r\n // Add the constraints for the node\r\n addOneColorConstraints(n);\r\n\r\n // Now add constraints for the edges\r\n vector<int> edges = m_graph.getEdgesForNode(n);\r\n for (int adjcent = 0; adjcent < edges.size(); adjcent++)\r\n {\r\n addEdgeColoringConstraints(n, edges[adjcent]);\r\n }\r\n }\r\n\r\n bool bResult = m_solver.solve();\r\n return bResult;\r\n }\r\n\r\n // The function gets allColoring by reference and returns\r\n // all k-coloring in this vector. Note that the inner vector\r\n // represents one assignment\r\n void givemeAllColoring(vector<vector<Minisat::lbool> >& allColoring) {\r\n // Go over all nodes\r\n for (int n = 0; n < m_graph.getNumberOfNodes(); n++)\r\n {\r\n // Add the constraints for the node\r\n addOneColorConstraints(n);\r\n\r\n // Now add constraints for the edges\r\n vector<int> edges = m_graph.getEdgesForNode(n);\r\n for (int adjcent = 0; adjcent < edges.size(); adjcent++)\r\n {\r\n addEdgeColoringConstraints(n, edges[adjcent]);\r\n }\r\n }\r\n\r\n // if (m_solver.modelValue(1) == Minisat::l_True) { int x=1; x++; }\r\n\r\n // Add your code here\r\n while (m_solver.solve()) {\r\n vector<Minisat::lbool> current_model = vector<Minisat::lbool>();\r\n for(int i = 0; i < m_solver.model.size(); i++) {\r\n current_model.push_back(m_solver.model[i]);\r\n }\r\n \r\n allColoring.push_back(current_model);\r\n Minisat::vec<Minisat::Lit> clause;\r\n for(int i = 0; i < m_solver.model.size(); i++)\r\n {\r\n if (m_solver.modelValue(i) == Minisat::l_True) {\r\n clause.push(Minisat::mkLit(i, true));\r\n } else {\r\n clause.push(Minisat::mkLit(i, false));\r\n }\r\n }\r\n m_solver.addClause(clause);\r\n }\r\n }\r\n\r\nprivate:\r\n Minisat::Var getNodeHasColorVar(int node, int color)\r\n {\r\n assert (node < m_graph.getNumberOfNodes() &&\r\n color < m_nNumberOfColors);\r\n\r\n return (color * m_graph.getNumberOfNodes()) + node;\r\n }\r\n\r\nprivate:\r\n const Graph& m_graph;\r\n int m_nNumberOfColors;\r\n\r\n Minisat::Solver m_solver;\r\n};\r\n\r\n#endif // COLORING_H_\r\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 18, "blob_id": "a415d7f1f1c9267620d91d170cae2c1ab1a58e35", "content_id": "7383b463cba6cbf3e1be8a75a656fcafd0a9f049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 18, "num_lines": 1, "path": "/README.md", "repo_name": "eytans/Pro-Formal-Verif", "src_encoding": "UTF-8", "text": "# Pro-Formal-Verif" } ]
3