repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Antbert1/notTheGymAPI
|
https://github.com/Antbert1/notTheGymAPI
|
cc5056aa76f3338bbade1cc6d5aa6cf2ecfc39ba
|
d1707ddbfa96e2f22e856ddd7de0d614ce211ab0
|
0e84d5e07ed0fcbb8949e4e3b6dc5ea1fcdced0f
|
refs/heads/master
| 2023-07-13T09:49:31.062432 | 2021-08-18T21:24:24 | 2021-08-18T21:24:24 | 396,048,788 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8452380895614624,
"alphanum_fraction": 0.8452380895614624,
"avg_line_length": 27,
"blob_id": "31194a2d19c15b002596ca65aff5004ded23589e",
"content_id": "8cd076cb63996d0896a1c4f70ad47b21e77c6a64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/notgymapi/admin.py",
"repo_name": "Antbert1/notTheGymAPI",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Classdetail\nfrom notgymapi import models\n\nadmin.site.register(Classdetail)\nadmin.site.register(models.UserProfile)\n"
},
{
"alpha_fraction": 0.6490231156349182,
"alphanum_fraction": 0.6507992744445801,
"avg_line_length": 37.0405387878418,
"blob_id": "706dc127ac6de56f3a483d342153e0956f9be4f3",
"content_id": "7549415581cc823742f4bbf27142d60193e6eac0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2815,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 74,
"path": "/notgymapi/views.py",
"repo_name": "Antbert1/notTheGymAPI",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom rest_framework import viewsets, status\nfrom rest_framework.response import Response\nfrom rest_framework.authentication import TokenAuthentication\n\nfrom .serializers import ClassdetailSerializer\nfrom .models import Classdetail\nfrom notgymapi import models\nfrom notgymapi import serializers\nimport datetime\nfrom notgymapi import permissions\n\n\nclass ClassdetailViewSet(viewsets.ModelViewSet):\n queryset = Classdetail.objects.all()\n serializer_class = ClassdetailSerializer\n\n # queryset = Book.objects.all()\n # serializer_class = BookSerializer\n # search_fields = ('name','author')\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n #checks if post request data is an array initializes serializer with many=True\n else executes default CreateModelMixin.create function\n \"\"\"\n # currentDate = datetime.date.today()\n data = request.data\n is_many = isinstance(request.data, list)\n if not is_many:\n\n # newObj, created = Classdetail.objects.update_or_create(\n # date=data.get('date'), question=data.get('question'),\n # defaults={'value': data.get('value')},\n # )\n # return newObj\n\n return super(ClassdetailViewSet, self).create(request, *args, **kwargs)\n else:\n # firstDate = request.data[0]['date']\n # newDate = datetime.datetime.strptime(firstDate,'%Y-%m-%d').date()\n # # dateToCheck = data[0].get('date')\n # Classdetail.objects.filter(date=firstDate).delete()\n serializer = self.get_serializer(data=request.data, many=True)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n\n return Response(\n serializer.data, status=status.HTTP_201_CREATED, headers=headers\n )\n\n # def get_queryset(self):\n # question = self.request.query_params.get('question')\n # date= self.request.query_params.get('date')\n\n # if (date != None and question != None):\n # queryset = Answer.objects.filter(question=question, date=date)\n # elif (date != None and question == None):\n # queryset = Answer.objects.filter(date=date)\n # elif (date == None and question != None):\n # queryset = Answer.objects.filter(question=question)\n # else:\n # queryset = Answer.objects.all()\n\n # return queryset\n\n\nclass UserProfileViewSet(viewsets.ModelViewSet):\n serializer_class = serializers.UserProfileSerializer\n queryset = models.UserProfile.objects.all()\n authentication_classes = (TokenAuthentication,)\n permission_classes = (permissions.UpdateOwnProfile,)\n"
},
{
"alpha_fraction": 0.5355932116508484,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 17.4375,
"blob_id": "8ea23a8509e3936358b391e1b32d001b77f9b2c9",
"content_id": "bb597dd775ec9fa735c6b075f62e06e8511788aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 16,
"path": "/notgymapi/migrations/0004_delete_userprofile.py",
"repo_name": "Antbert1/notTheGymAPI",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.6 on 2021-08-18 15:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('notgymapi', '0003_userprofile'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='UserProfile',\n ),\n ]\n"
},
{
"alpha_fraction": 0.630885124206543,
"alphanum_fraction": 0.630885124206543,
"avg_line_length": 30.235294342041016,
"blob_id": "964dd4f14a646be89e7988f47028d780ee3fff19",
"content_id": "825eafae244cf864b86bb83ab9cf5d748a6ccbfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1062,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 34,
"path": "/notgymapi/serializers.py",
"repo_name": "Antbert1/notTheGymAPI",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\nfrom notgymapi import models\n\nfrom .models import Classdetail\n\n\nclass ClassdetailSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Classdetail\n fields = (\"name\", \"type\", \"tags\", \"blurb\", \"location\")\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.UserProfile\n fields = (\"id\", \"email\", \"name\", \"password\")\n extra_kwargs = {\n \"password\": {\"write_only\": True, \"style\": {\"input_type\": \"password\"}}\n }\n\n def update(self, instance, validated_data):\n if \"password\" in validated_data:\n password = validated_data.pop(\"password\")\n instance.set_password(password)\n\n return super().update(instance, validated_data)\n\n def create(self, validated_data):\n user = models.UserProfile.objects.create_user(\n email=validated_data[\"email\"],\n name=validated_data[\"name\"],\n password=validated_data[\"password\"],\n )\n return user\n"
},
{
"alpha_fraction": 0.5871369242668152,
"alphanum_fraction": 0.634854793548584,
"avg_line_length": 24.36842155456543,
"blob_id": "e5dc5b8148945f5ffa4db9a18669ce30e740eebf",
"content_id": "2e4e476f1632ad8927a1046689336ea4b1a2032c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 19,
"path": "/notgymapi/migrations/0002_classdetail_location.py",
"repo_name": "Antbert1/notTheGymAPI",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.6 on 2021-08-16 15:01\n\nimport django.contrib.gis.db.models.fields\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('notgymapi', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='classdetail',\n name='location',\n field=django.contrib.gis.db.models.fields.PointField(blank=True, geography=True, null=True, srid=4326),\n ),\n ]\n"
}
] | 5 |
KasirajanPothiraj/python
|
https://github.com/KasirajanPothiraj/python
|
2f9ed03b1c4d43a13a0777308b5675576ffb07db
|
731f2360d82451f83caf648d620cb4233efb5933
|
4dec8e91d7a62f4263a77d321849a0a28b9985f7
|
refs/heads/master
| 2021-09-07T20:33:03.872459 | 2018-02-28T16:48:40 | 2018-02-28T16:48:40 | 123,313,626 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5881410241127014,
"alphanum_fraction": 0.5929487347602844,
"avg_line_length": 26.363636016845703,
"blob_id": "aaae03e8e8a101e729e2d804c7d000dd898b1bf8",
"content_id": "4c8d5e68213cc953b63c8e0a0d8e445af2555c69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1872,
"license_type": "no_license",
"max_line_length": 257,
"num_lines": 66,
"path": "/process_load.py",
"repo_name": "KasirajanPothiraj/python",
"src_encoding": "UTF-8",
"text": "# Purpose : Main file to process the input file and load the data into the database\r\n# Author : Kasirajan Pothiraj\r\n# Date : Feb 2018\r\n\r\nfrom Db import Db\r\nimport utils as u1\r\nimport csv\r\n\r\n\r\n# Read and process the config file\r\nprint (\"Reading Config File.........\")\r\ncfg = {}\r\nwith open(\"config.properties\") as f:\r\n for line in f:\r\n (key, val) = line.split('=')\r\n cfg[key] = val.replace(\"\\n\",\"\")\r\n\r\nprint (\"List of Config Values \\n\")\r\n\r\nfor keys,values in cfg.items():\r\n print(keys, values)\r\n\r\nprint (\"Processing the input file .....\")\r\n\r\nu1.replace_word(cfg['ip_file'], cfg['op_file'], \"^&^\", \",\")\r\n\r\ndb = Db(username=cfg['db_user'], password=cfg['db_passwd'], database=cfg['db_name'], driver=cfg['db_engine'])\r\n\r\nret = []\r\nret = db.select(cfg['tbl_name'], columns='*')\r\nprint(\"Currently Number of records in table :\", len(ret))\r\n\r\nif (len(ret) > 0):\r\n print (\"Truncating the table : \", cfg['tbl_name'])\r\n db.truncate(cfg['tbl_name'])\r\n\r\nsql = 'INSERT INTO %s(%s) VALUES (%s)' % (self.enclose_sys(table), ','.join(cols), ','.join(['%s'] * len(vals)))\r\n\r\nquery = 'LOAD DATA INFILE (%s) INTO (%s) FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\\r\\n' IGNORE 1 LINES;' %(cfg['op_file'], ','.join(cfg['tbl_name'])),' FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\\r\\n' IGNORE 1 LINES;'\r\n\r\nwith open(cfg['op_file'], 'r') as f:\r\n reader = csv.reader(f)\r\n next(reader) # Skip the header row.\r\n for row in reader:\r\n print(row)\r\n db.insert(cfg['tbl_name'], row)\r\n print(\"Row completed \" , row)\r\n #next(reader)\r\n\r\n\r\n#close the connection to the database.\r\ndb.commit()\r\ndb.disconnect()\r\nprint \"Done\"\r\n\r\n\r\n'''\r\nprint (\"Testing\")\r\ndb = Db(username='dba', password='password123', database='ref_data', driver='mysql')\r\nret = []\r\nret = db.select(table='geo_info', columns='*')\r\n\r\n\r\nprint(len(ret))\r\n\r\n'''\r\n"
},
{
"alpha_fraction": 0.5387434959411621,
"alphanum_fraction": 0.5411626696586609,
"avg_line_length": 35.47527313232422,
"blob_id": "14890be4f1ee83a96d1dcbd55946ea791b8e2641",
"content_id": "0ae04d0f826489b9a82916b4921aae26ffda9f7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13641,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 364,
"path": "/Db.py",
"repo_name": "KasirajanPothiraj/python",
"src_encoding": "UTF-8",
"text": "# Purpose : class to implements methods and variable required for all the database operations\r\n# Author : Kasirajan Pothiraj\r\n# Date : Feb 2018\r\nclass Db():\r\n\r\n def __init__(self, **kwargs):\r\n\r\n self._con = None\r\n self._host = kwargs['host'] if kwargs.has_key('host') else 'localhost'\r\n self._username = kwargs['username']\r\n self._password = kwargs['password']\r\n self._dbname = kwargs['database']\r\n self._driver = kwargs['driver']\r\n\r\n #encloser characters for system identifiers(key_delim) and strings(str_delim)\r\n self._key_delim = '\"'\r\n self._str_delim = \"'\"\r\n\r\n if self._driver == 'mysql':\r\n self._module = __import__('MySQLdb')\r\n self._key_delim = '`'\r\n elif self._driver == 'pgsql':\r\n self._module = __import__('psycopg2')\r\n else:\r\n raise Exception(\"Unknown database driver\")\r\n\r\n self._affected_rows = None\r\n self._last_query = None\r\n self._insert_id = None\r\n self._error = None\r\n self._autocommit = False\r\n\r\n self.connect()\r\n\r\n #def __del__(self):\r\n #self.disconnect()\r\n\r\n def connect(self):\r\n kwargs = {'host': self._host, 'user': self._username}\r\n if self._driver == 'mysql':\r\n kwargs['passwd'] = self._password\r\n kwargs['db'] = self._dbname\r\n elif self._driver == 'pgsql':\r\n kwargs['database'] = self._dbname\r\n kwargs['password'] = self._password\r\n\r\n self._con = self._module.connect(**kwargs)\r\n\r\n def disconnect(self):\r\n if self._con:\r\n self._con.commit()\r\n self._con.close()\r\n\r\n def reconnect(self):\r\n self.disconnect()\r\n self.connect()\r\n\r\n #fake connecting to the database. Useful when trying out connection parameters, e.g. during install\r\n @staticmethod\r\n def mock(**kwargs):\r\n try:\r\n d = Db(kwargs)\r\n return true\r\n except Exception:\r\n return false\r\n\r\n\r\n #queries the database, returning the result as a list of dicts or None, if no row found or on commands\r\n def _query(self, s, params = None):\r\n\r\n if isinstance(params, list):\r\n params = tuple(params)\r\n\r\n #need this for compatibility with manual queries using MySQL format, where the backtick is used for enclosing column names\r\n #instead of the standard double quote. Will be removed soon\r\n if self._driver != 'mysql':\r\n s = self.__replace_backticks(s)\r\n\r\n try:\r\n cur = self._con.cursor()\r\n cur.execute(s, params)\r\n self._insert_id = cur.lastrowid\r\n self._affected_rows = cur.rowcount\r\n\r\n try:\r\n results = cur.fetchall()\r\n n = len(results)\r\n if (n > 0):\r\n cols = self.table_columns(None, cur)\r\n except self._module.DatabaseError:\r\n #INSERT/UPDATE or similar\r\n return None\r\n finally:\r\n cur.close()\r\n\r\n retval = []\r\n for i in range(0,n):\r\n aux = results[i]\r\n row = {}\r\n for j in range(0,len(cols)):\r\n #elem = aux[j].decode('UTF-8') if isinstance(aux[j], basestring) else aux[j]\r\n row[cols[j]] = aux[j]\r\n\r\n if len(row):\r\n retval.append(row)\r\n\r\n return retval\r\n\r\n except self._module.DatabaseError as e:\r\n #Error. Reset insert id and affected rows to None\r\n self._insert_id = None\r\n self._affected_rows = None\r\n raise Exception(\"Database Error: %s\" % str(e))\r\n\r\n return retval\r\n\r\n #escape a variable/tuple/list\r\n def escape(self, s):\r\n if isinstance(s, basestring):\r\n return self._con.escape_string(s)\r\n elif isinstance(s, list):\r\n return map(lambda x: self.escape(x), s)\r\n elif isinstance(s, tuple):\r\n return tuple(self.escape(list(s)))\r\n else:\r\n raise TypeException(\"Unknown parameter given for escaping\")\r\n\r\n #never get here\r\n return None\r\n\r\n #encloses a string with single quotes\r\n def enclose_str(self, s):\r\n if isinstance(s, basestring):\r\n return ''.join([self._str_delim,str(s),self._str_delim])\r\n elif isinstance(s, list):\r\n return map(self.enclose_str, s)\r\n elif isinstance(s, tuple):\r\n return tuple(map(self.enclose_str, s))\r\n else:\r\n raise TypeError(\"Unknown argument type to enclose_str\")\r\n\r\n #encloses an identifier in the appropriate double quotes/backticks\r\n def enclose_sys(self,s):\r\n #we do not enclose variable containing spaces because we assume them to be expressions, e.g. COUNT(*) AS ...\r\n #Column names containing spaces are not supported\r\n if isinstance(s, basestring):\r\n if s.count(' ') or s == '*':\r\n return s\r\n return ''.join([self._key_delim,str(s),self._key_delim])\r\n elif isinstance(s, list):\r\n return map(self.enclose_sys, s)\r\n elif isinstance(s, tuple):\r\n return tuple(map(self.enclose_sys, s))\r\n else:\r\n raise TypeError(\"Unknown argument type to enclose_sys\")\r\n\r\n #SELECT FROM table\r\n def select(self, table, columns = None, where = None, op = \"AND\"):\r\n if isinstance(columns, tuple):\r\n columns = \",\".join(map(lambda x: self.enclose_sys(x), columns))\r\n elif isinstance(columns, basestring):\r\n columns = self.enclose_sys(columns)\r\n elif not columns:\r\n columns = \"*\"\r\n else:\r\n raise TypeException(\"Invalid column definition\")\r\n\r\n (where_clause, where_params) = self.__expand_where_clause(where, op)\r\n\r\n if not where_clause:\r\n return self._query(\"SELECT %s FROM %s\" % (columns, self.enclose_sys(table)))\r\n else:\r\n return self._query(\"SELECT %s FROM %s WHERE %s\" % (columns, self.enclose_sys(table), where_clause), where_params)\r\n\r\n #INSERT INTO table\r\n def insert(self, table, values):\r\n if isinstance(values, tuple):\r\n values = [values]\r\n if not isinstance(values, list):\r\n raise TypeError(\"INSERT: Inappropriate argument type for parameter values\")\r\n #cur = self._con.cursor()\r\n col_arr = self.get_columns(table)\r\n cols = map(lambda x: self.enclose_sys(x), col_arr)\r\n vals = tuple(map(lambda x: x, values))\r\n #cur.close()\r\n sql = 'INSERT INTO %s(%s) VALUES (%s)' % (self.enclose_sys(table), ','.join(cols), ','.join( ['%s'] * len(vals) ))\r\n return self._query(sql, vals)\r\n\r\n #UPDATE table\r\n def update(self, table, values, where = None, op = 'AND'):\r\n if isinstance(values, tuple):\r\n values = [values]\r\n if not isinstance(values, list):\r\n raise TypeError(\"UPDATE: Inappropriate argument type for parameter values\")\r\n\r\n cols = map(lambda x: self.enclose_sys(x[0])+'=%s', values)\r\n vals = tuple(map(lambda x: x[1], values))\r\n\r\n (where_clause, where_params) = self.__expand_where_clause(where, op)\r\n\r\n if where_clause:\r\n return self._query('UPDATE %s SET %s WHERE %s' % (self.enclose_sys(table), ','.join(cols), where_clause), list(vals) + list(where_params))\r\n else:\r\n return self._query('UPDATE %s SET %s' % (self.enclose_sys(table), ','.join(cols)), vals)\r\n\r\n #DELETE FROM table\r\n def delete(self, table, where = None, op = 'AND'):\r\n (where_clause, where_params) = self.__expand_where_clause(where, op)\r\n\r\n if where_clause:\r\n return self._query(\"DELETE FROM %s WHERE %s\" % (self.enclose_sys(table), where_clause), where_params)\r\n else:\r\n return self._query(\"DELETE FROM %s\" % (self.enclose_sys(table)))\r\n\r\n #upsert and merge perform the same task, having the same end result.\r\n #The difference is that the former is optimised to work on data where usually little new rows are added\r\n #while the latter is optimised in the case the majority of the data dealt with will be added, not already existing\r\n def upsert(self, table, values, where):\r\n self.update(table, values, where)\r\n if not self.affected_rows():\r\n self.insert(table, [values] + [where])\r\n def merge(self, table, values, where):\r\n try:\r\n self.insert(table, [values] + [where])\r\n except self._module.DatabaseError as e:\r\n #TODO: Check error in case it's not due to a PK/Unique violation\r\n self.update(table, values, where)\r\n\r\n #Returns a row, instead of simply a list of 1. Inspired by Wordpress\r\n def get_row(self, table, columns = None, where = None, op = \"AND\"):\r\n r = self.select(table, columns, where, op)\r\n return r[0] if r else None\r\n\r\n #Returns a variable. Useful for quick counts or returning of an id, for example. Inspired by Wordpress\r\n def get_var(self, table, columns = None, where = None, op = \"AND\"):\r\n r = self.select(table, columns, where, op)\r\n return r[0].items()[0][1] if r else None\r\n\r\n #Count the rows of a table\r\n def count(self, table, column, value = None):\r\n where = (column, value) if value else None\r\n return self.get_var(table, 'COUNT(*) AS %s' % (self.enclose_sys('cunt')), where)\r\n\r\n\r\n def drop(self):\r\n self._query(\"DROP DATABASE \" + self._dbname)\r\n def create(self):\r\n self._query(\"CREATE DATABASE \" + self._dbname)\r\n def purge(self):\r\n #only works in MySQL, must find alternative for Postgres\r\n self.drop()\r\n self.create()\r\n def truncate(self, table_name):\r\n self._query(\"TRUNCATE TABLE \" + self.enclose_sys(table_name))\r\n\r\n\r\n #wrappers around transaction management functions\r\n def commit(self):\r\n self._con.commit()\r\n def rollback(self):\r\n self._con.rollback()\r\n def autocommit(self, val):\r\n self._autocommit(bool(val))\r\n\r\n #getters...\r\n def affected_rows(self):\r\n return self._affected_rows\r\n def insert_id(self):\r\n return self._insert_id\r\n\r\n def __is_escaped(self, s, pos):\r\n for char in [\"'\", \"\\\\\"]:\r\n j = pos - 1\r\n count = 0\r\n\r\n #count back the num. of appearances of certain char\r\n while (j>=0 and s[j] == char):\r\n j-=1\r\n count+=1\r\n\r\n #reduce the count in cases like \\'' ,where the last ' to the left is escaped by 1 or more \\\r\n if (char == \"'\" and count and self.__isEscaped(s, pos-count)):\r\n count-=1\r\n if (count):\r\n break\r\n\r\n return True if (count % 2) else False\r\n\r\n\r\n #replaces MySQL style `backticks` with \"double quotes\", as per SQL standard.\r\n #Required in order to support MySQL queries containing backticks\r\n def __replace_backticks(self, str):\r\n s = list(str)\r\n delim = None\r\n inside = False\r\n\r\n\r\n for i in range(0, len(s)):\r\n #only working on important characters\r\n if (s[i] not in ['\"',\"'\",\"`\"]):\r\n continue\r\n\r\n if inside:\r\n if (s[i] == '`' or s[i] != delim): #if we encounter a wrong token, simply continue\r\n continue\r\n\r\n if not self.__is_escaped(s, i):\r\n inside = False\r\n delim = None\r\n else:\r\n if s[i] == '`':\r\n s[i] = '\"'\r\n continue\r\n\r\n if not self.__is_escaped(s, i):\r\n inside = True\r\n delim = s[i]\r\n\r\n return \"\".join(s)\r\n\r\n #helper function, expands a tuple/list of tuples containing where parameters to string\r\n def __expand_where_clause(self, where, op):\r\n params = []\r\n clauses = []\r\n\r\n if where:\r\n if isinstance(where, tuple):\r\n where = [where]\r\n if not isinstance(where, list):\r\n raise TypeException(\"Unknown type for WHERE clause argument\")\r\n\r\n if where:\r\n for clause in where:\r\n clause_op = clause[2] if len(clause)==3 else '='\r\n clauses.append(self.enclose_sys(clause[0]) + (\" %s \" % clause_op) + '%s')\r\n params.append(clause[1])\r\n\r\n where_clause = (' %s ' % op).join(clauses)\r\n return (where_clause, tuple(params) if len(params) else None)\r\n\r\n #returns an array containing the names of the columns of a table\r\n def table_columns(self, table_name = None, cur = None):\r\n if not cur:\r\n try:\r\n cur = self._con.cursor()\r\n cur.execute(\"SELECT * FROM \" + table_name + \" LIMIT 1\")\r\n cur.close()\r\n except self._module.DatabaseError as e:\r\n raise Exception(\"Database Error: %s\" % str(e))\r\n\r\n cols = map(lambda x: x[0], cur.description)\r\n return cols\r\n\r\n #returns the names of the columns of a table\r\n def get_columns(self, table_name = None):\r\n try:\r\n cur = self._con.cursor()\r\n cur.execute(\"SELECT * FROM \" + table_name + \" LIMIT 1\")\r\n result = map(lambda x: x[0], cur.description)\r\n #result = [dict(zip(fields, row)) for row in cur.fetchall()]\r\n cur.close()\r\n except self._module.DatabaseError as e:\r\n raise Exception(\"Database Error: %s\" % str(e))\r\n return result\r\n"
},
{
"alpha_fraction": 0.5739796161651611,
"alphanum_fraction": 0.5867347121238708,
"avg_line_length": 28.30769157409668,
"blob_id": "d62c696634733388a3ccb413ac61be9a338bbb82",
"content_id": "0650533cbb0c6f0239792213f085e337155231f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 392,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 13,
"path": "/utils.py",
"repo_name": "KasirajanPothiraj/python",
"src_encoding": "UTF-8",
"text": "import os\r\nimport sys\r\n\r\ndef replace_word(infile, outfile, old_word, new_word):\r\n if not os.path.isfile(infile):\r\n print(\"Error on replace_word, not a regular file: \" + infile)\r\n sys.exit(1)\r\n f1 = open(infile, 'r').read()\r\n f2 = open(outfile, 'w')\r\n m = f1.replace(old_word, new_word)\r\n f2.write(m);\r\n\r\n#replace_word(\"sample_input.txt\", \"sample.csv\", \"^&^\", \",\")"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 17.714284896850586,
"blob_id": "4b5b133b9eb5dc9b762d6e12e0833dfbc68368d3",
"content_id": "cc06b60a3b449ffda678c743cc1c5903bff2eb38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 136,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 7,
"path": "/config.properties",
"repo_name": "KasirajanPothiraj/python",
"src_encoding": "UTF-8",
"text": "ip_file=sample_input-Copy.txt\r\nop_file=file.csv\r\ndb_user=dba\r\ndb_passwd=password123\r\ndb_name=ref_data\r\ntbl_name=geo_info\r\ndb_engine=mysql"
}
] | 4 |
lucdalton/naive-bayes-classifier
|
https://github.com/lucdalton/naive-bayes-classifier
|
a6d6e846455ae29dbb4bd4e128293b442509015a
|
02d80d424a96f2b31a6f56154c0f9001d759af6e
|
e9aa22f93615ce61271acb2ccf9609def50efdc3
|
refs/heads/master
| 2021-01-10T07:37:35.531226 | 2016-01-21T15:29:07 | 2016-01-21T15:29:07 | 48,168,652 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6659589409828186,
"alphanum_fraction": 0.6733899712562561,
"avg_line_length": 25.91428565979004,
"blob_id": "e858b9ff5e92c12f02dad171b6ace572b91ae1b4",
"content_id": "eeafa5713058ff1bdf4a24eb8fdbaa786a7803cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2826,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 105,
"path": "/NBClassifier.py",
"repo_name": "lucdalton/naive-bayes-classifier",
"src_encoding": "UTF-8",
"text": "#!/bin/python\n\n# Class to expose the classifier\n# Inputs set of training data\n# The training data needs to be formatted one tweet per line\n\nclass NBClassifier:\n\n\tdef get_word_dictionary(self, tweet_array):\n\t\tdictionary = {}\n\t\tfor (review, sentiment) in tweet_array:\n\t\t\tfor word in review.split(' '):\n\t\t\t\t#print word\n\t\t\t\ttry:\n\t\t\t\t\tdictionary[word][sentiment] += 1\n\t\t\t\texcept:\n\t\t\t\t\tdictionary[word] = {\"neg\":1, \"pos\":1, \"neutral\":1}\n\t\t\t\t\tdictionary[word][sentiment] += 1\n\t\treturn dictionary\n\t\n\tdef total_neg_words(self):\n\t\tcount = 0\n\t\tfor key in self.sentiment_dictionary:\n\t\t\tcount += self.sentiment_dictionary[key]['neg']\n\t\treturn count\n\n\tdef total_pos_words(self):\n\t\tcount = 0\n\t\tfor key in self.sentiment_dictionary:\n\t\t\tcount += self.sentiment_dictionary[key]['pos']\n\t\treturn count\n\n\tdef total_neu_words(self):\n\t\tcount = 0\n\t\tfor key in self.sentiment_dictionary:\n\t\t\tcount += self.sentiment_dictionary[key]['neutral']\n\t\treturn count\n\n\tdef prob_word_given_pos(self, some_word):\n\t\ttry:\n\t\t\tpos_score = self.sentiment_dictionary[some_word]['pos']\n\t\texcept:\n\t\t\tpos_score = 1\n\t\treturn float(pos_score)/float(self.totalPos)\n\n\tdef prob_word_given_neg(self, some_word):\n\t\ttry:\n\t\t\tneg_score = self.sentiment_dictionary[some_word]['neg']\n\t\texcept:\n\t\t\tneg_score = 1\n\t\treturn float(neg_score)/float(self.totalNeg)\n\n\tdef prob_word_given_neutral(self, some_word):\n\t\ttry:\n\t\t\tneutral_score = self.sentiment_dictionary[some_word]['neutral']\n\t\texcept:\n\t\t\tneutral_score = 1\n\n\t\treturn float(neutral_score)/float(self.totalNeutral)\n\n\tdef test_sentence(self, sentence):\n\t\tpos = 1.0\n\t\tneg = 1.0\n\t\tneutral = 1.0\n\t\tsent = {'pos':0, 'neg':0}\n\t\tfor word in sentence.split(' '):\n\t\t\tpos *= self.prob_word_given_pos(word)\n\t\t\tneg *= self.prob_word_given_neg(word)\n\t\t\tneutral *= self.prob_word_given_neutral(word)\n\t\tsum_ = pos + neg + neutral\n\t\tsent['pos'] += (pos + (neutral/2))/sum_\n\t\tsent['neg'] += (neg + (neutral/2))/sum_\n\t\t#print json.dumps(sent)\n\t\treturn sent\n\n\tdef test_sentence_result(self, sentence):\n\t\treturn_result = self.test_sentence(sentence)\n\t\tpos = return_result['pos']\n\t\tneg = return_result['neg']\n\t\tif(pos < neg):\n\t\t\treturn_result['result'] = 'neg'\n\t\telse:\n\t\t\treturn_result['result'] = 'pos'\n\t\treturn return_result\n\n\t\t\n\t\t\n\n\tdef __init__(self, pos_training, neg_training):\n\t\tself.pos_tweets = open(pos_training, 'r').readlines()\n\t\tself.neg_tweets = open(neg_training, 'r').readlines()\n\t\tself.tweet_array = []\n\n\t\t## fill array with tuples of (line, sentiment)\n\t\tfor line in self.pos_tweets:\n\t\t\tself.tweet_array.append((line, 'pos'))\n\t\tfor line in self.neg_tweets:\n\t\t\tself.tweet_array.append((line, 'neg'))\n\n\t\tself.sentiment_dictionary = self.get_word_dictionary(self.tweet_array)\n\n\t\t# count sum up pos, neg, neutral\n\t\tself.totalPos = self.total_pos_words()\n\t\tself.totalNeg = self.total_neg_words()\n\t\tself.totalNeutral = self.total_neu_words()\n"
},
{
"alpha_fraction": 0.78125,
"alphanum_fraction": 0.7875000238418579,
"avg_line_length": 25.83333396911621,
"blob_id": "e20c9c74abda396427d41670345a05e3a0989ce4",
"content_id": "952c23f5e5dad0231ef6d2b08cb557ef386607c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 6,
"path": "/README.md",
"repo_name": "lucdalton/naive-bayes-classifier",
"src_encoding": "UTF-8",
"text": "## Text classifier to test the sentiment of tweets using naive-Bayes algorithm\nruns on python 2.x. \n\nrequires training data\n\nto run python sentimentServer.py &"
},
{
"alpha_fraction": 0.6178343892097473,
"alphanum_fraction": 0.624693751335144,
"avg_line_length": 31.41269874572754,
"blob_id": "0a60efb6c87063c53a503e7cb7a5205debe1b81d",
"content_id": "56cdde424d504e4ecdcd59febd29e34cc7248a70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2041,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 63,
"path": "/sentimentServer.py",
"repo_name": "lucdalton/naive-bayes-classifier",
"src_encoding": "UTF-8",
"text": "# The server that will accept post requests with the tweet that needs to be analysed\n# The tweet needs to be sent as a form parameter as tweet=\n# Initialise server with an instance of a classifier\n# This is a shit design for the class, I couldn't get it to work the way I wanted\n# so gave up and settled for something that just works.\n\nfrom BaseHTTPServer import BaseHTTPRequestHandler\nimport cgi\nfrom BaseHTTPServer import HTTPServer\nfrom NBClassifier import NBClassifier\nimport json\n\nprint 'loading training data....'\nclassifier = NBClassifier('pos_tweets', 'neg_tweets')\n\nclass SentimentServer(BaseHTTPRequestHandler):\n\n def get_sentiment(self, tweet):\n return classifier.test_sentence_result(tweet)\n\n def do_POST(self):\n # Parse the form data posted\n form = cgi.FieldStorage(\n fp=self.rfile, \n headers=self.headers,\n environ={'REQUEST_METHOD':'POST',\n 'CONTENT_TYPE':self.headers['Content-Type'],\n })\n # Begin the response\n self.send_response(200)\n self.end_headers()\n \n # Echo back information about what was posted in the form\n amountSent = len(form.keys())\n \n i = 0\n #self.wfile.write('[')\n for key in form.keys():\n #print key\n sent_data = form[key].value;\n \n\n returnData = self.get_sentiment(sent_data)\n #returnData = self.someClassifier.test_sentence_result(sent_data)\n \n\n returnData['text'] = sent_data\n self.wfile.write(json.dumps(returnData))\n if (i < amountSent -1):\n self.wfile.write(',')\n i += 1\n #self.wfile.write(']')\n return\n\n def __init(self, someClassifier):\n self.sentimentClassifier = someClassifier\n\n#server = HTTPServer(('localhost', 8080), SentimentServer)\n\nserver = HTTPServer(('localhost', 8080), SentimentServer)\n\nprint 'Done..\\nStarting server, use <Ctrl-C> to stop'\nserver.serve_forever()"
}
] | 3 |
TristanEis007/machine-learning
|
https://github.com/TristanEis007/machine-learning
|
4f1ee9fd21d823d8b4f1618b31b3ccfc1431b15f
|
889609caa654bde9c5d8b9e8b87c03f604abfe49
|
32de96e66116f8b3a64a115bb8e92c8565cfd2ae
|
refs/heads/master
| 2022-04-24T00:44:54.982468 | 2020-04-16T15:53:34 | 2020-04-16T15:53:34 | 104,358,204 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.542565107345581,
"alphanum_fraction": 0.549046516418457,
"avg_line_length": 26.01346778869629,
"blob_id": "73cc9ea5348d0778732140da82663a42b7a341e9",
"content_id": "8be23ab902b8f4884f160196cd040411a8159007",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8023,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 297,
"path": "/BRAF/run_procedure.py",
"repo_name": "TristanEis007/machine-learning",
"src_encoding": "UTF-8",
"text": "'''\nThis script contains the main() function to run \nthe BRAF procedure. \n\nFlags that can be used include k, p, s\n\n`k`, `p`, and `s` are the hyperparameter described in the\n\"Biased Random Forest for Dealing with the Class\nImbalance Problem\" paper.\n\nFor information on the KNN, DecisionTree, RandomForest, \nand BRAF_pipeline objects, please see BRAF.py\n\nAuthor: Tristan Eisenhart\n'''\n\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom BRAF import KNN, DecisionTree, RandomForest, BRAF_pipeline\n\n# Importing metrics from Sklearn for calculating AUC\nfrom sklearn.metrics import auc, precision_recall_curve, roc_auc_score, roc_curve\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--k\", type=int)\nparser.add_argument(\"--p\", type=float)\nparser.add_argument(\"--s\", type=int)\n\nargs = parser.parse_args()\n\nif args.k:\n K = args.k\nelse:\n K = 10\nif args.p:\n P = args.p\nelse:\n P = 0.5\nif args.s:\n S = args.s\nelse:\n S = 100\n\nprint('\\n----------------------------------------')\nprint('\\nRunning procedure with K={}, P={}, S={}\\n'.format(K, P, S))\n\ndef read_data(path=''):\n \n ''' \n Read csv from path into a pandas dataframe\n \n Parameters\n ----------\n \n path: String\n Path to the csv file\n \n '''\n \n return pd.read_csv(path)\n\n\ndef split_train_test(df, spl=0.8):\n \n ''' \n Function to split df into a\n training and a test set\n \n Parameters\n ----------\n \n df: Pandas DataFrame\n DataFrame to split into train/test sets\n\n spl: float\n Split between training and test set\n\n '''\n \n # Spliting between training and test set\n ind = np.arange(df.shape[0])\n train = np.random.choice(ind, \n int(spl*df.shape[0]), \n replace=False)\n \n test = np.setdiff1d(ind, train)\n df_train = df.loc[train, :].reset_index(drop = True)\n df_test = df.loc[test, :].reset_index(drop = True)\n \n assert len(set(test) - set(train)) == len(set(test))\n \n return df_train, df_test\n\n\ndef cross_val_training(X, k_fold = 10):\n\n '''\n Function to do cross validation using the BRAF_pipeline\n class. This function performs k-fold cross validation\n and outputs key evaluation metrics after each fold\n\n Parameters\n ----------\n \n X: Pandas DataFrame\n Data Frame with all training data\n\n k_fold: int\n Number of folds to use when performing cross validation\n\n '''\n \n ind = np.random.permutation(X.index)\n i, j = 0, len(ind) // k_fold\n\n precision_l = []\n recall_l = []\n auprc_l = []\n auroc_l = []\n \n for k in range(k_fold):\n \n # Spliting data set into k folds\n ind_val = ind[i+k:j+k]\n ind_train = np.setdiff1d(ind, ind_val)\n \n # Using 1 fold for validation and k-1 fold for training\n val_set = X.loc[ind_val, :].reset_index(drop = True)\n train_set = X.loc[ind_train, :].reset_index(drop = True)\n \n print('----- Training on fold {} -----'.format(k))\n # Running the BRAF pipeline. This pipeline\n # is described in detail in the BRAF.py\n braf_algo = BRAF_pipeline(train_set, k=K, p=P, s=S)\n val_pred, val_prob = braf_algo.merge_predict(val_set.loc[:, val_set.columns != 'Outcome'])\n \n print('\\nEvaluation Metrics for Fold {}'.format(k))\n # Computing evaluation metrics\n precision, recall, area, score = metrics(val_set['Outcome'], val_pred, val_prob)\n precision_l.append(precision)\n recall_l.append(recall)\n auprc_l.append(area)\n auroc_l.append(score)\n plot_ROC(val_set['Outcome'], val_prob, 'ROC_CVfold_{}'.format(k))\n plot_PRC(val_set['Outcome'], val_prob, 'PRC_CVfold_{}'.format(k))\n \n print('--------------------------------------------') \n print('----------- METRICS FROM TRAINING ----------')\n print('\\nMean CV Precision is {:.2f} and CV Recall is {:.2f}'\\\n .format(np.mean(precision_l), np.mean(recall_l)))\n print('AUPRC is {:.2f}'.format(np.mean(auprc_l)))\n print('AUROC is {:.2f}'.format(np.mean(auroc_l)))\n print('--------------------------------------------')\n print('--------------------------------------------\\n')\n print('Finished cross validation & training\\n')\n\n\ndef metrics(true, pred, prob):\n\n '''\n Function to compute key evaluation metrics.\n\n Parameters\n ----------\n \n true: numpy array\n Array of true outputs\n\n pred: numpy array\n Array of predicted output\n\n prob: numpy array\n Array with class score\n\n '''\n \n t_pos = \\\n len([a for a, p in zip(true, pred) if a == p and p == 1])\n t_neg = \\\n len([a for a, p in zip(true, pred) if a == p and p == 0])\n f_pos = \\\n len([a for a, p in zip(true, pred) if a != p and p == 1])\n f_neg = \\\n len([a for a, p in zip(true, pred) if a != p and p == 0])\n \n precision = t_pos / (t_pos + f_pos)\n recall = t_pos / (t_pos + f_neg)\n \n print('Precision is {:.2f} and Recall is {:.2f}'.format(precision, recall))\n \n prec_c, rec_c, _ = precision_recall_curve(true, prob)\n area = auc(rec_c, prec_c)\n \n print('AUPRC is {:.2f}'.format(area))\n\n score = roc_auc_score(true, prob)\n print('AUROC is {:.2f}'.format(score))\n print('\\n')\n\n return precision, recall, area, score\n\n \ndef plot_ROC(true, prob, name):\n \n '''\n Function to plot ROC and save it to disk.\n \n Parameters\n ----------\n \n true: numpy array\n Array of true outputs\n\n prob: numpy array\n Array with class score\n\n name: str\n Name of figure saved on disk\n \n '''\n \n false_pos, true_pos, _ = roc_curve(true, prob)\n roc_auc = auc(false_pos, true_pos)\n \n plt.title(name)\n plt.plot(false_pos, true_pos, 'b', label = 'AUC = %0.2f' % roc_auc)\n plt.legend(loc = 'lower right')\n plt.plot([0, 1], [0, 1],'r--')\n plt.xlim([0, 1])\n plt.ylim([0, 1.01])\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.savefig(name)\n plt.close()\n\n\ndef plot_PRC(true, prob, name):\n \n '''\n Function to plot PRC and save it to disk\n \n '''\n \n lr_precision, lr_recall, _ = precision_recall_curve(true, prob)\n \n plt.title(name)\n plt.plot(lr_precision, lr_recall, '-.')\n plt.xlim([0, 1])\n plt.ylim([0, 1.01])\n plt.ylabel('Precision')\n plt.xlabel('Recall')\n plt.savefig(name)\n plt.close()\n\n\ndef main():\n\n ''' Main '''\n\n # Reading data\n df = read_data()\n \n # Split into Training and Test Sets\n df_train, df_test = split_train_test(df)\n \n # Performing CV training\n cross_val_training(df_train)\n\n # Retraining on full training dataset & testing on hold-out test set\n print('---------------------------------------------')\n print('\\nTraining on entire training set and testing on hold-out test set')\n braf_algo = BRAF_pipeline(df_train, k=K, p=P, s=S)\n\n print('---------------------------------------------\\n')\n print('Done Training. Testing on test set and outputing evaluation metrics')\n pred, prob = braf_algo.merge_predict(df_test.loc[:, df_test.columns != 'Outcome'])\n precision, recall, area, score = metrics(df_test['Outcome'], pred, prob)\n\n print('---------------------------------------------')\n print('----------- METRICS ON TEST SET -------------')\n print('Precision is {:.2f} and Recall is {:.2f}'\\\n .format(np.mean(precision), np.mean(recall)))\n\n print('AUPRC is {:.2f}\\n'.format(np.mean(area)))\n print('AUROC is {:.2f}'.format(np.mean(score)))\n print('---------------------------------------------')\n print('---------------------------------------------\\n')\n plot_ROC(df_test['Outcome'], prob, 'ROC_test')\n plot_PRC(df_test['Outcome'], prob, 'PRC_test')\n print('\\nAll done. The hyperparameters used were:')\n print('K={}, P={}, S={}\\n'.format(K, P, S))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.546842098236084,
"alphanum_fraction": 0.5505421757698059,
"avg_line_length": 27.785503387451172,
"blob_id": "70f19ced09d698e851e9dcb5c0a47a2e78f3931e",
"content_id": "4b7b8eefb7132620df1b041cd4990bba066396c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19459,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 676,
"path": "/BRAF/BRAF.py",
"repo_name": "TristanEis007/machine-learning",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe BRAF file contains objects that generate\na KNN and a Random Forest. See helper on each\nobject for more information.\n\nThis article was very helpful for creating\nthe DecisionTree and RandomForest classes:\nhttps://towardsdatascience.com/random-forests-and-decision-trees-from-scratch-in-python-3e4fa5ae4249\n\nAuthor: Tristan Eisenhart\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nclass KNN():\n\n '''\n This object finds the K-nearest neighbors between\n a given observation x and a given data set. Use the\n find_knn function to get the results.\n\n Parameters\n ----------\n\n data_majority: numpy 2D array\n Data set with all majority observations\n\n k: int\n Number of nearest neighbors to return\n\n '''\n\n def __init__(self, data_majority, k):\n\n ''' Init func for class KNN '''\n\n self.data_majority = data_majority\n self.k = k\n\n\n def euclidean_distance(self, x, y):\n\n '''\n Returns the euclidean distance between\n rows x and y\n\n Parameters\n ----------\n\n x: Numpy array\n row of data x\n\n y: Numpy array\n row of data y\n\n '''\n\n return np.sqrt(sum(np.power(x - y, 2)))\n\n\n def find_knn(self, x):\n\n '''\n Computes the euclidean_distance function for x and all\n observations in the majority data set\n\n Parameters\n ----------\n\n x: Numpy array\n row of data x\n\n '''\n\n distances = list(map(lambda y: self.euclidean_distance(x, y),\n np.array(self.data_majority)))\n nearest_index = np.argsort(distances)[0:self.k]\n\n return nearest_index\n\n\nclass RandomForest():\n\n '''\n\n This object creates a Random Forest made of\n a specified number of Decision Trees trained\n over random samples and features from the\n training set.\n\n The predict() function returns a prediction from\n each tree.\n\n Parameters\n ----------\n\n X : numpy 2D array\n Input data to train the forest on\n\n y: numpy array\n Output data from the training set\n\n n_trees: int\n Number of trees to generate in the forest\n\n n_features: int\n Number of features to randomely sample for\n in each generated tree\n\n sample_size: int\n Number of observations to randomely sample\n when training each tree\n\n depth: int\n Maximum number of splits to perform in\n each tree\n\n min_leaf: int\n Minimum number of observations required to\n make a split at a node for each tree\n\n debug: bool\n Ignore, this is used for debuging\n\n '''\n\n def __init__(self, X, y, n_trees, n_features,\n sample_size=None, depth=10,\n min_leaf=5, debug=False):\n\n ''' Init func for class RandomForest '''\n\n self.X = X\n self.y = np.array(y)\n\n assert self.X.shape[0] == self.y.shape[0]\n\n # Will tune those hyperparameters during training\n # in addition to n_trees (number of estimators)\n self.n_features = n_features\n self.depth = depth\n self.min_leaf = min_leaf\n # Using the data set size if no sized passed\n if sample_size == None:\n self.sample_size = X.shape[0]\n else:\n self.sample_size = sample_size\n\n self.i = 0\n self.debug = debug\n self.trees = [self.generate_tree() for i in range(n_trees)]\n\n\n def generate_tree(self):\n\n '''\n Function to initiate a decision tree. The `rand_ind` variable\n and `rand_features` variable are randomely sampled sets of\n observations and features, respectively, to train the tree on.\n\n DecisionTree is a Class of it's own, defined below.\n\n '''\n\n # Sampling observations with replacement\n rand_ind = np.random.choice(self.y.shape[0],\n size=self.sample_size,\n replace=True)\n\n rand_features = np.random.choice(self.X.shape[1],\n size=self.n_features,\n replace=False)\n\n self.i += 1\n\n print('Fitting tree num.{}, {:.1f}% observations with Ouctome = 1'\\\n .format(self.i, 100*np.mean(self.y[rand_ind])))\n\n return DecisionTree(self.X.iloc[rand_ind],\n self.y[rand_ind],\n self.n_features,\n rand_features,\n ind=np.array(range(self.sample_size)),\n depth=self.depth,\n min_leaf=self.min_leaf,\n debug=self.debug)\n\n\n def predict(self, x):\n\n ''' Predict an output for observation x '''\n\n return [t.predict(x) for t in self.trees]\n\n\nclass DecisionTree():\n\n '''\n\n This object creates Decision Trees every time\n a node creates a split, i.e. one Decision Tree\n for each split. The `run_split_logic()` function\n finds the optimal feature and threshold for each\n node. It also had built in logic to determine when\n a leaf node is reached. The function is ran in the\n init.\n\n The DecisionTree is called recursively anytime a\n leaf node is not reached. A leaf node is reached\n when there are less than min_leaf samples left\n or when the max depth is reached.\n\n The Gini Index is used to determine the optimal\n feature and threshold at every node. This loss\n function can easily be replaced by another one.\n\n Parameters\n ----------\n\n X : Pandas dataframe\n Input data to train the forest on\n\n y: Pandas Series\n Output data from the training set\n\n n_features: int\n Number of features to randomely sample for\n in each generated tree\n\n ind_features: numpy array\n Array containing the index of randomely\n sampled features for a specific decision tree\n\n ind: numpy array\n Array containing the index of randomely\n sampled observations from the original\n data set X to train a specific decision tree\n\n depth: int\n Maximum number of splits to perform in\n each tree\n\n min_leaf: int\n Minimum number of observations required to\n make a split at a node for each tree\n\n direction: str\n Ignore this argument. I used it do debug the\n tree\n\n debug: Bool\n Ignore this argument. I used it do debug the\n tree\n\n '''\n\n\n def __init__(self, X, y, n_features, ind_features,\n ind, depth=10, min_leaf=5,\n direction='C', debug=False):\n\n ''' Init func for class DecisionTree '''\n\n self.X = X\n self.y = y\n\n assert self.X.shape[0] == self.y.shape[0]\n\n self.ind = ind\n self.min_leaf = min_leaf\n self.ind_features = ind_features\n self.depth = depth\n self.direction = direction\n self.n_features = n_features\n self.val = np.argmax(np.bincount(np.array(y)[ind].astype(int)))\n self.score = np.inf\n self.debug = debug\n self.run_split_logic()\n\n\n def gini(self, y):\n\n '''\n Computes the Gini score for a given array of\n output. The Gini score is equal to:\n\n 1 - sum(p_classes ^ 2)\n\n Where p_classes is an array containing the\n frequency of each class in the output data.\n\n Parameters\n ----------\n\n y: numpy array\n array of output over which to calculate the\n gini index\n\n '''\n\n _, counts = np.unique(y, return_counts = True)\n\n return 1 - np.sum(np.power(counts / np.sum(counts), 2))\n\n\n def run_split_logic(self):\n\n '''\n\n Function to run the split logic for a decision tree.\n The `find_optimal_split()` function performs a greedy \n search, where it iteratively goes through each feature \n in the set of randomely sampled features and computes\n the Gini score for each possible split threshold.\n\n Once the leaf node is reached, the functions returns. \n\n '''\n\n if self.debug:\n print('This is node {}.{}'.format(self.depth, self.direction))\n\n # Starting by running the optimal split feature\n for feature in self.ind_features:\n self.find_optimal_split(feature)\n\n # Function returns if we've reached a leaf_node\n if self.is_leaf_node():\n\n if self.debug:\n print('Reached a leaf node with depth {}. Final score is {:.2f}'\\\n .format(self.depth, self.score))\n return\n\n if self.debug:\n print('Optimal split made on {} with threshold value {:.2f}'\\\n .format(self.X.columns[self.feature], self.split_threshold))\n\n # Pass the data that to the left / right branch\n # based on the threshold value\n x = self.X.values[self.ind, self.feature]\n lt_tree = np.where(x <= self.split_threshold)[0]\n rt_tree = np.where(x > self.split_threshold)[0]\n\n # Randomely sample features without replacement for \n # initializing the next tree\n lt_rand_features = np.random.choice(self.X.shape[1],\n size=self.n_features,\n replace=False)\n rt_rand_features = np.random.choice(self.X.shape[1],\n size=self.n_features,\n replace=False)\n\n # Recursively calling the DecisionTree class on both\n # left and right branches as long as we haven't reached \n # a leaf node\n self.lt_tree = DecisionTree(self.X,\n self.y,\n self.n_features, \n lt_rand_features, \n ind=self.ind[lt_tree],\n direction='L' + str(self.depth-1), # debug\n depth=self.depth - 1,\n min_leaf=self.min_leaf)\n self.rt_tree = DecisionTree(self.X,\n self.y,\n self.n_features,\n rt_rand_features,\n ind=self.ind[rt_tree],\n direction='R' + str(self.depth-1), # debug\n depth=self.depth - 1,\n min_leaf=self.min_leaf)\n \n\n def find_optimal_split(self, feature):\n\n '''\n This function iterates over all values for a \n specified feature and updates the feature and\n threshold to use to make a split, if that split\n reduces the Gini value.\n\n Parameters\n ----------\n\n feature: int\n feature to run the split logic on\n\n '''\n\n x = np.array(self.X)[self.ind, feature]\n y = np.array(self.y)[self.ind]\n\n # Iterating over all observations in X\n for threshold in np.unique(x):\n\n lt_split_ind = np.where(x <= threshold)[0]\n rt_split_ind = np.where(x > threshold)[0]\n\n # If the split is smaller than min_leaf then use another\n # threshold value, i.e. skip this observation in the loop\n if len(lt_split_ind) < self.min_leaf or \\\n len(rt_split_ind) < self.min_leaf:\n continue\n\n # Computing gini score for both left and right branches\n lt_gini = self.gini(y[lt_split_ind])\n rt_gini = self.gini(y[rt_split_ind])\n\n # Using a weighted gini score for making the split decision\n w_gini = (lt_gini * len(lt_split_ind) / len(self.ind)) + \\\n (rt_gini * len(rt_split_ind) / len(self.ind))\n\n # If the w_gini is < than the current score, then we\n # update the feature used to make the split as well as\n # the split_threshold\n if w_gini < self.score:\n self.feature = feature\n self.score = w_gini\n self.split_threshold = threshold\n\n\n def is_leaf_node(self):\n\n ''' Function to test if the node is a leaf node '''\n\n return self.depth <= 0 or self.score == np.inf\n\n\n def predict(self, X):\n\n ''' Function to run the prediction on all X values '''\n\n return np.array([self.predict_row(i) for i in X.values])\n\n\n def predict_row(self, i):\n\n ''' Function to run the prediction logice on a single value '''\n\n if self.is_leaf_node():\n return self.val\n else:\n if i[self.feature] <= self.split_threshold:\n t = self.lt_tree\n else:\n t = self.rt_tree\n\n return t.predict_row(i)\n\n\nclass BRAF_pipeline():\n\n '''\n Pipeline to run BRAF. This object runs the\n pseudo-code from \"Biased Random Forest For\n Dealing With the Class Imbalance Problem\".\n\n Please look at the helper for class KNN,\n DecisionTree and RandomForest for information\n on how each internal object is built.\n\n The function `merge_predict()` is used to\n make a prediction using the trained Random\n Forests.\n\n Parameters\n ----------\n\n df: Pandas DataFrame\n Full data set to run BRAF on\n\n k: int\n Number of K-Nearest Neighbors to find\n\n p: float\n Proportion split to determine the number of\n estimators used for training each random\n forest\n\n s: int\n Number of estimators to use in the random forest.\n This number will be multiplied by p or (1-p)\n '''\n\n def __init__(self, df, k = 10, p = 0.5, s = 100):\n\n ''' Init funciton of BRAF_pipeline '''\n\n self.df = df\n self.k = k\n self.p = p\n self.s = s\n\n self.run_data_processing()\n self.run_random_forests()\n\n def run_data_processing(self):\n\n '''\n Function to process data as explained in\n the paper \"Biased Random Forest For\n Dealing With the Class Imbalance Problem\".\n\n '''\n\n self.X = self.df.loc[:, self.df.columns != 'Outcome']\n self.y = self.df.loc[:, 'Outcome']\n\n # Part 1: Split into a majority and\n # a minority set\n T_maj, T_min = self.split_maj_min()\n X_maj = T_maj.loc[:, T_maj.columns != 'Outcome']\n y_maj = T_maj.loc[:, 'Outcome']\n X_min = T_min.loc[:, T_min.columns != 'Outcome']\n y_min = T_min.loc[:, 'Outcome']\n\n del T_maj\n del T_min\n assert 'Outcome' not in X_maj.columns\n assert 'Outcome' not in X_min.columns\n\n # Part 2: For each observation from\n # T_min, find the K-nearest neighbors in\n # T_maj and add those unique neighbors to the\n # critical data set along with the minority\n # observation\n self.X_critical = self.generate_critical_set(X_maj, X_min)\n self.y_critical = self.df.loc[self.X_critical.index, 'Outcome'].astype(float)\n\n print('Critical data set generated using {} nearest neighbors. The set shape is {}'\\\n .format(self.k, self.X_critical.shape))\n print('{:.1f}% observations with Outcome = 1 in the critical set'\\\n .format(np.mean(self.y_critical) * 100))\n\n\n def run_random_forests(self):\n\n '''\n Function to run two random forests.\n\n The first random forest uses all observations\n from the original training set.\n\n The second random forest uses observations\n from the critical set.\n\n The number of estimators in each random forest\n is a function of s and p.\n\n '''\n\n # Part 3: Running a RF with observations from the\n # original data set. The size of the RF is equal\n # to s * (1-p).\n print('\\nFitting RF with entire data set')\n self.rf_all = RandomForest(self.X, self.y,\n n_trees = int(self.s*(1-self.p)),\n n_features = len(self.X.columns)//3)\n\n # Part 4: Running a RF with observations from the\n # critical data set. The size of the RF is equal\n # to s * p.\n print('\\nFitting RF with critical data set')\n self.rf_critical = RandomForest(self.X_critical, self.y_critical,\n n_trees = int(self.s*(self.p)),\n n_features = len(self.X.columns)//3)\n\n\n def split_maj_min(self, output='Outcome'):\n\n '''\n Split the data set into a majority and\n a minority set\n\n Parameters\n ----------\n\n output: String\n Column to use for the output\n\n '''\n\n assert output in self.df.columns\n\n maj = np.argmax(np.bincount(self.df['Outcome']))\n T_maj = self.df.loc[self.df[output]==maj, :]\n T_min = self.df.loc[self.df[output]!=maj, :]\n\n return T_maj, T_min\n\n\n def generate_critical_set(self, T_maj, T_min):\n\n '''\n For each observation from T_min,\n find the K-nearest neighbors in T_maj and\n add those unique neighbors to the critical\n data set along with the minority observation\n\n Parameters\n ----------\n\n T_maj: Pandas DataFrame\n Majority set\n\n T_min: Pandas DataFrame\n Minority set\n\n '''\n\n # Init KNN class\n knn = KNN(data_majority = T_maj, k = self.k)\n critical_ind = []\n T_critical = []\n # Looping over all minority observations\n for (n, minority_obs) in enumerate(np.array(T_min)):\n\n T_critical.append(T_min.iloc[n])\n # Finding k-nearest neighbors\n index_nn = knn.find_knn(minority_obs)\n\n # Looping over all nearest neighbors found\n for ind in index_nn:\n\n # Checking that neighbor is not already in\n # critical set\n if ind not in critical_ind:\n\n # Adding unique majority observations to\n # critical set\n critical_ind.append(ind)\n T_critical.append(T_maj.iloc[ind])\n\n return pd.DataFrame(T_critical)\n\n\n def merge_predict(self, x):\n\n '''\n Function to merge predictions from\n both random forest and to select the majority\n vote to determine the predicted class. Also\n returning the probability associated with\n the vote\n \n Parameters\n ----------\n\n x: Pandas DataFrame\n Data over which to run the prediction\n \n '''\n\n predict_all = self.rf_all.predict(x)\n predict_critical = self.rf_critical.predict(x)\n\n assert len(predict_all) == int(self.s * (1-self.p))\n assert len(predict_critical) == int(self.s * (self.p))\n\n all_ = predict_all, predict_critical\n\n majority_predict = [np.argmax(np.bincount(np.concatenate(all_)[:, i]))\n for i in range(len(x))]\n majority_vote = [np.mean(np.concatenate(all_)[:, i])\n for i in range(len(x))]\n\n proba = []\n for p, v in zip(majority_vote, majority_predict):\n if v == 0:\n proba.append(1-p)\n else:\n proba.append(p)\n\n return majority_predict, majority_vote\n"
},
{
"alpha_fraction": 0.799227774143219,
"alphanum_fraction": 0.8146718144416809,
"avg_line_length": 85.33333587646484,
"blob_id": "2a83620416270f97e6f8812bf6cc7ceff2f6c378",
"content_id": "3d6b0dc87abae2ff85de0d6074c758cceb8c823d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 259,
"license_type": "no_license",
"max_line_length": 238,
"num_lines": 3,
"path": "/README.md",
"repo_name": "TristanEis007/machine-learning",
"src_encoding": "UTF-8",
"text": "# machine-learning\n\nThis repo contains implementations of Machine Learning papers or projects. It also contains the materials I prepared for a Neural Network seminar I gave at Columbia University in 2017. Feel free to contact me with any questions at [email protected]\n"
}
] | 3 |
pragun-ananda/dagster
|
https://github.com/pragun-ananda/dagster
|
53db377b456f2a0c98813e26b456509f33dd01c7
|
212653d77f60e6c4bc6c2d65f0fadb6e8c0ce7fe
|
7c5535e46109767662b40ddf93071b97fa51b559
|
refs/heads/master
| 2023-08-25T18:50:24.044188 | 2021-10-15T17:34:29 | 2021-10-15T17:34:29 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8080645203590393,
"alphanum_fraction": 0.8387096524238586,
"avg_line_length": 55.3636360168457,
"blob_id": "9cad58bb3d52090e94b3483f085d421efeb6914a",
"content_id": "e30b7ad5d7ca2fce1cbc9fa12620f6d502837e74",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 620,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 11,
"path": "/python_modules/libraries/dagster-aws/dagster_aws/s3/__init__.py",
"repo_name": "pragun-ananda/dagster",
"src_encoding": "UTF-8",
"text": "from .compute_log_manager import S3ComputeLogManager\nfrom .file_cache import S3FileCache, s3_file_cache\nfrom .file_manager import S3FileHandle, S3FileManager\nfrom .intermediate_storage import S3IntermediateStorage\nfrom .io_manager import PickledObjectS3IOManager, s3_pickle_io_manager\nfrom .object_store import S3ObjectStore\nfrom .ops import S3Coordinate, file_handle_to_s3\nfrom .resources import s3_file_manager, s3_resource\nfrom .s3_fake_resource import S3FakeSession, create_s3_fake_resource\nfrom .system_storage import s3_intermediate_storage, s3_plus_default_intermediate_storage_defs\nfrom .utils import S3Callback\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7368420958518982,
"avg_line_length": 38.407405853271484,
"blob_id": "c3743f12c3e6a7b0bf60f6be6bb4c339e4240747",
"content_id": "81ee8f71a154dd21eb7a91fd4150eb9557e920ec",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1064,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 27,
"path": "/examples/hacker_news/hacker_news/repo.py",
"repo_name": "pragun-ananda/dagster",
"src_encoding": "UTF-8",
"text": "from dagster import repository, schedule_from_partitions\n\nfrom .jobs.dbt_metrics import dbt_prod_job, dbt_staging_job\nfrom .jobs.hacker_news_api_download import download_prod_job, download_staging_job\nfrom .jobs.story_recommender import story_recommender_prod_job, story_recommender_staging_job\nfrom .sensors.hn_tables_updated_sensor import make_hn_tables_updated_sensor\nfrom .sensors.slack_on_failure_sensor import make_slack_on_failure_sensor\n\n\n@repository\ndef hacker_news_prod():\n return [\n schedule_from_partitions(download_prod_job),\n make_slack_on_failure_sensor(base_url=\"my_prod_dagit_url.com\"),\n make_hn_tables_updated_sensor(story_recommender_prod_job),\n make_hn_tables_updated_sensor(dbt_prod_job),\n ]\n\n\n@repository\ndef hacker_news_staging():\n return [\n schedule_from_partitions(download_staging_job),\n make_slack_on_failure_sensor(base_url=\"my_staging_dagit_url.com\"),\n make_hn_tables_updated_sensor(story_recommender_staging_job),\n make_hn_tables_updated_sensor(dbt_staging_job),\n ]\n"
},
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7727272510528564,
"avg_line_length": 28.33333396911621,
"blob_id": "ac91dcfddd6b59c8f3c609290a0da96e6d96456a",
"content_id": "6666e774eb1a39720d5e72b9e6df5429c39e43f0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 176,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 6,
"path": "/examples/hacker_news_assets/hacker_news_assets_tests/test_pipelines/test_dbt_pipeline.py",
"repo_name": "pragun-ananda/dagster",
"src_encoding": "UTF-8",
"text": "from hacker_news_assets.pipelines.dbt_pipeline import activity_stats\n\n\ndef test_activity_forecast():\n result = activity_stats.execute_in_process()\n assert result.success\n"
},
{
"alpha_fraction": 0.6442307829856873,
"alphanum_fraction": 0.6538461446762085,
"avg_line_length": 19.799999237060547,
"blob_id": "575e413cdca5ee712a6f0192003bf88ff0803c1a",
"content_id": "edc205833f977cf3ff55a8caae178ccced351445",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 416,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 20,
"path": "/docs/sphinx/sections/api/apidocs/pipeline.rst",
"repo_name": "pragun-ananda/dagster",
"src_encoding": "UTF-8",
"text": ".. currentmodule:: dagster\n\n[Legacy] Pipelines\n==================\n\nAs of Dagster 0.13.0, we recommend using `Jobs` as an alternative to `Pipelines`.\n\nPipeline definitions\n--------------------\n.. autodecorator:: pipeline\n\n.. autoclass:: PipelineDefinition\n\nDependencies and aliases\n------------------------\n.. autoclass:: DependencyDefinition\n\n.. autoclass:: MultiDependencyDefinition\n\n.. autoclass:: SolidInvocation\n"
}
] | 4 |
CelebradoJonathan/consumer
|
https://github.com/CelebradoJonathan/consumer
|
f51d3185c04b842712cd5300382ca4ddcad44af4
|
e1212820c56300d297b9308a0f1e06fec226653d
|
717e787c829bec3c524aa0590ce4f96270b9c3f4
|
refs/heads/master
| 2021-06-29T13:10:47.159261 | 2019-11-13T05:38:20 | 2019-11-13T05:38:20 | 221,380,740 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6246458888053894,
"alphanum_fraction": 0.6246458888053894,
"avg_line_length": 23.789474487304688,
"blob_id": "b5617aa2dc36257e28c25049cf0abea08501b2c8",
"content_id": "b4c8015d266cb92e2ec106e7268576ede4048d01",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1412,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 57,
"path": "/receive.py",
"repo_name": "CelebradoJonathan/consumer",
"src_encoding": "UTF-8",
"text": "import pika\nimport configparser\nimport logging.config\nimport yaml\nimport os\nimport time\n\n\ndef setup_logging(\n default_path='logging.yaml',\n default_level=logging.INFO,\n env_key='LOG_CFG'\n):\n \"\"\"Setup logging configuration from a yaml file.\n \"\"\"\n path = default_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = yaml.safe_load(f.read())\n logging.config.dictConfig(config)\n\n else:\n logging.basicConfig(level=default_level)\n\n\ndef receive_metadata(hostname, queue, secs):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=hostname))\n channel = connection.channel()\n\n channel.queue_declare(queue=queue)\n while True:\n method, properties, body = channel.basic_get(queue=queue, auto_ack=True)\n if method:\n logger.info(\"Received :\"+str(body))\n else:\n time.sleep(int(secs))\n\n\ndef main():\n config = configparser.ConfigParser()\n conf_dir = os.path.join(os.path.dirname(__file__), 'conf.ini')\n config.read(conf_dir)\n hostname = config['args']['hostname']\n queue = config['args']['queue']\n secs = config['args']['secs']\n\n receive_metadata(hostname, queue, secs)\n\n\nif __name__ == '__main__':\n setup_logging()\n logger = logging.getLogger(__name__)\n main()"
},
{
"alpha_fraction": 0.6724137663841248,
"alphanum_fraction": 0.6724137663841248,
"avg_line_length": 9.454545021057129,
"blob_id": "713c30b47592929d27a9fba3aa173078722e8541",
"content_id": "fd3dbfaab7b91f7fd05d747b88ac7f395e5c4b9a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 116,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 11,
"path": "/README.md",
"repo_name": "CelebradoJonathan/consumer",
"src_encoding": "UTF-8",
"text": "# Product Name\n> \n\nTakes messages form a queue.\n\n## Usage example\n\nType in the terminal:\n```\npython receive.py\n```\n\n"
},
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.6730769276618958,
"avg_line_length": 12.25,
"blob_id": "d94b5c86c0a7577d9d5177c16a97e8867c3b7d98",
"content_id": "3ac755ca50e29b0014f25ebae276dacad7241a92",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 52,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 4,
"path": "/conf.ini",
"repo_name": "CelebradoJonathan/consumer",
"src_encoding": "UTF-8",
"text": "[args]\nhostname = 127.0.0.1\nqueue = listdir\nsecs = 5"
}
] | 3 |
MM-Phoenix/Beat-the-Hackers
|
https://github.com/MM-Phoenix/Beat-the-Hackers
|
7605c7b4c0aac0118cff3aa756b279d57d900511
|
46fe7dc57d8c58cee904923dc2defc94f785da2f
|
1dd71a32748b66572d258f278da81e7b944415f4
|
refs/heads/master
| 2022-12-16T06:06:47.598331 | 2020-09-11T19:44:54 | 2020-09-11T19:44:54 | 294,124,395 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5674800872802734,
"alphanum_fraction": 0.5896414518356323,
"avg_line_length": 25.919462203979492,
"blob_id": "55ab39c90bb59df3de73f705271f69866b414b03",
"content_id": "50a7bdb1117ce87ac31db282b2fceda771aa3a96",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4016,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 149,
"path": "/Password Generator V.python 2.7 🔐🐭.py",
"repo_name": "MM-Phoenix/Beat-the-Hackers",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 27 10:30:25 2020\n\n@author: Phoenix\n\"\"\"\n\n\nimport random\nimport sys\n\nWELCOME = \"\\nWelcome to Password Generator\\n\"\nSTART = \"Enter \\\"OK\\\" to generate your unique password.\\nEnter \\\"QUIT\\\" to exit the program.\\n\"\nCONTINUE = [\"ok\", \"okay\", \"yes\", \"yep\", \"yeah\", \"y\", \"continue\", \"c\"]\nEXIT = [\"nope\", \"no\", \"n\", \"non\", \"ni\", \"exit\", \"e\", \"quit\", \"q\"]\nBYE = [\"Bye Bye!\", \"See you soon!\", \"Goodbye!\", \"See ya!\", \"Take care\"]\nWRONG_COMAND = \"\\nI didn't understand your input.\\n\"\nAGAIN = \"Do you want to generate another password?\\n\"\nCLICKS = \"\\nYou will get a strong password in a few clicks\\n\"\nREMEMBER = \"Remember, a strong password should be at least 16 characters long. \"\n\nENTER1 = \"Please, enter the desired length of your password. (16-50)\\n\"\nENTER2 = \"Please, enter a length between 16 and 50\\n\"\nENTER3 = \"Please, try again. \"\nWEAK = \"Your password would be too weak! You don't want hackers to log in to your account.\"\nNO_WAY = \"No way! Length should be an integer number between 16 and 50.\"\nLONG = \"Okay you want a really strong password, but this is too much!\"\n\nDECORATION = \"\\n\" + \"~ \" * 40 + \"\\n\"\nYOUR_PASS = \"\\nYour unique unbeatable password, {} charactes long:\\n\\n\"\n\nALPHA = \"qwertyuiopasdfghjklzxcvbnm\"\nSYM = \"!@#$%^&*()-=_+[];',.<>?:{}\"\nNUMS = \"1234567890\"\n#---------------------------------------------------------------------------------------------\ndef start():\n comand = (raw_input(START)).lower()\n while comand in CONTINUE:\n decorate(CLICKS)\n length = get_length(REMEMBER, ENTER1)\n check_length(length)\n print(AGAIN)\n start()\n if comand in EXIT:\n print(DECORATION)\n sys.exit(random.choice(BYE))\n else:\n print(WRONG_COMAND)\n start()\ndef decorate(to_decor):\n \"\"\"\n Parameter : to_decor: string/function to be decorated\n Adds decorations before and after the string\n\n \"\"\"\n print(DECORATION)\n print(to_decor)\n print(DECORATION)\n\ndef password_generator(L):\n \"\"\"\n\n Parameters\n ----------\n L : integer: 16 <= x <= 50.\n\n Returns\n -------\n password : string YOUR_PASS + string password L characters long.\n \n >>> L = 30 ---> _:y:y=!2JH>DT56vG@dSEu-_C2_0eh (ex)\n\n \"\"\"\n List = list(ALPHA + ALPHA.upper() + SYM + NUMS)\n password = \"\"\n for i in range (L):\n password += str(random.choice(List)) #random.choises only will return a list, we need to join it\n return YOUR_PASS.format(L) + password + \"\\n\"\n\ndef get_length(string, enter):\n \"\"\"\n \n\n Parameters\n ----------\n string : string to be printed.\n enter : string ask for user input.\n\n Returns\n -------\n length : string of user input, \n NEEDS TO BE AN INTEGER BETWEEN 16 AND 50 \n (may be number, character, symbol, minor, major. this will be checked later).\n\n \"\"\"\n print(string)\n length = raw_input(enter)\n return length\n\ndef check_length(L):\n\t\"\"\"\n \n\n Parameters\n ----------\n L : string length got from get_length.\n\n Checks that L is a number.\n Rounds L to the nearest integer.\n Checks that L is between 16 and 50.\n Will ask for user input again if not. (get_length)\n Will move to password_generator if L is a number between 16 and 50.\n \n >>> L = \"20\" ---> OK\n >>> decorate(password_generator(20)) ---> )b_WwlJ-#GG,&!CKJinc (ex)\n\n >>> L = \"6\" or L = \"543\" ---> TOO WEAK or TOO MUCH\n >>> ask user input againd and check it'\n \n >>> L = \"43.9\" ---> L = 44\n \n >>> L = \"a9-\" ---> EXEPTION\n >>> ask user input again and check it'\n \n \"\"\"\n\ttry:\n\t\tlength = round(float(L))\n\t\tif length in range(16, 51):\n\t\t\tdecorate(password_generator(int(length)))\n \n\t\telse:\n\t\t\tprint(DECORATION)\n\t\t\tnew_L = get_length(WEAK if length < 16 else LONG, ENTER2)\n\t\t\tcheck_length(new_L)\n\texcept:\n\t\tprint(DECORATION)\n\t\tnew_L = get_length(NO_WAY, ENTER3)\n\t\tcheck_length(new_L)\n\ndef run():\n \"\"\"\n main function. will run the program.\n\n \"\"\"\n decorate(WELCOME)\n start()\n \nrun() \n"
},
{
"alpha_fraction": 0.7436878085136414,
"alphanum_fraction": 0.7475134134292603,
"avg_line_length": 61.238094329833984,
"blob_id": "231130a1c7ac7981849e52df94f3c39a21a96f6c",
"content_id": "b927dfd972639ceb3e55a89c188a7c9de2e3bd1c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1307,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 21,
"path": "/README.md",
"repo_name": "MM-Phoenix/Beat-the-Hackers",
"src_encoding": "UTF-8",
"text": "# Beat the Hackers\nMy first repository on GitHub. For those who care about security on the web. Create strong passwords for your accounts. <br />\n\nPassword Generator\n-\nRun the code on python 3+ and generate your password. <br />\nThe characters are randomized and you can choose the length of the password. It includes upper and lowercase, symbols and numbers. <br />\n\nSuggestions for creating your own password:\n-\n~ don't use the same password for multiple accounts <br />\n~ don't include personal information in your passwords <br />\n~ don't store your passwords on WebBrowsers <br />\n~ don't log in to sensitive accounts on others' computers, or when you're connected to a public WiFi, web proxy, Tor, free VPN <br />\n~ don't send sensitive information via unecrypted connections (HTTP, FTP). Use encrypted connections (HTTPS, FTP) whenever possible <br />\n~ do turn on 2-Factor-Authentication whenever possible <br />\n~ passwords should be at least 16 characters long <br />\n~ do use combinations of upper and lowercase, symbols and numbers <br />\n~ if you want to use a word or a phrase do mispell it by changing letters to numbers and symbles: \"I am hungry\" --> \"1@m#Un8Ry!\" <br />\n~ do change your passwords regularly <br />\n~ do log out from your accounts when you're finished using them <br />\n"
}
] | 2 |
goodger/goodger.github.io
|
https://github.com/goodger/goodger.github.io
|
e246f3a7c387898f9b5a362cf63f1ca59da4555f
|
8aa4949ffd181b437fef9fbb5048acf01a958a9a
|
54e6b80a7c0faa5b3e61cfc9aa0ebf3633d9c5af
|
refs/heads/master
| 2021-07-06T22:52:23.652979 | 2020-08-02T22:55:00 | 2020-08-02T22:55:00 | 142,926,169 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6916128993034363,
"alphanum_fraction": 0.7138709425926208,
"avg_line_length": 53.385963439941406,
"blob_id": "76ab4e9875bad2abc5b0b67b981006918b4fbef8",
"content_id": "c7aa2f8fd38d3935f642cc1cb2dd747f837cc887",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3100,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 57,
"path": "/personal/pycon_dc_2004/day_3.html",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n<meta name=\"generator\" content=\"Docutils 0.15b.dev: http://docutils.sourceforge.net/\" />\n<title>A Week at PyCon DC 2004 -- Day 3</title>\n<meta name=\"author\" content=\"David Goodger\" />\n<link rel=\"stylesheet\" href=\"../../default.css\" type=\"text/css\" />\n</head>\n<body>\n<div class=\"document\" id=\"a-week-at-pycon-dc-2004-day-3\">\n<h1 class=\"title\">A Week at <a class=\"reference external\" href=\"http://pycon.org/dc2004/\">PyCon DC 2004</a> -- Day 3</h1>\n<h2 class=\"subtitle\" id=\"sprint-day-3-monday-march-22\">Sprint Day 3 -- Monday, March 22</h2>\n<table class=\"docinfo\" frame=\"void\" rules=\"none\">\n<col class=\"docinfo-name\" />\n<col class=\"docinfo-content\" />\n<tbody valign=\"top\">\n<tr><th class=\"docinfo-name\">Author:</th>\n<td>David Goodger</td></tr>\n<tr><th class=\"docinfo-name\">Contact:</th>\n<td><a class=\"first last reference external\" href=\"mailto:goodger@python.org\">goodger@python.org</a></td></tr>\n</tbody>\n</table>\n<p>The watch's alarm didn't wake me up, possibly because I was wearing\nit, and I was bundled up under the covers. I woke up a few minutes\nlater anyway.</p>\n<p>Was cold this morning, so I was glad for my sweater, scarf, hat &\ngloves. A lot more traffic on Monday morning than Saturday & Sunday.</p>\n<p>Mike Orr joined the <a class=\"reference external\" href=\"http://www.python.org/cgi-bin/moinmoin/DocutilsSprint\">Docutils sprint</a> for the morning; he and Reggie\nbegan working on an API for document fragments, particularly HTML. By\nevening, Reggie Dugard had made good progress, almost complete. Aahz\nseems to be making steady strides with his FrameMaker writer, and Matt\nGilbert with MoinMoin integration. Ian Bicking was back with us after\nattending the Zope tutorial yesterday, but Oliver Rutherfurd is at\nwork today. Ed Loper and Bill Sconce also continued hacking on\nDocPy/LaTeX; Steve Holden is now busy with conference matters.</p>\n<p>For lunch, Reggie and I visited the conference building's food court,\nand impressive place with a dozen or so vendors, from Burger King and\nTaco Bell to Crepes and Chinese. We settled on Chinese; I had sweet &\nhot chicken on rice. We got big helpings; again I wasn't hungry for\ndinner. There were some kind of peppers in the food and I ate one by\naccident. Hot!</p>\n<ul class=\"simple\">\n<li>next: <a class=\"reference external\" href=\"day_4.html\">Day 4 -- Sprint Day 4 -- Tuesday, March 23</a></li>\n<li>up: <a class=\"reference external\" href=\"./\">Contents</a></li>\n</ul>\n</div>\n<div class=\"footer\">\n<hr class=\"footer\" />\n<a class=\"reference external\" href=\"day_3.txt\">View document source</a>.\nGenerated on: 2018-08-15 04:25 UTC.\nGenerated by <a class=\"reference external\" href=\"http://docutils.sourceforge.net/\">Docutils</a> from <a class=\"reference external\" href=\"http://docutils.sourceforge.net/rst.html\">reStructuredText</a> source.\n\n</div>\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.5445026159286499,
"alphanum_fraction": 0.5523560047149658,
"avg_line_length": 20.22222137451172,
"blob_id": "750c45f33850b584328eead14d240cead4bc0f5d",
"content_id": "7c44fda56ad8a22cf771d2c57a8070e30fc91244",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1146,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 54,
"path": "/projects/pycon/2007/idiomatic/generators.py",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "class C:\n\n i = 0\n \n def next(self):\n self.i += 1\n return self.i\n\n def __iter__(self):\n return self\n\n\nclass MyRangeClass:\n\n \"\"\"1-argument 'range' equivalent.\"\"\"\n\n def __init__(self, stop):\n self.stop = stop\n self.value = 0\n\n def __iter__(self):\n # Must return an object with a \"next\" method.\n # Usually just returns self (self.next must exist).\n return self\n\n def next(self):\n # Return the next value, or raise StopIteration.\n if self.value >= self.stop:\n raise StopIteration\n value = self.value\n self.value += 1\n return value\n\n\ndef my_range_generator(stop):\n value = 0\n while value < stop:\n yield value\n value += 1\n\n\ndef _linewise_generator(data_file):\n # Uses file.readline to keep the file position in sync.\n # \"for line in file\" gets out of sync.\n while 1:\n line = data_file.readline()\n if line:\n yield line\n else:\n break\n\ndef _decoded_generator(self, source):\n for row in source:\n yield [entry.decode('latin-1') for entry in row]\n"
},
{
"alpha_fraction": 0.681172788143158,
"alphanum_fraction": 0.7125207185745239,
"avg_line_length": 55.48958206176758,
"blob_id": "b17b4f4aeef070a8dee04d4950f556d4a142d4f8",
"content_id": "b47fb715ecc6362a58d1ee84207664142ea13bf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 5425,
"license_type": "no_license",
"max_line_length": 244,
"num_lines": 96,
"path": "/projects/conferences/index.html",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n<meta name=\"generator\" content=\"Docutils 0.15b.dev: http://docutils.sourceforge.net/\" />\n<title>Conferences</title>\n<link rel=\"stylesheet\" href=\"../../default.css\" type=\"text/css\" />\n</head>\n<body>\n<div class=\"document\" id=\"conferences\">\n<h1 class=\"title\">Conferences</h1>\n\n<div class=\"large sidebar\">\n<p class=\"first sidebar-title\">Related Pages</p>\n<ul class=\"last simple\">\n<li><a class=\"reference external\" href=\"../pycon/\">PyCon</a></li>\n<li><a class=\"reference external\" href=\"../python/\">Python</a></li>\n<li><a class=\"reference external\" href=\"../\">David Goodger’s Projects</a></li>\n<li><a class=\"reference external\" href=\"../../\">My home page</a></li>\n</ul>\n</div>\n<p>Here is a list of talks and tutorails I have presented at various\nconferences over the years.</p>\n<div class=\"contents topic\" id=\"contents\">\n<p class=\"topic-title first\">Contents</p>\n<ul class=\"simple\">\n<li><a class=\"reference internal\" href=\"#pycon\" id=\"id3\">PyCon</a></li>\n<li><a class=\"reference internal\" href=\"#pycon-india-2010\" id=\"id4\">PyCon India 2010</a></li>\n<li><a class=\"reference internal\" href=\"#rupy-2009\" id=\"id5\">RuPy 2009</a></li>\n<li><a class=\"reference internal\" href=\"#montrealpython\" id=\"id6\">MontrealPython</a></li>\n<li><a class=\"reference internal\" href=\"#oscon\" id=\"id7\">OSCON</a></li>\n</ul>\n</div>\n<div class=\"admonition note\">\n<p class=\"first admonition-title\">Note</p>\n<p class=\"last\">If there is no link from a talk or tutorial presentation\nbelow, or no link to a particular format, it means that I do not\nhave files for that presentation. Please do not write to ask for\nthem, because <strong>they do not exist!</strong> If I do prepare files, I will\npost them here.</p>\n</div>\n<div class=\"section\" id=\"pycon\">\n<h1><a class=\"toc-backref\" href=\"#id3\">PyCon</a></h1>\n<p>I have been particularly involved with <a class=\"reference external\" href=\"http://www.python.org/community/pycon/\">PyCon</a>, the annual U.S. Python\ncommunity conference, so <a class=\"reference external\" href=\"../pycon/\">my presentations at PyCon have their own\npage</a>.</p>\n</div>\n<div class=\"section\" id=\"pycon-india-2010\">\n<h1><a class=\"toc-backref\" href=\"#id4\">PyCon India 2010</a></h1>\n<p>I was invited to give a keynote talk at PyCon India 2010 in Bangalore,\ntitled <cite>Python in India</cite>. I also gave a talk about my <a class=\"reference external\" href=\"http://puzzler.sourceforge.net/\">Polyform\nPuzzler</a> project.</p>\n</div>\n<div class=\"section\" id=\"rupy-2009\">\n<h1><a class=\"toc-backref\" href=\"#id5\">RuPy 2009</a></h1>\n<p><a class=\"reference external\" href=\"http://rupy.eu\">RuPy</a> is the "strongly dynamic conference"\nfeaturing talks about Ruby, Python, and other languages and\ntechnologies. I was invited to give a "community-oriented talk"\nat RuPy 2009 in Poznan, Poland, billed as a keynote:</p>\n<ul class=\"simple\">\n<li><a class=\"reference external\" href=\"http://www.artima.com/weblogs/viewpost.jsp?thread=276019\">The story of my visit to Poznan (blog post)</a></li>\n<li>Video of my RuPy 2009 talk <a class=\"reference external\" href=\"http://rupy.blip.tv/file/3011513/\">via rupy.blip.tv</a> or <a class=\"reference external\" href=\"http://www.mefeedia.com/watch/26976224\">via mefeedia.com</a> (the beginning of the\nvideo is a bit messed up, but persevere, it's soon fixed)</li>\n<li><a class=\"reference external\" href=\"rupy/2009/handout.html\">Plain HTML handout/script</a></li>\n<li><a class=\"reference external\" href=\"rupy/2009/presentation.html\">S5 HTML slideshow</a> (use the arrow keys\nto navigate, or mouse over the lower left corner for controls)</li>\n<li><a class=\"reference external\" href=\"rupy/2009/presentation.txt\">reStructuredText source</a></li>\n</ul>\n</div>\n<div class=\"section\" id=\"montrealpython\">\n<h1><a class=\"toc-backref\" href=\"#id6\">MontrealPython</a></h1>\n<p>I gave a talk about my <a class=\"reference external\" href=\"http://puzzler.sourceforge.net/\">Polyform Puzzler</a> project at the first\nmeeting of the Montreal Python users group on February 7, 2008. It\nwas a lot of fun, and the group seemed to enjoy it.</p>\n<ul class=\"simple\">\n<li><a class=\"reference external\" href=\"http://video.google.com/videoplay?docid=4654600992090030361\">Video of the talk</a></li>\n</ul>\n</div>\n<div class=\"section\" id=\"oscon\">\n<h1><a class=\"toc-backref\" href=\"#id7\">OSCON</a></h1>\n<p>In 2007 I was invited to present my tutorial, <a class=\"reference external\" href=\"../pycon/2007/idiomatic/\">Code Like a Pythonista:\nIdiomatic Python</a>, at <a class=\"reference external\" href=\"http://conferences.oreilly.com/oscon\">OSCON</a>. This was\na revised encore presentation (to a different and larger audience) of\na tutorial I had presented at <a class=\"reference external\" href=\"../pycon/#2007\">PyCon 2007</a>.</p>\n</div>\n</div>\n<div class=\"footer\">\n<hr class=\"footer\" />\n<a class=\"reference external\" href=\"index.txt\">View document source</a>.\nGenerated on: 2018-08-15 04:25 UTC.\nGenerated by <a class=\"reference external\" href=\"http://docutils.sourceforge.net/\">Docutils</a> from <a class=\"reference external\" href=\"http://docutils.sourceforge.net/rst.html\">reStructuredText</a> source.\n\n</div>\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.6498855948448181,
"alphanum_fraction": 0.6659038662910461,
"avg_line_length": 30.214284896850586,
"blob_id": "d889a1dd1333638006ffe1b0cc49e98df4e88172",
"content_id": "27b17ef920411990c21b43085d08852caf7462f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 14,
"path": "/projects/conferences/rupy/2009/Makefile",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "all: presentation.html handout.html\n\npresentation.html : presentation.txt docutils.conf\n\t@echo \"checking in $< and making $@\"\n\t#@svn ci -q --force-log -m. $<\n\[email protected] --strip-comments $< $@\n\nhandout.html : presentation.txt docutils.conf\n\t@echo \"checking in $< and making $@\"\n\t#@svn ci -q --force-log -m. $<\n\[email protected] --strip-comments $< $@\n\nbundle :\n\ttar cvzf ../david-goodger-rupy-2009.tgz --exclude .svn --exclude .DS_Store .\n"
},
{
"alpha_fraction": 0.5198126435279846,
"alphanum_fraction": 0.5249423980712891,
"avg_line_length": 32.88161087036133,
"blob_id": "e940731c0d27c25d0bebaa0365edb9d53f15b77c",
"content_id": "d2955788fc0c83cb469038d177894c39d3150f1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13451,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 397,
"path": "/projects/emacs/menumaker.py",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n# $Id: menumaker.py 3140 2013-01-29 20:47:43Z david $\n# by David Goodger <[email protected]>\n\n# TODO: make the ``include`` directive work in all contexts (e.g. portions of\n# menus, not just whole menus) and preserve indentation.\n\n\"\"\"\\\nConverts a menu-definition table into emacs-lisp code for use by easy-menu.\n\nInput must be of this form::\n\n # MENU NAME DESCRIPTION SYMBOL\n Dir My Custom Directory Menu my-dir-menu\n #\n # MENU ITEM/- KEY/- SYMBOL PATH or LISP CODE\n D:/ f5 w d find-file-in-d d:/\n -\n ~/.emacs f5 . . find-file-.emacs w:/.emacs\n # submenu titles stand alone on a line \n ~/signatures\n # submenu items are indented one tab per level\n / - find-in-signatures ~/signatures/\n # BLANK LINE ENDS MENU DEFINITION, ALLOWS NEW DEFINITION TO BEGIN\n\nColumns are separated by one or more tabs (*not* spaces). Hyphens (\"-\") may be\nused to indicate menu separators and \"no key equivalent\". If a symbol is used\nfor more than one menu item, the lisp code should only be defined the first\ntime.\n\nA line may begin with an ``include`` directive, to insert data from another\nfile for entire menus (submenus, parts of menus too?). Syntax:\n``include(filename)`` on a line by itself.\n\nThe output looks like this::\n\n (defun find-file-in-d () (interactive)\n (find-file-in \"d:/\"))\n (defun find-file-.emacs () (interactive)\n (find-file \"w:/.emacs\"))\n (defun find-in-signatures () (interactive)\n (find-in \"~/signatures/\"))\n \n (define-prefix-command 'f5-map)\n (global-set-key [f5] 'f5-map)\n \n (define-prefix-command 'f5-.-map)\n (define-key f5-map \".\" 'f5-.-map)\n (define-key f5-.-map \".\" 'find-file-.emacs)\n \n (define-prefix-command 'f5-w-map)\n (define-key f5-map \"w\" 'f5-w-map)\n (define-key f5-w-map \"d\" 'find-file-in-d)\n \n (easy-menu-define\n my-dir-menu (current-global-map) \"My Custom Directory Menu\"\n '(\"Dir\"\n [\"D:/\" find-file-in-d t]\n \"-\"\n [\"~/.emacs\" find-file-.emacs t]\n (\"~/signatures\"\n [\"/\" find-in-home-signatures t]\n )\n ))\n \n (easy-menu-add my-dir-menu)\n\nCopy this elisp function definition (required by the generated code) to your\n.emacs file::\n\n (defun find-in (dir)\n \"Do find-file in minibuffer, starting with the directory given.\"\n (let (new-buffer)\n (let ((default-directory dir))\n (save-excursion\n (save-window-excursion\n (setq new-buffer (call-interactively 'find-file)))))\n (switch-to-buffer new-buffer)))\n\nFinally, your .emacs file needs to load the elisp produced. For example::\n\n (load-file \"~/.emacslib/initmenus.el\")\n\"\"\"\n\nimport sys\nimport os\nimport time\nimport re\nimport fileinput\nimport optparse\n\n\n(hNAME, hDESC, hSYMBOL) = range(3)\n(iNAME, iKEY, iSYMBOL, iLISP) = range(4)\n(mNAME, mSYMBOL) = range(2)\n\n\nclass MenuMaker:\n\n header = \"\"\"\\\n;; -*- coding: utf-8 -*-\n;; Generated by menumaker.py (%s)\n;; %s\"\"\"\n colsep = re.compile(r'\\t+')\n functiondef = '(defun %s () (interactive)\\n %s)'\n keymapdef = '(define-prefix-command \\'%s-map)'\n globalsetkeydef = '(global-set-key [%s] \\'%s)'\n definekeydef = '(define-key %s-map \"%s\" \\'%s)'\n menustartdef = \"\"\"\\\n(easy-menu-define\n %s (current-global-map)\n \"%s\"\n `(,(encode-coding-string \"%s\" 'mac-roman)\"\"\"\n menuitemdef = ' [\"%s\" %s t]'\n menusepdef = ' \"-\"'\n menuenddef = ' ))\\n(easy-menu-add %s)'\n sublevel = 0\n\n def __init__(self, args=None, test=False):\n self.input = InputFiles(args)\n self.menus = []\n self.keymaps = {}\n self.functions = {}\n self.test = test\n\n def mainloop(self):\n try:\n while 1:\n self.getmenu()\n except IndexError:\n if self.test:\n self.writedata_test()\n else:\n self.writedata()\n\n def getmenu(self):\n name, description, symbol = self.getheader()\n menu = []\n self.menus.append((name, description, symbol, menu))\n self.getitems(menu)\n\n def getheader(self):\n while 1:\n line = self.getline()\n if line:\n break\n parts = [s.strip() for s in self.colsep.split(line)]\n if len(parts) == 1 and self.process_directive(line):\n return self.getheader()\n elif len(parts) != 3:\n raise InputError(\n 'Exactly 3 columns required in memu header. (file %s line %s)'\n % (self.input.filename(), self.input.filelineno()))\n return parts\n\n def getitems(self, menu):\n while 1:\n line = self.getline()\n if not line:\n break\n parts = [s.strip() for s in self.colsep.split(line)]\n if parts[0]:\n self.sublevel = 0\n else:\n match = self.colsep.match(line)\n if not match:\n raise InputError(\n 'Only tabs allowed for indentation. (file %s line %s)'\n % (self.input.filename(), self.input.filelineno()))\n sublevel = match.end()\n if sublevel > self.sublevel:\n raise InputError(\n 'Submenu level change without submenu title.'\n '(file %s line %s)'\n % (self.input.filename(), self.input.filelineno()))\n del parts[0]\n self.sublevel = sublevel\n if len(parts) == 1:\n if parts[0] != '-':\n self.sublevel += 1\n menu.append((self.sublevel, parts[0]))\n continue\n if not (3 <= len(parts) <= 4):\n raise InputError(\n '3 or 4 columns required per memu item. (file %s line %s)'\n % (self.input.filename(), self.input.filelineno()))\n if parts[0].startswith('\\\\'):\n parts[0] = parts[0][1:]\n menu.append((self.sublevel, (parts[iNAME], parts[iSYMBOL])))\n self.setkeys(parts[iKEY], parts[iSYMBOL])\n if (len(parts) == 4) and parts[iLISP]:\n lisp = parts[iLISP]\n if lisp.startswith('(') and lisp.endswith(')'):\n self.functions[parts[iSYMBOL]] = lisp\n elif lisp == '\"\"':\n self.functions[parts[iSYMBOL]] = ('(insert \"%s\")'\n % parts[iNAME])\n elif lisp.startswith('\"') and lisp.endswith('\"'):\n self.functions[parts[iSYMBOL]] = '(insert %s)' % lisp\n elif lisp.endswith('/'):\n self.functions[parts[iSYMBOL]] = '(find-in \"%s\")' % lisp\n else:\n self.functions[parts[iSYMBOL]] = '(find-file \"%s\")' % lisp\n \n def setkeys(self, keystring, symbol):\n if keystring == '-':\n return\n keys = keystring.split()\n keymap = self.keymaps\n for i in range(len(keys) - 1):\n keymap = keymap.setdefault(keys[i], {})\n keymap[keys[-1]] = symbol\n\n def getline(self):\n while 1:\n line = self.input[self.input.lineno()].rstrip()\n if (not line) or (line[0] != '#'):\n break\n return line\n\n def writedata(self):\n self.writeheader()\n self.writefunctions()\n self.writekeymaps()\n self.writemenus()\n #print ''.join(line for (line,index,name) in self.input._lines)\n\n def writeheader(self):\n print self.header % (sys.modules[self.__class__.__module__].__file__,\n time.strftime('%Y-%m-%dT%H:%M:%S',\n time.localtime(time.time())))\n\n def writefunctions(self):\n print\n for (symbol, lisp) in self.functions.items():\n print self.functiondef % (symbol, lisp)\n\n def writekeymaps(self):\n for key, value in self.keymaps.items():\n if type(value) == type(''):\n print self.globalsetkeydef % (key, key)\n elif type(value) == type({}):\n print\n print self.keymapdef % (key)\n print self.globalsetkeydef % (key, key + '-map')\n self.writenestedkeymaps(key, value)\n else:\n raise TypeError('Bad type for keymap value')\n\n def writenestedkeymaps(self, mapkey, map):\n for key, value in map.items():\n if type(value) == type(''): # symbol\n print self.definekeydef % (mapkey, key, value)\n elif type(value) == type({}):\n print\n print self.keymapdef % (mapkey + '-' + key)\n print self.definekeydef % (mapkey, key,\n mapkey + '-' + key + '-map')\n self.writenestedkeymaps(mapkey + '-' + key, value)\n else:\n raise TypeError('Bad type for nested keymap value')\n\n def writemenus(self):\n menus = self.menus\n menus.reverse()\n for name, description, symbol, menu in menus:\n print\n print self.menustartdef % (symbol, description, name)\n self.sublevel = 0\n for level, item in menu:\n if item == '-':\n self.writeitem(level, self.menusepdef)\n elif isinstance(item, tuple):\n self.writeitem(\n level, self.menuitemdef % (item[mNAME], item[mSYMBOL]))\n else:\n self.writeitem(level, ' (\"%s\"' % item, new=True)\n if self.sublevel:\n print ' ', ')' * self.sublevel\n print self.menuenddef % (symbol)\n\n def writeitem(self, level, text, new=False):\n if level < self.sublevel or new and level == self.sublevel:\n print ' ', ' ' * self.sublevel, ')' * (self.sublevel - level\n + new)\n print level * ' ' + text\n self.sublevel = level\n\n def writedata_test(self):\n from pprint import pprint\n print 'functions:'\n pprint(self.functions)\n print '\\nkeymaps:'\n pprint(self.keymaps)\n print '\\nmenus:'\n pprint(self.menus)\n\n def process_directive(self, line):\n try:\n assert line.endswith(')')\n directive, argstr = line.split('(')\n args = argstr[:-1].split(',')\n self._directives[directive](self, *args)\n except (AssertionError, IndexError):\n return False\n return True\n\n def include(self, filename):\n self.input.insert(filename)\n\n _directives = {'include': include}\n\n\nclass InputFiles(object):\n\n# Methods: , filelineno, lineno, __getitem__.\n\n def __init__(self, filenames):\n self._lines = []\n self._filename = None\n self._filelineno = None\n self._lineno = -1\n for filename in filenames:\n try:\n lines = open(filename, 'rt').readlines()\n except IOError, error:\n print >>sys.stderr, '%s: %s' % (error.__class__.__name__, error)\n print >>sys.stderr, 'filename = %s' % filename\n print >>sys.stderr, 'cwd = %s' % os.getcwd()\n raise\n self._lines.extend((line, index, filename)\n for (index, line) in enumerate(lines))\n\n def __getitem__(self, i):\n if i != self._lineno:\n raise RuntimeError, \"accessing lines out of order\"\n try:\n return self.next()\n except StopIteration:\n raise IndexError, \"end of input reached\"\n\n def __iter__(self):\n return self\n\n def next(self):\n try:\n self._lineno += 1\n line, filelineno, filename = self._lines[self._lineno]\n except IndexError:\n raise StopIteration\n else:\n self._filelineno = filelineno\n self._filename = filename\n return line\n\n def filename(self):\n return self._filename\n\n def filelineno(self):\n return self._filelineno\n\n def lineno(self):\n return self._lineno\n\n def insert(self, filename):\n \"\"\"Insert the lines from `filename` after the current line.\"\"\"\n lines = open(filename, 'rt').readlines()\n newlines = [(line, index, filename)\n for (index, line) in enumerate(lines)]\n self._lines[self._lineno+1:self._lineno+1] = newlines\n\n\nclass InputError(Exception):\n \"\"\"InputError\"\"\"\n pass\n\n\nusage = '%prog [options] < input > output'\ndescription = ('Convert a menu-definition table into emacs-lisp code for '\n 'use by easy-menu.')\n\ndef main(argv=None):\n parser = optparse.OptionParser(usage=usage, description=description)\n parser.add_option('-d', '--describe', action='store_true',\n help='describe the input data format')\n parser.add_option('-t', '--test', action='store_true',\n help='produce test output (internal data structures)')\n (options, args) = parser.parse_args()\n if options.describe:\n print >>sys.stderr, __doc__,\n sys.exit(0)\n maker = MenuMaker(args, test=options.test)\n maker.mainloop()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.65329509973526,
"alphanum_fraction": 0.6618911027908325,
"avg_line_length": 30.727272033691406,
"blob_id": "fa1c312ff841fe19517aa51ab13b861451f2eddc",
"content_id": "7832f2daa9474e288c144e2258b752c302e46b41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 11,
"path": "/projects/pycon/2006/docutils-arch/Makefile",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "all: presentation.html handout.html\n\npresentation.html : presentation.txt docutils.conf\n\t@echo \"checking in $< and making $@\"\n\t#@svn ci -q --force-log -m. $<\n\[email protected] --strip-comments $< $@\n\nhandout.html : presentation.txt docutils.conf\n\t@echo \"checking in $< and making $@\"\n\t#@svn ci -q --force-log -m. $<\n\[email protected] --strip-comments $< $@\n"
},
{
"alpha_fraction": 0.6530826091766357,
"alphanum_fraction": 0.6635732650756836,
"avg_line_length": 68.61798095703125,
"blob_id": "d00c6ecf2c62a98dc1cc35ca577dd0185718e283",
"content_id": "e1e58222ac2d42edfd67e1eddf14b748c77d92b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 12422,
"license_type": "no_license",
"max_line_length": 454,
"num_lines": 178,
"path": "/personal/puzzles/rubik/solution.html",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n<meta name=\"generator\" content=\"Docutils 0.16b.dev: http://docutils.sourceforge.net/\" />\n<title>Rubik’s Cube Solution & Extras</title>\n<meta name=\"author\" content=\"David Goodger\" />\n<link rel=\"stylesheet\" href=\"../../../default.css\" type=\"text/css\" />\n</head>\n<body>\n<div class=\"document\" id=\"rubiks-cube-solution-extras\">\n<h1 class=\"title\">Rubik’s Cube Solution & Extras</h1>\n<table class=\"docinfo\" frame=\"void\" rules=\"none\">\n<col class=\"docinfo-name\" />\n<col class=\"docinfo-content\" />\n<tbody valign=\"top\">\n<tr><th class=\"docinfo-name\">Author:</th>\n<td>David Goodger</td></tr>\n<tr><th class=\"docinfo-name\">Contact:</th>\n<td><a class=\"first last reference external\" href=\"mailto:goodger@python.org\">goodger@python.org</a></td></tr>\n</tbody>\n</table>\n<div class=\"large sidebar\">\n<p class=\"first sidebar-title\">Related Pages</p>\n<ul class=\"last simple\">\n<li><a class=\"reference external\" href=\"../\">My puzzle pages</a></li>\n<li><a class=\"reference external\" href=\"../../\">My home page</a></li>\n</ul>\n</div>\n<div class=\"contents topic\" id=\"contents\">\n<p class=\"topic-title first\">Contents</p>\n<ul class=\"simple\">\n<li><a class=\"reference internal\" href=\"#rubiks-cube-solution\" id=\"id2\">Rubik’s Cube Solution</a><ul>\n<li><a class=\"reference internal\" href=\"#middle-layer-edges\" id=\"id3\">Middle Layer Edges</a></li>\n<li><a class=\"reference internal\" href=\"#bottom-corners\" id=\"id4\">Bottom Corners</a><ul>\n<li><a class=\"reference internal\" href=\"#adjacent-corners\" id=\"id5\">Adjacent Corners</a></li>\n<li><a class=\"reference internal\" href=\"#diagonally-opposite-corners\" id=\"id6\">Diagonally Opposite Corners</a></li>\n<li><a class=\"reference internal\" href=\"#swapping-the-other-two-corners\" id=\"id7\">Swapping The Other Two Corners</a></li>\n<li><a class=\"reference internal\" href=\"#finishing-the-corners\" id=\"id8\">Finishing The Corners</a></li>\n</ul>\n</li>\n<li><a class=\"reference internal\" href=\"#the-last-four-edges\" id=\"id9\">The Last Four Edges</a><ul>\n<li><a class=\"reference internal\" href=\"#adjacent-flipped-edges\" id=\"id10\">Adjacent Flipped Edges</a></li>\n<li><a class=\"reference internal\" href=\"#opposite-flipped-edges\" id=\"id11\">Opposite Flipped Edges</a></li>\n</ul>\n</li>\n</ul>\n</li>\n<li><a class=\"reference internal\" href=\"#extras\" id=\"id12\">Extras</a><ul>\n<li><a class=\"reference internal\" href=\"#patterns\" id=\"id13\">Patterns</a><ul>\n<li><a class=\"reference internal\" href=\"#six-spots\" id=\"id14\">Six Spots</a></li>\n<li><a class=\"reference internal\" href=\"#cross\" id=\"id15\">Cross</a></li>\n<li><a class=\"reference internal\" href=\"#cube-in-a-cube-in-a-cube\" id=\"id16\">Cube in a Cube in a Cube</a></li>\n</ul>\n</li>\n</ul>\n</li>\n</ul>\n</div>\n<div class=\"section\" id=\"rubiks-cube-solution\">\n<h1><a class=\"toc-backref\" href=\"#id2\">Rubik’s Cube Solution</a></h1>\n<p>Summarized from the solution pages mirrored at\n<<a class=\"reference external\" href=\"http://jjorg.chem.unc.edu/personal/monroe/cube/Denny3x3/\">http://jjorg.chem.unc.edu/personal/monroe/cube/Denny3x3/</a>> and\n<<a class=\"reference external\" href=\"http://www.helmsoft.org/cube/rubikscube/\">http://www.helmsoft.org/cube/rubikscube/</a>>, written by Denny Dedmore.\nThe initial instructions (for completing the first/top layer) and move\nicon legend have been omitted for brevity.</p>\n<div class=\"section\" id=\"middle-layer-edges\">\n<h2><a class=\"toc-backref\" href=\"#id3\">Middle Layer Edges</a></h2>\n<p>Find a middle-layer edge piece in the bottom row. Rotate it under the\nmatching side center color.</p>\n<p>To move it up & right:</p>\n<p><img alt=\"bl\" src=\"images/bl.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"br\" src=\"images/br.gif\" /> <img alt=\"ru\" src=\"images/ru.gif\" /> , <img alt=\"br\" src=\"images/br.gif\" /> <img alt=\"fcw\" src=\"images/fcw.gif\" /> <img alt=\"bl\" src=\"images/bl.gif\" /> <img alt=\"fcc\" src=\"images/fcc.gif\" /></p>\n<p>To move it up & left:</p>\n<p><img alt=\"br\" src=\"images/br.gif\" /> <img alt=\"ld\" src=\"images/ld.gif\" /> <img alt=\"bl\" src=\"images/bl.gif\" /> <img alt=\"lu\" src=\"images/lu.gif\" /> , <img alt=\"bl\" src=\"images/bl.gif\" /> <img alt=\"fcc\" src=\"images/fcc.gif\" /> <img alt=\"br\" src=\"images/br.gif\" /> <img alt=\"fcw\" src=\"images/fcw.gif\" /></p>\n<p>If an edge piece is already in place but backwards, simply apply the\nabove sequences twice. Put a bogus piece from the bottom row in its\nplace, moving the problem piece to the bottom.</p>\n</div>\n<div class=\"section\" id=\"bottom-corners\">\n<h2><a class=\"toc-backref\" href=\"#id4\">Bottom Corners</a></h2>\n<p>Flip the Rubik’s Cube upside down; the former, unfinished bottom side\nbecomes the top. Choose a side color, which becomes the front.</p>\n<p>Find the two corners on the top layer with the front side’s color\n(regardless of orientation). They’ll either be adjacent or diagonally\nopposite.</p>\n<div class=\"section\" id=\"adjacent-corners\">\n<h3><a class=\"toc-backref\" href=\"#id5\">Adjacent Corners</a></h3>\n<p>Rotate the top layer so the corners matching the front color are in\nfront. If they need to be swapped:</p>\n<p><img alt=\"lu\" src=\"images/lu.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"ld\" src=\"images/ld.gif\" /> <img alt=\"fcw\" src=\"images/fcw.gif\" /> , <img alt=\"tl\" src=\"images/tl.gif\" /> <img alt=\"fcc\" src=\"images/fcc.gif\" /> <img alt=\"lu\" src=\"images/lu.gif\" /> <img alt=\"tl\" src=\"images/tl.gif\" /> , <img alt=\"ld\" src=\"images/ld.gif\" /> <img alt=\"tl\" src=\"images/tl.gif\" /> <img alt=\"tl\" src=\"images/tl.gif\" /></p>\n</div>\n<div class=\"section\" id=\"diagonally-opposite-corners\">\n<h3><a class=\"toc-backref\" href=\"#id6\">Diagonally Opposite Corners</a></h3>\n<p>Rotate the top until the top left corner is in the correct position\nand then perform the following procedure to move the other corner into\nthe top right position:</p>\n<p><img alt=\"tl\" src=\"images/tl.gif\" /> <img alt=\"lu\" src=\"images/lu.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"ld\" src=\"images/ld.gif\" /> , <img alt=\"fcw\" src=\"images/fcw.gif\" /> <img alt=\"tl\" src=\"images/tl.gif\" /> <img alt=\"fcc\" src=\"images/fcc.gif\" /> <img alt=\"lu\" src=\"images/lu.gif\" /> , <img alt=\"tl\" src=\"images/tl.gif\" /> <img alt=\"ld\" src=\"images/ld.gif\" /> <img alt=\"tl\" src=\"images/tl.gif\" /></p>\n</div>\n<div class=\"section\" id=\"swapping-the-other-two-corners\">\n<h3><a class=\"toc-backref\" href=\"#id7\">Swapping The Other Two Corners</a></h3>\n<p>If the remaining corners need to be swapped, turn the cube around 180\ndegrees and perform the following procedure:</p>\n<p><img alt=\"lu\" src=\"images/lu.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"ld\" src=\"images/ld.gif\" /> <img alt=\"fcw\" src=\"images/fcw.gif\" /> , <img alt=\"tl\" src=\"images/tl.gif\" /> <img alt=\"fcc\" src=\"images/fcc.gif\" /> <img alt=\"lu\" src=\"images/lu.gif\" /> <img alt=\"tl\" src=\"images/tl.gif\" /> , <img alt=\"ld\" src=\"images/ld.gif\" /> <img alt=\"tl\" src=\"images/tl.gif\" /> <img alt=\"tl\" src=\"images/tl.gif\" /></p>\n</div>\n<div class=\"section\" id=\"finishing-the-corners\">\n<h3><a class=\"toc-backref\" href=\"#id8\">Finishing The Corners</a></h3>\n<p>Spin the entire cube (keeping the top up) until the top center colors\nmatch one of the 7 patterns below (ignore other colors).</p>\n<img alt=\"images/corners.png\" src=\"images/corners.png\" />\n<p>Orient the cube as shown above and perform this sequence:</p>\n<p><img alt=\"lu\" src=\"images/lu.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"ld\" src=\"images/ld.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> , <img alt=\"lu\" src=\"images/lu.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"ld\" src=\"images/ld.gif\" /> , <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /></p>\n<p>If the corners are not quite finished, match up the new pattern and\nperform the sequence once again. This process may have to be\nperformed up to three times.</p>\n</div>\n</div>\n<div class=\"section\" id=\"the-last-four-edges\">\n<h2><a class=\"toc-backref\" href=\"#id9\">The Last Four Edges</a></h2>\n<p>Usually at least one of the 4 remaining edges will be in place. Find\nthat piece and rotate the cube until it is positioned in the front.\nIf no edge is in place, choose any side to be the front. Perform the\nsequence below once then start over.</p>\n<p>Perform the moves below to put all 4 pieces in their correct\nlocations:</p>\n<p><img alt=\"vu\" src=\"images/vu.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"vd\" src=\"images/vd.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> , <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"vu\" src=\"images/vu.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"vd\" src=\"images/vd.gif\" /></p>\n<p>This step may have to be performed twice to get all edges in place.</p>\n<div class=\"section\" id=\"adjacent-flipped-edges\">\n<h3><a class=\"toc-backref\" href=\"#id10\">Adjacent Flipped Edges</a></h3>\n<p>Orient the flipped edges so one is in the front and one is to the\nright. Then perform this sequence:</p>\n<p><img alt=\"fcc\" src=\"images/fcc.gif\" /> <img alt=\"lu\" src=\"images/lu.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"hl\" src=\"images/hl.gif\" /> , <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"hl\" src=\"images/hl.gif\" /> <img alt=\"hl\" src=\"images/hl.gif\" /> , <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"ru\" src=\"images/ru.gif\" /> ,\n<img alt=\"hr\" src=\"images/hr.gif\" /> <img alt=\"hr\" src=\"images/hr.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> , <img alt=\"hr\" src=\"images/hr.gif\" /> <img alt=\"ru\" src=\"images/ru.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> , <img alt=\"ld\" src=\"images/ld.gif\" /> <img alt=\"fcw\" src=\"images/fcw.gif\" /></p>\n</div>\n<div class=\"section\" id=\"opposite-flipped-edges\">\n<h3><a class=\"toc-backref\" href=\"#id11\">Opposite Flipped Edges</a></h3>\n<p>Orient the flipped edges to the left and right. Then:</p>\n<p><img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"hl\" src=\"images/hl.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> , <img alt=\"hl\" src=\"images/hl.gif\" /> <img alt=\"hl\" src=\"images/hl.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /> , <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"ru\" src=\"images/ru.gif\" /> <img alt=\"hr\" src=\"images/hr.gif\" /> <img alt=\"hr\" src=\"images/hr.gif\" /> ,\n<img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"rd\" src=\"images/rd.gif\" /> <img alt=\"hr\" src=\"images/hr.gif\" /> <img alt=\"ru\" src=\"images/ru.gif\" /> , <img alt=\"tr\" src=\"images/tr.gif\" /> <img alt=\"tr\" src=\"images/tr.gif\" /></p>\n</div>\n</div>\n</div>\n<div class=\"section\" id=\"extras\">\n<h1><a class=\"toc-backref\" href=\"#id12\">Extras</a></h1>\n<div class=\"section\" id=\"patterns\">\n<h2><a class=\"toc-backref\" href=\"#id13\">Patterns</a></h2>\n<p>Source: <a class=\"reference external\" href=\"https://ruwix.com/the-rubiks-cube/rubiks-cube-patterns-algorithms/\">Pretty Rubik’s Cube patterns with algorithms</a></p>\n<div class=\"section\" id=\"six-spots\">\n<h3><a class=\"toc-backref\" href=\"#id14\">Six Spots</a></h3>\n<p>Permute the centers of the top, front, & right sides (T → F → R → T),\nas well as the down, bottom, & left sides (D → B → L → D).</p>\n<blockquote>\nU D' R L' F B' U D'</blockquote>\n</div>\n<div class=\"section\" id=\"cross\">\n<h3><a class=\"toc-backref\" href=\"#id15\">Cross</a></h3>\n<p>Rotate each corner in-place, resulting in a cross on each face:</p>\n<blockquote>\nR2 L' D F2 R' D' R' L U' D R D B2 R' U D2</blockquote>\n</div>\n<div class=\"section\" id=\"cube-in-a-cube-in-a-cube\">\n<h3><a class=\"toc-backref\" href=\"#id16\">Cube in a Cube in a Cube</a></h3>\n<p>Rotate the URF & DLB corners, 2×2 & 1×1.</p>\n<blockquote>\nU' L' U' F' R2 B' R F U B2 U B' L U' F U R F'</blockquote>\n</div>\n</div>\n</div>\n</div>\n<div class=\"footer\">\n<hr class=\"footer\" />\n<a class=\"reference external\" href=\"solution.txt\">View document source</a>.\nGenerated on: 2019-09-21 14:56 UTC.\nGenerated by <a class=\"reference external\" href=\"http://docutils.sourceforge.net/\">Docutils</a> from <a class=\"reference external\" href=\"http://docutils.sourceforge.net/rst.html\">reStructuredText</a> source.\n\n</div>\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.6449348330497742,
"alphanum_fraction": 0.6569709181785583,
"avg_line_length": 25.945945739746094,
"blob_id": "f0b4720f40af8a1e0f4dcf0dfe04c863ac4e243c",
"content_id": "f388ea952f247f45682d4ff826e7922131dd0ebb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 997,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 37,
"path": "/Makefile",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "all : recursive\n\t@find . -name '*.html' -print0 | xargs -0 $(MAKE) -s\n\nrecursive :\n\t@for makefile in `find . -mindepth 2 -name Makefile` ; do \\\n\t dir=`dirname $$makefile` ; \\\n\t echo \"making $$dir/\" ; \\\n\t ( cd $$dir ; make -s ) ; \\\n\tdone\n\npresentation.html : presentation.txt docutils.conf\n\t@echo \"making $@ from $<\"\n\t@python2 ~/projects/docutils/docutils/tools/s52html.py $< $@\n\nhandout.html : presentation.txt docutils.conf\n\t@echo \"making $@ from $<\"\n\t@python2 ~/projects/docutils/docutils/tools/rst2html.py $< $@\n\nresume_David_Goodger.html : resume_David_Goodger.txt resume.css docutils.conf\n\t@echo \"making $@ from $<\"\n\t@python2 ~/projects/docutils/docutils/tools/rst2html.py $< $@\n\n%.html : %.txt docutils.conf\n\t@echo \"making $@ from $<\"\n\t@python2 ~/projects/docutils/docutils/tools/rst2html.py $< $@\n\ncheckin : \n\t@svn ci -q --force-log -m. Makefile docutils.conf default.css\n\t@svn ci -q --force-log -m.\n\ncommit : checkin\n\ninstall : all\n\t@cd .. ; ./push\n\nupdated :\n\ttouch ../timestamp\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 15,
"blob_id": "a65194ece05c7ab724a375a8139d4318153b0fbc",
"content_id": "f51a50cd7698cbc49e86aa4bcf82d06b00ab9342",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 3,
"path": "/README.txt",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "goodger.github.io\n\nDavid Goodger's personal web space.\n"
},
{
"alpha_fraction": 0.7010790705680847,
"alphanum_fraction": 0.7179276943206787,
"avg_line_length": 33.300865173339844,
"blob_id": "7a5749bc9b4bd6cbdd0758c10f5d930689c17224",
"content_id": "f29e4d71a46a16f53389dfdd770c950ff2769691",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 15884,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 462,
"path": "/professional/cv/resume_David_Goodger.html",
"repo_name": "goodger/goodger.github.io",
"src_encoding": "UTF-8",
"text": "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n<meta name=\"generator\" content=\"Docutils 0.17b.dev: http://docutils.sourceforge.net/\" />\n<title>David Goodger</title>\n<style type=\"text/css\">\n\n/*\n:Author: David Goodger\n:Contact: [email protected]\n:date: $Date: 2003/01/17 17:21:58 $\n:version: $Revision: 1.3 $\n:copyright: This stylesheet has been placed in the public domain.\n\nCascading style sheet for my resume.\n*/\n\nh1, h2, div.contents, th.docinfo-name {\n font-family: sans-serif }\n\nh1 {\n font-size: x-large }\n\n/*dt {\n font-weight: bold }*/\n\n.first {\n margin-top: 0 }\n\n.last {\n margin-bottom: 0 }\n\na.toc-backref {\n text-decoration: none ;\n color: black }\n\ndd {\n margin-bottom: 0.5em }\n\ndiv.abstract {\n margin: 2em 5em }\n\ndiv.abstract p.topic-title {\n font-weight: bold ;\n text-align: center }\n\ndiv.attention, div.caution, div.danger, div.error, div.hint,\ndiv.important, div.note, div.tip, div.warning {\n margin: 2em ;\n border: medium outset ;\n padding: 1em }\n\ndiv.attention p.admonition-title, div.caution p.admonition-title,\ndiv.danger p.admonition-title, div.error p.admonition-title,\ndiv.warning p.admonition-title {\n color: red ;\n font-weight: bold ;\n font-family: sans-serif }\n\ndiv.hint p.admonition-title, div.important p.admonition-title,\ndiv.note p.admonition-title, div.tip p.admonition-title {\n font-weight: bold ;\n font-family: sans-serif }\n\ndiv.dedication {\n margin: 2em 5em ;\n text-align: center ;\n font-style: italic }\n\ndiv.dedication p.topic-title {\n font-weight: bold ;\n font-style: normal }\n\ndiv.figure {\n margin-left: 2em }\n\ndiv.footer, div.header {\n font-size: smaller ;\n text-align: center }\n\ndiv.sidebar {\n margin-left: 1em ;\n border: thin solid ;\n padding: 0.5em ;\n background-color: #ffffee ;\n width: 40% ;\n float: right ;\n clear: right }\n\ndiv.sidebar p.rubric {\n font-family: sans-serif ;\n font-size: medium }\n\ndiv.system-messages {\n margin: 5em }\n\ndiv.system-messages h1 {\n color: red }\n\ndiv.system-message {\n border: medium outset ;\n padding: 1em }\n\ndiv.system-message p.system-message-title {\n color: red ;\n font-weight: bold }\n\ndiv.topic {\n margin: 2em }\n\nh1.title {\n text-align: center ;\n font-size: xx-large }\n\nh2.subtitle {\n text-align: center }\n\nhr {\n width: 75% }\n\nol.simple, ul.simple {\n margin-bottom: 1em }\n\nol.arabic {\n list-style: decimal }\n\nol.loweralpha {\n list-style: lower-alpha }\n\nol.upperalpha {\n list-style: upper-alpha }\n\nol.lowerroman {\n list-style: lower-roman }\n\nol.upperroman {\n list-style: upper-roman }\n\np.caption {\n font-style: italic }\n\np.credits {\n font-style: italic ;\n font-size: smaller }\n\np.label {\n white-space: nowrap }\n\np.rubric {\n font-family: sans-serif ;\n font-weight: bold ;\n font-size: x-large ;\n text-align: center }\n\np.sidebar-title {\n font-family: sans-serif ;\n font-weight: bold ;\n font-size: larger }\n\np.topic-title {\n font-weight: bold }\n\npre.address {\n margin-bottom: 0 ;\n margin-top: 0 ;\n font-family: serif ;\n font-size: 100% }\n\npre.line-block {\n font-family: serif ;\n font-size: 100% }\n\npre.literal-block, pre.doctest-block {\n margin-left: 2em ;\n margin-right: 2em ;\n background-color: #eeeeee }\n\nspan.classifier {\n font-family: sans-serif ;\n font-style: oblique }\n\nspan.classifier-delimiter {\n font-family: sans-serif ;\n font-weight: bold }\n\nspan.interpreted {\n font-family: sans-serif }\n\nspan.option-argument {\n font-style: italic }\n\nspan.pre {\n white-space: pre }\n\nspan.problematic {\n color: red }\n\ntable {\n margin-top: 0.5em ;\n margin-bottom: 0.5em }\n\ntable.citation {\n border-left: solid thin gray ;\n padding-left: 0.5ex }\n\ntable.docinfo {\n margin: 2em 4em }\n\ntable.footnote {\n border-left: solid thin black ;\n padding-left: 0.5ex }\n\ntd, th {\n padding-left: 0.5em ;\n padding-right: 0.5em ;\n vertical-align: top }\n\nth.docinfo-name, th.field-name {\n font-weight: bold ;\n text-align: left ;\n white-space: nowrap }\n\nh1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {\n font-size: 100% }\n\ntt {\n background-color: #eeeeee }\n\nul.auto-toc {\n list-style-type: none }\n\n</style>\n</head>\n<body>\n<div class=\"document\" id=\"david-goodger\">\n<h1 class=\"title\">David Goodger</h1>\n\n<!-- -*- coding: utf-8 -*- -->\n<div class=\"sidebar\">\n<p class=\"first sidebar-title\"></p>\n<div class=\"contents local last topic\" id=\"contents\">\n<ul class=\"simple\">\n<li><a class=\"reference internal\" href=\"#technical-skills\" id=\"id1\">Technical Skills</a></li>\n<li><a class=\"reference internal\" href=\"#experience\" id=\"id2\">Experience</a></li>\n<li><a class=\"reference internal\" href=\"#open-source-projects\" id=\"id3\">Open-Source Projects</a></li>\n<li><a class=\"reference internal\" href=\"#additional-relevant-experience\" id=\"id4\">Additional Relevant Experience</a></li>\n<li><a class=\"reference internal\" href=\"#education-certificates\" id=\"id5\">Education & Certificates</a></li>\n<li><a class=\"reference internal\" href=\"#publications\" id=\"id6\">Publications</a></li>\n<li><a class=\"reference internal\" href=\"#keynote-presentations-tutorials\" id=\"id7\">Keynote Presentations & Tutorials</a></li>\n</ul>\n</div>\n</div>\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field\"><th class=\"field-name\">Email:</th><td class=\"field-body\"><a class=\"reference external\" href=\"mailto:goodger@python.org\">goodger@python.org</a></td>\n</tr>\n<tr class=\"field\"><th class=\"field-name\">Web Site:</th><td class=\"field-body\"><a class=\"reference external\" href=\"https://david.goodger.org\">https://david.goodger.org</a> (includes this résumé)</td>\n</tr>\n<tr class=\"field\"><th class=\"field-name\">LinkedIn:</th><td class=\"field-body\"><a class=\"reference external\" href=\"https://linkedin.com/in/goodger/\">https://linkedin.com/in/goodger/</a></td>\n</tr>\n<tr class=\"field\"><th class=\"field-name\">Location:</th><td class=\"field-body\">Ottawa & Montreal, Canada; open to positions in Canada, USA, & Japan</td>\n</tr>\n</tbody>\n</table>\n<p class=\"rubric\">Software Engineer/Architect · Systems Engineer/Analyst · Project Manager · Trainer</p>\n<p>Software/Systems Engineer with professional experience in the medical\ndevice industry, the financial sector, and automation/manufacturing.\nExperience in project management and training. Recognized expert in\nthe Python programming language. Extensive experience with markup\nlanguages for documentation, document processing, and data\ninterchange. Cross-platform systems development experience, ranging\nfrom GUI desktop applications to web services to command-line tools.</p>\n<p>Strengths include: effective communication between technical and\nnon-technical/business audiences, public speaking, excellent writing\nand editing skills, thorough and tenacious problem solving.\nInternational experience, with written and spoken Japanese and French.</p>\n<!-- I am currently fully employed and not seeking a change. -->\n<div class=\"section\" id=\"technical-skills\">\n<h1><a class=\"toc-backref\" href=\"#contents\">Technical Skills</a></h1>\n<dl class=\"docutils\">\n<dt><strong>Programming Languages</strong></dt>\n<dd>Python (expert), Lisp, Visual Basic, C, C++, SQL, Bash.</dd>\n<dt><strong>Development Methodologies</strong></dt>\n<dd>Agile (Scrum, Extreme Programming), design patterns, refactoring,\nTDD/test-driven development, UML.</dd>\n<dt><strong>Operating Systems</strong></dt>\n<dd>Linux/UNIX/QNX/Solaris, Windows, Mac OS X.</dd>\n<dt><strong>Documentation Technologies</strong></dt>\n<dd>XML, XSL, SGML; HTML/XHTML, CSS; Docutils/reStructuredText;\nUnicode; document analysis, DTD design; document data processing.</dd>\n</dl>\n</div>\n<div class=\"section\" id=\"experience\">\n<h1><a class=\"toc-backref\" href=\"#contents\">Experience</a></h1>\n<dl class=\"docutils\">\n<dt><strong>Medtronic Inc.</strong> (formerly Corventis), Mounds View, Minnesota, USA</dt>\n<dd><p class=\"first\">January 2013 – May 2020</p>\n<dl class=\"last docutils\">\n<dt><strong>Principal Systems Engineer</strong></dt>\n<dd><ul class=\"first last simple\">\n<li>Projects: SEEQ mobile cardiac telemetry patch & CareLink cardiac\npatient monitoring service.</li>\n<li>Systems & data analysis.</li>\n<li>Software systems requirements development.</li>\n<li>Agile Scrum Product Owner</li>\n</ul>\n</dd>\n<dt><strong>Programmer/Analyst</strong></dt>\n<dd>Developed desktop GUI software for an electronic medical device\nsystem. Technologies: wxPython GUI toolkit, USB communication, web\nservices, Dropbox integration.</dd>\n</dl>\n</dd>\n<dt><strong>Independent Consultant</strong>, Montréal, Québec, Canada</dt>\n<dd><p class=\"first\">December 2008 – January 2013</p>\n<p class=\"last\"><strong>Programmer/Analyst, Trainer, Systems Administrator.</strong></p>\n</dd>\n<dt><strong>CDP Capital Inc.</strong> (La Caisse de Dépôt et Placement du Québec), Montréal, Canada</dt>\n<dd><p class=\"first\">July 2005 – June 2012</p>\n<dl class=\"last docutils\">\n<dt><strong>Programmer/Analyst</strong></dt>\n<dd><p class=\"first\">Developed software tools for investment data analysis,\nincluding foreign exchange order management system and index\nfund management software. Technologies: Python, wxPython &\ntkInter GUI toolkits.</p>\n<p class=\"last\">(Full-time employee July 2005 – December 2008; consultant\nDecember 2008 – June 2012)</p>\n</dd>\n</dl>\n</dd>\n<dt><strong>Python Software Foundation</strong>, remote & Chicago, Illinois, USA</dt>\n<dd><p class=\"first\">December 2008 – September 2009</p>\n<dl class=\"last docutils\">\n<dt><strong>Conference Coordinator</strong></dt>\n<dd>Chaired the PyCon 2009 & 2008 Python community conferences,\nworking (mostly remotely) with volunteer organizers and\nprofessional meeting planners. Chaired PyCon 2008 as a\nvolunteer.</dd>\n<dt><strong>Member of the Board of Directors</strong></dt>\n<dd>Elected volunteer (February 2006 – July 2009)</dd>\n</dl>\n</dd>\n</dl>\n</div>\n<div class=\"section\" id=\"open-source-projects\">\n<h1><a class=\"toc-backref\" href=\"#contents\">Open-Source Projects</a></h1>\n<dl class=\"docutils\">\n<dt><strong>Docutils: Documentation Utilities</strong> (<a class=\"reference external\" href=\"http://docutils.sourceforge.net/\">http://docutils.sourceforge.net/</a>)</dt>\n<dd>Project coordinator and architect</dd>\n<dt><strong>Polyform Puzzler</strong> (<a class=\"reference external\" href=\"http://puzzler.sourceforge.net/\">http://puzzler.sourceforge.net/</a>)</dt>\n<dd>Project coordinator and architect</dd>\n<dt><strong>Python</strong> (the programming language, <a class=\"reference external\" href=\"http://www.python.org/\">http://www.python.org/</a>)</dt>\n<dd>Contributing developer; author or co-author of several Python\nEnhancement Proposals (<a class=\"reference external\" href=\"http://www.python.org/peps/\">http://www.python.org/peps/</a>)</dd>\n</dl>\n</div>\n<div class=\"section\" id=\"additional-relevant-experience\">\n<h1><a class=\"toc-backref\" href=\"#contents\">Additional Relevant Experience</a></h1>\n<dl class=\"docutils\">\n<dt><strong>ELM Machine Service Inc.</strong>, Cambridge, Ontario, Canada</dt>\n<dd><dl class=\"first last docutils\">\n<dt><strong>Consulting Developer</strong></dt>\n<dd>Developed a control panel and data acquisition application for\na PLC-based industrial gauge system, using open-source rapid\napplication development tools (Python, wxPython, Boa\nConstructor). Screen capture of application (running on\nWindows XP):\n<a class=\"reference external\" href=\"https://david.goodger.org/professional/cv/elm_gauge.png\">https://david.goodger.org/professional/cv/elm_gauge.png</a></dd>\n</dl>\n</dd>\n<dt><strong>ATS Automation Tooling Systems Inc.</strong>, Cambridge, Ontario, Canada</dt>\n<dd><dl class=\"first last docutils\">\n<dt><strong>Systems Administrator & Programmer</strong></dt>\n<dd>In charge of network and server systems in a mixed (Windows &\nQNX) environment, office and production facility with over 100\nworkstations. Developed engineering data processing\napplications & system maintenance solutions.</dd>\n</dl>\n</dd>\n<dt><strong>Rikai, Ltd.</strong>, Tokyo, Japan</dt>\n<dd><dl class=\"first last docutils\">\n<dt><strong>President and Founder</strong></dt>\n<dd>Software development & information technology consulting.</dd>\n</dl>\n</dd>\n<dt><strong>Uniscope, Inc.</strong>, Tokyo, Japan</dt>\n<dd><dl class=\"first last docutils\">\n<dt><strong>Operations Manager & Technical Team Leader; Developer</strong></dt>\n<dd>Project & personnel management; research and development;\nanalysis, design, and development of document processing,\nworkflow, relational and object database, intranet, and\ninternet systems; systems administration (Unix/Solaris,\nWindows platforms).</dd>\n</dl>\n</dd>\n<dt><strong>Embassy of Canada</strong>, Tokyo, Japan</dt>\n<dd><dl class=\"first last docutils\">\n<dt><strong>Network Systems Administrator & User Support Manager</strong></dt>\n<dd>Systems administration, user support, training, programming, &\ntroubleshooting (150 Windows clients, Unix servers).</dd>\n</dl>\n</dd>\n<dt><strong>Gunma Prefectural Board of Education (Japan Exchange & Teaching Program)</strong>, Gunma, Japan</dt>\n<dd><strong>Assistant English Teacher</strong></dd>\n</dl>\n</div>\n<div class=\"section\" id=\"education-certificates\">\n<h1><a class=\"toc-backref\" href=\"#contents\">Education & Certificates</a></h1>\n<dl class=\"docutils\">\n<dt><strong>Bachelor of Science, Major in Computer Science</strong></dt>\n<dd>McGill University, Montréal, Québec, Canada</dd>\n<dt><strong>Certified Scrum Product Owner</strong></dt>\n<dd>Scrum Alliance, Minneapolis, MN</dd>\n<dt><strong>Japanese-Language Proficiency Test</strong></dt>\n<dd>Japan Foundation & Japan Educational Exchanges and Services, Tokyo, Japan</dd>\n</dl>\n</div>\n<div class=\"section\" id=\"publications\">\n<h1><a class=\"toc-backref\" href=\"#contents\">Publications</a></h1>\n<dl class=\"docutils\">\n<dt><strong>Hello World! Computer Programming for Kids and Other Beginners</strong> (Manning, 2009)</dt>\n<dd>Technical Editor</dd>\n<dt><strong>Python For Dummies</strong> (Wiley, 2006)</dt>\n<dd>Technical Editor</dd>\n<dt><strong>Python Cookbook, Second Edition</strong> (O’Reilly & Associates, 2005)</dt>\n<dd>Contributor (section 1.23)</dd>\n<dt><strong>Python Cookbook</strong> (O’Reilly & Associates, 2002)</dt>\n<dd>Contributor (sections 15.4 & 2.3)</dd>\n<dt><strong>Professional Linux Programming</strong> (Wrox Press, 2000)</dt>\n<dd>Author of Chapter 15, “Python,” a programmer’s introduction to the\nPython programming language</dd>\n</dl>\n</div>\n<div class=\"section\" id=\"keynote-presentations-tutorials\">\n<h1><a class=\"toc-backref\" href=\"#contents\">Keynote Presentations & Tutorials</a></h1>\n<dl class=\"docutils\">\n<dt><strong>Python in India</strong></dt>\n<dd>Keynote presentation (PyCon India, Bangalore, India, 2010)</dd>\n<dt><strong>wxPython I: Introduction to GUI Programming</strong> & <strong>wxPython II: GUI Programming and MVC</strong></dt>\n<dd>Tutorials (PyCon 2010, Atlanta GA; PyCon 2008, Chicago IL)</dd>\n<dt><strong>How to Become an Invited Speaker</strong></dt>\n<dd>Keynote presentation (RuPy 2009, Poznan, Poland)</dd>\n<dt><strong>Code Like a Pythonista: Idiomatic Python</strong></dt>\n<dd>Tutorial (PyCon 2007, Dallas, Texas; OSCON 2007, Portland, Oregon)</dd>\n<dt><strong>Text and Data Processing</strong></dt>\n<dd>Tutorial (PyCon 2006, Dallas Texas)</dd>\n</dl>\n</div>\n</div>\n<div class=\"footer\">\n<hr class=\"footer\" />\n<a class=\"reference external\" href=\"resume_David_Goodger.txt\">View document source</a>.\nGenerated on: 2020-08-02 22:53 UTC.\nGenerated by <a class=\"reference external\" href=\"http://docutils.sourceforge.net/\">Docutils</a> from <a class=\"reference external\" href=\"http://docutils.sourceforge.net/rst.html\">reStructuredText</a> source.\n\n</div>\n</body>\n</html>\n"
}
] | 10 |
detriment/my_scripts
|
https://github.com/detriment/my_scripts
|
f9db8c173f48d7737ec9e2e3324dd818bf7b0ff7
|
6f597528ae22c76d3de1cb2b5002e29a710de365
|
5ef9fce3f281c7ec4749123848506dda1f841952
|
refs/heads/master
| 2020-02-27T23:52:08.400916 | 2015-09-18T00:07:50 | 2015-09-18T00:07:50 | 25,984,599 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6540098190307617,
"alphanum_fraction": 0.6906710267066956,
"avg_line_length": 29.193878173828125,
"blob_id": "b7fd1642ee544cd8fb12e2bc7d4f54cbbb7faccf",
"content_id": "b7c4beb4b938da32a0ed722fb1f31c6c92751f02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3055,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 98,
"path": "/burn-net.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\r\n#\r\n# Author:\t\tNate Kennison\r\n#\t\t \t\t<[email protected]>\r\n#\r\n# Create Date: \t12/3/2013\r\n#\r\n# Last Edit:\t12/3/2013\r\n#\r\n# Version:\t\t1.0\r\n#\r\n# Purpose:\t\tTo automate the configuration of burn in OSes for the test rack.\r\n\r\n#read server's position\r\nNO=`cat /root/position`\r\n\r\n#clear /etc/udev/rules.d/70-persistent-net.rules and recreate empty\r\nrm -rf /etc/udev/rules.d/70-persistent-net.rules\r\ntouch /etc/udev/rules.d/70-persistent-net.rules\r\n\r\nclear\r\n\r\n#determine which NIC is connected and set NIC variable\r\nif [[ `ethtool eth0 | grep 'Link detected:' | egrep '(yes)'` ]]; then\r\n\tNIC='eth0'\r\n\t\r\nelif [[ `ethtool eth1 | grep 'Link detected:' | egrep '(yes)'` ]]; then\r\n\tNIC='eth1'\r\n\r\nelif [[ `ethtool em0 | grep 'Link detected:' | egrep '(yes)'` ]]; then\r\n\tNIC='em0'\r\n\r\nelif [[ `ethtool em1 | grep 'Link detected:' | egrep '(yes)'` ]]; then\r\n\tNIC='em1'\r\n\t\r\nfi\t\r\n\r\nclear\r\n\r\n#set hostname\r\n#HN=`echo burnin$NO | sed 's/1//' | sed 's/0//'`\r\nHN=`echo burnin$NO`\r\n#set IP address\r\n#IP=192.168.0.$NO\r\nIP=192.168.0.1$NO\r\n#set gateway address\r\nGW=192.168.0.1\r\n#set number for worker script\r\n#NR=`echo $NO | sed 's/1//' | sed 's/0//'`\r\n\r\n#create ifcfg file\r\nrm -rf /etc/sysconfig/network-scripts/ifcfg-*\r\nrm -rf /etc/sysconfig/network-scripts/ifcfg-$NIC\r\ntouch /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"DEVICE=$NIC\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tIPADDR=$IP\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tNETMASK=255.255.255.0\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tGATEWAY=$GW\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tONBOOT=yes\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tBOOTPROTO=static\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\n\r\ntouch /etc/sysconfig/network-scripts/ifcfg-lo\r\necho \"DEVICE=lo\" >> /etc/sysconfig/network-scripts/ifcfg-lo\r\necho \"IPADDR=127.0.0.1\" >> /etc/sysconfig/network-scripts/ifcfg-lo\r\necho \"NETMASK=255.0.0.0\" >> /etc/sysconfig/network-scripts/ifcfg-lo\r\necho \"NETWORK=127.0.0.0\" >> /etc/sysconfig/network-scripts/ifcfg-lo\r\n\r\n#disable iptables\r\nchkconfig iptables off\r\n\r\n#create network/hostname file\r\nrm -rf /etc/sysconfig/network\r\ntouch /etc/sysconfig/network\r\necho \"NETWORKING=yes\" >> /etc/sysconfig/network\r\necho \"FORWARD_IPV4=yes\" >> /etc/sysconfig/network\r\necho \"NETWORKING_IPV6=no\" >> /etc/sysconfig/network\r\necho \"HOSTNAME=$HN\" >> /etc/sysconfig/network\r\n\r\n#set hostname\r\nhostname $HN\r\n\r\n#restart networking to apply IP configuration\r\n/etc/init.d/network restart && ifconfig |grep inet\r\n\r\n#delete resolf.conf\r\nrm -rf /etc/resolv.conf\r\n\r\n#create new resolv.conf\r\ntouch /etc/resolv.conf\r\necho \"nameserver 66.96.80.43\" >> /etc/resolv.conf\r\necho \"nameserver 66.96.80.194\" >> /etc/resolv.conf\r\necho \"options single-request\" >> /etc/resolv.conf\r\n\r\n#delete and create worker script for init.d to initialize\r\nrm -rf /etc/init.d/startmining.sh\r\ntouch /etc/init.d/startmining.sh\r\nchmod +x /etc/init.d/startmining.sh\r\necho \"/usr/bin/screen -dmS burnin ~/src/jhPrimeminer/jhprimeminer -o http://ypool.net:10034 -u detriment.work$NO -p x\" >> /etc/init.d/startmining.sh"
},
{
"alpha_fraction": 0.5615138411521912,
"alphanum_fraction": 0.5692481994628906,
"avg_line_length": 27.026012420654297,
"blob_id": "ce1f5728b97b3292c982c94695882810762befb5",
"content_id": "472f1b2859af628c1d6712ca3490ca47a115af5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 9697,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 346,
"path": "/temp/learnvim",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#\n#Install.sh\n#Automated installation script\n#\n#Author: Nate Kennison ([email protected])\n#\n#Last Edit Date: 10.2.14\n#\n#Version 0.1\n#\n\n#Force bash\nif [ ! \"$BASH_VERSION\" ] ; then\n exec /bin/bash \"$0\" \"$@\"\nfi\n\n#Determine OS\n#if [[ `uname -r | egrep RELEASE >/dev/null 2>&1` ]]; then\n#\techo \"FreeBSD is not supported by this script (yet?)\"\n#\texit\n#elif [[ `egrep -i 'Fedora' /etc/redhat-release >/dev/null 2>&1 `]]\n#\techo \"HiVelocity does not support Fedora!\"\n#elif [[ `egrep -i 'CentOS|Redhat|Fedora' /etc/redhat-release >/dev/null 2>&1` ]]; then\n os=redhat\n#elif [ -f /etc/debian_version ]; then\n#\tos=debian\n#fi\n\n#Menu\nclear\nPS3='Please enter your choice(s): '\noptions=(\"Hardware Check\" \"IPMI Script\" \"cPanel\" \"Managed cPanel Script\" \"Softaculous\" \"CloudLinux\" \"KernelCare\" \"Litespeed\" \"Webmin\" \"Plesk\" \"SolusVM\" \"Install\" \"Quit\")\necho \"-==HiVelocity Install Script==-\"\necho \"\"\necho \"Select all you want to install\"\necho \"\"\nselect opt in \"${options[@]}\"\ndo\n case $opt in\n \"Hardware Check\")\n echo \"You chose Hardware Check Script\"\n if [[ `echo $selections | egrep 'hardware'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tselections=\"$selections hardware\"\n \tfi\n ;;\n \"IPMI Script\")\n echo \"You chose IPMI Script\"\n if [[ `echo $selections | egrep 'ipmi'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tselections=\"$selections ipmi\"\n \tfi\n ;;\n \"cPanel\")\n echo \"You chose cPanel\"\n if [[ `echo $selections | egrep 'cpanel'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tif [[ `echo $os | grep \"redhat\" ` ]]; then\n\t\t\t\t\tselections=\"$selections cpanel\"\n\t\t\t\telif [[ `echo $os | grep \"debian\" ` ]]; then\n\t\t\t\t\techo \"cPanel is not supported on Debian/Ubuntu\"\n\t\t\t\tfi\n \tfi\n ;;\n \"Managed cPanel Script\")\n echo \"You chose Managed cPanel Script\"\n if [[ `echo $selections | egrep 'managed'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tif [[ `echo $os | grep \"redhat\" ` ]]; then\n \t\tselections=\"$selections managed\"\n \t\telif [[ `echo $os | grep \"debian\" ` ]]; then\n \t\t\techo \"cPanel is not supported on Debian/Ubuntu\"\n \t\tfi\n \tfi\n ;;\n \"Softaculous\")\n echo \"You chose Softaculous\"\n if [[ `echo $selections | egrep 'softaculous'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tif [[ `echo $os | grep \"redhat\" ` ]]; then\n \t\t\tselections=\"$selections softaculous\"\n \t\telif [[ `echo $os | grep \"debian\" ` ]]; then\n \t\t\techo \"cPanel is not supported on Debian/Ubuntu\"\n \t\tfi\n \tfi\n ;;\n \"CloudLinux\")\n echo \"You chose CloudLinux\"\n if [[ `echo $selections | egrep 'cloudlinux'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tif [[ `echo $os | grep \"redhat\" ` ]]; then\n \t\t\tselections=\"$selections cloudlinux\"\n \t\telif [[ `echo $os | grep \"debian\" ` ]]; then\n \t\t\techo \"CloudLinux is not supported on Debian/Ubuntu\"\n \t\tfi\n \tfi\n ;;\n \"KernelCare\")\n echo \"You chose KernelCare\"\n if [[ `echo $selections | egrep 'kernelcare'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tif [[ `echo $os | grep \"redhat\" ` ]]; then\n \t\t\tselections=\"$selections kernelcare\"\n \t\telif [[ `echo $os | grep \"debian\" ` ]]; then\n \t\t\techo \"KernelCare is not supported on Debian/Ubuntu\"\n \t\tfi \t\t\n \tfi\n ;;\n \"Litespeed\")\n echo \"You chose Litespeed\"\n echo \"Litespeed installer still not yet implemented. Sorry.\"\n if [[ `echo $selections | egrep 'litespeed'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tselections=\"$selections litespeed\"\n \tfi\n ;;\n \"Webmin\")\n echo \"You chose Webmin\"\n if [[ `echo $selections | egrep 'webmin'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tselections=\"$selections webmin\"\n \tfi\n ;;\n \"Plesk\")\n echo \"You chose Plesk\"\n if [[ `echo $selections | egrep 'plesk'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tif [[ `echo $os | grep \"redhat\" ` ]]; then\n \t\t\tselections=\"$selections plesk\"\n \t\telif [[ `echo $os | grep \"debian\" ` ]]; then\n \t\t\techo \"We do not support Plesk on Debian/Ubuntu\"\n \t\tfi\n \tfi\n ;;\n \"SolusVM\")\n echo \"You chose SolusVM\"\n if [[ `echo $selections | egrep 'solusvm'` ]]; then\n \techo \"Already Selected\"\n \telse\n \t\tif [[ `echo $os | grep \"redhat\" ` ]]; then\n \t\t\tselections=\"$selections solusvm\"\n \t\telif [[ `echo $os | grep \"debian\" ` ]]; then\n \t\t\techo \"SolusVM Is not supported on Debian/Ubuntu\"\n \t\tfi\n \t\t\n \tfi\n ;;\n\n \"Install\")\n\t\t\techo \"\"\n\t\t\techo \"You have chosen:\"\n\t\t\techo \"\"\n\t\t\t\tif [[ `echo $selections | grep \"hardware\" ` ]]; then\n\t\t\t\t\techo \"- Hardware Check Script (1)\"\n\t\t\t\tfi\n\n\t\t\t\tif [[ `echo $selections | grep \"ipmi\" ` ]]; then\n\t\t\t\t\techo \"- IPMI Script (2)\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"cpanel\" ` ]]; then\n\t\t\t\t\techo \"- cPanel (3)\"\n\t\t\t\t\techo \"\tLicensing: https://manage2.cpanel.net/\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"managed\" ` ]]; then\n\t\t\t\t\techo \"- Managed cPanel Script(4)\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"softaculous\" ` ]]; then\n\t\t\t\t\techo \"- Softaculous (5)\"\n\t\t\t\t\techo \"\tLicensing: http://www.softaculous.com/hivelocity.php\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"cloudlinux\" ` ]]; then\n\t\t\t\t\techo \"- CloudLinux (6)\"\n\t\t\t\t\techo \"\tLicensing: https://manage2.cpanel.net/\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"kernelcare\" ` ]]; then\n\t\t\t\t\techo \"- KernelCare (7)\"\n\t\t\t\t\techo \"\tLicensing: http://cln.cloudlinux.com/clweb/account/reseller/console.xhtml\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"litespeed\" ` ]]; then\n\t\t\t\t\techo \"- Litespeed (8)\"\n\t\t\t\t\techo \"\tLicensing: https://store.litespeedtech.com/store/clientarea.php\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"webmin\" ` ]]; then\n\t\t\t\t\techo \"- Webmin (9)\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"plesk\" ` ]]; then\n\t\t\t\t\techo \"- Plesk (10)\"\n\t\t\t\t\techo \"\tLicensing: https://ka.parallels.com:4000/\"\n\t\t\t\tfi\n\t\n\t\t\t\tif [[ `echo $selections | grep \"solusvm\" ` ]]; then \n\t\t\t\techo \"- SolusVM (11)\"\n\t\t\t\techo \"\tLicensing: https://www.soluslabs.com/clients/clientarea.php\"\n\t\t\t\tfi\n\n\t\t\techo \"\"\n\t\t\techo \"Continue? (y/yes/n/no)\"\n\t\t\tread go\n\t\t\tif [[ `echo $go | egrep '(y|yes|Y|YES)'` ]]; then\n break\n \tfi\n\t\t\t;;\n\n\t\t\"Quit\")\n\t\t\techo \"\"\n\t\t\techo \"Really Quit? (y/yes/n/no)\"\n\t\t\tread go\n\t\t\tif [ `echo $go | egrep '(y|yes|Y|YES)'` ]; then\n \texit\n else\n \tstuff\n fi\n ;;\n *) echo invalid option;;\n esac\ndone\t\t\n\n#Installers\nif [[ `echo $selections | grep \"hardware\" ` ]]; then\n\techo \"Running Hardware Check Script\"\n\tsleep 1\n\twget scripts.hivelocity.net/harden/hwcheck.sh\n\tbash hwcheck.sh\n\tread -p \"press [Enter] when ready to proceed\"\nfi\n\nif [[ `echo $selections | grep \"ipmi\" ` ]]; then\n\techo \"Running IPMI Script\"\n\tsleep 1\n\twget scripts.hivelocity.net/harden/ipmi.sh\n\tbash ipmi.sh\nfi\n\nif [[ `echo $selections | grep \"cpanel\" ` ]]; then\n\techo \"Installing cPanel\"\n\tsleep 1\n\tcd /home\n\twget http://74.50.120.123/latest\n\tsh latest\n\tcd\n\twget http://www.configserver.com/free/csf.tgz\n\ttar -xzf csf.tgz\n\tcd csf\n\tsh install.sh\nfi\n\nif [[ `echo $selections | grep \"managed\" ` ]]; then\n\techo \"Installing Managed cPanel Script\"\n\tsleep 1\n\twget scripts.hivelocity.net/harden/files/managed.sh && sh managed.sh\nfi\n\nif [[ `echo $selections | grep \"softaculous\" ` ]]; then\n\techo \"Installing Softaculous\"\n\tsleep 1\n\twget -N http://files.softaculous.com/install.sh\n\tchmod 755 install.sh\n\t./install.sh --quick\nfi\n\nif [[ `echo $selections | grep \"cloudlinux\" ` ]]; then\n\techo \"Installing CloudLinux\"\n\tsleep 1\n\twget http://repo.cloudlinux.com/cloudlinux/sources/cln/cldeploy\n\tsh cldeploy -i\n\t\nfi\n\nif [[ `echo $selections | grep \"kernelcare\" ` ]]; then\n\techo \"Installing KernelCare\"\n\tsleep 1\n\trpm -i http://patches.kernelcare.com/kernelcare-latest.x86_64.rpm\n\n\nfi\n\nif [[ `echo $selections | grep \"litespeed\" ` ]]; then\n\techo \"Installing Litespeed\"\n\tsleep 1\n\n\nfi\n\nif [[ `echo $selections | grep \"webmin\" ` ]]; then\n\techo \"Installing Webmin\"\n\tsleep 1\n\tif [[ `echo $os | grep \"redhat\" ` ]]; then\n\t\tyum -y install perl-Net-SSLeay\n\t\twget http://www.webmin.com/download/rpm/webmin-current.rpm\n\t\trpm -i webmin-*.rpm\n\tfi\n\tif [[ `echo $os | grep \"debian\" ` ]]; then\n\t\taptitude -y install perl libnet-ssleay-perl openssl libauthen-pam-perl libpam-runtime libio-pty-perl apt-show-versions libapt-pkg-perl\n\t\twget http://scripts.hivelocity.net/harden/files/libmd5-perl_2.03-1_all.deb\n\t\tdpkg -i libmd5-perl_2.03-1_all.deb\n\t\twget http://www.webmin.com/download/deb/webmin-current.deb\n\t\tapt-get -y install python\n\t\tapt-get -fy install\n\t\tdpkg -i webmin-*.deb\n\tfi\n\nfi\n\nif [[ `echo $selections | grep \"plesk\" ` ]]; then\n\techo \"Installing Plesk\"\n\tsleep 1\n\thttp://autoinstall.plesk.com/plesk-installer\n\tchmod +x plesk-installer\n\t./plesk-installer\nfi\n\nif [[ `echo $selections | grep \"solusvm\" ` ]]; then\n\techo \"Installing SolusVM\"\n\tsleep 1\n\twget http://soluslabs.com/installers/solusvm/install\n\tchmod 755 install\n\t./install\nfi\n\n#Final messages.\nclear\necho 'Done!'\nif [[ `echo $selections | grep \"cloudlinux\" ` ]]; then\n\techo \"You installed CloudLinux.\"\n\techo \"Be sure to reboot the server to complete the installation\"\n\t\nfi\n"
},
{
"alpha_fraction": 0.6246649026870728,
"alphanum_fraction": 0.6246649026870728,
"avg_line_length": 19.77777862548828,
"blob_id": "849f10ebd3d8d6b35e62bfc81e80477c22fcdc9e",
"content_id": "3d828c493f9d7c15149d6c9b94d4d6cb6711d30e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 373,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 18,
"path": "/learning/ifthen.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#/bin/bash\n\n#echo \"Do you want to skip yum update? (y/n), or press [enter] to continue (first ask)\"\n#read CP\n\nif [[ -n ${CP} ]]; then\n echo \"variable already set\"\nelse\n echo \"Do you want to skip yum update? (y/n), or press [enter] to continue (second ask)\"\n\tread CP\nfi\n\n\nif [[ $CP == 'y' ]]; then\n echo \"Will skip yum update\"\nelse\n echo \"Will run yum update\"\nfi"
},
{
"alpha_fraction": 0.4595660865306854,
"alphanum_fraction": 0.5779092907905579,
"avg_line_length": 15.933333396911621,
"blob_id": "077574333231dfeec463d7d179c3c6e2861e20e3",
"content_id": "2da542aa2e352d9bc0fb4343b0f67fb6d0a1108c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 30,
"path": "/ipv6test2",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nclear\necho \"enter IPv6 Address:\"\nread ip6\necho \"Enter IPv6 Gateway:\"\nread ip6gw\n\n#Address Case\ncase \"$ip6\" in \n\t*\"2604:4500\"* ) ;;\n\t*\"::\"* ) ;;\n\t* ) ip6=2604:4500::$ip6 ;;\nesac\n\n#Previx Case\ncase \"$ip6\" in\n\t*\"/\"* ) ip6subnet=$(echo $ip6 | sed -e 's,.*/,,g') ;;\n\t* ) ip6=$ip6/121 && ip6subnet=121 ;;\nesac\n\n#Gateway Case\ncase \"$ip6gw\" in \n\t*\"2604:4500\"* ) ;;\n\t*\"::\"* ) ;;\n\t* ) ip6gw=2604:4500::$ip6gw ;;\nesac\n\necho \"IPv6 Address = $ip6\"\necho \"IPv6 Subnet = $ip6subnet\"\necho \"IPv6 Gateway = $ip6gw\""
},
{
"alpha_fraction": 0.5275590419769287,
"alphanum_fraction": 0.5826771855354309,
"avg_line_length": 14.8125,
"blob_id": "450a71e4571fd6d5ebee1e5c1d64bbb23d24ef90",
"content_id": "ab1e4cf15cb2937e617bde0b984ab501d2a782e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 16,
"path": "/fix_networking",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nsudo ifconfig en0 down\nsudo ifconfig en0 up\n\nwhile true; do\n\tif eval \"ping -c 1 192.168.0.1\" > /dev/null 2>&1\n \t\tthen\n \t\techo \"we are online!\" && break\n \telse\n \t\techo \"We are still offline\"\n \tfi\n \t\t shift\ndone\n\nexit 0\t\n"
},
{
"alpha_fraction": 0.6333242654800415,
"alphanum_fraction": 0.663755476474762,
"avg_line_length": 24.43402862548828,
"blob_id": "8ba0fd07b70e7d83833c279df3025742c1866bcd",
"content_id": "3d2b7582cbcb5656c33b1f2aa7c288da803df26f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7328,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 288,
"path": "/python/playtime.py",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#define function named less_is_more and names it's argument arg.\ndef less_or_more(arg):\n\t#if number is less than 100, it says so.\n\tif arg > 100:\n\t\tprint \"Greater than 100\"\n\t\t#this returns the string 'greater' to the function call\n\t\treturn \"greater\"\n\telse:\n\t\tprint \"Less than 100\"\n\t\treturn \"less\"\n\n#THIS RUNS FIRST\n#this runs first, it sets the variable 'number' by asking the user a question. \"input\" sets as variable, \"raw input\" sets as string\nnumber = input(\"Enter a number: \")\n#this runs next, by taking the user's answer and passing it to the function above\nanswer = less_or_more(number)\n\n#less = \"this number is less than that other number mofugga\"\n#greater = \"this number is greater than that other number mofuckaaaaaaaa\"\n\n#this prints the return value from the function above.\nprint answer\n\nstring_variable = \"this variable is a string\"\n\nvariable_w_ = \"placeholder\"\n\n\"\"\"This is\na multi\nline comment\"\"\"\n\n#basic math\nprint 100 + 100\nprint 101-100\nprint 100 * 100\nprint 100 / 100\n#exponents:\nprint 100 ** 5\n#modulo (returns the remainder)\nprint 100 % 6\n\n#importing modules\nimport math\nfrom datetime import datetime\nnow = datetime.now()\nprint now\nprint dir(datetime)\n\ncurrent_year = now.year\ncurrent_month = now.month\ncurrent_day = now.day\n\nprint current_year\nprint current_month\nprint current_day\n\nprint '%s/%s/%s' % (now.month, now.day, now.year)\nprint '%s:%s:%s' % (now.hour, now.minute, now.second)\nprint '%s/%s/%s %s:%s:%s' % (now.month, now.day, now.year, now.hour, now.minute, now.second)\n\n#prints minimum of a range\nprint min(4, 10)\n#prints maximium in a range\nprint max(4, 10)\n#prints type (integer)\nprint type(124)\n#prints type (floating point)\nprint type(5.221)\n#prints type (string)\nprint type(\"these are words\")\n#prints absolite (distance from zero)\nprint abs(334)\n\n\"\"\"Comparators:\nEqual to (==)\nNot equal to (!=)\nLess than (<)\nLess than or equal to (<=)\nGreater than (>)\nGreater than or equal to (>=)\n\nBoolean operators:\nand, which checks if both the statements are True;\nor, which checks if at least one of the statements is True;\nnot, which gives the opposite of the statement.\n\"\"\"\n\n# Make me false!\nbool_one = (2 <= 2) and \"Alpha\" == \"Bravo\" # We did this one for you!\n\n# Make me true!\nbool_two = not (2+2) > 5\n\n# Make me false!\nbool_three = not (2+2) > 2\n\n# Make me true!\nbool_four = not (2+2) > 6\n\n# Make me true!\nbool_five = not (2+2) > 9\n\ndef greater_less_equal_5(answer):\n if answer>5:\n return 1\n elif answer<5: \n return -1\n else:\n return 0\n \nprint greater_less_equal_5(4)\nprint greater_less_equal_5(5)\nprint greater_less_equal_5(6)\n\n#calculating tip\ndef tax(bill):\n \"\"\"Adds 8% tax to a restaurant bill.\"\"\"\n bill *= 1.08\n print \"With tax: %f\" % bill\n return bill\n\ndef tip(bill):\n \"\"\"Adds 15% tip to a restaurant bill.\"\"\"\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill\n \nmeal_cost = 100\nmeal_with_tax = tax(meal_cost)\nmeal_with_tip = tip(meal_with_tax)\nprint round(meal_with_tax)\nprint round(meal_with_tip)\n\n#calculate hotel costs (functions)\ndef hotel_cost(nights):\n cost = 140 * nights\n print cost\n return cost\n \nhotel_cost(3)\n\ndef hotel_cost(nights):\n cost = 140 * nights\n print cost\n return cost\n \nhotel_cost(3)\n\ndef plane_ride_cost(location):\n if location == \"Charlotte\":\n print \"Tickets are $183\"\n return 183\n elif location == \"Tampa\":\n print \"Tickets are $220\"\n return 220\n elif location == \"Pittsburgh\":\n print \"Tickets are $222\"\n return 222\n elif location == \"Los Angeles\":\n print \"Tickets are $475\"\n return 475\n else:\n print \"Not a valid destination!\"\n return\n \ntrip_destination = raw_input(\"Where are you flying?: (Choices are Charlotte, Tampa, Pittsburgh, and Los Angeles)\")\nplane_ride_cost(trip_destination)\n\ndef rental_car_cost(days):\n if days >= 7:\n price = days * cost \n price = price - 50\n print price\n return price\n elif days >= 3:\n price = days * cost \n price = price - 20\n print price\n return price\n elif days <=2:\n price = days * cost\n print price\n return price\n \ncost = 40\ndays_input = input(\"How many days are you staying?: \")\nrental_car_cost(days_input)\n\n#setting up a list\nzoo_animals = [\"pangolin\", \"cassowary\", \"sloth\", \"lion\"];\n\nif len(zoo_animals) > 3:\n\tprint \"The first animal at the zoo is the \" + zoo_animals[0]\n\tprint \"The second animal at the zoo is the \" + zoo_animals[1]\n\tprint \"The third animal at the zoo is the \" + zoo_animals[2]\n\tprint \"The fourth animal at the zoo is the \" + zoo_animals[3]\n\nnumbers = [5, 6, 7, 8]\n\nprint \"Adding the numbers at indices 0 and 2...\"\nprint numbers[0] + numbers[2]\nprint \"Adding the numbers at indices 1 and 3...\"\n# Your code here!\nprint numbers[1] + numbers[3]\n\n#to change list items\nzoo_animals[2] = \"hyena\"\nzoo_animals[3] = \"leopard\"\n\n#addming items to a list\nsuitcase = [] \nsuitcase.append(\"sunglasses\")\n\nsuitcase.append('swimsuit')\nsuitcase.append('handgun')\nsuitcase.append('passport')\nsuitcase.append('satellite phone')\n\nlist_length = len(suitcase) #setting the lenght of a list\n\nprint \"There are %d items in the suitcase.\" % (list_length)\nprint suitcase\n\n\"\"\"\nExtracting list items. **NOTE** format: [first item: up to but not this item] \nie. [0:3] selects items 0, 1, and 2\n[:3] selects the first item through item 2\n[3:] selects item 3 through the last item\nthe count starts at 0, obviously\n\"\"\"\nsuitcase = [\"sunglasses\", \"hat\", \"passport\", \"laptop\", \"suit\", \"shoes\"]\n\nfirst = suitcase[0:2] # The first and second items (index zero and one)\nmiddle = suitcase[2:4] # Third and fourth items (index two and three)\nlast = suitcase[4:6] # The last two items (index four and five)\nprint first\nprint middle\nprint last\n\nanimals = \"catdogfrog\"\ncat = animals[:3] # The first three characters of animals\ndog = animals[3:6] # The fourth through sixth characters\nfrog = animals[6:] # From the seventh character to the end\n\n#\"\"\"Substitution of list items\"\"\"\nanimals = [\"aardvark\", \"badger\", \"duck\", \"emu\", \"fennec fox\"]\nduck_index = animals.index(\"duck\") # Use index() to find \"duck\"\nanimals.insert(duck_index, \"cobra\") #replace duck with cobra\nprint animals #print the list\n\n#Loops!\nmy_list = [1,9,3,8,5,7]\n\nfor number in my_list:\n answer = 2 * number\n print answer\n\n\n#More with for loops. Appending and sorting with a for loop\nstart_list = [5, 3, 1, 2, 4]\nsquare_list = []\n\nfor number in start_list:\n square = number ** 2\n square_list.append(square)\n \nsquare_list.sort()\nprint square_list\n\n#Dictionaries. They use curly brackets and have a value and a key {'Value' : key, 'Value' : key}\nresidents = {'Puffin' : 104, 'Sloth' : 105, 'Burmese Python' : 106}\n\nprint residents['Puffin'] # Prints Puffin's room number\nprint residents['Sloth'] #Prints 105\nprint residents['Burmese Python'] #Prints 106\n\n#addming items to menus\nmenu = {} # Empty dictionary\nmenu['Chicken Alfredo'] = 14.50 # Adding new key-value pair\nprint menu['Chicken Alfredo']\n\n# Your code here: Add some dish-price pairs to menu!\nmenu['Ice Cream'] = 3.23\nmenu['Bacon Wrapped Filet Mignon'] = 39.99\nmenu['Human'] = 3225.99\n\nprint \"There are \" + str(len(menu)) + \" items on the menu.\"\nprint menu\n\n\n\n"
},
{
"alpha_fraction": 0.6594203114509583,
"alphanum_fraction": 0.6594203114509583,
"avg_line_length": 13.310344696044922,
"blob_id": "9e7449bed520ed53d489b8b7ffda67912eda769f",
"content_id": "d1ad021abd29d4ec39be556417ee1262b9e2db92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 29,
"path": "/function",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#Basic Function layout\nfunction functionname () {\n\n\techo \"This is content\"\n\n}\n\nfunctionname\n\n#Case statement inside a function calling itself\nfunction functiontest () {\n\techo \"yes or no?\"\n\tread answer\n\tcase $answer in\n\t\t\"yes\"|\"y\" )\n\t\t\techo \"you selected yes\"\n\t\t;;\n\t\t\"no\"|\"n\" ) echo \"You selected no\" ;;\n\t\t* ) \n\t\t\techo \"Please answer yes or no\" \n\t\t\tfunctiontest\n\t\t;;\n\tesac\n\tunset answer\n}\n\nfunctiontest"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.6172839403152466,
"avg_line_length": 9.125,
"blob_id": "535844291c61cc5fe7e066f93a4366aed17666a5",
"content_id": "db4e11f0622ae6ca3bda59eba374aaceb76e82a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 81,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 8,
"path": "/examples/append-variable",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nvar=\"1\"\necho $var\nvar=\"$var 2\"\necho $var\nvar=\"$var stuff\"\necho $var\n"
},
{
"alpha_fraction": 0.6845238208770752,
"alphanum_fraction": 0.6904761791229248,
"avg_line_length": 32.70000076293945,
"blob_id": "4054839a72b20fcce868add2923b5e74b06e25cb",
"content_id": "6ab813ee2028ba8a96aa514237ef40db6684e0ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 336,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 10,
"path": "/python/nic.py",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "import subprocess\n\n#get active NIC\ncommand = \"ip link | grep BROADCAST | grep -v NO-CARRIER | awk '{print $2}' | sed s/://\"\nnic = subprocess.Popen(command, shell=True)\n\nimport subprocess\ncommand = \"ip link | grep BROADCAST | grep -v NO-CARRIER | awk '{print $2}' | sed s/://\"\nnic = subprocess.check_output(command, shell=True)\nprint nic"
},
{
"alpha_fraction": 0.6048780679702759,
"alphanum_fraction": 0.6121951341629028,
"avg_line_length": 23.176469802856445,
"blob_id": "40ceaeeeed3049ed11d80d3e21b7d108090ad219",
"content_id": "ea5b4c8c55491eb853cd7833ba7388495f2874ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 17,
"path": "/ksp/loop",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nmv versions.new versions.old &> /dev/null\n\n#set delimeter to new line\nIFS=$'\\n'\n\nfor URL in `cat urls`\ndo\n\tname=$(echo $URL | awk '{print $1}' | sed 's/_/ /g')\n\tlink=$(echo $URL | awk '{print $2}')\n\techo \"\" >> versions.new\n\techo $name >> versions.new\n\tlinks -dump $link | grep \"Thread\" | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions.new\ndone\n\ndiff versions.new versions.old >> changes"
},
{
"alpha_fraction": 0.554973840713501,
"alphanum_fraction": 0.6230366230010986,
"avg_line_length": 30.83333396911621,
"blob_id": "f66aac1b8bb96c1391698b9a1139ba13d4a12965",
"content_id": "685d3107e49f64feb7245e0645560b54119b35a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 6,
"path": "/examples/exit_status_check.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "echo stuff\necho things\nwget http://ipmiutil.sourceforge.net/FILES/ipmiutil-2.9.4-1_rhel6.x86_64.rpm >/dev/null 2>&1\n if [ $? -ne 0 ]; then\n echo \"command1 borked it\"\n fi\n"
},
{
"alpha_fraction": 0.5852272510528564,
"alphanum_fraction": 0.6079545617103577,
"avg_line_length": 15,
"blob_id": "7b570487bd2b1c7c858354a1f5324fe5d506cfb1",
"content_id": "0c74077efdf0bb2ded60ebaaa99cd2007c1a0f53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 176,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 11,
"path": "/examples/multiple-item-if-statements",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#match any\nif [ -s file1 ] || [ -s file2 ];then\necho \"one of the files exist\"\nfi\n\n#match all\nif [ -s file1 ] && [ -s file2 ];then\necho \"all of the files exist\"\nfi\n"
},
{
"alpha_fraction": 0.6455798745155334,
"alphanum_fraction": 0.6726142168045044,
"avg_line_length": 22.322368621826172,
"blob_id": "6cd4633a07cfe476bcac2b0e7e508b14ad3e0324",
"content_id": "7925e2c878c2f2012b3971c4a0c55f055f40b9e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3699,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 152,
"path": "/burn-net2.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\r\n# chkconfig: 35 98 10\r\n# description: Some description\r\n# processname: burn-net\r\n#\t/etc/rc.d/init.d/burn-net\r\n# Author:\t\tNate Kennison\r\n#\t\t \t\t<[email protected]>\r\n#\r\n# Create Date: \t12/3/2013\r\n#\r\n# Last Edit:\t12/3/2013\r\n#\r\n# Version:\t\t2.0\r\n#\r\n# Purpose:\t\tTo automate the configuration of burn in OSes for the test rack.\r\n \r\n# Source function library.\r\n. /etc/init.d/functions\r\n \r\nlockfile=/var/lock/subsys/burn-net\r\n \r\nstart() {\r\n\t#touch $lockfile\r\n\ttouch /var/lock/subsys/burn-net\r\n\techo -n \"Starting burn-net\"\r\n\t#start daemons, perhaps with the daemon function>\r\n\r\n\tNO=`cat /root/position`\r\n\r\n#clear /etc/udev/rules.d/70-persistent-net.rules and recreate empty\r\nrm -rf /etc/udev/rules.d/70-persistent-net.rules\r\ntouch /etc/udev/rules.d/70-persistent-net.rules\r\n\r\nclear\r\n\r\n#determine which NIC is connected and set NIC variable\r\nif [[ `ethtool eth0 | grep 'Link detected:' | egrep '(yes)'` ]]; then\r\n\tNIC='eth0'\r\n\t\r\nelif [[ `ethtool eth1 | grep 'Link detected:' | egrep '(yes)'` ]]; then\r\n\tNIC='eth1'\r\n\r\nelif [[ `ethtool em0 | grep 'Link detected:' | egrep '(yes)'` ]]; then\r\n\tNIC='em0'\r\n\r\nelif [[ `ethtool em1 | grep 'Link detected:' | egrep '(yes)'` ]]; then\r\n\tNIC='em1'\r\n\t\r\nfi\t\r\n\r\nclear\r\n\r\n#set hostname\r\nHN=`echo burnin$NO`\r\n\r\n#set IP address\r\nIP=192.168.0.1$NO\r\n\r\n#set gateway address\r\nGW=192.168.0.1\r\n\r\n\r\n#create ifcfg file\r\nrm -rf /etc/sysconfig/network-scripts/ifcfg-*\r\nrm -rf /etc/sysconfig/network-scripts/ifcfg-$NIC\r\ntouch /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"DEVICE=$NIC\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tIPADDR=$IP\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tNETMASK=255.255.255.0\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tGATEWAY=$GW\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tONBOOT=yes\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\necho \"\tBOOTPROTO=static\" >> /etc/sysconfig/network-scripts/ifcfg-$NIC\r\n\r\ntouch /etc/sysconfig/network-scripts/ifcfg-lo\r\necho \"DEVICE=lo\" >> /etc/sysconfig/network-scripts/ifcfg-lo\r\necho \"IPADDR=127.0.0.1\" >> /etc/sysconfig/network-scripts/ifcfg-lo\r\necho \"NETMASK=255.0.0.0\" >> /etc/sysconfig/network-scripts/ifcfg-lo\r\necho \"NETWORK=127.0.0.0\" >> /etc/sysconfig/network-scripts/ifcfg-lo\r\n\r\n#disable iptables\r\nchkconfig iptables off\r\n\r\n#create network/hostname file\r\nrm -rf /etc/sysconfig/network\r\ntouch /etc/sysconfig/network\r\necho \"NETWORKING=yes\" >> /etc/sysconfig/network\r\necho \"FORWARD_IPV4=yes\" >> /etc/sysconfig/network\r\necho \"NETWORKING_IPV6=no\" >> /etc/sysconfig/network\r\necho \"HOSTNAME=$HN\" >> /etc/sysconfig/network\r\n\r\n#set hostname\r\nhostname $HN\r\n\r\n#restart networking to apply IP configuration\r\n/etc/init.d/network restart && ifconfig |grep inet\r\n\r\n#delete resolf.conf\r\nrm -rf /etc/resolv.conf\r\n\r\n#create new resolv.conf\r\ntouch /etc/resolv.conf\r\necho \"nameserver 66.96.80.43\" >> /etc/resolv.conf\r\necho \"nameserver 66.96.80.194\" >> /etc/resolv.conf\r\necho \"options single-request\" >> /etc/resolv.conf\r\n\r\n#delete and create worker script for init.d to initialize\r\n\r\nwget scripts.hivelocity.net/harden/extras.sh && sh extras.sh\r\nrm -rf extras.sh\r\n\r\n\tsuccess\r\n}\t\r\n \r\nstop() {\r\n\t#rm -f $lockfile\r\n\trm -f /var/lock/subsys/burn-net\r\n\techo -n \"Stopping burn-net\"\r\n\t#start daemons, perhaps with the daemon function>\r\n\r\n\t\r\n\tsuccess\r\n}\r\n \r\ncase \"$1\" in\r\n start)\r\n\tstart\r\n\t;;\r\n stop)\r\n\tstop\r\n\t;;\r\n status)\r\n\techo \"Not applied to service\"\r\n\t;;\r\n restart)\r\n \tstop\r\n\tstart\r\n\t;;\r\n reload)\t\r\n\techo \"Not applied to service\"\r\n\t;;\r\n condrestart)\r\n \t#<Restarts the servce if it is already running. For example:>\r\n\techo \"Not applied to service\"\r\n\t;;\r\n probe)\r\n\t;;\r\n *)\r\n\techo \"Usage: burn-net{start|stop|status|reload|restart[|probe]\"\r\n\texit 1\r\n\t;;\r\nesac\r\nexit $?\r\n\r\n"
},
{
"alpha_fraction": 0.6190140247344971,
"alphanum_fraction": 0.6761971712112427,
"avg_line_length": 61.04379653930664,
"blob_id": "6514dac5746c49a7776e9584f904717835334565",
"content_id": "63a8296afa19f294a736604f535e1028059e508b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 8499,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 137,
"path": "/ksp/modurls_script.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncmd=\"links -dump\"\nhead=\"|head -n 1\"\n\n: > versions\n\nclear\necho \"Pulling Versions...\"\n\necho \"\" >> versions\necho \"Ferram Aerospace:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/20451-0-25-Ferram-Aerospace-Research-v0-14-3-2-10-21-14\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"NEAR:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/86419\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Kerbal Join Reinforcement:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/55657-0-22-Kerbal-Joint-Reinforcement-v1-0-Properly-Rigid-Part-Connections\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Deadly Reentry:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/54954\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Module Manager:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/55219\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Mechjeb:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/12384\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"RealChute:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/57988\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Stage Recovery:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/86677-0-25-StageRecovery-Recover-Funds-from-Dropped-Stages-v1-5-1-%2810-7-14%29\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"RemoteTech:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/83305-0-25-0-RemoteTech-v1-5-0\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\n\necho \"25% Complete\"\n\necho \"\" >> versions\necho \"Raster Prop Monitor:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/57603\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"PartCatalog:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/35018-0-24-2-PartCatalog-3-0-RC7-%282014-08-18%29\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Kerbal Attachment System:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/92514-0-24-2-Kerbal-Attachment-System-%28KAS%29-0-4-8-Fixed-for-0-24-2-x86-x64-%29\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Scansat:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/80369-0-24-SCANsat-v6-1-Real-Scanning-Real-Science-at-Warp-Speed\\x21-Jul-18\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Infernal Robotics:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/37707-0-24-2-Magic-Smoke-Industries-Infernal-Robotics-0-19-1\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Procedural Wings:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/29862\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Toolbar:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/60863\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Kerbal Alarm Clock:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/24786-0-24-2-Kerbal-Alarm-Clock-v2-7-8-2-%28July-28%29?p=1466194&viewfull=1#post1466194\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Texure Replacer:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/60961-0-24-2-TextureReplacer-1-7-1-(7-9-2014)-Female-name-detection\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"TAC Life Support:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/40667-0-25-TAC-Life-Support-v0-10-1-10Oct-No-Win64-Support\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\n\necho \"50% Complete\"\n\necho \"\" >> versions\necho \"Tweakscale:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/80234\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Near Future Tech:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/52042-0-25-Near-Future-Technologies-%2810-10-14-All-packs-0-25-update-fixes%29\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Procedural Fairings:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/39512-0-24-2-Procedural-Fairings-3-09-procedural-costs-Win64-decoupler-fix-%28August-3%29\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Connected Living Space:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/70161-0-25-Connected-Living-Space-%28API%29-v1-0-9-0-%2811th-October-2014%29\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"TAC Fuel Balancer:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/25823\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Enhanced Nav Ball:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/50524\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Karbonite:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/89401\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"KSP AVC:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/79745-0-23-5-KSP-AVC-Add-on-Version-Checker-Plugin-KSP-AVC-Online\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\n\necho \"75% Complete\"\n\necho \"\" >> versions\necho \"Science!:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/96000\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Docking Port Alignment Indicator:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/54303-0-25-Navball-docking-alignment-indicator-v4\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"NovaPunch:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/54303-0-25-Navball-docking-alignment-indicator-v4\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"B9:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/92630\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"KSP Interstellar:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/43839-0-25-KSP-Interstellar-%28Magnetic-Nozzles-ISRU-Revamp%29-Version-0-13\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Simple Part Organizer:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/62466-0-25-SimplePartOrganizer-v1-2-1\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"SelectRoot:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/43208-0-24-Jul18-SelectRoot-Set-a-new-root-part-0-24-new-UI\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"Aviation Lights:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/16925-0-23-X-Aviation-Lights-v3-6-%2803MAY14%29\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\necho \"\" >> versions\necho \"KW Rocketry:\" >> versions\nlinks -dump \"http://forum.kerbalspaceprogram.com/threads/51037\" | grep Thread | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions\n\necho \"100% Complete!\"\n\ncat versions\n\n\n#links -dump \"http://forum.kerbalspaceprogram.com/threads/29862\"| grep \"Thread\" | head -n 1\n\n\n#escape ! with \\x21"
},
{
"alpha_fraction": 0.5277777910232544,
"alphanum_fraction": 0.5694444179534912,
"avg_line_length": 22.66666603088379,
"blob_id": "c17408f165f8f3dd7ad13941d78dd55aba8a7bd4",
"content_id": "23e33de60a8fa84d63313db0250280482c79d914",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 3,
"path": "/sw",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n`cat /scripts/switches | grep $1 | sed s/$1// | head -n1` \n"
},
{
"alpha_fraction": 0.6009389758110046,
"alphanum_fraction": 0.68544602394104,
"avg_line_length": 18.454545974731445,
"blob_id": "ae18a232a172151006e7a3ab26815cbb743b2e29",
"content_id": "c94e21978ae85a7007df1954066da5846547959a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 213,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 11,
"path": "/python/function_w_input.py",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "def function(arg):\n\tif arg > 100:\n\t\tprint \"Greater than 100\"\n\t\treturn \"Greater than 100\"\n\telse:\n\t\tprint \"Less than 100\"\n\t\treturn \"Less than 100\"\n\nnumber = input(\"Enter a number: \")\nfunction(number);\n#function(500)"
},
{
"alpha_fraction": 0.5991058349609375,
"alphanum_fraction": 0.6174863576889038,
"avg_line_length": 30.920635223388672,
"blob_id": "a890789b0368e106331f7f500889d202a8645046",
"content_id": "f751652d31742b4e004743065d6dc3ad1a19e9da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2013,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 63,
"path": "/python/trip.py",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "def plane_ride_cost(city):\n if city == \"Charlotte\":\n print \"Tickets are $183\"\n return 183\n elif city == \"Tampa\":\n print \"Tickets are $220\"\n return 220\n elif city == \"Pittsburgh\":\n print \"Tickets are $222\"\n return 222\n elif city == \"Los Angeles\":\n print \"Tickets are $475\"\n return 475\n else:\n print \"Not a valid destination!\"\n return\n \nrental_car_price = 40\ndef rental_car_cost(days):\n if days >= 7:\n total_rental_car_price = days * rental_car_price \n total_rental_car_price = total_rental_car_price - 50\n print \"Your rental car will cost $%s\" % total_rental_car_price\n return total_rental_car_price\n elif days >= 3:\n total_rental_car_price = days * rental_car_price \n total_rental_car_price = total_rental_car_price - 20\n print \"Your rental car will cost $%s\" % total_rental_car_price\n return total_rental_car_price\n elif days <= 2:\n total_rental_car_price = days * rental_car_price \n print \"Your rental car will cost $%s\" % total_rental_car_price\n return total_rental_car_price\n \ndef hotel_cost(nights):\n hotel_nightly_cost = 140 * nights\n print \"Your hotel stay will cost $%s\" % hotel_nightly_cost\n return hotel_nightly_cost\n \ncity_input = raw_input(\"Where are you flying?: \")\ndays_input = input(\"How many days are you staying?: \")\n\nplane = plane_ride_cost(city_input)\nrental = rental_car_cost(days_input)\nhotel = hotel_cost(days_input)\n\ntotal = plane + rental + hotel\nprint \"Your total Vacation expense will be $%s\" % total\n\n\n\"\"\"Below is what was required to pass the test on codeacademy, but I like the prompt version better\nprint plane\nprint rental\nprint hotel\n\ndef trip_cost(city, days):\n plane = plane_ride_cost(city)\n rental = rental_car_cost(days)\n hotel = hotel_cost(days)\n return plane + rental + hotel\n \n#trip_cost('Charlotte', 2)\n\"\"\"\n\n\n"
},
{
"alpha_fraction": 0.6115485429763794,
"alphanum_fraction": 0.6141732335090637,
"avg_line_length": 28.30769157409668,
"blob_id": "ae22f58ee43519edd66073ed48136433e98ff0b0",
"content_id": "bdb68b3fcfe1e883456e383fbae7dca4c0cc6946",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 381,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 13,
"path": "/imagevenue.downloader/downloader.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "rm -rf urls.to.download.txt\n\nfor urlin in $(cat temp2.txt)\ndo\ndomain=`echo $urlin | sed 's,[?].*,,' | sed 's,img.php,,g'`\n\nimageurl=`lynx -source $urlin | grep 'SRC=\"' | grep .JPG | sed 's,<a href=\"#\" onClick=\"showOnclick()\"><img id=\"thepic\" onLoad=\"scaleImg();\" SRC=\",,g' | sed 's/\\\".*$//'`\n\necho $domain$imageurl >> urls.to.download.txt\n\ndone\n\nwget -nc -i urls.to.download.txt\n"
},
{
"alpha_fraction": 0.5120481848716736,
"alphanum_fraction": 0.6566265225410461,
"avg_line_length": 17.44444465637207,
"blob_id": "e58efd9932669b57d8a61b145330f3da303bfa1c",
"content_id": "e0082ac436cc440eccabfb9438daf4920ac991e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 166,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 9,
"path": "/ipv6test",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nip6=2604:4500::3465/121\necho $ip6\nif [ 'echo $ip6 | grep -v \\' ];then\necho \"a / has been found\"\nelse\necho \"a slash has not been found\"\nfi\n"
},
{
"alpha_fraction": 0.6356589198112488,
"alphanum_fraction": 0.6705426573753357,
"avg_line_length": 12.578947067260742,
"blob_id": "e4e484e3cfc3b10ba56f18ee8112c40ce772e039",
"content_id": "7806fd3a71ffa3e16a054f52b009d9d268d55f1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 19,
"path": "/finish.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#\n# File:\t\tfinish.sh\n#\n# Purpose:\tLess typing is your friend\n#\n# Version:\t1.0\n#\n# Author:\tNate Kennison\n#\t\t\t<[email protected]>\n#\n# Date: \tMarch 13th, 2014\n#\n#\n# License:\tGPL version 2\n\ndhclient\nwget scripts.hivelocity.net/harden/finishcent.sh\nbash finishcent.sh\n"
},
{
"alpha_fraction": 0.6064516305923462,
"alphanum_fraction": 0.6193548440933228,
"avg_line_length": 34.181819915771484,
"blob_id": "633bdb7b7ea4a45fe9e5ab46399b3377980632c8",
"content_id": "f4dbbb64a3c2b6c2be3a6a2898d396571883609c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 22,
"path": "/fappeningdl.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#Automate downlaoding images from thefappening.so\n#may work with other blogs as well\n#Created 9/28/14\n#Author: Nate Kennison\n\necho \"URL?\"\nread url\n\necho \"Name?\"\nread name\n\nmkdir /Users/nate/\"Google Drive\"/Pictures/\"$name\" >/dev/null 2>&1\ncd /Users/nate/\"Google Drive\"/Pictures/\"$name\"\n\nlynx -source $url | grep .jpg | sed 's,<p><a href=\",,g' | cut -f1 -d\">\" | sed 's/\"//g' >> temp ;wget -nc -i temp | grep Downloaded ; rm -rf temp\nlynx -source $url | grep .gif | sed 's,<p><a href=\",,g' | cut -f1 -d\">\" | sed 's/\"//g' >> temp ;wget -nc -i temp | grep Downloaded ; rm -rf temp\nlynx -source $url | grep .png | sed 's,<p><a href=\",,g' | cut -f1 -d\">\" | sed 's/\"//g' >> temp ;wget -nc -i temp | grep Downloaded ; rm -rf temp\n\ndir=`pwd`\necho \"Download Directory: $dir\"\nexit\n\n"
},
{
"alpha_fraction": 0.5139859914779663,
"alphanum_fraction": 0.5192307829856873,
"avg_line_length": 26.285715103149414,
"blob_id": "9640c6f565bce6d46a58c2e32a443e1b55ab10e2",
"content_id": "c64528ee4d73d9f3e3b6a25937e2d3b588dd85b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 572,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 21,
"path": "/ksp/checkmyversions.old",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nIFS=$'\\n'\n\nfor VER in `cat modlist-installed`\ndo\n\tname=$(echo $VER | awk '{print $1}' | sed 's/_/ /g')\n\tversion=$(echo $VER | awk '{print $2}')\n\techo \"\"\n\techo \"______________________________________________________________________________________________\"\n\t#echo $name\n\tversion_checked=$(cat versions.new | grep \"$name\" | grep \"$version\")\n\tif [[ -z ${version_checked} ]];then\n\t\tcat versions.new | grep -A 1 $name | sed 's/$name//g'\n\t\techo \"Versions Do Not Match\"\n\telse\n\t\techo $name\n\t\techo \"Versions Match\"\n\tfi\n\techo \"Currently Installed Verson: $version\"\ndone"
},
{
"alpha_fraction": 0.6850828528404236,
"alphanum_fraction": 0.6919889450073242,
"avg_line_length": 18.513513565063477,
"blob_id": "48cf56481799d749ac003899e68a4050206ea945",
"content_id": "5ae72b4ffeed7e5691ef67832df30e600bbdf133",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 724,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 37,
"path": "/mkthumb",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#\n#\n#\n\nif [ ! \"$BASH_VERSION\" ] ; then\n exec /bin/bash \"$0\" \"$@\"\nfi\nfunction mkstick () {\n\nsudo diskutil list\necho \"What is the disk number of the thumbstick? (ie /dev/sdX)\"\nread diskno\nsudo diskutil unmountDisk force /dev/disk$diskno\necho \"Writing Utility-Thumbstick.img to /dev/disk$diskno\"\nsudo dd bs=512 if=/Users/nate/Desktop/Utility-thumbstick.img of=/dev/disk$diskno\nsay thumbstick is done\nsudo diskutil unmountDisk force /dev/disk$diskno\necho \"USB Stick is done!\"\necho \"Would you like to make another? (y/n)\"\nread another\n\nif [[ ! `echo $another | egrep '(y|yes|n|no)'` ]]; then\n\n\techo \"Please answer with y or n:\"\n\tread another \nfi\n\nif [[ $another = y ]]; then\n\tmkstick\nelse\n\texit 0\nfi\n\n}\n\nmkstick\n\n\n"
},
{
"alpha_fraction": 0.6177042722702026,
"alphanum_fraction": 0.6507782340049744,
"avg_line_length": 26.399999618530273,
"blob_id": "28266be3685f299d36e57c128e8ee495734ea038",
"content_id": "1fc823bc175eba2ae1b9c0304f34d8ec1b531344",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2056,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 75,
"path": "/examples/loops.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Listing the planets.\n\nfor planet in Mercury Venus Earth Mars Jupiter Saturn Uranus Neptune Pluto\ndo\n echo $planet # Each planet on a separate line.\ndone\n\necho; echo\n\nfor planet in \"Mercury Venus Earth Mars Jupiter Saturn Uranus Neptune Pluto\"\n # All planets on same line.\n # Entire 'list' enclosed in quotes creates a single variable.\n # Why? Whitespace incorporated into the variable.\ndo\n echo $planet\ndone\n\necho; echo \"Whoops! Pluto is no longer a planet!\"\n\nfor loop in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20\ndo\necho \"$loop is a number\"\ndone\n\n# Associate the name of each planet with its distance from the sun.\n\necho \"\"\n\nfor planet in \"Mercury 36\" \"Venus 67\" \"Earth 93\" \"Mars 142\" \"Jupiter 483\"\ndo\n set -- $planet # Parses variable \"planet\"\n #+ and sets positional parameters.\n # The \"--\" prevents nasty surprises if $planet is null or\n #+ begins with a dash.\n\n # May need to save original positional parameters,\n #+ since they get overwritten.\n # One way of doing this is to use an array,\n # original_params=(\"$@\")\n\n echo \"$1\t\t$2,000,000 miles from the sun\"\n #-------two tabs---concatenate zeroes onto parameter $2\ndone\n\n# (Thanks, S.C., for additional clarification.)\n\nFILES=\"/usr/sbin/accept\n/usr/sbin/pwck\n/usr/sbin/chroot\n/usr/bin/fakefile\n/sbin/badblocks\n/sbin/ypbind\" # List of files you are curious about.\n # Threw in a dummy file, /usr/bin/fakefile.\n\necho\n\nfor file in $FILES\ndo\n\n if [ ! -e \"$file\" ] # Check if file exists.\n then\n echo \"$file does not exist.\"; echo\n continue # On to next.\n fi\n\n ls -l $file | awk '{ print $8 \" file size: \" $5 }' # Print 2 fields.\n whatis `basename $file` # File info.\n # Note that the whatis database needs to have been set up for this to work.\n # To do this, as root run /usr/bin/makewhatis.\n echo\ndone \n\n#ping IP range (slow as shit, but works.)\nfor a in `seq 255`;do ping -o -c 1 -t 1 192.168.0.$a | grep \"bytes from\" | awk '{print $4}' | sed 's/://g';done\n\n"
},
{
"alpha_fraction": 0.636911928653717,
"alphanum_fraction": 0.6441496014595032,
"avg_line_length": 20.842105865478516,
"blob_id": "20af5e8bd7a64000de9f5687dc8b36906cf2afb0",
"content_id": "c1889b170b83453e7da2340deec6bf66ee1b44a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 829,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 38,
"path": "/ksp/newscript",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nfunction newlist () {\n\trm -rf /scripts/ksp/mods.tsv\n\tmv ~/Downloads/KSP\\ Mods\\ -\\ Sheet1.tsv /scripts/ksp/mods.tsv\n\texpand -t 1 mods.tsv >> mods.temp\n\ttail +3 mods.temp > mods.list\n\trm -rf mods.tsv\n}\n\necho \"New Tab delimited file from google docs? (y/n)\"\nread answer\nif [[ $answer == y ]];then\n\tnewlist\nfi\n\nIFS=$'\\n'\n\n: > changes\nmv versions.new versions.old\n\nfor URL in `cat /scripts/ksp/mods.list`\ndo\n\tname=$(echo $URL | awk '{print $1}' | sed 's/_/ /g')\n\tlink=$(echo $URL | awk '{print $5}')\n\techo $URL\n\techo \"\" >> versions.new\n\techo $name >> versions.new\n\t /usr/local/bin/links -dump $link | grep \"Thread\" | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions.new\ndone\n\ndiff versions.new versions.old\n\necho \"Run version checker script? (y/n)\"\nread answer\nif [[ $answer == y ]];then\n\tbash checkmyversions\nfi"
},
{
"alpha_fraction": 0.4930555522441864,
"alphanum_fraction": 0.5347222089767456,
"avg_line_length": 7.470588207244873,
"blob_id": "5f9837b20bef5673012a11ab51c40fbd827b2bbc",
"content_id": "d6363924bc9c0542d267acc925ca276700d57664",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 17,
"path": "/examples/case",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\necho \"press 1, 2, or 3\"\nread variable\n\ncase $variable in\n\t\"1\" )\n\t\techo one\n\t;;\n\t\"2\" )\n\t\techo two\n\t;;\t\n\t\"3\" )\n\techo three\n\t;;\nesac\n"
},
{
"alpha_fraction": 0.8413792848587036,
"alphanum_fraction": 0.8482758402824402,
"avg_line_length": 47.33333206176758,
"blob_id": "d7c1e4b0293cc37e24799235d4acdbc6d92b77e9",
"content_id": "e183e041d731fc915aa484b4851070d79fa3dac2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 3,
"path": "/list-downloads",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nsqlite3 ~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV* 'select LSQuarantineDataURLString from LSQuarantineEvent'\n"
},
{
"alpha_fraction": 0.6696428656578064,
"alphanum_fraction": 0.6964285969734192,
"avg_line_length": 9.272727012634277,
"blob_id": "5ca61810e3405779ed90bdd4b3ed1e1cc8e89db3",
"content_id": "4c257f32f0442c5738a0d7d5b6428a8bcdeed76a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 11,
"path": "/learning/passing-variables/1",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nvariable=variable\n\necho $variable \"in script 1\"\necho \"switches:\" $1\n\n. 2\n\necho $export\necho $ipmiip"
},
{
"alpha_fraction": 0.6423841118812561,
"alphanum_fraction": 0.6556291580200195,
"avg_line_length": 13.899999618530273,
"blob_id": "e6138aabc57e218b4d4522260e2a8180679bdd5e",
"content_id": "9349815bb369c914d884f1165f0e2889a84978fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 10,
"path": "/learning/exit_codes.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"Errors will output to error.log\"\necho \"Do you want this to error out?\"\nread err\n\ncase $err in\n\tyes) exit 2 ;;\n\tno) exit 0 ;;\nesac\n\n\n"
},
{
"alpha_fraction": 0.6998654007911682,
"alphanum_fraction": 0.7285778522491455,
"avg_line_length": 37.39655303955078,
"blob_id": "29f7b425df419efc6ab2d4d480b996ee5816036d",
"content_id": "9077046382c2058727739e361859ee83cd9ce130",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2229,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 58,
"path": "/ksp/script.old",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#\n# 2 part script. First part checks 2 update threads for overall version changes\n# Second part checks individual release threads for changes\n#\n# If updates are found, an e-mail is dispatched via mail to the user.\n#\n# This section checks the following 2 forum posts and updates to KSP mods:\n#\n# http://forum.kerbalspaceprogram.com/threads/95750-Official-Mod-Compatibility-Thread-for-25\n# http://forum.kerbalspaceprogram.com/threads/55401-Community-Mods-and-Plugins-Library\n\n: > changes\n\ntouch modlist1.old\ntouch modlist2.old\ncp modlist1.new modlist1.old &> /dev/null\n /usr/local/bin/links -dump http://forum.kerbalspaceprogram.com/threads/95750-Official-Mod-Compatibility-Thread-for-25 |head -n 260 > modlist-dump1\ncat modlist-dump1 | sed -n '/Known Compatible/,$p' modlist-dump1 > modlist-top-cut1\n/usr/local/bin/tac modlist-top-cut1 | sed -n '/Reply With Quote/q;p' modlist-top-cut1 > modlist1.new\nrm -rf modlist-top-cut1 modlist-dump1\ndiff modlist1.new modlist1.old > changes\n\ncp modlist2.new modlist2.old &> /dev/null\n /usr/local/bin/links -dump http://forum.kerbalspaceprogram.com/threads/55401-Community-Mods-and-Plugins-Library |head -n 750 > modlist-dump2\ncat modlist-dump2 | sed -n '/Last Updated/,$p' modlist-dump2 > modlist-top-cut2\n/usr/local/bin/tac modlist-top-cut2 | sed -n '/Reply With Quote/q;p' modlist-top-cut2 > modlist2.new\nrm -rf modlist-top-cut2 modlist-dump2\ndiff modlist2.new modlist2.old >> changes\n\n\n# This section runs the text file \"url\" and checks indivual release threads for different mods\nmv versions.new versions.old &> /dev/null\n\n# Set delimeter to new line\nIFS=$'\\n'\n\nfor URL in `cat urls`\ndo\n\tname=$(echo $URL | awk '{print $1}' | sed 's/_/ /g')\n\tlink=$(echo $URL | awk '{print $2}')\n\techo \"\" >> versions.new\n\techo $name >> versions.new\n\t /usr/local/bin/links -dump $link | grep \"Thread\" | head -n1 | sed 's/Thread://g' | sed 's/^ *//' >> versions.new\ndone\n\ndiff versions.new versions.old >> changes\n\nif [ -s changes ];then \n\tcat changes |mail -s changes nate\nfi\n\ntouch /var/log/scripts.log\necho \"KSP version checker script ran on `date`\" >> /var/log/scripts.log\nif [ -s changes ];then \n\techo \"Detected Changes:\" >> /var/log/scripts.log\n\tcat changes >> /var/log/scripts.log\nfi\n\n\n"
},
{
"alpha_fraction": 0.5975610017776489,
"alphanum_fraction": 0.5975610017776489,
"avg_line_length": 8.222222328186035,
"blob_id": "a4b9d6893856adca2adf066276ef1f3913baf937",
"content_id": "1b006bd872173856d519f974455b36465dcd3970",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 9,
"path": "/learning/all-in-one/1",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"enter text\"\nread text\n\n\nif [[ -n ${text} ]]; then\necho $text\nfi"
},
{
"alpha_fraction": 0.5506072640419006,
"alphanum_fraction": 0.6369770765304565,
"avg_line_length": 29.91666603088379,
"blob_id": "3c8ee5c9d9f78bca45f578b22288485107f7fece",
"content_id": "b00f104b68fb89931188608e892acf2035568264",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 369,
"num_lines": 24,
"path": "/learning/loops/finishdebian.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "if [[ `egrep -i 'CentOS|Redhat|Fedora' /etc/redhat-release` ]]; then\n\n os=`(cat /etc/redhat-release)`\nfi\n\nfor distro in \"utopic Ubuntu 14.10\" \"trusty Ubuntu 14.04 LTS\" \"saucy Ubuntu 13.10\" \"precise Ubuntu 12.04.x LTS\" \"raring Ubuntu 13.04\" \"ucid Ubuntu 10.04 LTS\" \"quantal Ubuntu 12.10\" \"oneiric Ubuntu 11.10\" \"natty Ubuntu 11.04\" \"maverick Ubuntu 10.10\" \"karmic Ubuntu 9.10\" \"jaunty Ubuntu 9.04\" \"wheezy Debian 7.0\" \"squeeze Debian 6.0\" \"lenny Debian 5.0\" \"etch Debian 4.0\" \ndo\n\tset -- $distro\n\tif [[ `cat /etc/apt/sources.list | grep $1` ]];then\n\t\tos=$\"$2 $3 $4\"\n\tfi\n# if [ `cat /etc/apt/sources.list | grep $1` ]; then\n# if [ ! -e \"$distro\" ]\n# then\n# echo $2 $3 $4 $5\n# continue\n# fi\ndone\n#echo $distros\n\n\necho $os\n\nexit 0"
},
{
"alpha_fraction": 0.6342856884002686,
"alphanum_fraction": 0.6342856884002686,
"avg_line_length": 10,
"blob_id": "6ca7cd2faa1b28dd7698838d5639b954b3a8a6c7",
"content_id": "716c83b53a4a746317374655e21ff0566e481e96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 16,
"path": "/examples/grep-if-check",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"enter text\"\nread text\n\n#with else\nif [[ -n ${text} ]]; then\necho $text\nelse\necho \"nothing entered\"\nfi\n\n#without else\nif [[ -n ${text} ]]; then\necho $text\nfi"
},
{
"alpha_fraction": 0.6232814192771912,
"alphanum_fraction": 0.6306141018867493,
"avg_line_length": 15.015625,
"blob_id": "dca6a98d0fb18a7ad99eae9a82ce6938988b9dad",
"content_id": "27d226bc44622a32ca4d83b8d43b1e61b8232227",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1091,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 64,
"path": "/initscript.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\r\n# chkconfig: 35 99 10\r\n# description: Some description\r\n# processname: initsample\r\n#\t/etc/rc.d/init.d/initsample\r\n \r\n# Source function library.\r\n. /etc/init.d/functions\r\n \r\n#<define any local shell functions used by the code that follows>\r\n \r\n\r\n\r\nlockfile=/var/lock/subsys/initsample\r\n \r\nstart() {\r\n\t#touch $lockfile\r\n\ttouch /var/lock/subsys/initsample\r\n\techo -n \"Starting initsample\"\r\n\t#start daemons, perhaps with the daemon function>\r\n\r\n\t\r\n\tsuccess\r\n}\t\r\n \r\nstop() {\r\n\t#rm -f $lockfile\r\n\trm -f /var/lock/subsys/initsample\r\n\techo -n \"Stopping initsample\"\r\n\t#start daemons, perhaps with the daemon function>\r\n\r\n\t\r\n\tsuccess\r\n}\r\n \r\ncase \"$1\" in\r\n start)\r\n\tstart\r\n\t;;\r\n stop)\r\n\tstop\r\n\t;;\r\n status)\r\n\techo \"Not applied to service\"\r\n\t;;\r\n restart)\r\n \tstop\r\n\tstart\r\n\t;;\r\n reload)\t\r\n\techo \"Not applied to service\"\r\n\t;;\r\n condrestart)\r\n \t#<Restarts the servce if it is already running. For example:>\r\n\techo \"Not applied to service\"\r\n\t;;\r\n probe)\r\n\t;;\r\n *)\r\n\techo \"Usage: initsample{start|stop|status|reload|restart[|probe]\"\r\n\texit 1\r\n\t;;\r\nesac\r\nexit $?\r\n\r\n"
},
{
"alpha_fraction": 0.6089552044868469,
"alphanum_fraction": 0.6268656849861145,
"avg_line_length": 16.6842098236084,
"blob_id": "74147808659254cc338ed7626c42e42e4a30e96f",
"content_id": "37558a01899f18debb85b3bf586c6cde42c4d261",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 335,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 19,
"path": "/case.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nclear\n\necho \"Do the thing? (y/n)\"\nread answer\nclear\n\ncase \"$answer\" in\n\ty|yes|Yes|YES) \n\t\techo \"you selected yes\" \n\t\techo \"yes starts with the letter y\"\n\t\tls -alh\n\t;;\n\tn|no|No|NO) echo \"you selected no\" ;;\n\t[0-9].[0-9]) echo \"floating number detected\" ;;\n\t[0-9]) echo \"numeral detected\" ;;\n\t*) echo \"Bad selection\" ;;\nesac"
},
{
"alpha_fraction": 0.6373015642166138,
"alphanum_fraction": 0.6476190686225891,
"avg_line_length": 17.060606002807617,
"blob_id": "40f91040ab8b55335be289da653c2ba7891a664e",
"content_id": "4e8c9e1fe0f6b3aca2706cea330269cba65cd235",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1260,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 66,
"path": "/startmining.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\r\n# chkconfig: 35 99 10\r\n# description: Some description\r\n# processname: startmining\r\n#\t/etc/rc.d/init.d/startmining\r\n \r\n# Source function library.\r\n. /etc/init.d/functions\r\n \r\n#<define any local shell functions used by the code that follows>\r\n \r\nNO=`cat /root/position`\r\n\r\nlockfile=/var/lock/subsys/startmining\r\n \r\nstart() {\r\n\t#touch $lockfile\r\n\ttouch /var/lock/subsys/startmining\r\n\techo -n \"Starting startmining\"\r\n\t#start daemons, perhaps with the daemon function>\r\n\t\r\n\t/usr/bin/screen -dmS burnin ~/src/jhPrimeminer/jhprimeminer -o http://ypool.net:10034 -u detriment.work$NO -p x\r\n\t\r\n\tsuccess\r\n}\t\r\n \r\nstop() {\r\n\t#rm -f $lockfile\r\n\trm -f /var/lock/subsys/startmining\r\n\techo -n \"Stopping startmining\"\r\n\t#start daemons, perhaps with the daemon function>\r\n\r\n\tkillall jhprimeminer\r\n\t\r\n\tsuccess\r\n}\r\n \r\ncase \"$1\" in\r\n start)\r\n\tstart\r\n\t;;\r\n stop)\r\n\tstop\r\n\t;;\r\n status)\r\n\techo \"Not applied to service\"\r\n\t;;\r\n restart)\r\n \tstop\r\n\tstart\r\n\t;;\r\n reload)\t\r\n\techo \"Not applied to service\"\r\n\t;;\r\n condrestart)\r\n \t#<Restarts the servce if it is already running. For example:>\r\n\techo \"Not applied to service\"\r\n\t;;\r\n probe)\r\n\t;;\r\n *)\r\n\techo \"Usage: startmining{start|stop|status|reload|restart[|probe]\"\r\n\texit 1\r\n\t;;\r\nesac\r\nexit $?\r\n\r\n"
},
{
"alpha_fraction": 0.5320181250572205,
"alphanum_fraction": 0.5831177234649658,
"avg_line_length": 26.60714340209961,
"blob_id": "dd222a88d4ede473897eac995a76ecb2c4ac328e",
"content_id": "36c7b8c2b8a16afdd033e8b1ba199313d2c1afa2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3092,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 112,
"path": "/ipmi-script-for-utility-drive.sh",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#\n# Bash script to automate configuration of Supermicro IPMI in Linux.\n#\n# Last Edited Date: 10.24.2014\n#\n# Author: Nate Kennison\n#\n# Version 1.7.0\n\nclear\n\nConfig_IPMI () {\n\n service ipmi start >/dev/null 2>&1\n #auto set gateway for private IPMI addresses\n IFS=. read ip1 ip2 ip3 ip4 <<< \"$IPMIIP\"\n \n if [[ `echo $ip4 | egrep '^([2-9]|[1-5][0-9]|6[0-2])$'` ]]; then\n IPMIGW=1\n \n elif [[ `echo $ip4 | egrep '^(6[6-9]|[7-9][0-9]|1[0-1][0-9]|12[0-6])$'` ]]; then\n IPMIGW=65\n \n elif [[ `echo $ip4 | egrep '^(1[3-8][0-9]|190)$'` ]]; then\n IPMIGW=129\n \n elif [[ `echo $ip4 | egrep '^(19[4-9]|2[0-5][0-9]|25[0-4])$'` ]]; then\n IPMIGW=193\n fi\n \n #automatically set subnet mask or ask if not private IPMI address\n if [[ `echo $IPMIIP | grep -E '10\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}'` ]]; then\n IPMIMASK=192\n else\n echo 'Subnet mask last octet (ie. 255.255.255.x) replace X'\n read IPMIMASK\n \n echo 'Gateway last octet (ie. x.x.x.y) replace y'\n read IPMIGW\n fi\n \n #Use passed password variable or set password variable\n if [[ -n ${PW} ]]; then\n echo \"Password already set.\"\n else\n echo \"Server Password:\"\n read PW\n fi\n \n sleep 1\n clear\n \n #determine and set architecture\n UNAME64=$(uname -ar | grep 64)\n UNAME686=$(uname -ar | grep 686)\n \n if [ \"$UNAME64\" ]; then\n arch=x86_64\n dir=64bit\n elif [ \"$UNAME686\" ]; then\n arch=x86\n dir=32bit\n fi\n\n #Check if Dell or Supermicro\n if [[ `/root/ipmicfg/linux/64bit/ipmicfg-linux.x86_64 -user list | egrep 'root'` ]]; then\n hw=dell\n \n elif [[ `/root/ipmicfg/linux/64bit/ipmicfg-linux.x86_64 -user list | egrep 'ADMIN'` ]]; then\n hw=supermicro \n fi\n \n if [[ `echo $hw | egrep dell` ]]; then\n passwd=root\n \n elif [[ `echo $hw | egrep supermicro` ]]; then\n passwd=ADMIN\n fi\n \n #Set the IPMI config\n ipmiutil smcoem lanport dedicated\n chmod +x /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -dhcp off\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -m $IPMIIP\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -k 255.255.255.$IPMIMASK\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -g ${IPMIIP%.*}.$IPMIGW\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -user del 3\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -user add 3 user $PW 3\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -user setpwd 2 $passwd\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -user setpwd 1 $passwd\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -user level 1 1\n /root/ipmicfg/linux/$dir/ipmicfg-linux.$arch -user list\n\n}\n\n#ask some questions and set the ip variables up\necho 'IP address for IPMI?'\nread IPMIIP\n\nif [[ -z ${IPMIIP} ]]; then\n echo \"Skipping IPMI\"\n sleep 1\n rm -rf ipmi.sh\nelse\n Config_IPMI\nfi\n\n#go home and clean up after yourself!\nsleep 2\ncd\nrm -rf ipmi.sh\n"
},
{
"alpha_fraction": 0.5796610116958618,
"alphanum_fraction": 0.6101694703102112,
"avg_line_length": 16.352941513061523,
"blob_id": "b82dc0e16e72917a349df5ee741c382c95c159ee",
"content_id": "86127c891e8fd0f7e041cbd40c19291bac715c78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 17,
"path": "/pingrange",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#Force bash\nif [ ! \"$BASH_VERSION\" ] ; then\n exec /bin/bash \"$0\" \"$@\"\nfi\nif [[ -z `echo $@` ]];then\n\n\techo \"Ping a range of IPs in the same subnet.\"\n\techo \"Please enter the IP range you would like to ping. ie. 10.0.0.1-25\"\n\tread range\nelse\n\trange=$@\nfi\nnmap -sP $range \n\nexit 0\n"
},
{
"alpha_fraction": 0.6289004683494568,
"alphanum_fraction": 0.6296433806419373,
"avg_line_length": 23.035715103149414,
"blob_id": "5101ad0bbbf2364ee80834d9138970227673db38",
"content_id": "3a29b8025b92274c71464770ae83a49433eebd45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2692,
"license_type": "no_license",
"max_line_length": 226,
"num_lines": 112,
"path": "/imgurdl",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#\n# auto downloads images from an imgur album\n#\n#\n\n#Force bash\nif [ ! \"$BASH_VERSION\" ] ; then\n exec /bin/bash \"$0\" \"$@\"\nfi\n\n#Help (-h flag) menu\nif [[ $@ == -h ]];then\necho \"Arguments:\"\necho \"-h -------- Help (This Menu)\"\necho \"-r -------- Repeat Same Album Mode\"\necho \"-m -------- Multiple Album Mode\"\necho \"-d -------- Specify Download Directory\"\nexit 0\nfi\n\n#imgur album download function\nfunction download.album () {\n\t#Ask for URL\n\techo \"enter URL:\"\n\tread enteredurl\n\t#convert any album URL to our needed URL\n\talbum=`echo $enteredurl | sed 's,http://imgur.com/a/,,g' | sed 's,https://imgur.com/a/,,g' | sed 's,http://imgur.com/gallery/,,g' | sed 's,https://imgur.com/gallery/,,g' | sed 's,[/].*,,' | sed 's,[#].*,,'`\n\t#Set our URL\n\turl=\"http://imgur.com/a/$album/layout/blog\"\n\t\n\t#If multiple album mode, unset directory name\n\tif [[ $mode == m ]];then\n\t\tunset dir_name\n\tfi\n\t\n\t#If album name is unset, set album name.\n\tif [[ -z $dir_name ]];then\n\t\t#pull album title from umgur\n\t\tpulledname=`lynx -source $url |grep \"</title>\" | sed 's, - Album on Imgur</title>,,g'`\n\t\t#delete whitespace\n\t\tpossiblename=\"$(echo -e \"${pulledname}\" | sed -e 's/^[[:space:]]*//')\"\n\t\techo \"Enter album name ($possiblename):\"\n\t\tread dir_name\n\t\t#if left blank, use pulled album title\n\t\tif [[ -z $dir_name ]];then\n\t\t\tdir_name=`echo $possiblename`\n\t\tfi\n\tfi\n\n\t#Set custom download directory if -d flag is passed, otherwise use default download directory.\n\tif [[ -z $root_dir ]];then\n\t\tif [[ $mode == d ]];then\n\t\t\techo \"Download Directory?:\"\n\t\t\tread root_dir\n\t\telse\n\t\troot_dir=/Users/nate/Dropbox/Pictures\n\t\tfi\n\tfi\n\t\n\t#create album's download directory\n\tcd\n\tmkdir \"$root_dir\"/\"$dir_name\"\n\tcd \"$root_dir\"/\"$dir_name\"\n\t\n\t#download the images\n\tlynx -source $url | grep content= | grep i.imgur.com | grep og:image | grep -v \"?fb\" | sed 's,<meta property=\"og:image\" content=\",,g' | sed 's,\" />,,g' >> source ; wget -nc -i source |grep \"Downloaded:\" ; rm -rf source\n\n\t#output to terminal where images were downloaded\n\techo \"Images downloaded to `pwd`\"\n\n\t#repeat if repeat flag is set\n\tif [[ $repeat == yes ]];then\n\t\techo \"Go again? (y/n)\"\n\t\tread answer\n\tfi\n\t#repeat if Y is pressed.\n\tif [[ $answer == y ]];then\n\t\texport repeat=yes\n\tfi\n\t#if anything else is pressed, disable repeat and let script end.\n\tif [[ $answer == n ]];then\n\t\tunset repeat\n\tfi\n\t#while loop to repeat the function.\n\twhile [[ $repeat == yes ]]\n\tdo\n\t\tdownload.album\n\tdone\n}\n\n#multi album mode flag\nif [[ $@ == *-m* ]];then\n\trepeat=yes\n\tmode=m\nfi\n#repeat mode flag\nif [[ $@ == *-r* ]];then\n\trepeat=yes\n\tmode=r\nfi\n#custom download directory mode flag\nif [[ $@ == *-d* ]];then\n\tmode=d\nfi\n\n#run download function\ndownload.album\n\ncd\n\nexit\n"
},
{
"alpha_fraction": 0.6532257795333862,
"alphanum_fraction": 0.7096773982048035,
"avg_line_length": 14.625,
"blob_id": "4be2aa388d392631fbe133dc88b67199e72b0759",
"content_id": "1f05d588a4579e9d089edc514251ea2279598434",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 8,
"path": "/learning/passing-variables/2",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho $variable \"in script 2\"\necho \"passed switches:\" $1\nexport export=exported\nIP=\"10.0.0.1\"\n\nexport ipmiip=$IP"
},
{
"alpha_fraction": 0.5642458200454712,
"alphanum_fraction": 0.6201117038726807,
"avg_line_length": 12.769230842590332,
"blob_id": "f53cb1e6dfc97a2fae98d7b52bbadd5235c6d2b7",
"content_id": "031be380959dee4c9f241d4982da9baa82bb1167",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 13,
"path": "/rdp",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [[ -z ${1} ]]; then\n\techo \"IP?\"\n\tread ip\n\techo \"Password?\"\n\tread password\nelse\t\n\tip=$1\n\tpassword=$2\nfi\n\nrdesktop -u administrator -p \"$password\" -g 1600x900 $ip &\n"
},
{
"alpha_fraction": 0.4410211145877838,
"alphanum_fraction": 0.44542253017425537,
"avg_line_length": 28.921052932739258,
"blob_id": "3ccbf57e3f43e839b240eb3b59c16bae557866b4",
"content_id": "bf1028cc5cd8c5d920097ec4aadead98a8364622",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1136,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 38,
"path": "/ksp/checkmyversions",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nIFS=$'\\n'\n\nfunction outputversion () {\n\t\techo \"\"\n\t\techo \"______________________________________________________________________________________________\"\n\t\t#echo $name\n\t\tversion_checked=$(cat versions.new | sed 's/(//g' | sed 's/)//g' | grep -A 1 \"$name\" | grep \"$version\")\n\t\tif [[ -z ${version_checked} ]];then\n\t\t\tcat versions.new | grep -A 1 $name | sed 's/$name//g'\n\t\t\techo \"############################################################################\"\n\t\t\techo \"# Versions Do Not Match #\"\n\t\t\techo \"############################################################################\"\n\t\t\techo \"Currently Installed Version: $version\"\n\t\t\techo $link\n\t\telse\n\t\t\techo $name\n\t\t\techo \"Versions Match\"\n\t\t\techo \"Currently Installed Version: $version\"\n\t\tfi\n\t\t\n\t}\n\n\nfor VER in `cat /scripts/ksp/mods.list`\ndo\n\tname=$(echo $VER | awk '{print $1}' | sed 's/_/ /g')\n\tversion=$(echo $VER | awk '{print $2}')\n\tlink=$(echo $VER | awk '{print $5}')\n\tinstallcheck=$(echo $version | grep \"Not-Installed\")\n\n\tif [[ $installcheck != \"Not-Installed\" ]];then\n\t\toutputversion\n\tfi\n\tunset installcheck\n\t\ndone"
},
{
"alpha_fraction": 0.5157397985458374,
"alphanum_fraction": 0.5912906527519226,
"avg_line_length": 30.75,
"blob_id": "29eb04e3ae3bd00854383d17590a9d59afef96a2",
"content_id": "52a0a3041a83d06f69525834ba99adc49a883c79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1906,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 60,
"path": "/ipv6",
"repo_name": "detriment/my_scripts",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n clear\n unset ip6\n echo \"IPv6 Address:\"\n echo \"Enter address after the double colon.\"\n echo \"Only include CIDR if it differs from /121\"\n echo \"For Example: to enter 2604:4500::xxxx/121), only enter xxxx\"\n echo \" ** Leave blank for no IPv6 **\"\n read ip6\n\n function setIP6 () {\n\n echo \"Enter IPv6 Gateway:\"\n echo \"Only enter last segment. ie. 2604:4500::xxxx.\"\n read ip6gw\n\n #Address Case\n case \"$ip6\" in \n *\"2604:4500\"* ) ;;\n *\"::\"* ) ;;\n * ) ip6=2604:4500::$ip6 ;;\n esac\n \n #Previx Case\n case \"$ip6\" in\n *\"/\"* ) ip6subnet=$(echo $ip6 | sed -e 's,.*/,,g') ;;\n * ) ip6=$ip6/121 && ip6subnet=121 ;;\n esac\n \n #Gateway Case\n case \"$ip6gw\" in \n *\"2604:4500\"* ) ;;\n *\"::\"* ) ;;\n * ) ip6gw=2604:4500::$ip6gw ;;\n esac\n \n }\n\n\n if [[ -n ${ip6} ]]; then\n setIP6 \n else\n echo \"Skipping IPv6 Configuration\"\n sleep 1\n fi\n\nrm -rf /etc/sysconfig/network-scripts/ifcfg-enp0s8 >/dev/null 2>&1\ntouch /etc/sysconfig/network-scripts/ifcfg-enp0s8 >/dev/null 2>&1\necho \"DEVICE=enp0s8\" >> /etc/sysconfig/network-scripts/ifcfg-enp0s8\necho \"IPADDR=192.168.0.55\" >> /etc/sysconfig/network-scripts/ifcfg-enp0s8\necho \"NETMASK=255.255.255.0\" >> /etc/sysconfig/network-scripts/ifcfg-enp0s8\necho \"GATEWAY=192.168.0.1\" >> /etc/sysconfig/network-scripts/ifcfg-enp0s8\necho \"ONBOOT=yes\" >> /etc/sysconfig/network-scripts/ifcfg-enp0s8\necho \"IPV6INIT=yes\" >> /etc/sysconfig/network-scripts/ifcfg-enp0s8\necho \"IPV6ADDR=$ip6\" >> /etc/sysconfig/network-scripts/ifcfg-enp0s8\necho \"IPV6_DEFAULTGW=$ip6gw\" >> /etc/sysconfig/network-scripts/ifcfg-enp0s8\necho \"NETWORKING_IPV6=yes\" >> /etc/sysconfig/network\n\ncat /etc/sysconfig/network-scripts/ifcfg-enp0s8\ncat /etc/sysconfig/network\n\n"
}
] | 43 |
allen111/pythonMong
|
https://github.com/allen111/pythonMong
|
f5abd6bd2143408acd71b2598bb1299e1c9a4843
|
98f4ddc98b5b2ecaedf03809d6180de3afa1f06c
|
d33ab360f8aa005e21e96034d3587418ec45b10d
|
refs/heads/master
| 2020-03-22T18:05:20.504877 | 2018-07-11T15:29:37 | 2018-07-11T15:29:37 | 140,436,384 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8125,
"alphanum_fraction": 0.8125,
"avg_line_length": 23,
"blob_id": "b0b7010a69f06847954d9aed4e53d3fcffa43bd5",
"content_id": "5bfc4193575b229c50bbf38af2ce349be6db1f40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 2,
"path": "/README.md",
"repo_name": "allen111/pythonMong",
"src_encoding": "UTF-8",
"text": "# pythonMong\nfunny time with python and mongodb\n"
},
{
"alpha_fraction": 0.6454843282699585,
"alphanum_fraction": 0.6589584946632385,
"avg_line_length": 18.544483184814453,
"blob_id": "ad897c3da763ea4e61668c91482c26800ea3fce8",
"content_id": "cb02ff7872cb5645806c457bbfb9be32471a487b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5492,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 281,
"path": "/pymongoplay.py",
"repo_name": "allen111/pythonMong",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport pymongo,sys\n# from pprint import pprint\n\nclient=pymongo.MongoClient()\ndb=client.rpg\nitems=db.items\nrooms=db.rooms\nplayerst=db.playerstats\n# playerDef=playerst.find_one({\"name\":\"allen\"}) adesso ci sono i salvataggi con save_id crescente vedi load and save\n#esempi\n# sword={\n# \t\"name\":\"old sword\",\n# \t\"desc\":\"an old and rusty sword\",\n# \t\"atk\":2,\n# \t\"slot\":0,\n# \t\"cost\":10,\n# \t\"tags\":[\"weapon\",\"sword\",\"common\"]\n# }\n\n#items.insert_one(sword)\n\n# queryVendor={\n# \t\"$or\":[\n# \t\t{\"tags\":\"weapon\"},{\"tags\":\"food\"}\n# \t]\n# }\n\n#if result == None print all\n\ndef printItemsName(result): \n\n\tif result is not None:\n\t\tfor doc in result:\n\t\t\tprint(doc)\n\telse:\n\t\tfor doc in items.find():\n\t\t\tprint(doc.get(\"name\"))\n\n\n\ndef printDetailItem(itemName):\t\t\t\n\titem=items.find_one({\"name\":itemName})\n\tfor k,v in item.items():\n\t\tprint(k+\": \"+str(v))\n\t\n\t\n\nroomMap=[[]]\noldTile=7\n\ndef printActiveRoom():\n\tfor x in roomMap:\n\t\tprint (x)\ndef updateMap(): #da fare all load e prima e dopo il movimento\n\tglobal roomMap\n\tglobal oldTile\n\tx,y=player.get(\"x\"),player.get(\"y\")\n\toldTile,roomMap[y][x]=roomMap[y][x],oldTile\n\n\n\ndef doorPosition(doorLoc,roomSize):\n\tpos={\n\t\"nord\":(roomSize//2,0),\n\t\"sud\":(roomSize//2,roomSize-1),\n\t\"est\":(roomSize-1,roomSize//2),\n\t\"ovest\":(0,roomSize//2),\n\n\t}\n\treturn pos[doorLoc]\n\n\ndef isTaken(item):\n\t\n\n\tfor it in player[\"inventory\"]:\n\t\tif it[\"id_spawn\"]==item[\"id_spawn\"]:\n\t\t\treturn True\n\n\treturn False\n\n\ndoorList={}\ndef initializeRoom():\n\tglobal activeRoom\n\t\n\tsize=activeRoom.get(\"size\")\n\tw,h=size, size\n\tglobal roomMap\n\troomMap=[[0 for x in range(w)] for y in range(h)]\n\n\tfor item in activeRoom.get(\"items\"):\n\t\tif not isTaken(item):\n\t\t\troomMap[item.get(\"x\")][item.get(\"y\")]=1\n\n\tfor door in activeRoom.get(\"doors\"):\n\t\tnextRoomId=door.get(door.keys()[0])\n\t\troomName=door.keys()[0]\n\t\tnextRoom=rooms.find_one(nextRoomId)\n\t\tif nextRoom is not None:\n\t\t\troomX,roomY=doorPosition(roomName,activeRoom.get(\"size\"))\n\t\t\tprint(roomX,roomY)\n\t\t\troomMap[roomY][roomX]=2\n\t\t\tglobal doorList\n\n\t\telse:\n\t\t\troomX,roomY=doorPosition(roomName,activeRoom.get(\"size\"))\n\t\t\troomMap[roomY][roomX]=3\n\n\ndef openPort(x,y):\n\tglobal activeRoom\n\tglobal oldTile\n\troomSize=activeRoom[\"size\"]\n\tpos={\n\t\t(roomSize//2,0):\"nord\",\n\t\t(roomSize//2,roomSize-1):\"sud\",\n\t\t(roomSize-1,roomSize//2):\"est\",\n\t\t(0,roomSize//2):\"ovest\",\n\t}\n\tnextRoomName=pos.get((x,y))\n\tdoorsList=activeRoom[\"doors\"]\n\tmatches=[obj for obj in doorsList if (obj.keys()[0]==nextRoomName)]\n\tif matches:\n\t\tdoor=matches.pop()\n\t\tdoorid=door.get(nextRoomName)\n\t\tactiveRoom=rooms.find_one({\"_id\":doorid})\n\t\tinitializeRoom()\n\t\toldTile=7\n\t\treverse={\n\t\t\t\"nord\":\"sud\",\n\t\t\t\"sud\":\"nord\",\n\t\t\t\"est\":\"ovest\",\n\t\t\t\"ovest\":\"est\",\n\t\t}\n\t\tnewDoor=reverse[nextRoomName]\n\t\tnewX,newY=doorPosition(newDoor,activeRoom[\"size\"])\n\t\tplayer.update({\"x\":newX})\n\t\tplayer.update({\"y\":newY})\n\t\tplayer.update({\"room\":activeRoom[\"name\"]})\n\t\tupdateMap()\n\t\tprintActiveRoom()\n\n\ndef getItem(x,y):\n\tglobal activeRoom\n\tglobal oldTile\n\tif oldTile==2:\n\t\topenPort(x,y)\n\telse:\n\t\tres=activeRoom.get(\"items\")\n\t\tmatches = [obj for obj in res if (obj.get(\"y\")==x and obj.get(\"x\")==y)]\n\t\tif matches and oldTile!=0:\n\t\t\titemOnFloor=matches.pop()\n\t\t\tactiveRoom[\"items\"].remove(itemOnFloor)\n\t\t\titemOnFloorId=itemOnFloor.get(\"item_id\")\n\t\t\titemName=items.find_one({\"_id\":itemOnFloorId})[\"name\"]\n\t\t\titemDict={\n\t\t\t\t\"name\":itemName,\n\t\t\t\t\"id\":itemOnFloorId,\n \t\"id_spawn\":itemOnFloor.get(\"id_spawn\")\n\n\t\t\t}\n\t\t\tplayer[\"inventory\"].append(itemDict)\n\t\t\tprintInventory()\n\t\t\toldTile=0\n\n\ndef printInventory():\n\tif not player[\"inventory\"]:\n\t\tprint(\"il tuo zaino e' vuoto\")\n\telse:\n\t\tfor item in player[\"inventory\"]:\n\t\t\titemId=item[\"id\"]\n\t\t\ttmpItem=items.find_one({\"_id\":itemId})\n\t\t\tprint(tmpItem[\"name\"]+\": \"+tmpItem[\"desc\"])\n\n\n\n\n\ndef exitWithoutSave():\n\texit(0)\ndef savePlayerState(): \n\tstate=player\n\tstate.pop(\"_id\")\n\tnewSave=state.get(\"save_id\")\n\tnewSave=newSave +1\n\tstate.update({\"save_id\" : newSave})\n\tplayerst.insert_one(player)\n\tprint(\"goodbye\")\n\texit(0)\n\ndef loadState(): \n\tp= playerst.find().sort(\"save_id\",pymongo.DESCENDING).limit(1)[0]\n\tx,y=p.get(\"x\"),p.get(\"y\")\n\tglobal activeRoom\n\tactiveRoom=rooms.find_one({\"name\":p[\"room\"]})\n\treturn p\n\n\ndef checkRoomPosition(x,y,m_activeRoom):\n\tsize=m_activeRoom.get(\"size\")\n\tif x>=0 and x<size and y>=0 and y<size:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef movePlayerNew(x,y,m_activeRoom):\n\tif checkRoomPosition(x,y,m_activeRoom):\n\t\tupdateMap()\n\t\tplayer.update({\"x\":x})\n\t\tplayer.update({\"y\":y})\n\t\tupdateMap()\n\t\tprint(x,y)\n\telse:\n\t\tprint(\"muroo\\n\")\n\tprintActiveRoom()\n\n\n\n# xy\n# 00 10 20 30\n# 01 11 21 31\n# 02 12 22 32\ndef error11():\n\tprint(\"sorry, where?\")\n\n\ninpt={\n\t\"w\":\"nord\",\n\t\"s\":\"sud\",\n\t\"d\":\"est\",\n\t\"a\":\"ovest\",\n\t\"q\":\"quit\",\n\t\"x\":\"close\",\n\t\"e\":\"use\",\n\t\"i\":\"listInventory\",\n\t\"none\":\"none\",\n}\n\n\ndef manageInput(command):\n\tx=player.get(\"x\")\n\ty=player.get(\"y\")\n\n\tfun={\n\t\"nord\":[movePlayerNew,(x,y-1,activeRoom)],\n\t\"sud\":[movePlayerNew,(x,y+1,activeRoom)],\n\t\"est\":[movePlayerNew,(x+1,y,activeRoom)],\n\t\"ovest\":[movePlayerNew,(x-1,y,activeRoom)],\n\t\"quit\":[savePlayerState,()],\n\t\"close\":[exitWithoutSave,()],\n\t\"use\":[getItem,(x,y)],\n\t\"listInventory\":[printInventory,()],\n\t\"none\":[error11,()]\n\n\t}\n\tprint(\"Called: \"+str(fun[command][0].__name__)) #DEBUG\n\tfun[command][0](*fun[command][1])\n\n\n\n\n\n\t\n\n\t\nactiveRoomName=\"ingresso\"\nactiveRoom=rooms.find_one({\"name\":activeRoomName})\n\n\nplayer=loadState()\nprint(player) #DEBUG\ninitializeRoom()\nupdateMap()\nprintActiveRoom()\nwhile True:\n\tdirectionKey=raw_input('Enter your input:')\n\tmanageInput(inpt.get(directionKey,\"none\"))\n"
}
] | 2 |
meonwax/aoc2020
|
https://github.com/meonwax/aoc2020
|
98dedb8c6b10b800fa89c283403ffd2f3f9b508b
|
0df0c35c6090d9c7982c7dccef268a74eaefc882
|
fdcb0a753b515950018b96fa0e205354bdc428ad
|
refs/heads/main
| 2023-01-29T13:18:30.434270 | 2020-12-16T20:01:53 | 2020-12-16T20:01:53 | 317,309,700 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5281490683555603,
"alphanum_fraction": 0.5415147542953491,
"avg_line_length": 25.54838752746582,
"blob_id": "f274191f910c9a94da8456856b846b5d77628cbe",
"content_id": "a14761299bea6289b705247be14aa669974581c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2469,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 93,
"path": "/day11.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "with open('day11.txt', 'r') as f:\n input = [line.strip() for line in f.readlines()]\n\n\ndef deep_copy(list):\n copy = []\n for r in list:\n row = []\n for s in r:\n row.append(s)\n copy.append(row)\n return copy\n\n\nDIRECTIONS = [\n (-1, -1), # North-West\n (-1, 0), # North\n (-1, 1), # North-East\n (0, -1), # West\n (0, 1), # East\n (1, -1), # South-West\n (1, 0), # South\n (1, 1), # Sout-East\n]\n\n\ndef find_occupied_neighbours(layout, direction, r, s, unlimited):\n while True:\n r = r + direction[0]\n s = s + direction[1]\n if r < 0 or s < 0 or r >= len(layout) or s >= len(layout[r]):\n return 0\n seat = layout[r][s]\n if seat == '.':\n if unlimited:\n continue\n return 0\n if seat == 'L':\n return 0\n if seat == '#':\n return 1\n\n\ndef count_occupied_neighbours(layout, r, s, unlimited):\n seat = layout[r][s]\n count = 0\n if seat != '.':\n for direction in DIRECTIONS:\n count += find_occupied_neighbours(layout, direction, r, s, unlimited)\n return count\n\n\ndef next_round(last_layout, max_neighbours, unlimited):\n new_layout = deep_copy(last_layout)\n for r in range(len(new_layout)):\n row = new_layout[r]\n for s in range(len(row)):\n seat = row[s]\n if seat == '.':\n continue\n occupied_neighbours = count_occupied_neighbours(last_layout, r, s, unlimited)\n if seat == 'L' and occupied_neighbours == 0:\n new_layout[r][s] = '#'\n elif seat == '#' and occupied_neighbours > max_neighbours:\n new_layout[r][s] = 'L'\n return new_layout\n\n\ndef count_occupied_seats(layout):\n occupied_count = 0\n for row in layout:\n for seat in row:\n occupied_count += seat == '#'\n return occupied_count\n\n\nprint(\"\\n# Part 1\")\nlast_layout = deep_copy(input)\nwhile True:\n new_layout = next_round(last_layout, 3, False)\n if new_layout == last_layout:\n break\n last_layout = new_layout\nprint(\"Number of seats occupied: {}\".format(count_occupied_seats(last_layout)))\n\nprint(\"\\n# Part 2\")\nlast_layout = deep_copy(input)\nwhile True:\n new_layout = next_round(last_layout, 4, True)\n if new_layout == last_layout:\n break\n last_layout = new_layout\nprint(\"Number of seats occupied: {}\".format(count_occupied_seats(last_layout)))\n"
},
{
"alpha_fraction": 0.5570897459983826,
"alphanum_fraction": 0.5693042874336243,
"avg_line_length": 31.465517044067383,
"blob_id": "98609c106787218dc5d9dcd80d8c556d91414ff6",
"content_id": "c3d0c09e07b1a7eef5cbee55e3426eb7e11dfc09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1883,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 58,
"path": "/day14.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "import re\n\nwith open('day14.txt', 'r') as f:\n input = [line.strip() for line in f.readlines()]\n\n\ndef apply_bit(value: int, index: int, bit: str):\n binary_string = '{0:036b}'.format(value)\n binary_string = binary_string[:index] + bit + binary_string[index + 1:]\n return int(binary_string, 2)\n\n\ndef generate_variation(address: int, floating_bits: list, index: int, value: int):\n if index < len(floating_bits):\n bit_position = floating_bits[index]\n\n variation_address = apply_bit(address, bit_position, '0')\n generate_variation(variation_address, floating_bits, index + 1, value)\n\n variation_address = apply_bit(address, bit_position, '1')\n generate_variation(variation_address, floating_bits, index + 1, value)\n else:\n mem[address] = value\n\n\nprint(\"\\n# Part 1\")\nmask: str\nmem = {}\nfor line in input:\n if line.startswith('mask'):\n mask = re.search(r'^mask = ([X\\d]+)$', line).group(1)\n else:\n matches = re.search(r'^mem\\[(\\d+)\\] = (\\d+)$', line)\n value = int(matches.group(2))\n for i, char in enumerate(mask):\n if char != 'X':\n value = apply_bit(value, i, char)\n address = int(matches.group(1))\n mem[address] = value\nprint(\"Sum:\", sum(mem.values()))\n\nprint(\"\\n# Part 2\")\nmem = {}\nfor line in input:\n if line.startswith('mask'):\n mask = re.search(r'^mask = ([X\\d]+)$', line).group(1)\n else:\n matches = re.search(r'^mem\\[(\\d+)\\] = (\\d+)$', line)\n address = int(matches.group(1))\n value = int(matches.group(2))\n floating_bits = []\n for i, char in enumerate(mask):\n if char == '1':\n address = apply_bit(address, i, '1')\n if char == 'X':\n floating_bits.append(i)\n generate_variation(address, floating_bits, 0, value)\nprint(\"Sum:\", sum(mem.values()))\n"
},
{
"alpha_fraction": 0.5105485320091248,
"alphanum_fraction": 0.5738396644592285,
"avg_line_length": 14.800000190734863,
"blob_id": "bffd363c980e54b886ffe8847be0e8a33aa87f00",
"content_id": "218bf6918b925af14339753eab3b2337bc108885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 474,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 30,
"path": "/day10.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "with open('day10.txt', 'r') as f:\n input = [int(line) for line in f.readlines()]\n\n# input = [int(i) for i in \"\"\"16\n# 10\n# 15\n# 5\n# 1\n# 11\n# 7\n# 19\n# 6\n# 12\n# 4\"\"\".split()]\n\njoltage_diff_count = {\n 1: 0,\n 3: 0,\n}\n\nprint(\"\\n# Part 1\")\ninput.sort()\njoltage = 0\nfor adapter in input:\n diff = adapter - joltage\n joltage_diff_count[diff] += 1\n joltage += diff\njoltage_diff_count[3] += 1\n\nprint(\"Result: {}\".format(joltage_diff_count[1] * joltage_diff_count[3]))\n"
},
{
"alpha_fraction": 0.5725126266479492,
"alphanum_fraction": 0.5809443593025208,
"avg_line_length": 25.954545974731445,
"blob_id": "1e621542a73bc8c37038983495c9409c33f383a2",
"content_id": "4c7130c16124ddc7f7f729de500ce12957650a4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1186,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 44,
"path": "/day07.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "import re\n\nwith open('day07.txt', 'r') as f:\n input = [line.strip() for line in f.readlines()]\n\n\ndef parse_rules(input):\n rules = {}\n for rule in input:\n matches = re.search(r'(.+) bags contain (.+)\\.', rule)\n bag = matches.group(1)\n children = {}\n if matches.group(2) != 'no other bags':\n for child in matches.group(2).split(', '):\n matches = re.search(r'(\\d+) (.+) ', child)\n children[matches.group(2)] = int(matches.group(1))\n rules[bag] = children\n return rules\n\n\ndef calculate_bag(bag):\n bags = set()\n for key, value in rules.items():\n if bag in value.keys():\n bags.add(key)\n bags.update(calculate_bag(key))\n return bags\n\n\ndef calculate_containing_bags(bag):\n count = 0\n for child, amount in rules.get(bag).items():\n count += amount\n count += amount * calculate_containing_bags(child)\n return count\n\n\nrules = parse_rules(input)\n\nprint(\"\\n# Part 1\")\nprint(\"Number of bag colors: {}\".format(len(calculate_bag('shiny gold'))))\n\nprint(\"\\n# Part 2\")\nprint(\"Number of containing bags: {}\".format(calculate_containing_bags('shiny gold')))\n"
},
{
"alpha_fraction": 0.545945942401886,
"alphanum_fraction": 0.5891891717910767,
"avg_line_length": 32.03571319580078,
"blob_id": "d6858011dab60a5b488942f48ba31332797920da",
"content_id": "fc50e0579ba61513ef2ef235d402728db1e39f3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 28,
"path": "/day03.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "with open('day03.txt', 'r') as f:\n input = [line.strip() for line in f.readlines()]\n\n\ndef count_trees(right, down):\n tree_count = 0\n i = 0\n for line in input[down::down]:\n i += right\n if line[i % len(line)] == '#':\n tree_count += 1\n return tree_count\n\n\nprint(\"\\n# Part 1\")\nprint(\"Number of trees: {}\".format(count_trees(3, 1)))\n\nprint(\"\\n# Part 2\")\nprint(\"Number of trees for 'Right 1, down 1': {}\".format(count_trees(1, 1)))\nprint(\"Number of trees for 'Right 3, down 1': {}\".format(count_trees(3, 1)))\nprint(\"Number of trees for 'Right 5, down 1': {}\".format(count_trees(5, 1)))\nprint(\"Number of trees for 'Right 7, down 1': {}\".format(count_trees(7, 1)))\nprint(\"Number of trees for 'Right 1, down 2': {}\".format(count_trees(1, 2)))\n\nresult = 1\nfor right, down in [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]:\n result *= count_trees(right, down)\nprint(\"Multiplied: {}\".format(result))\n"
},
{
"alpha_fraction": 0.5367156267166138,
"alphanum_fraction": 0.5467289686203003,
"avg_line_length": 25.280702590942383,
"blob_id": "9c881bdc71831f4ebfc28324975dfb381462a13e",
"content_id": "f4fdbea42e416fcec416a653c1c4197a197372a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1498,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 57,
"path": "/day16.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "class Rule:\n def __init__(self, field: str, ranges: list):\n self.__field = field\n self.__ranges = ranges\n\n def is_valid(self, value):\n for range in self.__ranges:\n lower = int(range.split('-')[0])\n upper = int(range.split('-')[1])\n if lower <= value <= upper:\n return True\n return False\n\n\nclass Ticket:\n def __init__(self, values: list):\n self.__values = values\n\n def __matches_rule(value):\n for rule in rules:\n if rule.is_valid(value):\n return True\n return False\n\n def get_error_rate(self):\n error_rate = 0\n for value in self.__values:\n if not Ticket.__matches_rule(value):\n error_rate += value\n return error_rate\n\n\nrules = []\nmy_ticket: Ticket\nnearby_tickets = []\n\nwith open('day16.txt', 'r') as f:\n sections = f.read().split('\\n\\n')\n\n # Parse rules\n for line in sections[0].split('\\n'):\n s = line.split(': ')\n ranges = s[1].split(' or ')\n rules.append(Rule(s[0], ranges))\n\n # Parse my ticket\n my_ticket = Ticket([int(value) for value in sections[1].split('\\n')[1].split(',')])\n\n # Parse nearby tickets\n for line in sections[2].split('\\n')[1:-1]:\n nearby_tickets.append(Ticket([int(value) for value in line.split(',')]))\n\nprint(\"\\n# Part 1\")\nerror_rate = 0\nfor ticket in nearby_tickets:\n error_rate += ticket.get_error_rate()\nprint(\"Error rate:\", error_rate)\n"
},
{
"alpha_fraction": 0.3585185110569,
"alphanum_fraction": 0.4340740740299225,
"avg_line_length": 34.52631759643555,
"blob_id": "571e88af9ab54765231daa61616c132c4ff55b32",
"content_id": "26b7dfc36f42b9d228c73f2d84a1b65ac3deff5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 675,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 19,
"path": "/day01.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "with open('day01.txt', 'r') as f:\n input = [int(line) for line in f.readlines()]\n\nprint(\"\\n# Part 1\")\nfor i, v1 in enumerate(input):\n for v2 in input[i + 1:]:\n if v1 + v2 == 2020:\n print(\"{} + {} = {}\".format(v1, v2, v1 + v2))\n print(\"{} * {} = {}\".format(v1, v2, v1 * v2))\n\nprint(\"\\n# Part 2\")\nfor i, v1 in enumerate(input):\n for k, v2 in enumerate(input[i + 1:]):\n if v1 + v2 >= 2020:\n continue\n for v3 in input[k + 1:]:\n if v1 + v2 + v3 == 2020:\n print(\"{} + {} + {} = {}\".format(v1, v2, v3, v1 + v2 + v3))\n print(\"{} * {} * {} = {}\".format(v1, v2, v3, v1 * v2 * v3))\n"
},
{
"alpha_fraction": 0.5243004560470581,
"alphanum_fraction": 0.5360824465751648,
"avg_line_length": 24.148147583007812,
"blob_id": "30d87f87ecf4fb2be650d0908ee6e29b1c0443ed",
"content_id": "6c6defb906477d2c63181c4eab18bddfb8e49248",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 679,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 27,
"path": "/day06.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "with open('day06.txt', 'r') as f:\n input = f.read().strip().split('\\n\\n')\n\nprint(\"\\n# Part 1\")\nsum = 0\nfor group in input:\n answers = set()\n for person in group.split('\\n'):\n for answer in person:\n answers.add(answer)\n sum += len(answers)\nprint(\"Sum: {}\".format(sum))\n\nprint(\"\\n# Part 2\")\nsum = 0\nfor group in input:\n answers = {}\n persons = group.split('\\n')\n for person in persons:\n for answer in person:\n if answer in answers:\n answers[answer] += 1\n else:\n answers[answer] = 1\n for count in answers.values():\n sum += count == len(persons)\nprint(\"Sum: {}\".format(sum))\n"
},
{
"alpha_fraction": 0.5147650837898254,
"alphanum_fraction": 0.5483221411705017,
"avg_line_length": 23.42622947692871,
"blob_id": "271abc4ad94071f03117ae89a0c8fe535066882f",
"content_id": "7b31f85065fb9e824f53ab6461ddecc1f8263c5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1490,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 61,
"path": "/day12.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "with open('day12.txt', 'r') as f:\n input = [line.strip() for line in f.readlines()]\n\nDIRECTION_MAPPING = {\n 0: 'N',\n 90: 'E',\n 180: 'S',\n 270: 'W',\n}\n\nMOVEMENT_MAPPING = {\n 'N': (0, -1),\n 'E': (1, 0),\n 'S': (0, 1),\n 'W': (-1, 0),\n}\n\nprint(\"\\n# Part 1\")\nship_x = 0\nship_y = 0\nship_direction = 90\n\nfor instruction in input:\n action = instruction[0]\n value = int(instruction[1:])\n if action == 'F':\n action = DIRECTION_MAPPING[ship_direction]\n if action in MOVEMENT_MAPPING:\n ship_x += MOVEMENT_MAPPING[action][0] * value\n ship_y += MOVEMENT_MAPPING[action][1] * value\n else:\n if action == 'L':\n value = 360 - value\n ship_direction = (ship_direction + value) % 360\n\nprint(\"Manhattan distance:\", abs(ship_x) + abs(ship_y))\n\nprint(\"\\n# Part 2\")\nship_x = 0\nship_y = 0\nwaypoint_x = 10\nwaypoint_y = -1\n\nfor instruction in input:\n action = instruction[0]\n value = int(instruction[1:])\n if action in MOVEMENT_MAPPING:\n waypoint_x += MOVEMENT_MAPPING[action][0] * value\n waypoint_y += MOVEMENT_MAPPING[action][1] * value\n elif action == 'F':\n ship_x += value * waypoint_x\n ship_y += value * waypoint_y\n else:\n if action == 'L':\n value = 360 - value\n for _ in range(int(value / 90)):\n temp_x = waypoint_x\n waypoint_x = waypoint_y * -1\n waypoint_y = temp_x\n\nprint(\"Manhattan distance:\", abs(ship_x) + abs(ship_y))\n"
},
{
"alpha_fraction": 0.5529905557632446,
"alphanum_fraction": 0.5603357553482056,
"avg_line_length": 26.22857093811035,
"blob_id": "33d83ed5b69c8e64d6ee38e382796a5da76930dc",
"content_id": "58a009119c8229f6abdc9779c3a1c92c1a03cfe1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 953,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 35,
"path": "/day09.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "with open('day09.txt', 'r') as f:\n input = [int(line.strip()) for line in f.readlines()]\n\n\ndef check_previous(numbers, sum):\n for i in range(len(numbers)):\n for k in range(i + 1, len(numbers)):\n if numbers[i] + numbers[k] == sum:\n return True\n return False\n\n\ndef find_window(number):\n for i in range(len(input)):\n window = []\n for k in range(i, len(input)):\n value = input[k]\n window.append(value)\n s = sum(window)\n if s == number:\n return window\n if s > number:\n break\n\n\nprint(\"\\n# Part 1\")\nPREAMBLE_LENGTH = 25\nfor i, number in enumerate(input[PREAMBLE_LENGTH:], PREAMBLE_LENGTH):\n if not check_previous(input[i - PREAMBLE_LENGTH:i], number):\n break\nprint(\"Invalid number: {}\".format(number))\n\nprint(\"\\n# Part 2\")\nwindow = find_window(number)\nprint(\"Result: {}\".format(min(window) + max(window)))\n"
},
{
"alpha_fraction": 0.5359877347946167,
"alphanum_fraction": 0.5773353576660156,
"avg_line_length": 26.20833396911621,
"blob_id": "205151a8a8b1c9108f627bb868bc547484cec39c",
"content_id": "bcb87f7911261f24fc5ecd0ba21d5675b495772a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 653,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 24,
"path": "/day15.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "input = [2, 15, 0, 9, 1, 20]\n\n\ndef play(rounds: int) -> int:\n numbers = {}\n last_number: int\n for turn, number in enumerate(input):\n numbers[number] = [turn]\n last_number = number\n for turn in range(len(input), rounds):\n if last_number not in numbers:\n numbers[last_number] = [turn - 1]\n last_number = 0\n else:\n numbers[last_number].append(turn - 1)\n last_number = numbers[last_number][-1] - numbers[last_number][-2]\n return last_number\n\n\nprint(\"\\n# Part 1\")\nprint(\"Last number spoken:\", play(2020))\n\nprint(\"\\n# Part 2\")\nprint(\"Last number spoken:\", play(30000000))\n"
},
{
"alpha_fraction": 0.663112998008728,
"alphanum_fraction": 0.673774003982544,
"avg_line_length": 35.07692337036133,
"blob_id": "3ef976b984795aee5d697a02c12311e3e8c39ab3",
"content_id": "f7299c17fd49254bb3284fa5854fef971bf7b69b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 469,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 13,
"path": "/day13.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "import math\n\nwith open('day13.txt', 'r') as f:\n input = [line.strip() for line in f.readlines()]\n\nprint(\"\\n# Part 1\")\nearliest_departure = int(input[0])\nbus_ids = [int(id) for id in input[1].split(',') if id != 'x']\nnext_departures = {}\nfor bus_id in bus_ids:\n next_departures[math.ceil(earliest_departure / bus_id) * bus_id] = bus_id\nnext_departure = min(next_departures)\nprint(\"Result:\", (next_departure - earliest_departure) * next_departures[next_departure])\n"
},
{
"alpha_fraction": 0.5795996189117432,
"alphanum_fraction": 0.6034318208694458,
"avg_line_length": 27.351350784301758,
"blob_id": "44038133c712d02880f1493a5d4338c6e3852501",
"content_id": "9e8b6b5090b18b54053d73e30f375a2116925797",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1049,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 37,
"path": "/day05.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "HALF_INDICATORS = {\n 'F': 0,\n 'B': 1,\n 'L': 0,\n 'R': 1\n}\n\nwith open('day05.txt', 'r') as f:\n input = [line.strip() for line in f.readlines()]\n\n\ndef halve_interval(interval, half_indicator):\n half = int(len(interval) / 2)\n if half_indicator == 0:\n return interval[:half]\n if half_indicator == 1:\n return interval[half:]\n\n\nprint(\"\\n# Part 1\")\nseat_ids = []\nfor boarding_pass in input:\n row_interval = [i for i in range(0, 128)]\n cols_interval = [i for i in range(0, 8)]\n for char in boarding_pass[:7]:\n row_interval = halve_interval(row_interval, HALF_INDICATORS[char])\n for char in boarding_pass[7:]:\n cols_interval = halve_interval(cols_interval, HALF_INDICATORS[char])\n seat_ids.append(row_interval[0] * 8 + cols_interval[0])\nprint(\"Highest seat ID: {}\".format(max(seat_ids)))\n\nprint(\"\\n# Part 2\")\nfor seat_id in seat_ids:\n if (seat_id + 2 in seat_ids) and not (seat_id + 1 in seat_ids):\n my_seat_id = seat_id + 1\n break\nprint(\"My seat ID: {}\".format(my_seat_id))\n"
},
{
"alpha_fraction": 0.6379928588867188,
"alphanum_fraction": 0.6532257795333862,
"avg_line_length": 28.36842155456543,
"blob_id": "13eeb1f3734c7ed900436e510e0af1d77739f8e5",
"content_id": "0822cf16bf2ed86a19bb52334cb05273c28e0b1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1116,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 38,
"path": "/day02.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "from collections import namedtuple\nimport re\n\nwith open('day02.txt', 'r') as f:\n input = [line.strip().split(\" \") for line in f.readlines()]\n\n\ndef parse_policies():\n Policy = namedtuple('Policy', 'char min max password')\n policies = []\n for line in input:\n policies.append(Policy(line[1][:-1], int(line[0].split(\"-\")[0]), int(line[0].split(\"-\")[1]), line[2]))\n return policies\n\n\ndef validate_part_one(policy):\n occurrences = re.sub('[^' + policy.char + ']', '', policy.password)\n return policy.min <= len(occurrences) <= policy.max\n\n\ndef validate_part_two(policy):\n return (policy.password[policy.min - 1] == policy.char) != (policy.password[policy.max - 1] == policy.char)\n\n\nprint(\"\\n# Part 1\")\npolicies = parse_policies()\nvalid_passwords = 0\nfor policy in policies:\n if validate_part_one(policy):\n valid_passwords += 1\nprint(\"Number of valid passwords: {}\".format(valid_passwords))\n\nprint(\"\\n# Part 2\")\nvalid_passwords = 0\nfor policy in policies:\n if validate_part_two(policy):\n valid_passwords += 1\nprint(\"Number of valid passwords: {}\".format(valid_passwords))\n"
},
{
"alpha_fraction": 0.5278236865997314,
"alphanum_fraction": 0.5823691487312317,
"avg_line_length": 32.61111068725586,
"blob_id": "5e879f9f45658273f6449fd89a96d4ae8651a6b6",
"content_id": "140c0952c390e47a6ba733298423831ccc29c50d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1815,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 54,
"path": "/day04.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "import re\n\nwith open('day04.txt', 'r') as f:\n input = f.read().split('\\n\\n')\n\n\ndef parse_passports():\n passports = []\n for line in input:\n passport = {}\n for pair in re.split('[ \\n]', line.strip()):\n passport[pair.split(':')[0]] = pair.split(':')[1]\n passports.append(passport)\n return passports\n\n\ndef validate(passport, constraints):\n for field, regex in constraints.items():\n if not re.match(regex, passport.get(field) or ''):\n return False\n return True\n\n\nprint(\"\\n# Part 1\")\nvalid_passports = 0\nCONSTRAINTS = {\n 'byr': r'.+',\n 'iyr': r'.+',\n 'eyr': r'.+',\n 'hgt': r'.+',\n 'hcl': r'.+',\n 'ecl': r'.+',\n 'pid': r'.+',\n}\nfor passport in parse_passports():\n valid_passports += validate(passport, CONSTRAINTS)\nprint(\"Valid passports: {}\".format(valid_passports))\n\nprint(\"\\n# Part 2\")\nvalid_passports = 0\nCONSTRAINTS = {\n 'byr': r'^(19[2-8][0-9]|199[0-9]|200[0-2])$', # four digits; at least 1920 and at most 2002.\n 'iyr': r'^(201[0-9]|2020)$', # (Issue Year) - four digits; at least 2010 and at most 2020.\n 'eyr': r'^(202[0-9]|2030)$', # four digits; at least 2020 and at most 2030.\n 'hgt': r'^((1[5-8][0-9]|19[0-3])cm|(59|6[0-9]|7[0-6])in)$', # a number followed by either cm or in:\n # If cm, the number must be at least 150 and at most 193.\n # If in, the number must be at least 59 and at most 76.\n 'hcl': r'^#[0-9a-f]{6}$', # a # followed by exactly six characters 0-9 or a-f.\n 'ecl': r'^(amb|blu|brn|gry|grn|hzl|oth)$', # exactly one of: amb blu brn gry grn hzl oth.\n 'pid': r'^\\d{9}$', # a nine-digit number, including leading zeroes.\n}\nfor passport in parse_passports():\n valid_passports += validate(passport, CONSTRAINTS)\nprint(\"Valid passports: {}\".format(valid_passports))\n"
},
{
"alpha_fraction": 0.5633423328399658,
"alphanum_fraction": 0.5723270177841187,
"avg_line_length": 26.825000762939453,
"blob_id": "09e3f76cd542d4af70ef369342a2bcafb90d9470",
"content_id": "7e22c7ba3c067b87d4841f0c4123ba0c10a9947f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1113,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 40,
"path": "/day08.py",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "with open('day08.txt', 'r') as f:\n input = [line.strip() for line in f.readlines()]\n\n\ndef run():\n processed_line_numbers = set()\n a = 0\n pc = 0\n while pc < len(input):\n if pc in processed_line_numbers:\n return a, True\n processed_line_numbers.add(pc)\n op, arg = input[pc].split(' ')\n if op == 'acc':\n a += int(arg)\n elif op == 'jmp':\n pc += int(arg)\n continue\n pc += 1\n return a, False\n\n\ndef flip_operation(line_number):\n if input[line_number].startswith('jmp'):\n input[line_number] = input[line_number].replace('jmp', 'nop')\n elif input[line_number].startswith('nop'):\n input[line_number] = input[line_number].replace('nop', 'jmp')\n\n\nprint(\"\\n# Part 1\")\na = run()[0]\nprint(\"Accumulator value: {}\".format(a))\n\nprint(\"\\n# Part 2\")\nfor line_number in range(0, len(input)):\n a, infinite_loop = run()\n if infinite_loop:\n flip_operation(line_number - 1) # Unflip last operation\n flip_operation(line_number) # Flip current operation\nprint(\"Accumulator value: {}\".format(a))\n"
},
{
"alpha_fraction": 0.7019230723381042,
"alphanum_fraction": 0.7788461446762085,
"avg_line_length": 33.66666793823242,
"blob_id": "f02fe4a297eef43815d8de56d10edab54d39e37a",
"content_id": "913c8b07dffe2d5c14eeb038b0f6574103af5437",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 3,
"path": "/README.md",
"repo_name": "meonwax/aoc2020",
"src_encoding": "UTF-8",
"text": "# Advent of Code 2020\n\nMy solutions to the _Advent of Code 2020_ programming puzzles written in Python.\n"
}
] | 17 |
unstad/master-thesis
|
https://github.com/unstad/master-thesis
|
35ff7fcb9b98596b4745557593fc0d5bd31d8c82
|
dc5966311b30941c0356d1069f3066772e9a8a88
|
ad02bf04bbed47457c1e8d82ad135d3281321535
|
refs/heads/master
| 2020-07-12T09:31:08.950350 | 2019-08-28T22:14:44 | 2019-08-28T22:14:44 | 204,779,819 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6098839640617371,
"alphanum_fraction": 0.6196181178092957,
"avg_line_length": 29.701148986816406,
"blob_id": "19e65e0b7bfa3495594f9f80db0250cd288ef02b",
"content_id": "f084388bb0c4cc24ff64be7b44efb820d6d5c5b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2671,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 87,
"path": "/exif.py",
"repo_name": "unstad/master-thesis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys\n\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS, GPSTAGS\nfrom pprint import pprint\n\n# PIL documentation: https://pillow.readthedocs.io/en/stable/handbook/index.html\n\n\n# Extracts exif data and makes it understandable\ndef get_exif(image):\n exif_data = {}\n data = image.getexif()\n for tag, value in data.items():\n field_name = TAGS.get(tag, tag)\n if field_name == \"GPSInfo\":\n gps_data = {}\n for gps_tag, gps_value in value.items():\n gps_field_name = GPSTAGS.get(gps_tag, gps_tag)\n gps_data[gps_field_name] = gps_value\n exif_data[field_name] = gps_data\n else:\n exif_data[field_name] = value\n return exif_data\n\n\n# Lat and long are given in rational. Returns lat long in degrees.\n# rational gives lat long in degrees, minutes and seconds in tuples with denominator or and nominator\n# Converts to degrees\ndef lat_long_degrees(value):\n deg = value[0][0] / value[0][1]\n minute = value[1][0] / value[1][1]\n second = value[2][0] / value[2][1]\n return deg + (minute / 60) + (second / 3600)\n\n\ndef get_lat_long(exif_data):\n latitude = None\n longitude = None\n if \"GPSInfo\" in exif_data:\n gps_info = exif_data[\"GPSInfo\"]\n latitude = lat_long_degrees(gps_info[\"GPSLatitude\"])\n latitude_ref = gps_info[\"GPSLatitudeRef\"]\n longitude = lat_long_degrees(gps_info[\"GPSLongitude\"])\n longitude_ref = gps_info[\"GPSLongitudeRef\"]\n\n if latitude_ref != \"N\":\n latitude = 0 - latitude\n\n if longitude_ref != \"E\":\n longitude = 0 - longitude\n return latitude, longitude\n\n\ndef get_direction(exif_data):\n direction = None\n if \"GPSInfo\" in exif_data:\n gps_info = exif_data[\"GPSInfo\"]\n direction_ref = gps_info[\"GPSImgDirectionRef\"]\n direction = gps_info[\"GPSImgDirection\"]\n direction = direction[0] / direction[1]\n return direction, direction_ref\n\n\ndef get_height(exif_data):\n height = None\n if \"GPSInfo\" in exif_data:\n gps_info = exif_data[\"GPSInfo\"]\n height_ref = gps_info[\"GPSAltitudeRef\"]\n height = gps_info[\"GPSAltitude\"]\n height = height[0] / height[1]\n if height_ref == b\"\\x00\":\n height = height\n else:\n height = -height\n return height\n\n\ndef get_geodetic_coordinate(image_path):\n image = Image.open(image_path)\n exif_data = get_exif(image)\n height = get_height(exif_data)\n direction = get_direction(exif_data)\n lat, lon = get_lat_long(exif_data)\n return {\"height\": height, \"direction\": direction, \"latitude\": lat, \"longitude\": lon}\n"
},
{
"alpha_fraction": 0.6418918967247009,
"alphanum_fraction": 0.6959459185600281,
"avg_line_length": 15.44444465637207,
"blob_id": "9781c15aa2966e92202a8d54729c75c02a010318",
"content_id": "c534e5849bbd981a4811b76c502c561868ccb88e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 9,
"path": "/README.md",
"repo_name": "unstad/master-thesis",
"src_encoding": "UTF-8",
"text": "# Code for master thesis\n\n## Dependencies\n\n* [Anaconda](https://www.anaconda.com)\n\n## Example\n\n`$ python main.py img/IMG_1957.jpg img/IMG_1959.jpg`\n"
},
{
"alpha_fraction": 0.5718061923980713,
"alphanum_fraction": 0.5947136282920837,
"avg_line_length": 28.86842155456543,
"blob_id": "75487d6b215103ca91e3628f98f4ffa42a1cc5b7",
"content_id": "40470d6e0d11319e60e6d5dbf7f1186ae1812e1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2270,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 76,
"path": "/camera_calibration.py",
"repo_name": "unstad/master-thesis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport glob\nimport numpy as np\nimport sys\n\nfrom cv2 import cv2\n\n\n# Find calibration matrix from given images. Chessboard used must be asymmetric.\ndef find_calibration_matrix(images):\n # Prepare real world points\n rwp = np.zeros((7 * 9, 3), np.float32)\n rwp[:, :2] = np.mgrid[0:7, 0:9].T.reshape(-1, 2)\n\n # Arrays to store 3D points in the real world and 2D points in image plane\n rw_points = []\n image_points = []\n\n for image in images:\n img = cv2.imread(image)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Take in a 8-bit grayscale image and the number of internal corners of chessboard and flags.\n # CLAIB_CV_FAST_CHECK = check quickly for corners and end if none to save time\n found, corners = cv2.findChessboardCorners(\n img_gray, (7, 9), flags=cv2.CALIB_CB_FAST_CHECK\n )\n\n if found:\n rw_points.append(rwp)\n # Needs accuracy below pixel level\n corners = cv2.cornerSubPix(\n img_gray,\n corners,\n (11, 11),\n (-1, -1),\n (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001),\n )\n image_points.append(corners)\n\n # Draws the corners on picture\n # cv2.drawChessboardCorners(img, (7, 9), cornersnew, retval)\n # cv2.imshow('img', img)\n # cv2.waitKey()\n else:\n print(\"no corners found in \" + image, file=sys.stderr)\n\n cv2.destroyAllWindows()\n\n _, cameramatrix, distortioncoefficients, _, _ = cv2.calibrateCamera(\n rw_points, image_points, img_gray.shape[::-1], None, None\n )\n return cameramatrix, distortioncoefficients\n\n\ndef fail(msg):\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef main():\n if len(sys.argv) < 2:\n fail(\"usage: {} <image> ...\".format(sys.argv[0]))\n images = sys.argv[1:]\n calibration_matrix, distortion_coefficients = find_calibration_matrix(images)\n print(\n \"calibration matrix {}\\ndistortion coefficients {}\".format(\n calibration_matrix, distortion_coefficients\n )\n )\n\n\n# Example run: ./camera_calibration.py img/chessboard/*.jpeg\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5983231663703918,
"alphanum_fraction": 0.6356707215309143,
"avg_line_length": 23.754716873168945,
"blob_id": "b4fabf2c548aa41eeff2d636a9938f862f881e8e",
"content_id": "2ace3706ccf7c7c73964d41dceee4d8857a995ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1312,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 53,
"path": "/main.py",
"repo_name": "unstad/master-thesis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport exif\nimport img2pos\nimport sys\n\nfrom pprint import pprint\nimport math\n\n\ndef fail(msg):\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\ndef main():\n if len(sys.argv) < 3:\n fail(\"usage: {} <image1> <image2>\".format(sys.argv[0]))\n image1_path, image2_path = sys.argv[1], sys.argv[2]\n\n print(\"--> Image 1\")\n geodetic_coord1 = exif.get_geodetic_coordinate(image1_path)\n pprint(geodetic_coord1)\n\n print(\"\\n--> Image 2\")\n geodetic_coord2 = exif.get_geodetic_coordinate(image2_path)\n pprint(geodetic_coord2)\n print()\n\n points_3d, t = img2pos.find_3d_points(image1_path, image2_path)\n scale_factor = img2pos.scale_factor_from_geodetic(\n geodetic_coord1, geodetic_coord2, t\n )\n points_3d = points_3d * scale_factor\n img2pos.show_figure(points_3d, t)\n\n print(\"\\n--> Image 1: Geodetic -> ECEF\")\n ecef1 = img2pos.convert_geodetic_to_ecef(\n geodetic_coord1[\"latitude\"],\n geodetic_coord1[\"longitude\"],\n geodetic_coord1[\"height\"],\n )\n print(\"--> ECEF\")\n pprint(ecef1)\n geodetic1 = img2pos.convert_ECEF_to_Geodetic(ecef1)\n geodetic1[0] = math.degrees(geodetic1[0])\n geodetic1[1] = math.degrees(geodetic1[1])\n print(\"--> Geodetic\")\n pprint(geodetic1)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5583539605140686,
"alphanum_fraction": 0.610411524772644,
"avg_line_length": 29.354948043823242,
"blob_id": "75e606192425b50fc960f28a9164e73106f5642e",
"content_id": "f299c732e22077b58512e716ff65a172bdf9684f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8894,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 293,
"path": "/img2pos.py",
"repo_name": "unstad/master-thesis",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport math\nimport numpy as np\nimport numpy.linalg as mlin\n\nfrom cv2 import cv2\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom pprint import pprint\n\n\nSEMI_MAJOR_AXIS = 6378137.0\nSEMI_MINOR_AXIS = 6356752.3142\n\n\n# Returns true if the value v is considered an outlier in the given list of values\ndef is_outlier(v, values):\n sigma = np.std(values)\n mean = np.mean(values)\n return v > (2 * sigma) + mean or v < mean - (2 * sigma)\n\n\n# Remove any match where the distance is higher than the average\ndef clean_matches(matches):\n distances = [m.distance for m in matches]\n avg = np.average(distances)\n return [m for m in matches if m.distance < avg]\n\n\n# Extract coordinates using matches and key points\ndef extract_coordinates(matches, key_points, indexField):\n points = []\n for m in matches:\n index = getattr(m, indexField)\n points.append(key_points[index].pt)\n return np.array(points)\n\n\n# Group 3D points by each axis. Three lists are returned, one for each axis.\ndef points_by_axis(points):\n points_x = []\n points_y = []\n points_z = []\n for coord in points:\n points_x.append(coord[0][0])\n points_y.append(coord[0][1])\n points_z.append(coord[0][2])\n return points_x, points_y, points_z\n\n\ndef find_3d_points(image1_path, image2_path):\n img1 = cv2.imread(image1_path, cv2.IMREAD_GRAYSCALE) # queryImage\n img2 = cv2.imread(image2_path, cv2.IMREAD_GRAYSCALE) # trainImage\n\n # Initial calibration matrix from camera\n init_calibration_matrix = np.array(\n [\n [2.78228443e03, 0.00000000e00, 1.65670819e03],\n [0.00000000e00, 2.77797243e03, 1.19855894e03],\n [0.00000000e00, 0.00000000e00, 1.00000000e00],\n ]\n )\n distortion_coefficients = np.array(\n [0.07874525, -0.07184864, -0.00619498, 0.00252332, -0.09900985]\n )\n\n # Undistort images. getOptimalNewCameraMatrix: 1 tells us that we want to see the \"black hills\" after undistorting. Exchanging for 0 removes them.\n height, width = img1.shape[:2]\n calibration_matrix, roi = cv2.getOptimalNewCameraMatrix(\n init_calibration_matrix,\n distortion_coefficients,\n (width, height),\n 1,\n (width, height),\n )\n img1_distorted = cv2.undistort(\n img1, init_calibration_matrix, distortion_coefficients, None, calibration_matrix\n )\n img2_distorted = cv2.undistort(\n img2, init_calibration_matrix, distortion_coefficients, None, calibration_matrix\n )\n\n # Crop images\n x, y, w, h = roi\n img1_distorted = img1_distorted[y : y + h, x : x + w]\n img2_distorted = img2_distorted[y : y + h, x : x + w]\n\n # To display the undistorted images:\n # plt.imshow(img1_distorted), plt.show()\n # plt.imshow(img2_distorted), plt.show()\n\n # Create an ORB object\n orb = cv2.ORB_create()\n\n # Detect keypoints\n kp1 = orb.detect(img1_distorted, None)\n kp2 = orb.detect(img2_distorted, None)\n\n # Find descriptors\n kp1, des1 = orb.compute(img1_distorted, kp1)\n kp2, des2 = orb.compute(img2_distorted, kp2)\n\n # To draw the keypoints:\n #img1kp = cv2.drawKeypoints(img1, kp1, None, color=(0, 255, 0), flags=0) #flags = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS\n # img2kp = cv2.drawKeypoints(img2, kp2, None, color=(0, 255, 0), flags=0)\n #plt.imshow(img1kp), plt.show()\n # plt.imshow(img2kp), plt.show()\n\n # Brute-force matcher object. crossCheck=True means that it has to match both ways\n brute_force = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n # Matching descriptors\n matches = brute_force.match(des1, des2)\n\n # Clean the matches by distance\n matches = clean_matches(matches)\n\n # Sort matches in order of distance\n matches = sorted(matches, key=lambda x: x.distance)\n\n # To draw the first 20 matches:\n #img_matches = cv2.drawMatches(img1_distorted, kp1, img2_distorted, kp2, matches[:], None, flags = 2)\n #plt.imshow(img_matches), plt.show()\n\n # Extract coordinates\n points1 = extract_coordinates(matches, kp1, \"queryIdx\")\n points2 = extract_coordinates(matches, kp2, \"trainIdx\")\n\n # Find essential Matrix\n essential_matrix, _ = cv2.findEssentialMat(\n points1, points2, calibration_matrix, method=cv2.RANSAC, prob=0.999, threshold=3\n )\n determinant = mlin.det(essential_matrix)\n eps = 1e-10\n if determinant > eps:\n raise Exception(\n \"expected determinant to be close to zero, but is {}\".format(determinant)\n )\n\n # Find camera2 position relative to camera1 (t is only in unit)\n _, R, t, _ = cv2.recoverPose(essential_matrix, points1, points2, calibration_matrix)\n\n # Create camera matrices\n M1 = np.hstack((np.eye(3, 3), np.zeros((3, 1))))\n M2 = np.hstack((R, t))\n camera_matrix1 = np.dot(calibration_matrix, M1)\n camera_matrix2 = np.dot(calibration_matrix, M2)\n\n # Compute 3D points\n points_3d = []\n for c1, c2 in zip(points1, points2):\n point = cv2.triangulatePoints(camera_matrix1, camera_matrix2, c1, c2)\n points_3d.append(point)\n points_3d = cv2.convertPointsFromHomogeneous(np.array(points_3d))\n\n return points_3d, t\n\n\ndef scale_factor_from_geodetic(geodetic_coordinate1, geodetic_coordinate2, t):\n ecef1 = convert_geodetic_to_ecef(\n geodetic_coordinate1[\"latitude\"],\n geodetic_coordinate1[\"longitude\"],\n geodetic_coordinate1[\"height\"],\n )\n ecef2 = convert_geodetic_to_ecef(\n geodetic_coordinate2[\"latitude\"],\n geodetic_coordinate2[\"longitude\"],\n geodetic_coordinate2[\"height\"],\n )\n\n distance = distance_between_two_gps_positions(ecef1, ecef2)\n print(\"distance=%f\" % distance)\n length_t = length_of_translation_vector(t)\n\n return distance / length_t\n\n\ndef show_figure(points_3d, t):\n # Extract each X, Y and Z value into separate lists for the purpose of eliminating outlier values\n points_x, points_y, points_z = points_by_axis(points_3d)\n\n # Show points in a figure\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n for coord in points_3d:\n x = coord[0][0]\n y = coord[0][1]\n z = coord[0][2]\n if is_outlier(x, points_x):\n continue\n if is_outlier(y, points_y):\n continue\n if is_outlier(z, points_z):\n continue\n ax.scatter(x, y, z, marker=\".\")\n ax.scatter(0, 0, 0, marker=\"s\")\n ax.scatter(t[0], t[1], t[2], marker=\"s\")\n plt.show(fig)\n\n\ndef ellipsoid_of_revolution():\n e2 = (math.pow(SEMI_MAJOR_AXIS, 2) - math.pow(SEMI_MINOR_AXIS, 2)) / math.pow(\n SEMI_MAJOR_AXIS, 2\n )\n return e2\n\n\ndef distance_between_two_gps_positions(ECEF1, ECEF2): # Euclidian distance\n x = ECEF2[0] - ECEF1[0]\n y = ECEF2[1] - ECEF1[1]\n z = ECEF2[2] - ECEF1[2]\n distance = math.sqrt(x * x + y * y + z * z)\n return distance\n\n\ndef length_of_translation_vector(t):\n length_t = math.sqrt(math.pow(t[0], 2) + math.pow(t[1], 2) + math.pow(t[2], 2))\n return length_t\n\n\ndef convert_geodetic_to_ecef(lat, lon, height): # lat long in radians\n lat = math.radians(lat)\n lon = math.radians(lon)\n e2 = ellipsoid_of_revolution()\n n = SEMI_MAJOR_AXIS / math.sqrt(1 - e2 * math.pow(math.sin(lat), 2))\n ecef = [\n (n + height) * math.cos(lat) * math.cos(lon),\n (n + height) * math.cos(lat) * math.sin(lon),\n (n * (1 - e2) + height) * math.sin(lat),\n ]\n return ecef\n\n\n# Implemented as code in appendix in Olson, D. K. (1996).\n# \"Converting earth-Centered, Earth-Fixed Coordinates to Geodetic Coordinates\"\ndef convert_ECEF_to_Geodetic(ECEF):\n e2 = ellipsoid_of_revolution()\n a1 = SEMI_MAJOR_AXIS * e2\n a2 = a1 * a1\n a3 = a1 * e2 / 2\n a4 = (5 / 2) * a2\n a5 = a1 + a3\n a6 = 1 - e2\n\n z = ECEF[2]\n z_abs = abs(z)\n w2 = ECEF[0] * ECEF[0] + ECEF[1] * ECEF[1]\n w = math.sqrt(w2)\n z2 = ECEF[2] * ECEF[2]\n r2 = w2 + z2\n r = math.sqrt(r2)\n\n geo = [0, 0, 0]\n if r < 100000:\n geo[0] = 0\n geo[1] = 0\n geo[2] = -1.0e7\n return geo\n\n geo[1] = np.arctan2(ECEF[1], ECEF[0]) # longitude\n\n s2 = z2 / r2\n c2 = w2 / r2\n u = a2 / r\n v = a3 - a4 / r\n\n if c2 > 0.3:\n s = (z_abs / r) * (1.0 + c2 * (a1 + u + s2 * v) / r)\n geo[0] = np.arcsin(s) # Latitude\n ss = s * s\n c = math.sqrt(1.0 - ss)\n else:\n c = (w / r) * (1.0 - s2 * (a5 - u - c2 * v) / r)\n geo[0] = np.arccos(c) # Latitude\n ss = 1.0 - c * c\n s = math.sqrt(ss)\n\n g = 1.0 - e2 * ss\n rg = SEMI_MAJOR_AXIS / math.sqrt(g)\n rf = a6 * rg\n u = w - rg * c\n v = z_abs - rf * s\n f = c * u + s * v\n m = c * v - s * u\n p = m / (rf / g + f)\n\n geo[0] = geo[0] + p # Latitude\n geo[2] = f + m * p / 2.0 # Height\n\n if z < 0.0:\n geo[0] *= -1.0 # Latitude\n return geo\n"
}
] | 5 |
galaxy-works/galaxy-workflow-tool-tests
|
https://github.com/galaxy-works/galaxy-workflow-tool-tests
|
4e4d30d2bcde5f7720f1881426ffd2cdca29c393
|
0020a02198399846a09cd175e461fe551ffcf487
|
c0c19ff67503eb24b4eca93a53df399dde7de6b6
|
refs/heads/main
| 2023-02-08T10:21:38.518081 | 2020-12-30T14:23:06 | 2020-12-30T14:23:06 | 321,179,829 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6467013955116272,
"alphanum_fraction": 0.6475694179534912,
"avg_line_length": 27.09756088256836,
"blob_id": "33dc660742b9c4f7a32401bdd756f058d29d6f97",
"content_id": "fa6b4a35c0b7b84a1d6f49a7b90aca3aececdd27",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1152,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 41,
"path": "/gwtt/script.py",
"repo_name": "galaxy-works/galaxy-workflow-tool-tests",
"src_encoding": "UTF-8",
"text": "\"\"\"Entry point for galaxy-workflow-tool-tests.\"\"\"\nimport sys\n\nfrom bioblend import galaxy\nfrom galaxy.tool_util.verify.script import (\n arg_parser,\n run_tests,\n)\n\nSCRIPT_DESCRIPTION = \"\"\"\nScript to run all the tool tests for all the tools in a Galaxy workflow.\n\"\"\"\n\n\ndef main(argv=None):\n \"\"\"Entry point function for galaxy-workflow-tool-tests.\"\"\"\n if argv is None:\n argv = sys.argv[1:]\n\n parser = arg_parser()\n parser.add_argument('workflow_id', metavar='WORKFLOW_ID',\n help='workflow id to scan for tools')\n args = parser.parse_args(argv)\n gi = galaxy.GalaxyInstance(url=args.galaxy_url, key=args.admin_key or args.key)\n workflows = gi.workflows\n workflow_dict = workflows.export_workflow_dict(args.workflow_id)\n tool_ids = []\n for step_dict in workflow_dict.get(\"steps\").values():\n if not step_dict.get(\"type\") == \"tool\":\n continue\n tool_ids.append(step_dict[\"tool_id\"])\n\n def tool_not_in_workflow(test_reference):\n return test_reference.tool_id not in tool_ids\n\n run_tests(args, test_filters=[tool_not_in_workflow])\n\n\n__all__ = (\n 'main',\n)\n"
},
{
"alpha_fraction": 0.7282913327217102,
"alphanum_fraction": 0.7535014152526855,
"avg_line_length": 26.461538314819336,
"blob_id": "e684154fe43bd6c2b1ddfd90a6fabee100f7615e",
"content_id": "04924e392b385f983331617fa8f5fe02a327e85d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 357,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 13,
"path": "/Dockerfile",
"repo_name": "galaxy-works/galaxy-workflow-tool-tests",
"src_encoding": "UTF-8",
"text": "FROM python:3.7-buster\nWORKDIR /usr/src/app\n\nCOPY requirements.txt ./\nRUN pip install --no-cache-dir -r requirements.txt\n# RUN pip install \nRUN pip install pysam\n\nCOPY . .\n# Install custom galaxy-tool-util if needed.\n# RUN pip install galaxy_tool_util-21.1.0.dev4-py2.py3-none-any.whl\nRUN python setup.py install\nENTRYPOINT [ \"galaxy-workflow-tool-tests\" ]\n"
},
{
"alpha_fraction": 0.47652366757392883,
"alphanum_fraction": 0.4783649444580078,
"avg_line_length": 46.21739196777344,
"blob_id": "f69cb11f043863bf1c7e642347d8b06654837539",
"content_id": "df5499dc4a1c4f910f053a3956206bcf1f3314aa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 5431,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 115,
"path": "/README.rst",
"repo_name": "galaxy-works/galaxy-workflow-tool-tests",
"src_encoding": "UTF-8",
"text": "\ngalaxy-workflow-tool-tests\n---------------------------\n\nA simple wrapper around Galaxy_'s galaxy-tool-test that runs tool tests for all the tools in a workflow.\n\n------------------\nMinimal Usage:\n------------------\n\n::\n\n pip install galaxy-workflow-tool-tests\n galaxy-workflow-tool-tests -k <api_key> -u http://localhost:8080/ <encoded_workflow_id>\n\n\n----------------\nProgram Help\n----------------\n\n\n::\n\n $ galaxy-workflow-tool-tests --help\n usage: galaxy-workflow-tool-tests [-h] [-u GALAXY_URL] [-k KEY] [-a ADMIN_KEY]\n [--force_path_paste] [-t TOOL_ID]\n [--tool-version TOOL_VERSION]\n [-i TEST_INDEX] [-o OUTPUT] [--append]\n [--skip-previously-executed | --skip-previously-successful]\n [-j OUTPUT_JSON] [--verbose]\n [-c CLIENT_TEST_CONFIG]\n [--suite-name SUITE_NAME]\n [--with-reference-data]\n [--skip-with-reference-data]\n [--history-per-suite | --history-per-test-case]\n [--no-history-cleanup] [--publish-history]\n [--parallel-tests PARALLEL_TESTS]\n [--retries RETRIES] [--page-size PAGE_SIZE]\n [--page-number PAGE_NUMBER]\n [--download-attempts DOWNLOAD_ATTEMPTS]\n [--download-sleep DOWNLOAD_SLEEP]\n [--test-data TEST_DATA]\n WORKFLOW_ID\n\n Script to quickly run a tool test against a running Galaxy instance.\n\n positional arguments:\n WORKFLOW_ID workflow id to scan for tools\n\n optional arguments:\n -h, --help show this help message and exit\n -u GALAXY_URL, --galaxy-url GALAXY_URL\n Galaxy URL\n -k KEY, --key KEY Galaxy User API Key\n -a ADMIN_KEY, --admin-key ADMIN_KEY\n Galaxy Admin API Key\n --force_path_paste This requires Galaxy-side config option\n \"allow_path_paste\" enabled. Allows for fetching test\n data locally. Only for admins.\n -t TOOL_ID, --tool-id TOOL_ID\n Tool ID\n --tool-version TOOL_VERSION\n Tool Version (if tool id supplied). Defaults to just\n latest version, use * to test all versions\n -i TEST_INDEX, --test-index TEST_INDEX\n Tool Test Index (starting at 0) - by default all tests\n will run.\n -o OUTPUT, --output OUTPUT\n directory to dump outputs to\n --append Extend a test record json (created with --output-json)\n with additional tests.\n --skip-previously-executed\n When used with --append, skip any test previously\n executed.\n --skip-previously-successful\n When used with --append, skip any test previously\n executed successfully.\n -j OUTPUT_JSON, --output-json OUTPUT_JSON\n output metadata json\n --verbose Verbose logging.\n -c CLIENT_TEST_CONFIG, --client-test-config CLIENT_TEST_CONFIG\n Test config YAML to help with client testing\n --suite-name SUITE_NAME\n Suite name for tool test output\n --with-reference-data\n --skip-with-reference-data\n Skip tests the Galaxy server believes use data tables\n or loc files.\n --history-per-suite Create new history per test suite (all tests in same\n history).\n --history-per-test-case\n Create new history per test case.\n --no-history-cleanup Perserve histories created for testing.\n --publish-history Publish test history. Useful for CI testing.\n --parallel-tests PARALLEL_TESTS\n Parallel tests.\n --retries RETRIES Retry failed tests.\n --page-size PAGE_SIZE\n If positive, use pagination and just run one 'page' to\n tool tests.\n --page-number PAGE_NUMBER\n If page size is used, run this 'page' of tests -\n starts with 0.\n --download-attempts DOWNLOAD_ATTEMPTS\n Galaxy may return a transient 500 status code for\n download if test results are written but not yet\n accessible.\n --download-sleep DOWNLOAD_SLEEP\n If download attempts is greater than 1, the amount to\n sleep between download attempts.\n --test-data TEST_DATA\n Add local test data path to search for missing test\n data\n\n.. _Galaxy: http://galaxyproject.org/\n.. _GitHub: https://github.com/\n"
},
{
"alpha_fraction": 0.6410256624221802,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 18.5,
"blob_id": "ecfea57769aad919547fe18ec132aaa97e948fec",
"content_id": "6db5809348b37adfaf9c6378c4d597c59e2e9be1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 39,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "galaxy-works/galaxy-workflow-tool-tests",
"src_encoding": "UTF-8",
"text": "galaxy-tool-util>=20.9.0.dev4\nbioblend\n"
},
{
"alpha_fraction": 0.689393937587738,
"alphanum_fraction": 0.6994949579238892,
"avg_line_length": 22.294116973876953,
"blob_id": "604595a8dda0feb392903227acd75a290b57dfdc",
"content_id": "dbe6dc7691afe0492b978303c29e4776c7570b1e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 17,
"path": "/gwtt/__init__.py",
"repo_name": "galaxy-works/galaxy-workflow-tool-tests",
"src_encoding": "UTF-8",
"text": "\"\"\"The public interface or entry point for the galaxy-workflow-tool-tests project.\"\"\"\n\n__version__ = '0.1.0.dev0'\n\nPROJECT_NAME = \"galaxy-workflow-tool-tests\"\nPROJECT_OWNER = PROJECT_USERAME = \"galaxy-works\"\nPROJECT_AUTHOR = 'Galaxy Works'\nPROJECT_EMAIL = '[email protected]'\nPROJECT_URL = \"https://github.com/galaxy-works/galaxy-workflow-tool-tests\"\n\n\nfrom .script import main\n\n\n__all__ = (\n 'main',\n)\n"
},
{
"alpha_fraction": 0.75789475440979,
"alphanum_fraction": 0.7631579041481018,
"avg_line_length": 7.636363506317139,
"blob_id": "6f2f89435ed8fc3c4d4e4d1c9a08d073c6ca0512",
"content_id": "0ea000c94f2184e178779f5d712508d1201f72a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 190,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 22,
"path": "/dev-requirements.txt",
"repo_name": "galaxy-works/galaxy-workflow-tool-tests",
"src_encoding": "UTF-8",
"text": "mypy\n\n# For testing\ntox\npytest\ncoverage\n\n# For dev\nsphinx\nsphinx_rtd_theme\nrecommonmark\n\n# Used to check readme.\nreadme\n\n# Used for code checking.\npyflakes\nflake8\n\n# For release\nwheel\ntwine\n"
}
] | 6 |
Imafikus/Joystick-Observations
|
https://github.com/Imafikus/Joystick-Observations
|
985ea1952b68e765f023ab8b9c8d0324664bab7f
|
c166eb857cef244f7ea997d43e401e6aab631cfd
|
4d75705b5219797261b846faf8168c34b7b552f8
|
refs/heads/master
| 2021-07-12T23:40:23.308350 | 2017-10-10T18:55:54 | 2017-10-10T18:55:54 | 104,119,818 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4898591935634613,
"alphanum_fraction": 0.5198784470558167,
"avg_line_length": 34.591609954833984,
"blob_id": "807b76bd8ff35ca76294073b47d54c460436c44c",
"content_id": "4cc7fd01644c9be50544586b0de316d94ef12a75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16123,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 453,
"path": "/proba.py",
"repo_name": "Imafikus/Joystick-Observations",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nfrom datetime import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\n\nclass BrowseWindow(QWidget):\n \n def __init__(self):\n super().__init__()\n \n self.initUI()\n\n def initUI(self):\n browse_width = 550\n browse_height = 200\n\n lbl0_x = (browse_width/100)*11.11\n lbl0_y = (browse_height/100)*10\n\n lbl1_x = (browse_width/100)*35.56\n lbl1_y = (browse_height/100)*35\n\n ok_width = (browse_width/100)*33.33\n ok_height = (browse_height/100)*25\n ok_x = (browse_width/100)*11.11\n ok_y = (browse_height/100)*55\n\n cancel_width = (browse_width/100)*33.33\n cancel_height = (browse_height/100)*25\n cancel_x = (browse_width/100)*55.56\n cancel_y = (browse_height/100)*55\n\n self.setGeometry(300, 300, browse_width, browse_height)\n self.setWindowTitle('Browse')\n self.setFixedSize(self.size())\n self.path = str(QFileDialog.getOpenFileName())\n self.path = self.path[2:len(self.path)-19] \n \n self.lbl0 = QLabel(\"Current Path: \"+ self.path, self)\n self.lbl0.move(lbl0_x, lbl0_y) \n\n self.lbl1 = QLabel(\"Save current path?\", self)\n self.lbl1.move(lbl1_x, lbl1_y)\n\n OK = QPushButton('OK', self)\n OK.resize(ok_width, ok_height)\n OK.move(ok_x, ok_y)\n OK.clicked.connect(self.okButton)\n\n Cancel = QPushButton('Cancel', self)\n Cancel.resize(cancel_width, cancel_height)\n Cancel.move (cancel_x, cancel_y)\n Cancel.clicked.connect(self.cancelButton)\n \n def cancelButton(self):\n self.close()\n \n def okButton(self): \n f = open(\"config/browse.txt\", \"w\")\n f.write(self.path)\n f.close()\n self.close() \n\n\nclass MainWindow(QMainWindow):\n \n def __init__(self):\n super(MainWindow, self).__init__()\n \n self.initUI()\n \n \n def initUI(self):\n\n #toolbar init\n BrowseAct = QAction(QIcon('icons/browse.png'), 'Browse for folder', self)\n BrowseAct.setShortcut('Ctrl+B')\n BrowseAct.triggered.connect(self.browse)\n \n self.toolbar = self.addToolBar('Browse')\n self.toolbar.addAction(BrowseAct)\n\n #window init\n self.main_width = 300\n self.main_height = 350\n #table vals\n self.table_width = (self.main_width/100)*44\n self.table_height = (self.main_height/100)*16.67 \n self.table_x = (self.main_width/100)*28\n self.table_y = (self.main_height/100)*70\n #inteval vals\n self.interval_width = (self.main_width/100)*23.33\n self.interval_height =(self.main_height/100)*7.5\n self.interval_x = (self.main_width/100)*48.33\n self.interval_y = (self.main_width/100)*25.33\n\n self.lbl_interval_x = (self.main_width/100)*28.33\n self.lbl_interval_y = (self.main_width/100)*25.33\n\n self.lbl_mins_x = (self.main_width/100)*53.33\n self.lbl_mins_y = (self.main_width/100)*16\n #start_time vals\n self.start_lbl_x = (self.main_width/100)*28.33\n self.start_lbl_y = (self.main_height/100)*38.33\n\n self.start_h_lbl_x = (self.main_width/100)*41.67\n self.start_h_lbl_y = (self.main_height/100)*31.67\n self.start_h_x = (self.main_width/100)*40\n self.start_h_y = (self.main_height/100)*38.63 \n \n self.start_m_lbl_x = (self.main_width/100)*53.33\n self.start_m_lbl_y = (self.main_height/100)*31.67 \n self.start_m_x = (self.main_width/100)*51.67\n self.start_m_y = (self.main_height/100)*38.63\n \n self.start_s_lbl_x = (self.main_width/100)*65\n self.start_s_lbl_y = (self.main_height/100)*31.67 \n self.start_s_x = (self.main_width/100)*63.33\n self.start_s_y = (self.main_height/100)*38.63\n\n #end_time vals\n self.end_lbl_x = (self.main_width/100)*28.33\n self.end_lbl_y = (self.main_height/100)*54.33\n\n self.end_h_lbl_x = (self.main_width/100)*41.67\n self.end_h_lbl_y = (self.main_height/100)*47.67\n self.end_h_x = (self.main_width/100)*40\n self.end_h_y = (self.main_height/100)*54.63 \n \n self.end_m_lbl_x = (self.main_width/100)*53.33\n self.end_m_lbl_y = (self.main_height/100)*47.67 \n self.end_m_x = (self.main_width/100)*51.67\n self.end_m_y = (self.main_height/100)*54.63\n \n self.end_s_lbl_x = (self.main_width/100)*65\n self.end_s_lbl_y = (self.main_height/100)*47.67 \n self.end_s_x = (self.main_width/100)*63.33\n self.end_s_y = (self.main_height/100)*54.63\n\n #main window\n self.setGeometry(300, 300, self.main_width, self.main_height)\n self.setFixedSize(self.size())\n self.setWindowTitle('Joystick Observation') \n self.center() \n \n #interval init\n self.lbl_interval = QLabel(\"Interval:\", self)\n self.lbl_interval.move(self.lbl_interval_x, self.lbl_interval_y)\n self.lbl_mins = QLabel(\"Mins.\", self)\n self.lbl_mins.move(self.lbl_mins_x, self.lbl_mins_y)\n self.interval = QLineEdit(self)\n self.interval.setText(\"5\")\n self.interval.resize(self.interval_width, self.interval_height)\n self.interval.move(self.interval_x, self.interval_y)\n \n #start_time init\n self.start_lbl = QLabel(\"Start: \", self)\n self.start_lbl.move(self.start_lbl_x, self.start_lbl_y)\n \n self.start_h_lbl = QLabel(\"H\", self)\n self.start_h_lbl.move(self.start_h_lbl_x, self.start_h_lbl_y)\n self.start_h = QLineEdit(self)\n self.start_h.setText(\"0\")\n self.start_h.resize(25, 30)\n self.start_h.move(self.start_h_x, self.start_h_y)\n\n self.start_m_lbl = QLabel(\"M\", self)\n self.start_m_lbl.move(self.start_m_lbl_x, self.start_m_lbl_y)\n self.start_m = QLineEdit(self)\n self.start_m.setText(\"0\")\n self.start_m.resize(25, 30)\n self.start_m.move(self.start_m_x, self.start_m_y) \n\n self.start_s_lbl = QLabel(\"S\", self)\n self.start_s_lbl.move(self.start_s_lbl_x, self.start_s_lbl_y)\n self.start_s = QLineEdit(self)\n self.start_s.setText(\"0\")\n self.start_s.resize(25, 30)\n self.start_s.move(self.start_s_x, self.start_s_y)\n\n #end_time init \n\n self.end_lbl = QLabel(\"End: \", self)\n self.end_lbl.move(self.end_lbl_x, self.end_lbl_y)\n \n self.end_h_lbl = QLabel(\"H\", self)\n self.end_h_lbl.move(self.end_h_lbl_x, self.end_h_lbl_y)\n self.end_h = QLineEdit(self)\n self.end_h.setText(\"0\")\n self.end_h.resize(25, 30)\n self.end_h.move(self.end_h_x, self.end_h_y)\n\n self.end_m_lbl = QLabel(\"M\", self)\n self.end_m_lbl.move(self.end_m_lbl_x, self.end_m_lbl_y)\n self.end_m = QLineEdit(self)\n self.end_m.setText(\"0\")\n self.end_m.resize(25, 30)\n self.end_m.move(self.end_m_x, self.end_m_y) \n\n self.end_s_lbl = QLabel(\"S\", self)\n self.end_s_lbl.move(self.end_s_lbl_x, self.end_s_lbl_y)\n self.end_s = QLineEdit(self)\n self.end_s.setText(\"0\")\n self.end_s.resize(25, 30)\n self.end_s.move(self.end_s_x, self.end_s_y) \n \n \n\n #Button init\n\n make_table = QPushButton('Make Table', self)\n make_table.resize(self.table_width, self.table_height)\n make_table.move(self.table_x, self.table_y)\n make_table.clicked.connect(self.table_button)\n \n self.show()\n\n def browse(self):\n self.B = BrowseWindow()\n self.B.show()\n \n def table_button(self): \n start_hrs = self.start_h.text()\n start_mins = self.start_m.text()\n start_secs = self.start_s.text()\n\n end_hrs = self.end_h.text()\n end_mins = self.end_m.text()\n end_secs = self.end_s.text()\n \n report = self.check_input(start_hrs, start_mins, start_secs, end_hrs, end_mins, end_secs)\n\n if report[1] == False:\n QMessageBox.warning(self, \"Error\", report[0], QMessageBox.Ok)\n else:\n \n if self.check_browse_path()== False:\n QMessageBox.warning(self, \"No browse path!\", \"Choose browse path.\", QMessageBox.Ok)\n else:\n start_time = self.make_start_time(start_hrs, start_mins, start_secs)\n\n end_time = self.make_end_time(end_hrs, end_mins, end_secs)\n log = self.get_log()\n\n if self.check_start_time2(start_time, log) == False:\n QMessageBox.warning(self, \"Bad Input!\", \"Start time must be smaller than first time stamp in session.\", QMessageBox.Ok) \n if self.check_end_time2(end_time, log) == False:\n QMessageBox.warning(self, \"Bad Input!\", \"End time must be greater than last time stamp in session.\", QMessageBox.Ok) \n else:\n interval = self.interval.text() \n if self.check_interval_input(interval)== False:\n QMessageBox.warning(self, \"Bad Input!\", \"Interval must be positive integer number.\", QMessageBox.Ok)\n else:\n interval = int(interval)\n browse = open('config/browse.txt', 'r').read()\n stuff = self.get_dates(log, interval, start_time, end_time) \n rows = self.get_rows(log, stuff)\n table = self.make_HTML(rows) \n f = open(\"table.html\", \"w\")\n f.write(table)\n f.close()\n QMessageBox.information(self, \"Success!\", \"Table successfully made!\", QMessageBox.Ok)\n\n def make_start_time(self, hrs, mins, secs):\n if len(hrs) == 1: hrs = \"0\" + hrs\n if len(mins) == 1: mins = \"0\" + mins\n if len(secs) == 1: secs = \"0\" + secs\n\n string_date = hrs + mins + secs\n date = datetime.strptime(string_date, \"%H%M%S\")\n return date \n \n def make_end_time(self, hrs, mins, secs):\n if len(hrs) == 1: hrs = \"0\" + hrs\n if len(mins) == 1: mins = \"0\" + mins\n if len(secs) == 1: secs = \"0\" + secs\n\n string_date = hrs + mins + secs\n date = datetime.strptime(string_date, \"%H%M%S\")\n return date \n \n def get_log(self):\n path = open('config/browse.txt', 'r').read()\n open('config/browse.txt', 'w').close() \n log = open(path, \"r\").read().splitlines()\n return log\n\n def get_rows(self, log, stuff):\n dates = stuff[0]\n bools = stuff[1]\n rows = [] \n k = 0\n for i in range(0, len(dates)):\n if bools[i] == False:\n interval = dates[i].strftime('%H:%M:%S')\n row = self.get_interval_row(interval)\n rows.append(row)\n else:\n meteor = log[k].split()\n row = self.get_meteor_row(meteor, k)\n rows.append(row)\n k += 1\n return rows \n \n def get_meteor_row(self, meteor, i):\n row = \"<tr>\\n\" + \"<td>\" + str(i+1) + \"</td>\\n\" + \"<td>\" + meteor[2] + \"</td>\\n\" + \"<td>\" + meteor[1] + \"</td>\\n\" + \"<td>\" + meteor[0] + \"</td>\\n\" + \"</tr>\\n\"\n return row \n\n def get_interval_row(self, interval):\n row = \"<tr>\\n\" + \"<td>\" + \"Interval\" + \"</td>\\n\" + \"<td>\" + interval + \"</td>\\n\" + \"<td>\" + \"-\" + \"</td>\\n\" + \"<td>\" + \"-\" + \"</td>\\n\" + \"</tr>\\n\" \n return row\n \n def make_HTML(self, rows):\n table = open(\"config/begin.txt\").read()\n for i in range (0, len(rows)):\n table += rows[i]\n end = open(\"config/end.txt\").read()\n table += end\n return table\n\n def get_dates(self, log, interval, start_date, end_date): \n dates = []\n bools = []\n i = 0\n \n for i in range(0, len(log)):\n meteor = log[i].split()\n string_date = meteor[2] \n date = datetime.strptime(string_date, \"%H:%M:%S\")\n dates.append(date)\n bools.append(True)\n while start_date <= end_date:\n dates.append(start_date)\n bools.append(False)\n start_date = self.add_interval(start_date, interval)\n \n self.sort_date_tupple(dates, bools) \n if dates[len(dates)-1] < end_date:\n dates.append(end_date)\n bools.append(False)\n \n stuff = (dates, bools)\n return stuff\n\n def add_interval(self, tm, mins):\n fulldate = datetime(1900, 1, 1, tm.hour, tm.minute, tm.second)\n secs = mins*60\n fulldate = fulldate + timedelta(seconds=secs)\n return fulldate \n \n def sort_date_tupple(self, dates, bools):\n check = False \n while not check:\n check = True \n for i in range(0, len(dates)-1):\n if dates[i] > dates[i + 1]:\n check = False\n hold = dates[i + 1]\n dates[i + 1] = dates[i]\n dates[i] = hold\n\n hold_b = bools[i + 1]\n bools[i + 1] = bools[i]\n bools[i] = hold_b \n \n def check_interval_input(self,s):\n try:\n int(s)\n if int(s) > 0:\n return True\n else:\n return False\n except ValueError:\n return False\n\n def check_browse_path(self):\n path = open('config/browse.txt', 'r').read() \n if len(path) == 0: return False\n else: return True \n \n QMessageBox.information(self, \"Success!\", \"Table successfully made!\", QMessageBox.Ok)\n\n def check_input(self, start_hrs, start_mins, start_secs, end_hrs, end_mins, end_secs):\n check = True\n\n hrs_check = []\n mins_check = []\n secs_check = []\n \n hrs_check = []\n mins_check = []\n secs_check = []\n\n msg = \"\"\n \n for i in range (0, 24): \n hrs_check.append(str(i))\n if i < 9: hrs_check.append(\"0\" + str(i))\n\n for i in range (0, 59): \n mins_check.append(str(i))\n if i < 9: mins_check.append(\"0\" + str(i))\n \n for i in range (0, 59): \n secs_check.append(str(i))\n if i < 9: secs_check.append(\"0\" + str(i))\n\n if (start_hrs not in hrs_check) or (end_hrs not in hrs_check):\n msg += \"Hours must be in range 0-23\\n\"\n check = False\n\n if (start_mins not in mins_check) or (end_mins not in mins_check):\n msg += \"Minutes must be in range 0-59\\n\"\n check = False\n\n if (start_secs not in secs_check) or (end_secs not in secs_check):\n msg += \"Seconds must be in range 0-59\\n\"\n check = False\n \n report = (msg, check)\n return report\n \n def check_start_time2(self, start_date, log):\n check = True\n meteor = log[0].split()\n string_date = meteor[2]\n date = datetime.strptime(string_date, \"%H:%M:%S\")\n if start_date > date: check = False\n return check\n\n def check_end_time2(self, end_date, log):\n check = True\n meteor = log[len(log)-1].split()\n string_date = meteor[2]\n date = datetime.strptime(string_date, \"%H:%M:%S\")\n if end_date < date: check = False\n return check\n \t \n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n \n \nif __name__ == '__main__':\n \n app = QApplication(sys.argv)\n ex = MainWindow()\n sys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.8132780194282532,
"alphanum_fraction": 0.8132780194282532,
"avg_line_length": 59.25,
"blob_id": "179ffbe0bd0d082d9ea72b1edfbb834d982508bf",
"content_id": "1fa60d09b6d5a1d9762fc0ae2f54ce1dc8f07ec9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 4,
"path": "/README.txt",
"repo_name": "Imafikus/Joystick-Observations",
"src_encoding": "UTF-8",
"text": "Program is made for processing files made by joystick observations.\nProgram makes the working IMO form for visual observations(see table.html) \n\nFor now you MUST choose files which look the same as example.txt otherwise, program will crash.\n"
}
] | 2 |
aorizondo/djangoshop-paypal
|
https://github.com/aorizondo/djangoshop-paypal
|
59f2201bf8fad3fd8d3a9c1b914cdc6f861b049a
|
a14894a3651128820f0f85669cb139a341f08cf2
|
cf7b1798116b28969fdf99482d5fa7e54f2a8de1
|
refs/heads/master
| 2023-08-29T12:12:05.156870 | 2023-08-25T08:40:15 | 2023-08-25T08:40:15 | 682,277,480 | 0 | 0 |
MIT
| 2023-08-23T20:33:56 | 2023-08-23T20:33:56 | 2020-12-10T11:17:11 | null |
[
{
"alpha_fraction": 0.6539325714111328,
"alphanum_fraction": 0.6921348571777344,
"avg_line_length": 30.785715103149414,
"blob_id": "7e021cd5e8884e4c6167db544e287db70e149b33",
"content_id": "871a56b577a5cf4ac65c44af3495bc23a6ccd631",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 14,
"path": "/shop_paypal/__init__.py",
"repo_name": "aorizondo/djangoshop-paypal",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSee PEP 386 (https://www.python.org/dev/peps/pep-0386/)\n\nRelease logic:\n 1. Increase version number (change __version__ below).\n 2. Check that all changes have been documented in CHANGELOG.md.\n 3. git add shop_paypal/__init__.py CHANGELOG.md\n 4. git commit -m 'Bump to {new version}'\n 5. git tag {new version}\n 6. git push --tags\n 7. python setup.py sdist\n 8. twine upload dist/djangoshop-paypal-{new version}.tar.gz\n\"\"\"\n__version__ = '1.2'\n"
},
{
"alpha_fraction": 0.6702412962913513,
"alphanum_fraction": 0.7184986472129822,
"avg_line_length": 25.64285659790039,
"blob_id": "7b52e7a1d7860019fad575d202209536478b809c",
"content_id": "e380205748b3c016f153a8e32a4bdac592e1669b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 373,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 14,
"path": "/CHANGELOG.md",
"repo_name": "aorizondo/djangoshop-paypal",
"src_encoding": "UTF-8",
"text": "# Changes\n\n## 1.2 (recommended for django-SHOP version 1.2)\n* Add support for Django-3.0.\n* Fix new PayPal requirement: Add additional costs not reflected in list of ordered items.\n* Drop support for Django-2.0 and lower.\n* Drop support for Python-2.7.\n\n## 1.0.1\n* Fix #6: PayPal's create payment now is invoked by the server.\n\n## 1.0\n\n* Adopted to django-SHOP version 1.0\n"
}
] | 2 |
przempb/code_out_of_img_generator
|
https://github.com/przempb/code_out_of_img_generator
|
cba2fd138cad92a272e2a10f15d6958361cd0372
|
bda3ba1ce79395aa82ee4260ff55be2142c732da
|
ef92ff4f39575fe0a848e7fb97dadf00801693b6
|
refs/heads/main
| 2023-06-10T15:25:42.118217 | 2021-07-04T17:36:47 | 2021-07-04T17:36:47 | 382,909,954 | 0 | 0 | null | 2021-07-04T17:25:10 | 2021-07-04T17:36:49 | 2021-07-05T09:29:51 |
Python
|
[
{
"alpha_fraction": 0.8115941882133484,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 45,
"blob_id": "6724eb4de12445de467f50ac717f364eeb40da3f",
"content_id": "68440d86f8a3bce92d8e320e2ac7c43788a8aa92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 140,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 3,
"path": "/README.md",
"repo_name": "przempb/code_out_of_img_generator",
"src_encoding": "UTF-8",
"text": "Generator kodu pochodzącego ze screenshotów v1\n\nGenerator oparty na bibliotekach do czytania - cv2 - i przetwarzania tekstu - pytesseract\n"
},
{
"alpha_fraction": 0.6693105697631836,
"alphanum_fraction": 0.6723611950874329,
"avg_line_length": 26.779661178588867,
"blob_id": "ebb5226bf0019d2696b11cc7973e919fec87134b",
"content_id": "1e19260472ca76481f39b263fc8d69166062318a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1652,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 59,
"path": "/main.py",
"repo_name": "przempb/code_out_of_img_generator",
"src_encoding": "UTF-8",
"text": "import cv2 as cv\nimport pytesseract\nimport sys\n\npytesseract.pytesseract.tesseract_cmd = r\"C:\\\\Program Files\\\\Tesseract-OCR\\tesseract.exe\"\n\n\n#_______LISTA PRZYKLADOWYCH GRAFIK\nclean_img = r\"C:\\...\"\n\n#_______OTWARCEI GRAFIKI DO PRZETWARZANIA W CV2\nimg = cv.imread(clean_img)\n\n\n#_______WYŚWIETLANIE WYBRANEJ GRAFIKI\nif img is None:\n sys.exit(\"Could not read the image.\")\ncv.imshow(\"Display window\", img)\nk = cv.waitKey(0)\nif k == ord(\"s\"):\n cv.imwrite(\"test_image.png\", img)\n \n#_______UTWORZENIE TEKSTU W PYTESSERACT\ntext = pytesseract.image_to_string(img)\n\n#_______FUNKCJE PRZETWARZAJĄCE TEKST\n#podstawowe czyszczenie kodu. Zwraca listę elementów. Każdy z nich to linijka kodu. \ndef text_cleaner(text):\n list = text.split(\"\\n\")\n for elem in list:\n if elem == \"\" or elem == \" \" or elem == \"\\x0c\":\n list.remove(elem)\n return list\n\n#zwraca listę z listami elementów w tekście. Każdy element jest linijką kodu. Ważne do tworzenia stringów\ndef elements_iterator(text):\n elements_lists = []\n for elem in text:\n elements_list = []\n elements_list.append(elem)\n elements_lists.append(elements_list)\n return elements_lists\n\n#tworzy string z listy elemnentów i zapisuje plik w formacie py\ndef file_writer(ready_text):\n string_of_list = \"\"\n for item in ready_text:\n string_of_list += item[0]\n string_of_list += \"\\n\"\n\n with open('test_ready_file.py', mode='w') as file:\n file.write(string_of_list)\n\n\n#_______WYWOLYWANIE FUNKCJI\ncleaned_text = text_cleaner(text)\nelements = elements_iterator(cleaned_text)\ncreated_file = file_writer(elements)\nprint(elements)\n"
}
] | 2 |
lightinguy/animated-octo-potato
|
https://github.com/lightinguy/animated-octo-potato
|
600befa6ab61df8f942b279e96a7ebf7f3926122
|
7d8a03e5fce31a379500d8df4278cd792ab0058f
|
daa967cc86845796619b91751118a0c88b53a45f
|
refs/heads/master
| 2020-07-20T02:01:00.712956 | 2016-11-16T23:27:08 | 2016-11-16T23:27:08 | 73,749,501 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 11.5,
"blob_id": "40f722153da89c1defe80fcc563ddbecd8fd27d9",
"content_id": "8ce10d6afaeb4861ee52166baf91504a49582023",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 4,
"path": "/ga/test.py",
"repo_name": "lightinguy/animated-octo-potato",
"src_encoding": "UTF-8",
"text": "my_var = \"hello world\"\nprint my_var\n\nprint\"hello\""
}
] | 1 |
eQOHire2/eqo_test
|
https://github.com/eQOHire2/eqo_test
|
40ce2a8aa096c6408429685d8c23596ef74993b7
|
acda30a8de3abd36d25573d4c5ed64bdd3c2d62a
|
7b02bcf37f8dedfa5a272ea75dd9140c25003916
|
refs/heads/master
| 2021-03-24T09:36:58.973321 | 2018-02-23T05:17:21 | 2018-02-23T05:17:21 | 122,298,458 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5,
"avg_line_length": 12,
"blob_id": "6c843c85a14c4aa60f93063174f87bda510367ff",
"content_id": "7130ccdf3b5d1e0ed6ace1869e07205ed7111d38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 1,
"path": "/README.md",
"repo_name": "eQOHire2/eqo_test",
"src_encoding": "UTF-8",
"text": "\"# eqo_test\" \n"
},
{
"alpha_fraction": 0.6381475925445557,
"alphanum_fraction": 0.6467818021774292,
"avg_line_length": 23.44230842590332,
"blob_id": "8de3968f5c8f4eb4f35c0bfa21e7ff619f66255c",
"content_id": "7929af48eee4ef84d52b88e32a3a5b9426de6cf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1274,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 52,
"path": "/json_uploader.py",
"repo_name": "eQOHire2/eqo_test",
"src_encoding": "UTF-8",
"text": "import boto3\nimport os\nimport json\nimport decimal\n\n\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table('Main_Table')\n\ns3 = boto3.resource('s3')\njpeg_bucket_name = 'eqo-jpeg-test'\njpeg_bucket = s3.Bucket(jpeg_bucket_name)\n\npdf_bucket_name = 'eqo-pdf-test'\npdf_bucket = s3.Bucket(pdf_bucket_name)\n\n\ndef write_json_to_dynamo(folder):\n if folder[:-1] != '/' or folder[-1] != '\\\\':\n folder += '/'\n\n with table.batch_writer() as batch:\n for filename in os.listdir(folder):\n try:\n file = json.load(open(folder + filename), parse_float=decimal.Decimal)\n except json.decoder.JSONDecodeError:\n print(filename + \" could not be decoded\")\n continue\n\n batch.put_item(Item = file['User'])\n\n\ndef upload_resumes(folder):\n for filename in os.listdir(folder):\n s3.Object(pdf_bucket_name, filename).put(Body=open(folder + filename, 'rb'))\n\n\ndef upload_pictures(folder):\n for filename in os.listdir(folder):\n s3.Object(jpeg_bucket_name, filename).put(Body=open(folder + filename, 'rb'))\n\n\njson_folder = 'JSON Files/'\nwrite_json_to_dynamo(json_folder)\n\n\nresume_folder = 'resumes/'\nupload_resumes(resume_folder)\n\n\npic_folder = 'profile/'\nupload_pictures(pic_folder)\n\n\n\n"
},
{
"alpha_fraction": 0.6911196708679199,
"alphanum_fraction": 0.6988416910171509,
"avg_line_length": 22.981481552124023,
"blob_id": "7050cccfbc54703be896bfe2cc29eadd2e702dd2",
"content_id": "f0be7258479d59aaa3b954aeb33dbbc52a5ad757",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1295,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 54,
"path": "/ftptest.py",
"repo_name": "eQOHire2/eqo_test",
"src_encoding": "UTF-8",
"text": "from ftplib import FTP\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom io import StringIO\nimport os\n\nftp = FTP('ec2-52-43-60-46.us-west-2.compute.amazonaws.com')\nftp.login(user='eqohireftp', passwd= 'eQoHire1234')\n\nprint(ftp.dir)\n\ndef read_file(filename):\n rsrcmgr = PDFResourceManager()\n retstr = StringIO()\n laparams = LAParams()\n device = TextConverter(rsrcmgr, retstr, laparams=laparams)\n\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n pagenos = set()\n\n print(\"start of \" + filename)\n ftp.retrbinary('RETR ' + filename, open(filename, 'wb').write)\n\n fp = open(filename, 'rb')\n for page in PDFPage.get_pages(fp, pagenos):\n interpreter.process_page(page)\n fp.close()\n os.remove(filename)\n text = retstr.getvalue()\n print(text)\n\n device.close()\n retstr.close()\n\n print(\"end of \" + filename)\n\n\ndef grab_file(folder, filename):\n ftp.retrbinary('RETR ' + filename, open(folder + filename, 'wb').write)\n\n\nftp.cwd('/resumes')\nfor filename in ftp.nlst():\n read_file(filename)\n\nfolder = 'resumes/'\nftp.cwd(folder)\nfor filename in ftp.nlst():\n grab_file(folder, filename)\n\n\nftp.quit()\n"
}
] | 3 |
ivpi/liblightnvm
|
https://github.com/ivpi/liblightnvm
|
85bff44a0e4ff3195af8e4f0aa89d2c3a17257c0
|
0edfe8ffc2724fd93b722ff24a84a0481ebfaf7d
|
03c8a8728b0776ec08cbcd3bec00d9c3c871b1f7
|
refs/heads/master
| 2020-07-23T01:56:47.035638 | 2017-04-10T20:03:11 | 2017-04-10T20:03:11 | 73,731,229 | 0 | 0 | null | 2016-11-14T17:47:00 | 2016-11-10T10:57:23 | 2016-11-10T15:55:30 | null |
[
{
"alpha_fraction": 0.7133539319038391,
"alphanum_fraction": 0.716786801815033,
"avg_line_length": 37.84000015258789,
"blob_id": "7819fc317f1b974b36804b4fabd56e69dc94a61e",
"content_id": "14cc292d1d115edc73be93227ec7857728ac13c5",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2913,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 75,
"path": "/deprecated/nvm_beam.h",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * flash_beam - I/O abstraction for flash memories.\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#ifndef __NVM_BEAM_H\n#define __NVM_BEAM_H\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <uthash.h>\n\n#include <liblightnvm.h>\n#include <nvm.h>\n\n#define MAX_BLOCKS 5\n\n#define FORCE_SYNC 1\n#define OPTIONAL_SYNC 2\n\nstruct w_buffer {\n\tsize_t cursize;\t\t/* Current buf lenght. Follows mem */\n\tsize_t cursync;\t\t/* Bytes in buf that have been synced to media */\n\tsize_t buf_limit;\t/* Limit for the allocated memory region */\n\tvoid *buf;\t\t/* Buffer to cache writes */\n\tchar *mem;\t\t/* Points to the place in buf where writes can be\n\t\t\t\t * appended to. It defines the part of the\n\t\t\t\t * buffer containing valid data */\n\tchar *sync;\t\t/* Points to the place in buf until which data\n\t\t\t\t * has been synced to the media */\n};\n\n/* TODO: Allocate dynamic number of blocks */\nstruct beam {\n\tint gid;\t\t\t\t/* internal global identifier */\n\tint lun;\t\t\t\t/* virtual lun mapped to beam*/\n\tstruct nvm_dev *dev;\t\t\t/* LightNVM target */\n\tstruct nvm_vblk *current_w_vblock;\t/* current block in use */\n\tstruct nvm_vblk vblocks[MAX_BLOCKS];\t/* vblocks forming the beam */\n\tint nvblocks;\t\t\t\t/* number of vblocks */\n\tstruct w_buffer w_buffer;\t\t/* write buffer */\n\tunsigned long bytes;\t\t\t/* valid bytes */\n\tUT_hash_handle hh;\t\t\t/* hash handle for uthash */\n};\n\nstatic inline size_t calculate_ppa_off(size_t cursync, int write_page_size)\n{\n\tsize_t disaligned_data = cursync % write_page_size;\n\tsize_t aligned_data = cursync / write_page_size;\n\tint rest = (disaligned_data == 0) ? 0 : 1;\n\treturn (aligned_data + rest);\n}\n\n#endif /* __NVM_BEAM_H */\n"
},
{
"alpha_fraction": 0.640312135219574,
"alphanum_fraction": 0.6481154561042786,
"avg_line_length": 23.69454574584961,
"blob_id": "6398e9ba8955f10c4569caf958cf25b5710488d6",
"content_id": "c5d986b80ac91811a0870a88418cb31a4995440d",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6792,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 275,
"path": "/src/nvm_vblk.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * vblock - Virtual block functions\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * Copyright (C) 2015 Matias Bjørling <[email protected]>\n * Copyright (C) 2016 Simon A. F. Lund <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#include <assert.h>\n#include <stdlib.h>\n#include <stdint.h>\n#include <unistd.h>\n#include <string.h>\n#include <errno.h>\n#include <stdio.h>\n#include <linux/lightnvm.h>\n#include <liblightnvm.h>\n#include <nvm.h>\n#include <nvm_debug.h>\n\nstruct nvm_vblk *nvm_vblk_new(void)\n{\n\tstruct nvm_vblk *vblk;\n\n\tvblk = malloc(sizeof(*vblk));\n\tif (!vblk)\n\t\treturn NULL;\n\n\tvblk->dev = 0;\n\tvblk->addr.ppa = 0;\n\tvblk->pos_write = 0;\n\tvblk->pos_read = 0;\n\n\treturn vblk;\n}\n\nstruct nvm_vblk *nvm_vblk_new_on_dev(NVM_DEV dev, NVM_ADDR addr)\n{\n\tstruct nvm_vblk *vblk;\n\n\tvblk = malloc(sizeof(*vblk));\n\tif (!vblk)\n\t\treturn NULL;\n\n\tvblk->dev = dev;\n\tvblk->addr = addr;\n\tvblk->pos_write = 0;\n\tvblk->pos_read = 0;\n\n\treturn vblk;\n}\n\nvoid nvm_vblk_free(struct nvm_vblk *vblk)\n{\n\tfree(vblk);\n}\n\nvoid nvm_vblk_pr(struct nvm_vblk *vblk)\n{\n\tprintf(\"vblk {\");\n\tprintf(\"\\n dev(%p),\", vblk->dev);\n\tprintf(\"\\n \");\n\tnvm_addr_pr(vblk->addr);\n\tprintf(\"}\\n\");\n}\n\nstruct nvm_addr nvm_vblk_attr_addr(struct nvm_vblk *vblk)\n{\n\treturn vblk->addr;\n}\n\nint nvm_vblk_gets(struct nvm_vblk *vblock, struct nvm_dev *dev, uint32_t ch,\n\t\t uint32_t lun)\n{\n\tstruct nvm_ioctl_dev_vblk ctl;\n\tstruct nvm_addr addr;\n\tint err;\n\n\tif (ch >= dev->geo.nchannels)\n\t\treturn -1;\n\n\tif (lun >= dev->geo.nluns)\n\t\treturn -1;\n\n\taddr.ppa = 0;\n\taddr.g.lun = lun;\n\taddr.g.ch = ch;\n\n\tmemset(&ctl, 0, sizeof(ctl));\n\tctl.ppa = addr.ppa;\n\n\terr = ioctl(dev->fd, NVM_DEV_BLOCK_GET, &ctl);\n\tif (err)\n\t\treturn err;\n\n\tvblock->addr.ppa = ctl.ppa;\n\tvblock->dev = dev;\n\n\treturn 0;\n}\n\nint nvm_vblk_get(struct nvm_vblk *vblock, struct nvm_dev *dev)\n{\n\treturn nvm_vblk_gets(vblock, dev, 0, 0);\n}\n\nint nvm_vblk_put(struct nvm_vblk *vblock)\n{\n\tstruct nvm_ioctl_dev_vblk ctl;\n\tint ret;\n\n\tmemset(&ctl, 0, sizeof(ctl));\n\tctl.ppa = vblock->addr.ppa;\n\n\tret = ioctl(vblock->dev->fd, NVM_DEV_BLOCK_PUT, &ctl);\n\n\treturn ret;\n}\n\nssize_t nvm_vblk_erase(struct nvm_vblk *vblk)\n{\n\tstruct nvm_geo geo = nvm_dev_attr_geo(vblk->dev);\n\n\tconst int len = geo.nplanes;\n\tstruct nvm_addr list[len];\n\tint i;\n\n\tfor (i = 0; i < len; ++i) {\n\t\tlist[i].ppa = vblk->addr.ppa;\n\t\tlist[i].g.pl = i;\n\t}\n\n\treturn nvm_addr_erase(vblk->dev, list, len, NVM_MAGIC_FLAG_DEFAULT);\n}\n\nssize_t nvm_vblk_pwrite(struct nvm_vblk *vblk, const void *buf,\n\t\t\tsize_t count, size_t offset)\n{\n\tconst struct nvm_geo geo = nvm_dev_attr_geo(vblk->dev);\n\tconst int len = geo.nplanes * geo.nsectors;\n\tconst int tot_len = len * (count / (geo.nsectors * geo.nbytes * geo.nplanes));\n\t//printf(\"lib: tot_len %d, count %lu, nsector %lu, nbytes %lu\\n\", tot_len, count, geo.nsectors, geo.nbytes);\n\tconst int align = len * geo.nbytes;\n\tconst int vpg_offset = offset / align;\n\tsize_t nbytes_written = 0, nbytes_filled = 0;\n\n\tif ((count % align) || (offset % align)) {\n\t\terrno = EINVAL;\n\t\treturn -1;\n\t}\n\n\twhile (nbytes_written < count) {\n\t\tstruct nvm_addr list[tot_len];\n\t\tssize_t err;\n\t\tint i;\n\n\t\tnbytes_filled = 0;\n\n\t\tfor (i = 0; i < tot_len; i++) {\n\t\t\tlist[i].ppa = vblk->addr.ppa;\n\n\t\t\tlist[i].g.pg = (nbytes_filled / align) + vpg_offset;\n\t\t\tlist[i].g.sec = i % geo.nsectors;\n\t\t\tlist[i].g.pl = (i / geo.nsectors) % geo.nplanes;\n\t\t\t//printf(\"i %d, pg %d, sec %d, pl %d\\n\", i, list[i].g.pg, list[i].g.sec, list[i].g.pl);\n\n\t\t\tif ((i+1) % (geo.nsectors * geo.nplanes) == 0)\n\t\t\t\tnbytes_filled += align;\n\t\t}\n\n\t\terr = nvm_addr_write(vblk->dev, list, tot_len, buf + nbytes_written,\n\t\t\t\t NVM_MAGIC_FLAG_DEFAULT);\n\t\tif (err) {\t// errno set by `nvm_addr_write`\n\t\t\treturn -1;\n\t\t}\n\n\t\tnbytes_written += nbytes_filled;\n\t}\n\n\treturn 0;\n}\n\nssize_t nvm_vblk_write(struct nvm_vblk *vblk, const void *buf, size_t count)\n{\n\tssize_t err;\n\n\terr = nvm_vblk_pwrite(vblk, buf, count, vblk->pos_write);\n\tif (err)\n\t\treturn -1;\t// errno set by nvm_vblk_pwrite / nvm_addr_write\n\n\tvblk->pos_write += count;\n\n\treturn 0;\n}\n\nssize_t nvm_vblk_pread(struct nvm_vblk *vblk, void *buf, size_t count,\n\t\t size_t offset)\n{\n\tconst struct nvm_geo geo = nvm_dev_attr_geo(vblk->dev);\n\tconst int len = geo.nplanes * geo.nsectors;\n\tconst int tot_len = len * (count / (geo.nsectors * geo.nbytes * geo.nplanes));\n\tconst int align = len * geo.nbytes;\n\tconst int vpg_offset = offset / align;\n\tsize_t nbytes_read = 0, nbytes_filled = 0;\n\n\t//printf(\"lib: tot_len %d, count %lu, nsector %lu, nbytes %lu\\n\", tot_len, count, geo.nsectors, geo.nbytes);\n\n\tif ((count % align) || (offset % align)) {\n\t\terrno = EINVAL;\n\t\treturn -1;\n\t}\n\n\twhile (nbytes_read < count) {\n\t\tstruct nvm_addr list[tot_len];\n\t\tssize_t err;\n\t\tint i;\n\n\t\tnbytes_filled = 0;\n\n\t\tfor (i = 0; i < tot_len; i++) {\n\t\t\tlist[i].ppa = vblk->addr.ppa;\n\n\t\t\tlist[i].g.pg = (nbytes_filled / align) + vpg_offset;\n\t\t\tlist[i].g.sec = i % geo.nsectors;\n\t\t\tlist[i].g.pl = (i / geo.nsectors) % geo.nplanes;\n\t\t\t//printf(\"i %d, pg %d, sec %d, pl %d\\n\", i, list[i].g.pg, list[i].g.sec, list[i].g.pl);\n\n\t\t\tif ((i+1) % (geo.nsectors * geo.nplanes) == 0)\n nbytes_filled += align;\n\t\t}\n\n\t\terr = nvm_addr_read(vblk->dev, list, tot_len, buf + nbytes_read,\n\t\t\t\t NVM_MAGIC_FLAG_DEFAULT);\n\t\tif (err) {\t// errno set by `nvm_addr_read`\n\t\t\treturn -1;\n\t\t}\n\n\t\tnbytes_read += nbytes_filled;\n\t}\n\n\treturn 0;\n}\n\nssize_t nvm_vblk_read(struct nvm_vblk *vblk, void *buf, size_t count)\n{\n\tssize_t err;\n\n\terr = nvm_vblk_pread(vblk, buf, count, vblk->pos_read);\n\tif (err)\n\t\treturn -1;\t// errno set by nvm_vblk_pread / nvm_addr_read\n\n\tvblk->pos_read += count;\n\n\treturn 0;\n}\n\n"
},
{
"alpha_fraction": 0.6048598289489746,
"alphanum_fraction": 0.6108410954475403,
"avg_line_length": 20.739837646484375,
"blob_id": "d9bc7b7ccf200d8258ec778eac1d4ea3a211af69",
"content_id": "e5a66c3df3ba60fe98680b65622a69bc23e79fac",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2675,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 123,
"path": "/tests/test_mbad.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/**\n * This tests attempts to determine whether or not the nvm_dev_mark function\n * works as expected. It does so by marking all blocks bad, then tries to _get\n * one. Which is then expected to fail, since all blocks are bad.\n */\n#include <stdlib.h>\n#include <string.h>\n#include <stdio.h>\n#include <liblightnvm.h>\n\n#include <CUnit/Basic.h>\n\nstatic char nvm_dev_name[DISK_NAME_LEN] = \"nvme0n1\";\n\nvoid TEST_DEV_MARK(void)\n{\n\tNVM_DEV dev;\n\tNVM_GEO geo;\n\tNVM_VBLK vblk;\n\n\tint vblocks_total;\t\t\t/* Total number of vblocks */\n\tint i, ch, lun;\n\n\tint mark_total;\t\t\t\t/* Calls to mark */\n\tint mark_failed;\t\t\t/* Failed calls to mark */\n\t\n\tvblk = nvm_vblk_new();\t\t\t/* Allocate a vblock */\n\tif (!vblk) {\n\t\tCU_FAIL();\n\t\treturn;\n\t}\n\n\tdev = nvm_dev_open(nvm_dev_name);\t/* Open device */\n\tif (!dev) {\n\t\tnvm_vblk_free(vblk);\n\t\tCU_FAIL();\n\t\treturn;\n\t}\n\n\tgeo = nvm_dev_attr_geo(dev);\t\t/* Get geometry */\n\n\tvblocks_total = geo.nluns * geo.nblocks;\n\n\tmark_total = 0;\n\tmark_failed = 0;\n\tfor (i = 0; i < vblocks_total; ++i) {\n\t\tNVM_ADDR addr[geo.nplanes];\n\t\tint err, pl;\n\n\t\tfor (pl = 0; pl < geo.nplanes; ++pl) {\n\t\t\taddr[pl].g.ch = i % geo.nchannels;\t/* Setup block address */\n\t\t\taddr[pl].g.lun = i % geo.nluns;\n\t\t\taddr[pl].g.blk = i % geo.nblocks;\n\t\t\taddr[pl].g.pl = pl;\n\t\t}\n\n\t\terr = nvm_addr_mark(dev, addr, geo.nplanes, 0x1);\n\t\tmark_total++;\n\t\tif (err) {\n\t\t\tmark_failed++;\n\t\t}\n\t}\n\n\tnvm_geo_pr(geo);\n\tprintf(\"vblocks_total(%d), mark_total(%d) / mark_failed(%d)\\n\",\n\t\tvblocks_total, mark_total, mark_failed);\n\n\t// Now try to get a block via each channel/lun, they should all fail\n\tfor (ch = 0; ch < geo.nchannels; ch++) {\n\t\tfor (lun = 0; lun < geo.nluns; ++lun) {\n\t\t\tint err = nvm_vblk_gets(vblk, dev, ch, lun);\n\t\t\tif (!err) {\n\t\t\t\tprintf(\"What? No error?\\n\");\n\t\t\t\tnvm_vblk_pr(vblk);\n\t\t\t}\n\t\t\tnvm_vblk_put(vblk);\n\t\t}\n\t}\n\n\tnvm_vblk_free(vblk);\n\tnvm_dev_close(dev);\n}\n\nint main(int argc, char **argv)\n{\n\tif (argc > 1) {\n\t\tif (strlen(argv[1]) > DISK_NAME_LEN) {\n\t\t\tprintf(\"len(dev_name) > %d\\n\", DISK_NAME_LEN - 1);\n\t\t\treturn -1;\n\t\t}\n\t\tstrcpy(nvm_dev_name, argv[1]);\n\t}\n\n\tCU_pSuite pSuite = NULL;\n\n\t/* initialize the CUnit test registry */\n\tif (CUE_SUCCESS != CU_initialize_registry())\n\t\treturn CU_get_error();\n\n\t/* add a suite to the registry */\n\tpSuite = CU_add_suite(\"nvm_dev*\", NULL, NULL);\n\tif (NULL == pSuite) {\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\t/* add the tests to the suite */\n\tif (\n\t(NULL == CU_add_test(pSuite, \"nvm_dev_mark\", TEST_DEV_MARK)) ||\n\t0\n\t)\n\t{\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\t/* Run all tests using the CUnit Basic interface */\n\tCU_basic_set_mode(CU_BRM_SILENT);\n\tCU_basic_run_tests();\n\tCU_cleanup_registry();\n\n\treturn CU_get_error();\n}\n\n"
},
{
"alpha_fraction": 0.6469981670379639,
"alphanum_fraction": 0.6539551615715027,
"avg_line_length": 25.222972869873047,
"blob_id": "4ea07bc0489b7eee05919c59b4d0e1f457026c05",
"content_id": "5e9159757054f68e3f7322230ef9cee676f62349",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3881,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 148,
"path": "/tests/test_vblk_gp_n.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * This test attempts to allocate all blocks on the given device.\n * Allocating all blocks will fail at least for two reasons:\n *\n * - vblocks might be reserved by others\n * - vblocks might be bad\n *\n * The test therefore only fails when the amount of successfully allocated\n * vblocks is below threshold TOTAL_NUMBER_OF_VBLOCK - k. Where k is some\n * arbitrarily chosen number defaulting to 8 failed gets.\n *\n * NOTE: Be wary about running this test on actual hardware since it might\n *\t wear out an MLC device after about 1000-3000 runs. This is currently\n *\t true since 'vblock_get' erases a block when responding to an _GET.\n */\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <liblightnvm.h>\n\n#include <CUnit/Basic.h>\n\nstatic char nvm_dev_name[DISK_NAME_LEN] = \"nvm_vblock_tst\";\nint k = 10;\t// Total number of nvm_vblk_gets allowed to fail\n\nvoid TEST_VBLOCK_GP_N(void)\n{\n\tNVM_DEV dev;\n\tNVM_GEO geo;\n\n\tNVM_VBLK *vblocks;\t/* Array of vblocks */\n\tint vblocks_total;\t/* Number of vblocks on device / allocated */\n\tint vblocks_reserved;\t/* Number of vblocks successfully reserved */\n\n\tint ngets;\t\t/* Total number of ngets */\n\tint ngets_failed;\t/* Total number of failed ngets */\n\tint *ngets_lun;\t\t/* Number of gets per lun */\n\tint *ngets_lun_failed;\t/* Number of failed gets per lun */\n\n\tint i;\n\n\tdev = nvm_dev_open(nvm_dev_name);\n\tCU_ASSERT_PTR_NOT_NULL(dev);\n\n\tgeo = nvm_dev_attr_geo(dev);\n\n\tngets = 0;\n\tngets_lun = malloc(sizeof(ngets_lun)*geo.nluns);\n\tmemset(ngets_lun, 0, sizeof(ngets_lun)*geo.nluns);\n\n\tngets_failed = 0;\n\tngets_lun_failed = malloc(sizeof(ngets_lun_failed)*geo.nluns);\n\tmemset(ngets_lun_failed, 0, sizeof(ngets_lun_failed)*geo.nluns);\n\n\tvblocks_total = geo.nluns * geo.nblocks;\t/* Allocate vblocks */\n\tvblocks = malloc(sizeof(NVM_VBLK) * vblocks_total);\n\tCU_ASSERT_PTR_NOT_NULL(vblocks);\n\n\tfor (i=0; i < vblocks_total; i++) {\n\t\tvblocks[i] = nvm_vblk_new();\n\t\tCU_ASSERT_PTR_NOT_NULL(vblocks[i]);\n\t}\n\n\tvblocks_reserved = 0;\n\tfor (i=0; i < vblocks_total; i++) {\t\t/* Reserve vblocks */\n\t\tint err, ch, lun;\n\n\t\tch = i % geo.nchannels;\n\t\tlun = i % geo.nluns;\n\t\terr = nvm_vblk_gets(vblocks[vblocks_reserved], dev, ch, lun);\n\t\tngets++;\n\t\tngets_lun[lun]++;\n\t\tif (err) {\n\t\t\tngets_failed++;\n\t\t\tngets_lun_failed[lun]++;\n\t\t\tcontinue;\n\t\t}\n\n\t\tvblocks_reserved++;\n\t}\n\n\t/* Check that we did as much as we expected */\n\tCU_ASSERT(ngets == vblocks_total);\n\n\t/* Check that we got a sufficient amount of vblocks */\n\tCU_ASSERT(vblocks_total - vblocks_reserved < k)\n\n\t/* That is... no more than k failures */\n\tCU_ASSERT(ngets_failed <= k);\n\n\t/* Print counters / totals\n\tprintf(\"vblocks_total(%d)\\n\", vblocks_total);\n\tprintf(\"vblocks_reserved(%d)\\n\", vblocks_reserved);\n\tprintf(\"ngets(%d), ngets_failed(%d)\\n\", ngets, ngets_failed);\n\tfor(i=0; i < geo.nluns; i++) {\n\t\tprintf(\"i(%d), ngets_lun(%d) / ngets_lun_failed(%d)\\n\",\n\t\t\ti, ngets_lun[i], ngets_lun_failed[i]);\n\t}\n\t*/\n\n\tfor (i=0; i < vblocks_reserved; i++) {\t\t/* Release vblocks */\n\t\tint err = nvm_vblk_put(vblocks[i]);\n\t\tCU_ASSERT(!err);\n\t\tif (err) {\n\t\t\tcontinue;\n\t\t}\n\t}\n\n\tfor (i=0; i < vblocks_total; i++) {\t\t/* Deallocate vblocks */\n\t\tnvm_vblk_free(vblocks[i]);\n\t}\n\tfree(vblocks);\n}\n\nint main(int argc, char **argv)\n{\n\tif (argc != 2) {\n\t\tprintf(\"Usage: %s dev_name\\n\", argv[0]);\n\t\treturn -1;\n\t}\n\tif (strlen(argv[1]) > DISK_NAME_LEN) {\n\t\tprintf(\"len(device_name) > %d\\n\", DISK_NAME_LEN - 1);\n\t}\n\tstrcpy(nvm_dev_name, argv[1]);\n\n\tCU_pSuite pSuite = NULL;\n\n\tif (CUE_SUCCESS != CU_initialize_registry())\n\t\treturn CU_get_error();\n\n\tpSuite = CU_add_suite(\"_vblock_[gets|put] n\", NULL, NULL);\n\tif (NULL == pSuite) {\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\tif (NULL == CU_add_test(pSuite, \"_vblock_[gets|put] n\", TEST_VBLOCK_GP_N))\n\t{\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\tCU_basic_set_mode(CU_BRM_SILENT);\n\tCU_basic_run_tests();\n\tCU_cleanup_registry();\n\n\treturn CU_get_error();\n}\n"
},
{
"alpha_fraction": 0.6720554232597351,
"alphanum_fraction": 0.6766743659973145,
"avg_line_length": 26.929031372070312,
"blob_id": "4437348d7c0c6772389af17b6cd3876940340ada",
"content_id": "930bebdd69d321d45a1d8d8d3a5cca19097da5c9",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4330,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 155,
"path": "/src/nvm_util.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * utils - Helper functions and utitilies used by liblnvm\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * Copyright (C) 2015 Matias Bjørling <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#define _GNU_SOURCE\n#include <stdlib.h>\n\n#include <liblightnvm.h>\n#include <string.h>\n#include <nvm_debug.h>\n#include <nvm_util.h>\n\n/*\n * Searches the udev 'subsystem' for device named 'dev_name' of type 'devtype'\n *\n * NOTE: Caller is responsible for calling `udev_device_unref` on the returned\n * udev_device\n *\n * @returns First device in 'subsystem' of given 'devtype' with given 'dev_name'\n */\nstruct udev_device *udev_dev_find(struct udev *udev, const char *subsystem,\n\t\t\t\t const char *devtype, const char *dev_name)\n{\n\tstruct udev_device *dev = NULL;\n\n\tstruct udev_enumerate *enumerate;\n\tstruct udev_list_entry *devices, *dev_list_entry;\n\n\tenumerate = udev_enumerate_new(udev);\t/* Search 'subsystem' */\n\tudev_enumerate_add_match_subsystem(enumerate, subsystem);\n\tudev_enumerate_scan_devices(enumerate);\n\tdevices = udev_enumerate_get_list_entry(enumerate);\n\tudev_list_entry_foreach(dev_list_entry, devices) {\n\t\tconst char *path;\n\t\tint path_len;\n\n\t\tpath = udev_list_entry_get_name(dev_list_entry);\n\t\tif (!path) {\n\t\t\tNVM_DEBUG(\"Failed retrieving path from entry\\n\");\n\t\t\tcontinue;\n\t\t}\n\t\tpath_len = strlen(path);\n\n\t\tif (dev_name) {\t\t\t/* Compare name */\n\t\t\tint dev_name_len = strlen(dev_name);\n\t\t\tint match = strcmp(dev_name,\n\t\t\t\t\t path + path_len-dev_name_len);\n\n\t\t\tif (match != 0) {\n\t\t\t\tNVM_DEBUG(\"Name comparison failed\\n\");\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\t\t\t\t\t\t/* Get the udev object */\n\t\tdev = udev_device_new_from_syspath(udev, path);\n\t\tif (!dev) {\n\t\t\tNVM_DEBUG(\"Failed retrieving device from path\\n\");\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (devtype) {\t\t\t/* Compare device type */\n\t\t\tconst char *sys_devtype;\n\t\t\tint sys_devtype_match;\n\n\t\t\tsys_devtype = udev_device_get_devtype(dev);\n\t\t\tif (!sys_devtype) {\n\t\t\t\tNVM_DEBUG(\"sys_devtype(%s)\", sys_devtype);\n\t\t\t\tudev_device_unref(dev);\n\t\t\t\tdev = NULL;\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tsys_devtype_match = strcmp(devtype, sys_devtype);\n\t\t\tif (sys_devtype_match != 0) {\n\t\t\t\tNVM_DEBUG(\"%s != %s\\n\", devtype, sys_devtype);\n\t\t\t\tudev_device_unref(dev);\n\t\t\t\tdev = NULL;\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\n\t\tbreak;\n\t}\n\n\treturn dev;\n}\n\nstruct udev_device *udev_nvmdev_find(struct udev *udev, const char *dev_name)\n{\n\tstruct udev_device *dev;\n\n\tdev = udev_dev_find(udev, \"gennvm\", NULL, dev_name);\n\tif (dev)\n\t\treturn dev;\n\n\tNVM_DEBUG(\"NOTHING FOUND\\n\");\n\treturn NULL;\n\n}\n\nvoid *nvm_buf_alloc(NVM_GEO geo, size_t nbytes)\n{\n\tchar *buf;\n\tint ret;\n\n\tret = posix_memalign((void **)&buf, geo.nbytes, nbytes);\n\tif (ret)\n\t\treturn NULL;\n\n\treturn buf;\n}\n\nvoid nvm_buf_fill(char *buf, size_t nbytes)\n{\n\t#pragma omp parallel for schedule(static, 1)\n\tfor (size_t i = 0; i < nbytes; ++i)\n\t\tbuf[i] = (i % 26) + 65;\n}\n\nvoid nvm_buf_pr(char *buf, size_t nbytes)\n{\n\tconst int width = 32;\n\tint i;\n\n\tprintf(\"** NVM_BUF_PR - BEGIN **\");\n\tfor (i = 0; i < nbytes; i++) {\n\t\tif (!(i % width))\n\t\t\tprintf(\"\\ni[%d,%d]: \", i, i+(width-1));\n\t\tprintf(\" %c\", buf[i]);\n\t}\n\tprintf(\"\\n** NVM_BUF_PR - END **\\n\");\n}\n\n"
},
{
"alpha_fraction": 0.5843847393989563,
"alphanum_fraction": 0.5929939150810242,
"avg_line_length": 18.471097946166992,
"blob_id": "7b79289b093f23ce9b2cd7d56088666810e28791",
"content_id": "3306cdae7ba9d8e37ca6d52367f19077181d830d",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6737,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 346,
"path": "/examples/vblk.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <errno.h>\n#include <liblightnvm.h>\n\nint get(NVM_DEV dev, NVM_GEO geo, NVM_VBLK vblk, NVM_ADDR addr, int flags)\n{\n\tssize_t err;\n\n\tprintf(\"** nvm_vblk_gets(..., %d, %d)\\n\", addr.g.ch, addr.g.lun);\n\n\terr = nvm_vblk_gets(vblk, dev, addr.g.ch, addr.g.lun);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_vblk_gets err(%ld)\\n\", err);\n\t} else {\n\t\tprintf(\"got \");\n\t\tnvm_vblk_pr(vblk);\n\t}\n\n\tnvm_vblk_free(vblk);\n\n\treturn err;\n}\n\nint put(NVM_DEV dev, NVM_GEO geo, NVM_VBLK vblk, NVM_ADDR addr, int flags)\n{\n\tssize_t err;\n\n\tprintf(\"** nvm_vblk_put(...):\\n\");\n\tnvm_addr_pr(addr);\n\n\terr = nvm_vblk_put(vblk);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_vblk_put err(%ld)\\n\", err);\n\t}\n\n\tnvm_vblk_free(vblk);\n\n\treturn err;\n}\n\nint erase(NVM_DEV dev, NVM_GEO geo, NVM_VBLK vblk, NVM_ADDR addr, int flags)\n{\n\tssize_t err;\n\n\tprintf(\"** nvm_vblk_erase(...):\\n\");\n\tnvm_addr_pr(addr);\n\n\terr = nvm_vblk_erase(vblk);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_vblk_erase err(%ld)\\n\", err);\n\t}\n\n\tnvm_vblk_free(vblk);\n\n\treturn err;\n}\n\nint pwrite(NVM_DEV dev, NVM_GEO geo, NVM_VBLK vblk, NVM_ADDR addr, int flags)\n{\n\tssize_t err;\n\n\tchar *buf;\n\tsize_t count, offset;\n\n\tprintf(\"** nvm_vblk_pwrite(...):\\n\");\n\tnvm_addr_pr(addr);\n\n\tcount = geo.vpg_nbytes;\n\n\tbuf = nvm_buf_alloc(geo, count);\n\tif (!buf) {\n\t\tprintf(\"FAILED: allocating buf\\n\");\n\t\tfree(vblk);\n\t\treturn -ENOMEM;\n\t}\n\n\tnvm_buf_fill(buf, count);\n\n\toffset = geo.vpg_nbytes * addr.g.pg;\n\terr = nvm_vblk_pwrite(vblk, buf, count, offset);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_vblk_pwrite err(%ld)\\n\", err);\n\t}\n\n\tnvm_vblk_free(vblk);\n\tfree(buf);\n\n\treturn err;\n}\n\nint write(NVM_DEV dev, NVM_GEO geo, NVM_VBLK vblk, NVM_ADDR addr, int flags)\n{\n\tssize_t err = 0;\n\n\tchar *buf;\n\tconst int count = geo.vblk_nbytes;\n\n\tprintf(\"** nvm_vblk_write(...):\\n\");\n\tnvm_addr_pr(addr);\n\n\tbuf = nvm_buf_alloc(geo, count);\n\tif (!buf) {\n\t\tprintf(\"FAILED: allocating buf\\n\");\n\t\tfree(vblk);\n\t\treturn -ENOMEM;\n\t}\n\n\tnvm_buf_fill(buf, count);\n\n\terr = nvm_vblk_write(vblk, buf, count);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_vblk_write err(%ld)\\n\", err);\n\t}\n\n\tnvm_vblk_free(vblk);\n\tfree(buf);\n\n\treturn err;\n}\n\nint pread(NVM_DEV dev, NVM_GEO geo, NVM_VBLK vblk, NVM_ADDR addr, int flags)\n{\n\tssize_t err;\n\n\tvoid *buf;\n\tsize_t count, offset;\n\n\tprintf(\"** nvm_vblk_pread(...):\\n\");\n\tnvm_addr_pr(addr);\n\n\tcount = geo.vpg_nbytes;\n\toffset = geo.vpg_nbytes * addr.g.pg;\n\n\tbuf = nvm_buf_alloc(geo, count);\n\tif (!buf) {\n\t\tprintf(\"FAILED: allocating buf\\n\");\n\t\tfree(vblk);\n\t\treturn -ENOMEM;\n\t}\n\n\terr = nvm_vblk_pread(vblk, buf, count, offset);\n\tif (getenv(\"NVM_BUF_PR\"))\n\t\tnvm_buf_pr(buf, count);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_vblk_pread err(%ld)\\n\", err);\n\t}\n\n\tnvm_vblk_free(vblk);\n\tfree(buf);\n\n\treturn err;\n}\n\nint read(NVM_DEV dev, NVM_GEO geo, NVM_VBLK vblk, NVM_ADDR addr, int flags)\n{\n\tssize_t err;\n\n\tvoid *buf;\n\tint count;\n\n\tprintf(\"** nvm_vblk_read(...):\\n\");\n\tnvm_addr_pr(addr);\n\n\tcount = geo.vblk_nbytes;\n\tbuf = nvm_buf_alloc(geo, count);\n\tif (!buf) {\n\t\tprintf(\"FAILED: allocating buf\\n\");\n\t\tnvm_vblk_free(vblk);\n\t\treturn -ENOMEM;\n\t}\n\n\terr = nvm_vblk_read(vblk, buf, geo.vblk_nbytes);\n\tif (getenv(\"NVM_BUF_PR\"))\n\t\tnvm_buf_pr(buf, count);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_vblk_read err(%ld)\\n\", err);\n\t}\n\n\tnvm_vblk_free(vblk);\n\tfree(buf);\n\n\treturn err;\n}\n\n// From hereon out the code is mostly boiler-plate for command-line parsing,\n// there is a bit of useful code exemplifying:\n//\n// * nvm_dev_open\n// * nvm_dev_close\n// * nvm_dev_attr_geo\n//\n// as well as using the NVM_ADDR data structure.\n\n#define NVM_CLI_CMD_LEN 50\n\ntypedef struct {\n\tchar name[NVM_CLI_CMD_LEN];\n\tint (*func)(NVM_DEV, NVM_GEO, NVM_VBLK, NVM_ADDR, int);\n\tint argc;\n\tint flags;\n} NVM_CLI_VBLK_CMD;\n\nstatic NVM_CLI_VBLK_CMD cmds[] = {\n\t{\"get\", get, 5, 0},\n\t{\"put\", put, 6, 0},\n\t{\"read\", read, 6, 0},\n\t{\"write\", write, 6, 0},\n\t{\"erase\", erase, 6, 0},\n\t{\"pread\", pread, 7, 0},\n\t{\"pwrite\", pwrite, 7, 0},\n};\n\nstatic int ncmds = sizeof(cmds) / sizeof(cmds[0]);\nstatic char *args[] = {\"dev_name\", \"ch\", \"lun\", \"blk\", \"pg\"};\n\nvoid _usage_pr(char *cli_name)\n{\n\tint cmd;\n\n\tprintf(\"Usage:\\n\");\n\tfor (cmd = 0; cmd < ncmds; cmd++) {\n\t\tint arg;\n\t\tprintf(\" %s %6s\", cli_name, cmds[cmd].name);\n\t\tfor (arg = 0; arg < cmds[cmd].argc-2; ++arg) {\n\t\t\tprintf(\" %s\", args[arg]);\n\t\t}\n\t\tprintf(\"\\n\");\n\t}\n\n\tprintf(\"OR using PPA (parts as above are extracted from address):\\n\");\n\tfor (cmd = 0; cmd < ncmds; cmd++) {\n\t\tprintf(\" %s %6s dev_name ppa\\n\", cli_name, cmds[cmd].name);\n\t}\n}\n\nint main(int argc, char **argv)\n{\n\tchar cmd_name[NVM_CLI_CMD_LEN];\n\tchar dev_name[DISK_NAME_LEN+1];\n\tint ret, i;\n\n\tNVM_CLI_VBLK_CMD *cmd = NULL;\n\t\n\tNVM_DEV dev;\n\tNVM_GEO geo;\n\tNVM_ADDR addr;\n\tNVM_VBLK vblk;\n\n\tif (argc < 4) {\n\t\t_usage_pr(argv[0]);\n\t\treturn -1;\n\t}\n\n\t\t\t\t\t\t\t// Get `cmd_name`\n\tif (strlen(argv[1]) < 1 || strlen(argv[1]) > (NVM_CLI_CMD_LEN-1)) {\n\t\tprintf(\"Invalid cmd\\n\");\n\t\t_usage_pr(argv[0]);\n\t\treturn -EINVAL;\n\t}\n\tmemset(cmd_name, 0, sizeof(cmd_name));\n\tstrcpy(cmd_name, argv[1]);\n\n\tfor (i = 0; i < ncmds; ++i) {\t\t// Get `cmd`\n\t\tif (strcmp(cmd_name, cmds[i].name) == 0) {\n\t\t\tcmd = &cmds[i];\n\t\t\tbreak;\n\t\t}\n\t}\n\tif (!cmd) {\n\t\tprintf(\"Invalid cmd(%s)\\n\", cmd_name);\n\t\t_usage_pr(argv[0]);\n\t\treturn -EINVAL;\n\t}\n\n\tif ((argc != cmd->argc) && (argc != 4)) {\t// Check argument count\n\t\tprintf(\"Invalid cmd(%s) argc(%d) != %d\\n\",\n\t\t\tcmd_name, argc, cmd->argc);\n\t\t_usage_pr(argv[0]);\n\t\treturn -1;\n\t}\n\n\tif (strlen(argv[2]) > DISK_NAME_LEN) {\t\t// Get `dev_name`\n\t\tprintf(\"len(dev_name) > %d\\n\", DISK_NAME_LEN);\n\t\treturn -1;\n\t}\n\tmemset(dev_name, 0, sizeof(dev_name));\n\tstrcpy(dev_name, argv[2]);\n\n\taddr.ppa = 0;\n\tswitch(argc) {\t\t\t\t\t// Get `addr`\n\n\t\tcase 7:\t\t\t\t\t// ch lun blk pg\n\t\t\taddr.g.pg = atoi(argv[6]);\n\t\tcase 6:\t\t\t\t\t// ch lun blk\n\t\t\taddr.g.blk = atoi(argv[5]);\n\t\tcase 5:\t\t\t\t\t// ch lun\n\t\t\taddr.g.lun = atoi(argv[4]);\n\t\t\taddr.g.ch = atoi(argv[3]);\n\t\t\tbreak;\n\n\t\tcase 4:\t\t\t\t\t// ppa\n\t\t\taddr.ppa = atol(argv[3]);\n\t\t\tbreak;\n\n\t\tdefault:\n\t\t\t_usage_pr(argv[0]);\n\t\t\treturn -1;\n\t}\n\n\tdev = nvm_dev_open(dev_name);\t\t\t// open `dev`\n\tif (!dev) {\n\t\tprintf(\"Failed opening device, dev_name(%s)\\n\", dev_name);\n\t\treturn -EINVAL;\n\t}\n\tgeo = nvm_dev_attr_geo(dev);\t\t\t// Get `geo`\n\n\tif (addr.g.ch >= geo.nchannels) {\t\t// Check `addr`\n\t\tprintf(\"ch(%u) too large\\n\", addr.g.ch);\n\t\treturn -EINVAL;\n\t}\n\tif (addr.g.lun >= geo.nluns) {\n\t\tprintf(\"lun(%u) too large\\n\", addr.g.lun);\n\t\treturn -EINVAL;\n\t}\n\tif (addr.g.blk >= geo.nblocks) {\n\t\tprintf(\"blk(%u) too large\\n\", addr.g.blk);\n\t\treturn -EINVAL;\n\t}\n\tif (addr.g.pg >= geo.npages) {\n\t\tprintf(\"pg(%u) too large\\n\", addr.g.pg);\n\t\treturn -EINVAL;\n\t}\n\tif (addr.g.sec >= geo.nsectors) {\n\t\taddr.g.sec = 0;\n\t}\n\n\tvblk = nvm_vblk_new_on_dev(dev, addr);\n\n\tret = cmd->func(dev, geo, vblk, addr, cmd->flags);\n\n\tnvm_dev_close(dev);\t\t\t\t// close `dev`\n\n\treturn ret;\n}\n"
},
{
"alpha_fraction": 0.672533392906189,
"alphanum_fraction": 0.6835817098617554,
"avg_line_length": 28.484848022460938,
"blob_id": "8858902872104468f95e8b6c5743003100c4b692",
"content_id": "3bbb09405bf3a1a19227a950ddb239a83633ce7b",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 7784,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 264,
"path": "/include/liblightnvm.h",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * liblightnvm - Linux Open-Channel I/O interface\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#ifndef __LIBLIGHTNVM_H\n#define __LIBLIGHTNVM_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#include <stdint.h>\n#include <sys/types.h>\n\n#ifndef DISK_NAME_LEN\n#define DISK_NAME_LEN 32\n#endif\n#ifndef NVM_DISK_NAME_LEN\n#define NVM_DISK_NAME_LEN 32\n#endif\n\n#define NVM_MAGIC_OPCODE_ERASE 0x90 // NVM_OP_ERASE\n#define NVM_MAGIC_OPCODE_WRITE 0x91 // NVM_OP_PWRITE\n#define NVM_MAGIC_OPCODE_READ 0x92 // NVM_OP_PREAD\n\n#define NVM_MAGIC_FLAG_DUAL 0x1 // NVM_IO_DUAL_ACCESS\n#define NVM_MAGIC_FLAG_QUAD 0x2 // NVM_IO_QUAD_ACCESS\n#define NVM_MAGIC_FLAG_DEFAULT NVM_MAGIC_FLAG_DUAL\n\n/* BITS ALLOCATED FOR THE GENERAL ADDRESS FORMAT */\n#define NVM_BLK_BITS (16)\n#define NVM_PG_BITS (16)\n#define NVM_SEC_BITS (8)\n#define NVM_PL_BITS (8)\n#define NVM_LUN_BITS (8)\n#define NVM_CH_BITS (7)\n\ntypedef struct nvm_addr {\n\t/* Generic structure for all addresses */\n\tunion {\n\t\tstruct {\n\t\t\tuint64_t blk : NVM_BLK_BITS;\n\t\t\tuint64_t pg : NVM_PG_BITS;\n\t\t\tuint64_t sec : NVM_SEC_BITS;\n\t\t\tuint64_t pl : NVM_PL_BITS;\n\t\t\tuint64_t lun : NVM_LUN_BITS;\n\t\t\tuint64_t ch : NVM_CH_BITS;\n\t\t\tuint64_t reserved : 1;\n\t\t} g;\n\n\t\tstruct {\n\t\t\tuint64_t line : 63;\n\t\t\tuint64_t is_cached : 1;\n\t\t} c;\n\n\t\tuint64_t ppa;\n\t};\n} NVM_ADDR;\n\ntypedef struct nvm_geo {\n\t/* Values queried from device */\n\tsize_t nchannels;\t// # of channels on device\n\tsize_t nluns;\t\t// # of luns per channel\n\tsize_t nplanes;\t\t// # of planes for lun\n\tsize_t nblocks;\t\t// # of blocks per plane\n\tsize_t npages;\t\t// # of pages per block\n\tsize_t nsectors;\t// # of sectors per page\n\tsize_t nbytes;\t\t// # of bytes per sector\n\n\t/* Values derived from above */\n\tsize_t tbytes;\t\t// Total # of bytes on device\n\tsize_t vblk_nbytes;\t// # of bytes per vblk\n\tsize_t vpg_nbytes;\t// # upper bound on _nvm_vblk_[read|write]\n} NVM_GEO;\n\ntypedef struct nvm_dev *NVM_DEV;\ntypedef struct nvm_vblk *NVM_VBLK;\ntypedef struct nvm_sblk *NVM_SBLK;\n\nvoid nvm_geo_pr(NVM_GEO geo);\n\nNVM_DEV nvm_dev_open(const char *dev_name);\nvoid nvm_dev_close(NVM_DEV dev);\nvoid nvm_dev_pr(NVM_DEV dev);\n\n/**\n * Returns of the geometry related device information including derived\n * information such as total number of bytes etc.\n *\n * NOTE: See NVM_GEO for the specifics.\n *\n * @return NVM_GEO of given dev\n */\nNVM_GEO nvm_dev_attr_geo(NVM_DEV dev);\n\nvoid *nvm_buf_alloc(NVM_GEO geo, size_t nbytes);\n\n/**\n * Fills `buf` with chars A-Z\n */\nvoid nvm_buf_fill(char *buf, size_t nbytes);\n\n/**\n * Prints `buf` to stdout\n */\nvoid nvm_buf_pr(char *buf, size_t nbytes);\n\n/**\n * address interface\n */\n\n/**\n * Mark address by setting flags to one of:\n *\n * 0x0 -- GOOD\n * 0x1 -- BAD\n * 0x2 -- GROWN_BAD\n */\nssize_t nvm_addr_mark(NVM_DEV dev, NVM_ADDR list[], int len, uint16_t flags);\n\nssize_t nvm_addr_erase(NVM_DEV dev, NVM_ADDR list[], int len, uint16_t flags);\n\nssize_t nvm_addr_write(NVM_DEV dev, NVM_ADDR list[], int len, const void *buf,\n uint16_t flags);\n\nssize_t nvm_addr_read(NVM_DEV dev, NVM_ADDR list[], int len, void *buf,\n uint16_t flags);\n\n\n\nvoid nvm_addr_pr(NVM_ADDR addr);\n\n/**\n * virtual block interface\n */\n\nNVM_VBLK nvm_vblk_new(void);\nNVM_VBLK nvm_vblk_new_on_dev(NVM_DEV dev, NVM_ADDR addr);\nvoid nvm_vblk_free(NVM_VBLK vblk);\nvoid nvm_vblk_pr(NVM_VBLK vblk);\n\nNVM_ADDR nvm_vblk_attr_addr(NVM_VBLK vblk);\n\n/**\n * Get ownership of an arbitrary flash block from the given device.\n *\n * Returns: On success, a flash block is allocated in LightNVM's media manager\n * and vblk is filled up accordingly. On error, -1 is returned, in which case\n * errno is set to indicate the error.\n */\nint nvm_vblk_get(NVM_VBLK vblk, NVM_DEV dev);\n\n/**\n * Reserves a block on given device using a specific lun.\n *\n * @param vblk Block created with nvm_vblk_new\n * @param dev Handle obtained with nvm_dev_open\n * @param ch Channel from which to reserve via\n * @param lun Lun from which to reserve via\n *\n * @returns On success 0, on error -1 and *errno* set appropriately.\n */\nint nvm_vblk_gets(NVM_VBLK vblk, NVM_DEV dev, uint32_t ch, uint32_t lun);\n\n/**\n * Put flash block(s) represented by vblk back to dev.\n *\n * This action implies that the owner of the flash block previous to this\n * function call no longer owns the flash block, and therefor can no longer\n * submit I/Os to it, or expect that data on it is persisted. The flash block\n * cannot be reclaimed by the previous owner.\n *\n * @returns On success 0, on error -1 and *errno* set appropriately.\n */\nint nvm_vblk_put(NVM_VBLK vblk);\n\n/**\n * Erase an entire vblk\n *\n * @returns On success 0, on error -1 and *errno* set appropriately.\n */\nssize_t nvm_vblk_erase(NVM_VBLK vblk);\n\n/**\n * Read 'count' bytes from 'vblk' starting at 'offset' into 'buf'\n *\n * @returns On success 0, on error -1 and *errno* set appropriately.\n */\nssize_t nvm_vblk_pread(NVM_VBLK vblk, void *buf, size_t count, size_t offset);\n\n/**\n * Write 'count' bytes to 'vblk' starting at 'offset' from 'buf'\n *\n * NOTE: Use this for controlling chunked writing, do NOT use this for\n * random-access.\n *\n * @returns On success 0, on error -1 and *errno* set appropriately.\n */\nssize_t nvm_vblk_pwrite(NVM_VBLK vblk, const void *buf, size_t count,\n\t\t\tsize_t offset);\n\n/**\n * Write 'count' bytes to 'vblk' starting at 'offset' from 'buf'\n *\n * @returns On success 0, on error -1 and *errno* set appropriately.\n */\nssize_t nvm_vblk_write(NVM_VBLK vblk, const void *buf, size_t count);\n\n/**\n * Read the entire vblk, storing it into buf\n *\n * @returns On success 0, on error -1 and *errno* set appropriately.\n */\nssize_t nvm_vblk_read(NVM_VBLK vblk, void *buf, size_t count);\n\n/**\n * spanning block interface\n */\n\nNVM_SBLK nvm_sblk_new(NVM_DEV dev, int ch_bgn, int ch_end, int lun_bgn,\n int lun_end, int blk);\n\nvoid nvm_sblk_free(NVM_SBLK sblk);\n\nssize_t nvm_sblk_erase(NVM_SBLK sblk);\nssize_t nvm_sblk_write(NVM_SBLK sblk, const void *buf, size_t count);\nssize_t nvm_sblk_pad(NVM_SBLK sblk);\nssize_t nvm_sblk_read(NVM_SBLK sblk, void *buf, size_t count);\nssize_t nvm_sblk_pread(struct nvm_sblk *sblk, void *buf, size_t count,\n\t\t size_t offset);\n\nNVM_DEV nvm_sblk_attr_dev(NVM_SBLK sblk);\nNVM_ADDR nvm_sblk_attr_bgn(NVM_SBLK sblk);\nNVM_ADDR nvm_sblk_attr_end(NVM_SBLK sblk);\nNVM_GEO nvm_sblk_attr_geo(NVM_SBLK sblk);\nvoid nvm_sblk_pr(NVM_SBLK sblk);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* __LIBLIGHTNVM.H */\n"
},
{
"alpha_fraction": 0.6520523428916931,
"alphanum_fraction": 0.6552481651306152,
"avg_line_length": 21.44843101501465,
"blob_id": "cdc6ff6c94159383215a9b457928c0c941a70b9c",
"content_id": "61469569042d340aa79e851b3006fceef6f8b3a1",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 10013,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 446,
"path": "/deprecated/nvm_beam.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * beam - Beam abstraction user-space append-only interface for liblightnvm\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#include <errno.h>\n#include <string.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <assert.h>\n#include <linux/lightnvm.h>\n\n#include <likely.h>\n#include <nvm_beam.h>\n#include <nvm_debug.h>\n\nstatic struct atomic_cnt beam_guid = {\n\t.cnt = 0,\n};\n\nstatic struct atomic_cnt fd_guid = {\n\t.cnt = 0,\n};\n\nstatic struct beam *beamt;\n\nstatic void beam_buf_free(struct w_buffer *buf)\n{\n\tif (buf->buf)\n\t\tfree(buf->buf);\n\n\tbuf->buf = NULL;\n\tbuf->mem = NULL;\n\tbuf->sync = NULL;\n}\n\nstatic void beam_put_blocks(struct beam *beam)\n{\n\tstruct nvm_vblk *vblock;\n\tint i;\n\n\tfor (i = 0; i < beam->nvblocks; i++) {\n\t\tvblock = &beam->vblocks[i];\n\t\tnvm_vblk_put(vblock);\n\t}\n}\n\nstatic void beam_free(struct beam *beam)\n{\n\tbeam_buf_free(&beam->w_buffer);\n\tbeam_put_blocks(beam);\n\tfree(beam);\n}\n\n/* XXX: All block functions assume that block allocation is thread safe */\n/* TODO: Allocate blocks dynamically */\nstatic int switch_block(struct beam **beam)\n{\n\tsize_t buf_size;\n\tint ret;\n\n\tNVM_GEO geo = nvm_dev_attr_geo((*beam)->dev);\n\n\t/* Write buffer for small writes */\n\tbuf_size = nvm_dev_attr_geo((*beam)->dev).vblk_nbytes;\n\tif (buf_size != (*beam)->w_buffer.buf_limit) {\n\t\tfree((*beam)->w_buffer.buf);\n\t\tret = posix_memalign(&(*beam)->w_buffer.buf, geo.nbytes,\n\t\t\t\t buf_size);\n\t\tif (ret) {\n\n\t\t\treturn ret;\n\t\t}\n\t\t(*beam)->w_buffer.buf_limit = buf_size;\n\t}\n\n\t(*beam)->w_buffer.mem = (*beam)->w_buffer.buf;\n\t(*beam)->w_buffer.sync = (*beam)->w_buffer.buf;\n\t(*beam)->w_buffer.cursize = 0;\n\t(*beam)->w_buffer.cursync = 0;\n\n\t(*beam)->current_w_vblock = &(*beam)->vblocks[(*beam)->nvblocks - 1];\n\n\treturn 0;\n}\n\nstatic int preallocate_block(struct beam *beam)\n{\n\tstruct nvm_vblk *vblock = &beam->vblocks[beam->nvblocks];\n\tint ret;\n\n\tret = nvm_vblk_gets(vblock, beam->dev, 0, beam->lun);\n\tif (ret) {\n\t\treturn ret;\n\t}\n\n\tbeam->nvblocks++;\n\n\treturn ret;\n}\n\nstatic int allocate_block(struct beam *beam)\n{\n\tint ret;\n\n\t/* TODO: Retry if preallocation fails - when saving metadata */\n\tret = preallocate_block(beam);\n\tif (ret)\n\t\treturn ret;\n\n\tret = switch_block(&beam);\n\n\treturn ret;\n}\n\nstatic int beam_init(struct beam *beam, int lun, struct nvm_dev *dev)\n{\n\tatomic_assign_inc(&beam_guid, &beam->gid);\n\tbeam->lun = lun;\n\tbeam->nvblocks = 0;\n\tbeam->bytes = 0;\n\tbeam->dev = dev;\n\tif (!beam->dev) {\n\t\treturn -EINVAL;\n\t}\n\n\tbeam->w_buffer.buf_limit = 0;\n\tbeam->w_buffer.buf = NULL;\n\n\treturn allocate_block(beam);\n}\n\n/*\n * Sync write buffer to the device. The write granurality is determined by the\n * size of the full page size, across flash planes. These sizes are device\n * specific and are queried for each operating device.\n */\nstatic int beam_sync(struct beam *beam, int flags)\n{\n\tsize_t vpage_nbytes = nvm_dev_attr_vpage_nbytes(beam->dev);\n\tsize_t sync_len = beam->w_buffer.cursize - beam->w_buffer.cursync;\n\tsize_t disaligned_data = sync_len % vpage_nbytes;\n\tsize_t ppa_off = calculate_ppa_off(beam->w_buffer.cursync, vpage_nbytes);\n\tint npages = sync_len / vpage_nbytes;\n\tsize_t synced_bytes;\n\tsize_t err;\n\n\tint synced_pages = 0;\n\n\tif (((flags & OPTIONAL_SYNC) && (sync_len < vpage_nbytes)) ||\n\t\t(sync_len == 0))\n\t\treturn 0;\n\n\tif (flags & FORCE_SYNC) {\n\t\tif (beam->w_buffer.cursync + sync_len ==\n\t\t\t\t\t\tbeam->w_buffer.buf_limit) {\n\t\t\t/* TODO: Metadata */\n\t\t}\n\n\t\tif (disaligned_data > 0) {\n\t\t\t/* Add padding to current page */\n\t\t\tint padding = vpage_nbytes - disaligned_data;\n\n\t\t\tmemset(beam->w_buffer.mem, '\\0', padding);\n\t\t\tbeam->w_buffer.cursize += padding;\n\t\t\tbeam->w_buffer.mem += padding;\n\n\t\t\tnpages++;\n\t\t}\n\t} else {\n\t\tsync_len -= disaligned_data;\n\t}\n\n\t/* write data to media */\n\terr = nvm_vblk_pwrite(beam->current_w_vblock,\n\t\t\t beam->w_buffer.sync,\n\t\t\t ppa_off);\n\tif (err) {\n\t\treturn -1;\n\t}\n\t++synced_pages;\n\n\t/* We might need to take a lock here */\n\tsynced_bytes = synced_pages * vpage_nbytes;\n\tbeam->bytes += synced_bytes;\n\tbeam->w_buffer.cursync += synced_bytes;\n\tbeam->w_buffer.sync += synced_bytes;\n\n\treturn 0;\n}\n\nstatic void clean_all(void)\n{\n\tstruct beam *b, *b_tmp;\n\n\tHASH_ITER(hh, beamt, b, b_tmp) {\n\t\tHASH_DEL(beamt, b);\n\t\tnvm_dev_close(b->dev);\n\t\tbeam_free(b);\n\t}\n}\n\nint nvm_beam_create(const char *dev_name, int lun, int flags)\n{\n\tstruct nvm_dev *dev;\n\tstruct beam *beam;\n\tint ret;\n\n\tdev = nvm_dev_open(dev_name);\n\tif (!dev) {\n\t\treturn -1;\n\t}\n\n\tbeam = malloc(sizeof(struct beam));\n\tif (!beam) {\n\t\tnvm_dev_close(dev);\n\t\treturn -ENOMEM;\n\t}\n\n\tret = beam_init(beam, lun, dev);\n\tif (ret) {\n\t\tnvm_dev_close(dev);\n\t\tfree(beam);\n\t\treturn ret;\n }\n\n\tHASH_ADD_INT(beamt, gid, beam);\n\n\treturn beam->gid;\n}\n\nvoid nvm_beam_destroy(int beam, int flags)\n{\n\tstruct beam *b;\n\tstruct nvm_dev *dev;\n\n\tHASH_FIND_INT(beamt, &beam, b);\n\tHASH_DEL(beamt, b);\n\n\tdev = b->dev;\n\tbeam_free(b);\n\tnvm_dev_close(dev);\n}\n\n/* TODO: Implement a pool of available blocks to support double buffering */\n/*\n * TODO: Flush pages in a different thread as write buffer gets filled up,\n * instead of flushing the whole block at a time\n */\nssize_t nvm_beam_append(int beam, const void *buf, size_t count)\n{\n\tstruct beam *b;\n\tsize_t offset = 0;\n\tsize_t left = count;\n\tint ret;\n\n\tHASH_FIND_INT(beamt, &beam, b);\n\tif (!b) {\n\t\treturn -EINVAL;\n\t}\n\n\twhile (b->w_buffer.cursize + left > b->w_buffer.buf_limit) {\n\n\t\tsize_t fits_buf = b->w_buffer.buf_limit - b->w_buffer.cursize;\n\n\t\tret = preallocate_block(b);\n\t\tif (ret) {\n\t\t\treturn ret;\n\t\t}\n\n\t\tmemcpy(b->w_buffer.mem, buf, fits_buf);\n\t\tb->w_buffer.mem += fits_buf;\n\t\tb->w_buffer.cursize += fits_buf;\n\t\tif (beam_sync(b, FORCE_SYNC)) {\n\t\t\treturn -ENOSPC;\n\t\t}\n\n\t\tswitch_block(&b);\n\n\t\tleft -= fits_buf;\n\t\toffset += fits_buf;\n\t}\n\n\tmemcpy(b->w_buffer.mem, buf + offset, left);\n\tb->w_buffer.mem += left;\n\tb->w_buffer.cursize += left;\n\n\treturn count;\n}\n\nint nvm_beam_sync(int beam, int flags)\n{\n\tstruct beam *b;\n\n\tHASH_FIND_INT(beamt, &beam, b);\n\tif (!b) {\n\t\treturn -EINVAL;\n\t}\n\n\t/* TODO: Expose flags to application */\n\treturn beam_sync(b, FORCE_SYNC);\n}\n\n/*\n * Flag for aligned buffer\n */\nssize_t nvm_beam_read(int beam, void *buf, size_t count, off_t offset,\n\t\t int flags)\n{\n\tstruct beam *b;\n\tstruct nvm_vblk *current_r_vblock;\n\tsize_t block_off, ppa_off, page_off;\n\tsize_t ppa_count;\n\tsize_t nppas;\n\tsize_t left_bytes = count;\n\tsize_t valid_bytes;\n\tsize_t left_pages;\n\tsize_t pages_to_read, bytes_to_read;\n\tvoid *read_buf;\n\tchar *reader;\n\tchar *writer = buf;\n\t/* char *cache; // Used when trying write cache for reads*/\n\tint read_pages;\n\tint ret;\n\n\tNVM_GEO geo;\n\n\tHASH_FIND_INT(beamt, &beam, b);\n\tif (!b) {\n\t\treturn -EINVAL;\n\t}\n\n\tgeo = nvm_dev_attr_geo(b->dev);\n\n\t/* TODO: Improve calculations */\n\tleft_pages = ((count + offset % geo.nbytes) / geo.nbytes) +\n\t\t((((count) % geo.nbytes) > 0) ? 1 : 0);\n\n\t/* Assume that all blocks forming the beam have same size */\n\tnppas = geo.vblk_nbytes;\n\n\tppa_count = offset / geo.nbytes;\n\tblock_off = ppa_count / nppas;\n\tppa_off = ppa_count % nppas;\n\tpage_off = offset % geo.nbytes;\n\n\tcurrent_r_vblock = &b->vblocks[block_off];\n\n\tpages_to_read = (nppas > left_pages) ? left_pages : nppas;\n\tret = posix_memalign(&read_buf, geo.nbytes,\n\t\t\t pages_to_read * geo.nbytes);\n\tif (ret) {\n\t\treturn ret;\n\t}\n\treader = read_buf;\n\n\t/*\n\t * We assume that the device supports reading at a sector granurality\n\t * (typically 4KB). If not, we deal with the read in LightNVM in the\n\t * kernel.\n\t */\n\twhile (left_bytes) {\n\t\tsize_t err;\n\n\t\tbytes_to_read = pages_to_read * geo.nbytes;\n\t\tvalid_bytes = (left_bytes > bytes_to_read) ?\n\t\t\t\t\t\tbytes_to_read : left_bytes;\n\n\t\tif (UNLIKELY(pages_to_read + ppa_off > nppas)) {\n\t\t\twhile (pages_to_read + ppa_off > nppas)\n\t\t\t\tpages_to_read--;\n\n\t\t\tvalid_bytes = (nppas * geo.nbytes) -\n\t\t\t\t\t(ppa_off * geo.nbytes) - page_off;\n\t\t}\n\n\t\tassert(left_bytes <= left_pages * geo.nbytes);\n\n\t\t/* TODO: Send bigger I/Os if we have enough data */\n\t\terr = nvm_vblk_pread(current_r_vblock, reader, ppa_off);\n\t\tif (err) {\n\t\t\treturn -1;\n\t\t}\n\t\t++read_pages;\n\n\t\t/* TODO: Optional - Flag for aligned memory */\n\t\tmemcpy(writer, reader + page_off, valid_bytes);\n\t\twriter += valid_bytes;\n\n\t\treader = read_buf;\n\n\t\tblock_off++;\n\t\tppa_off = 0;\n\t\tpage_off = 0;\n\t\tcurrent_r_vblock = &b->vblocks[block_off];\n\n\t\tleft_pages -= read_pages;\n\t\tleft_bytes -= valid_bytes;\n\n\t\tpages_to_read = (nppas > left_pages) ? left_pages : nppas;\n\t}\n\n\t/* TODO: Optional - Flag for aligned memory */\n\tfree(read_buf);\n\treturn count;\n}\n\nint nvm_beam_init(void)\n{\n\tpthread_spin_init(&fd_guid.lock, PTHREAD_PROCESS_SHARED);\n\tpthread_spin_init(&beam_guid.lock, PTHREAD_PROCESS_SHARED);\n\n\t/* TODO: Recover state */\n\treturn 0;\n}\n\nvoid nvm_beam_exit(void)\n{\n\tpthread_spin_destroy(&fd_guid.lock);\n\tpthread_spin_destroy(&beam_guid.lock);\n\n\t/* TODO: save state */\n\n\tclean_all();\n}\n\n"
},
{
"alpha_fraction": 0.6686460971832275,
"alphanum_fraction": 0.6805225610733032,
"avg_line_length": 16.726316452026367,
"blob_id": "5d51eb27828645b9862552b579014ef0881392f9",
"content_id": "e87e263998703680f372fa43e8e1c87ed2f5b5d3",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1684,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 95,
"path": "/Makefile",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "BUILD_TYPE?=Release\nBUILD_DIR?=build\nBUILD_TESTS?=ON\nBUILD_EXAMPLES?=ON\n\n#\n# Traditional build commands\n#\n\ndefault: configure make\n\ndebug:\n\t$(eval BUILD_TYPE := Debug)\n\ntests_off:\n\t$(eval BUILD_TESTS := OFF)\n\nexamples_off:\n\t$(eval BUILD_EXAMPLES := OFF)\n\ncmake_check:\n\t@cmake --version || (echo \"\\n** Please install 'cmake' **\\n\" && exit 1)\n\nconfigure: cmake_check\n\tmkdir -p $(BUILD_DIR)\n\tcd $(BUILD_DIR) && cmake -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DTESTS=$(BUILD_TESTS) -DEXAMPLES=$(BUILD_EXAMPLES) ../\n\t@echo \"Modify build configuration in '$(BUILD_DIR)'\"\n\nmake:\n\tcd $(BUILD_DIR) && make\n\ninstall:\n\tcd $(BUILD_DIR) && make install\n\nclean:\n\trm -r $(BUILD_DIR) || true\n\trm tags || true\n\nall: clean default install\n\n#\n# Packages (currently debian/.deb)\n#\npkg:\n\tcd $(BUILD_DIR) && make package\n\npkg_install:\n\tsudo dpkg -i $(BUILD_DIR)/*.deb\n\npkg_uninstall:\n\tsudo apt-get --yes remove liblightnvm || true\n\n#\n# Commands useful for development\n#\n#\ntags:\n\tctags * -R .\n\tcscope -b `find . -name '*.c'` `find . -name '*.h'`\n\n# Invoking tests ...\ntest_dev:\n\tsudo nvm_test_dev nvme0n1\n\ntest_mbad:\n\tsudo nvm_test_mbad nvme0n1\n\ntest_vblk:\n\tsudo nvm_test_vblk nvme0n1\n\ntest_vblk_gp_n:\n\tsudo nvm_test_vblk_gp_n nvme0n1\n\ntest_concur:\n\tsudo nvm_test_concur nvme0n1\n\n# ... all of them\ntest: test_dev test_vblk test_vblk_gp_n test_concur\n\n# Invoking examples ...\nex_info:\n\t@sudo nvm_ex_info nvme0n1 || true\n\nex_vblock_pio_1:\n\t@sudo nvm_ex_vblock_pio_1 nvme0n1 || true\n\nex_vblock_pio_n:\n\t@sudo nvm_ex_vblock_pio_n nvme0n1 || true\n\nexample: ex_info ex_vblock_pio_1 ex_vblock_pio_n\n\n# ... all of them\n\n# Removes everything, build and install package\ndev: pkg_uninstall clean configure make pkg pkg_install\n"
},
{
"alpha_fraction": 0.5395522117614746,
"alphanum_fraction": 0.5425373315811157,
"avg_line_length": 23.814815521240234,
"blob_id": "bb42d8c4c69a31bb9ebdfa075c0f68a423e8874c",
"content_id": "39fb7949292bfd886c842a9a8a1f8ca815bebb60",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1340,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 54,
"path": "/pylnvm/lnvm.py",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "\"\"\"\n pylnvm - Wrapper for liblightnvm\n\n\"\"\"\nfrom __future__ import print_function\nimport ctypes\n\nclass Geo(ctypes.Structure):\n \"\"\"Geometry of a Non-volatile memory device or subset thereof\"\"\"\n\n _fields_ = [\n (\"nchannels\", ctypes.c_size_t),\n (\"nluns\", ctypes.c_size_t),\n (\"nplanes\", ctypes.c_size_t),\n (\"nblocks\", ctypes.c_size_t),\n (\"npages\", ctypes.c_size_t),\n (\"nsectors\", ctypes.c_size_t),\n (\"nbytes\", ctypes.c_size_t),\n (\"tbytes\", ctypes.c_size_t),\n (\"vblk_nbytes\", ctypes.c_size_t),\n (\"vpg_nbytes\", ctypes.c_size_t),\n ]\n\n def __str__(self):\n \"\"\"Returns a human readable string representation\"\"\"\n\n attrs = []\n for field, _ in self._fields_:\n attrs.append(\"%s(%d)\" % (field, getattr(self, field)))\n\n fmt = []\n split = 3\n for i in xrange(0, len(attrs), split):\n fmt.append(\", \".join(attrs[i:i+split]))\n\n return \"geo {\\n %s\\n}\" % \",\\n \".join(fmt)\n\nLLN = ctypes.cdll.LoadLibrary(\"liblightnvm.so\")\nLLN.nvm_dev_attr_geo.restype = Geo\n\ndef main():\n \"\"\"Main entry-point for execution.\"\"\"\n\n dev = LLN.nvm_dev_open(\"nvme0n1\")\n\n if not dev:\n print(\"Failed opening dev\")\n return\n\n geo = LLN.nvm_dev_attr_geo(dev)\n print(geo)\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6561794281005859,
"alphanum_fraction": 0.6619877219200134,
"avg_line_length": 21.951852798461914,
"blob_id": "eefb7b88330fea438619047ed6c755441fce6b39",
"content_id": "0b6663874d8470930c9a28742fbdfc462f8bc189",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6198,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 270,
"path": "/src/nvm_dev.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * dev - Device functions\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * Copyright (C) 2015 Matias Bjørling <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#include <unistd.h>\n#include <stdlib.h>\n#include <string.h>\n#include <fcntl.h>\n#include <stdio.h>\n#include <errno.h>\n#include <libudev.h>\n#include <linux/lightnvm.h>\n#include <liblightnvm.h>\n#include <nvm.h>\n#include <nvm_util.h>\n#include <nvm_debug.h>\n\nint sysattr_to_int(struct udev_device *dev, const char *attr, int *val)\n{\n\tconst char *dev_path;\n\tchar path[4096];\n\tchar buf[4096];\n\tchar c;\n\tFILE *fp;\n\tint i;\n\n\tmemset(buf, 0, sizeof(char)*4096);\n\n\tdev_path = udev_device_get_syspath(dev);\n\tif (!dev_path)\n\t\treturn -ENODEV;\n\n\tsprintf(path, \"%s/%s\", dev_path, attr);\n\tfp = fopen(path, \"rb\");\n\tif (!fp)\n\t\treturn -ENODEV;\n\n\ti = 0;\n\twhile (((c = getc(fp)) != EOF) && i < 4096) {\n\t\tbuf[i] = c;\n\t\t++i;\n\t}\n\tfclose(fp);\n\n\t*val = atoi(buf);\n\treturn 0;\n}\n\nint nvm_dev_geo_fill(struct nvm_geo *geo, const char *dev_name)\n{\n\tstruct udev *udev;\n\tstruct udev_device *dev;\n\tint val;\n\n\tudev = udev_new();\n\tif (!udev) {\n\t\tNVM_DEBUG(\"Failed creating udev for dev_name(%s)\\n\", dev_name);\n\t\treturn -ENOMEM;\n\t}\n\n\t/* Extract geometry from sysfs via libudev */\n\tdev = udev_nvmdev_find(udev, dev_name);\n\tif (!dev) {\n\t\tNVM_DEBUG(\"Cannot find dev_name(%s)\\n\", dev_name);\n\t\tudev_unref(udev);\n\t\treturn -ENODEV;\n\t}\n\n\tdev = udev_device_get_parent(dev);\n\tif (!dev)\n\t\treturn -ENODEV;\n\n\tif (sysattr_to_int(dev, \"lightnvm/num_channels\", &val))\n\t\treturn -EIO;\n\tgeo->nchannels = val;\n\n\tif (sysattr_to_int(dev, \"lightnvm/num_luns\", &val))\n\t\treturn -EIO;\n\tgeo->nluns = val;\n\n\tif (sysattr_to_int(dev, \"lightnvm/num_planes\", &val))\n\t\treturn -EIO;\n\tgeo->nplanes = val;\n\n\tif (sysattr_to_int(dev, \"lightnvm/num_blocks\", &val))\n\t\treturn -EIO;\n\tgeo->nblocks = val;\n\n\tif (sysattr_to_int(dev, \"lightnvm/num_pages\", &val))\n\t\treturn -EIO;\n\tgeo->npages = val;\n\n\tif (sysattr_to_int(dev, \"lightnvm/sec_per_pg\", &val))\n\t\treturn -EIO;\n\tgeo->nsectors = val;\n\n\tif (sysattr_to_int(dev, \"lightnvm/hw_sector_size\", &val))\n\t\treturn -EIO;\n\tgeo->nbytes = val;\n\n\t/* Derive total number of bytes on device */\n\tgeo->tbytes = geo->nchannels * geo->nluns * geo->nplanes * \\\n\t\t geo->nblocks * geo->npages * geo->nsectors * geo->nbytes;\n\n\t/* Derive number of bytes occupied by a virtual block/page */\n\tgeo->vblk_nbytes = geo->nplanes * geo->npages * geo->nsectors * \\\n\t\t\t\t geo->nbytes;\n\tgeo->vpg_nbytes = geo->nplanes * geo->nsectors * geo->nbytes;\n\n\tudev_device_unref(dev);\n\tudev_unref(udev);\n\n\treturn 0;\n}\n\nstruct nvm_dev *nvm_dev_new(void)\n{\n\tstruct nvm_dev *dev;\n\n\tdev = malloc(sizeof(*dev));\n\tif (dev)\n\t\tmemset(dev, 0, sizeof(*dev));\n\n\treturn dev;\n}\n\nvoid nvm_dev_free(struct nvm_dev **dev)\n{\n\tif (!dev)\n\t\treturn;\n\n\tfree(*dev);\n\t*dev = NULL;\n}\n\nvoid nvm_dev_pr(struct nvm_dev *dev)\n{\n\tprintf(\"dev { name(%s), fd(%d) }\\n\", dev->name, dev->fd);\n}\n\nint nvm_dev_attr_nchannels(struct nvm_dev *dev)\n{\n\treturn dev->geo.nchannels;\n}\n\nint nvm_dev_attr_nluns(struct nvm_dev *dev)\n{\n\treturn dev->geo.nluns;\n}\n\nint nvm_dev_attr_nplanes(struct nvm_dev *dev)\n{\n\treturn dev->geo.nplanes;\n}\n\nint nvm_dev_attr_nblocks(struct nvm_dev *dev)\n{\n\treturn dev->geo.nblocks;\n}\n\nint nvm_dev_attr_npages(struct nvm_dev *dev)\n{\n\treturn dev->geo.npages;\n}\n\nint nvm_dev_attr_nsectors(struct nvm_dev *dev)\n{\n\treturn dev->geo.nsectors;\n}\n\nint nvm_dev_attr_nbytes(struct nvm_dev *dev)\n{\n\treturn dev->geo.nbytes;\n}\n\nint nvm_dev_attr_vblk_nbytes(struct nvm_dev *dev)\n{\n\treturn dev->geo.vblk_nbytes;\n}\n\nint nvm_dev_attr_vpage_nbytes(struct nvm_dev *dev)\n{\n\treturn dev->geo.vpg_nbytes;\n}\n\nstruct nvm_geo nvm_dev_attr_geo(struct nvm_dev *dev)\n{\n\treturn dev->geo;\n}\n\nvoid nvm_geo_pr(struct nvm_geo geo)\n{\n\tprintf(\"geo {\\n\");\n\tprintf(\" nchannels(%lu), nluns(%lu), nplanes(%lu), nblocks(%lu),\\n\",\n\t geo.nchannels, geo.nluns, geo.nplanes, geo.nblocks);\n\tprintf(\" npages(%lu), nsectors(%lu), nbytes(%lu),\\n\",\n\t geo.npages, geo.nsectors, geo.nbytes);\n\tprintf(\" total_nbytes(%lub:%luMb)\\n\",\n\t geo.tbytes, geo.tbytes >> 20);\n\tprintf(\" vblk_nbytes(%lub:%luMb)\\n\",\n\t geo.vblk_nbytes, geo.vblk_nbytes >> 20);\n\tprintf(\" vpg_nbytes(%lub:%luKb)\\n\",\n\t geo.vpg_nbytes, geo.vpg_nbytes >> 10);\n\tprintf(\"}\\n\");\n}\n\nstruct nvm_dev *nvm_dev_open(const char *dev_name)\n{\n\tchar dev_path[NVM_DISK_NAME_LEN];\n\tstruct nvm_dev *dev;\n\tint err;\n\n\tdev = nvm_dev_new();\n\tif (!dev) {\n\t\tNVM_DEBUG(\"FAILED: nvm_dev_new.\\n\");\n\t\treturn NULL;\n\t}\n\n\tstrncpy(dev->name, dev_name, DISK_NAME_LEN);\n\n\terr = nvm_dev_geo_fill(&dev->geo, dev_name);\n\tif (err) {\n\t\tNVM_DEBUG(\"FAILED: nvm_dev_geo_fill, err(%d)\\n\", err);\n\t\tnvm_dev_free(&dev);\n\t\treturn NULL;\n\t}\n\n\tsprintf(dev_path, \"/dev/%s\", dev_name);\n\tdev->fd = open(dev_path, O_RDWR);\n\tif (dev->fd < 0) {\n\t\tNVM_DEBUG(\"FAILED: open dev_path(%s) dev->fd(%d)\\n\",\n\t\t\t dev_path, dev->fd);\n\n\t\tnvm_dev_close(dev);\n\t\tfree(dev);\n\n\t\treturn NULL;\n\t}\n\n\treturn dev;\n}\n\nvoid nvm_dev_close(struct nvm_dev *dev)\n{\n\tclose(dev->fd);\n\tnvm_dev_free(&dev);\n}\n\n"
},
{
"alpha_fraction": 0.5896995663642883,
"alphanum_fraction": 0.6034334897994995,
"avg_line_length": 20.98113250732422,
"blob_id": "a45d70994643538d0a1cdd05d38e3e1301b19726",
"content_id": "a5b2619ee757f54a6598ffbfe11729a3303f3ee6",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1165,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 53,
"path": "/examples/scripts/ewr_vblk_split.sh",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nBLK=$1\nif [ -z \"$BLK\" ]; then\n\techo \"usage: $0 [BLK_IDX] [0|1]\"\n\texit\nfi\n\nDRY=$2\nif [ -z \"$DRY\" ]; then\n\tDRY=\"1\"\nfi\n\nNVME_DEV=nvme0\nLNVM_DEV=nvme0n1\nNCHANNELS=`cat /sys/class/nvme/$NVME_DEV/$LNVM_DEV/lightnvm/num_channels`\nCH_BEGIN=0\nCH_END=$(($NCHANNELS-1))\nNLUNS=`cat /sys/class/nvme/$NVME_DEV/$LNVM_DEV/lightnvm/num_luns`\nLUN_BEGIN=0\nLUN_END=$(($NLUNS-1))\n\necho \"** $LNVM_DEV with nchannels($NCHANNELS) and nluns($NLUNS)\"\n\necho \"** E 'spanned' block\"\nfor CH in $(seq $CH_BEGIN $CH_END); do\n\tfor LUN in $(seq $LUN_BEGIN $LUN_END); do\n\t\techo \"*** ch($CH), lun($LUN), blk($BLK)\"\n\t\tif [ $DRY -ne \"1\" ]; then\n\t\t\tnvm_vblk erase $LNVM_DEV $CH $LUN $BLK\n\t\tfi\n\tdone\ndone\n\necho \"** W 'spanned' block\"\nfor CH in $(seq $CH_BEGIN $CH_END); do\n\tfor LUN in $(seq $LUN_BEGIN $LUN_END); do\n\t\techo \"*** ch($CH), lun($LUN), blk($BLK)\"\n\t\tif [ $DRY -ne \"1\" ]; then\n\t\t\tnvm_vblk write $LNVM_DEV $CH $LUN $BLK\n\t\tfi\n\tdone\ndone\n\necho \"** R 'spanned' block\"\nfor CH in $(seq $CH_BEGIN $CH_END); do\n\tfor LUN in $(seq $LUN_BEGIN $LUN_END); do\n\t\techo \"*** ch($CH), lun($LUN), blk($BLK)\"\n\t\tif [ $DRY -ne \"1\" ]; then\n\t\t\tnvm_vblk read $LNVM_DEV $CH $LUN $BLK\n\t\tfi\n\tdone\ndone\n"
},
{
"alpha_fraction": 0.6345997452735901,
"alphanum_fraction": 0.6389678716659546,
"avg_line_length": 25.178192138671875,
"blob_id": "63faace6353ec29caecb58b144a90958b808dc16",
"content_id": "939a4f35628a7c1edb272f9088256f273ae58f70",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 9844,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 376,
"path": "/src/nvm_sblk.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * sblock - Spanning block functions\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * Copyright (C) 2015 Matias Bjørling <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#include <assert.h>\n#include <stdlib.h>\n#include <stdint.h>\n#include <unistd.h>\n#include <errno.h>\n#include <stdio.h>\n#include <linux/lightnvm.h>\n#include <liblightnvm.h>\n#include <nvm.h>\n#include <nvm_debug.h>\n#include <nvm_omp.h>\n\nstruct nvm_sblk *nvm_sblk_new(struct nvm_dev *dev,\n\t\t\t int ch_bgn, int ch_end,\n\t\t\t int lun_bgn, int lun_end,\n\t\t\t int blk)\n{\n\tstruct nvm_sblk *sblk;\n\tstruct nvm_geo dev_geo = nvm_dev_attr_geo(dev);\n\n\tif (ch_bgn < 0 || ch_bgn > ch_end || ch_end >= dev_geo.nchannels) {\n\t\tNVM_DEBUG(\"invalid channel span\");\n\t\treturn NULL;\n\t}\n\tif (lun_bgn < 0 || lun_bgn > lun_end || lun_end >= dev_geo.nluns) {\n\t\tNVM_DEBUG(\"invalid lun span\");\n\t\treturn NULL;\n\t}\n\tif (blk < 0 || blk >= dev_geo.nblocks) {\n\t\tNVM_DEBUG(\"invalid block\");\n\t\treturn NULL;\n\t}\n\n\tsblk = malloc(sizeof(*sblk));\n\tif (!sblk)\n\t\treturn NULL;\n\n\tsblk->pos_write = 0;\n\tsblk->pos_read = 0;\n\n\tsblk->dev = dev;\n\n\tsblk->bgn.g.ch = ch_bgn;\t/* Construct span */\n\tsblk->bgn.g.lun = lun_bgn;\n\tsblk->bgn.g.blk = blk;\n\tsblk->bgn.g.pl = 0;\n\tsblk->bgn.g.pg = 0;\n\tsblk->bgn.g.sec = 0;\n\n\tsblk->end.g.ch = ch_end;\n\tsblk->end.g.lun = lun_end;\n\tsblk->end.g.blk = blk;\n\tsblk->end.g.pl = dev_geo.nplanes - 1;\n\tsblk->end.g.pg = dev_geo.npages - 1;\n\tsblk->end.g.sec = dev_geo.nsectors - 1;\n\n\tsblk->geo = dev_geo;\t\t/* Inherit geometry from device */\n\n\t/* Overwrite with channels and luns */\n\tsblk->geo.nchannels = (sblk->end.g.ch - sblk->bgn.g.ch) + 1;\n\tsblk->geo.nluns = (sblk->end.g.lun - sblk->bgn.g.lun) + 1;\n\tsblk->geo.nblocks = 1; // For each ch/lun there is only one block\n\n\t/* Derive total number of bytes in sblk */\n\tsblk->geo.tbytes = sblk->geo.nchannels * sblk->geo.nluns * \\\n\t\t\t sblk->geo.nplanes * sblk->geo.nblocks * \\\n\t\t\t sblk->geo.npages * sblk->geo.nsectors * \\\n\t\t\t sblk->geo.nbytes;\n\n\treturn sblk;\n}\n\nvoid nvm_sblk_free(struct nvm_sblk *sblk)\n{\n\tfree(sblk);\n}\n\nssize_t nvm_sblk_erase(struct nvm_sblk *sblk)\n{\n\tconst struct nvm_geo geo = nvm_sblk_attr_geo(sblk);\n\n\tconst int nplanes = geo.nplanes;\n\n\tconst struct nvm_addr bgn = sblk->bgn;\n\tconst struct nvm_addr end = sblk->end;\n\n\tssize_t nerr = 0;\n\n\tint PLANE_FLAG = 0x0;\n\n\tPLANE_FLAG = (geo.nplanes == 4) ? NVM_MAGIC_FLAG_QUAD : PLANE_FLAG;\n\tPLANE_FLAG = (geo.nplanes == 2) ? NVM_MAGIC_FLAG_DUAL : PLANE_FLAG;\n\n\t#pragma omp parallel for schedule(static) collapse(2) reduction(+:nerr)\n\tfor (int ch = bgn.g.ch; ch <= end.g.ch; ++ch) {\n\t\tfor (int lun = bgn.g.lun; lun <= end.g.lun; ++lun) {\n\t\t\tstruct nvm_addr addrs[nplanes];\n\t\t\tssize_t err;\n\n\t\t\tfor (int i = 0; i < nplanes; ++i) {\n\t\t\t\taddrs[i].ppa = bgn.ppa;\n\t\t\t\taddrs[i].g.ch = ch;\n\t\t\t\taddrs[i].g.lun = lun;\n\t\t\t\taddrs[i].g.pl = i % nplanes;\n\t\t\t\t// blk is fixed and inherited from bgn\n\t\t\t\t// pg is fixed and inherited from bgn (0)\n\t\t\t\t// sec is fixed and inherited from bgn (0)\n\t\t\t}\n\n\t\t\terr = nvm_addr_erase(sblk->dev,\n\t\t\t\t\t addrs,\n\t\t\t\t\t nplanes,\n\t\t\t\t\t PLANE_FLAG);\n\t\t\tif (err) {\n\t\t\t\tNVM_DEBUG(\"FAILED: nvm_addr_erase err(%ld)\", err);\n\t\t\t\t++nerr;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn -nerr;\n}\n\nssize_t nvm_sblk_pwrite(struct nvm_sblk *sblk, const void *buf, size_t count,\n\t\t\tsize_t offset)\n{\n\tconst struct nvm_addr bgn = sblk->bgn;\n\n\tconst struct nvm_geo geo = nvm_sblk_attr_geo(sblk);\n\n\tconst int nchannels = geo.nchannels;\n\tconst int ch_off = bgn.g.ch;\n\tconst int nluns = geo.nluns;\n\tconst int lun_off = bgn.g.lun;\n\n\tconst int npages = geo.npages;\n\tconst int nplanes = geo.nplanes;\n\tconst int nsectors = geo.nsectors;\n\tconst int nbytes = geo.nbytes;\n\n\tconst int alignment = (nplanes * nsectors * nbytes);\n\n\tconst size_t spg_bgn = sblk->pos_write / alignment;\n\tconst size_t spg_end = spg_bgn + (count / alignment);\n\n\tconst int NVM_OP_NADDR = nplanes * nsectors;\n\tconst int NVM_CMD_NADDR = NVM_OP_NADDR;\n\n\tconst int nthreads = nchannels * nluns;\n\n\tssize_t nerr = 0;\n\n\tint PLANE_FLAG = 0x0;\n\n\tconst char *data;\n\n\tif (buf) {\t// Use user-supplied buffer\n\t\tdata = buf;\n\t} else {\t// Allocate and use a padding buffer\n\t\tdata = nvm_buf_alloc(geo, NVM_CMD_NADDR * nbytes);\n\t\tif (!data)\n\t\t\treturn -count;\n\t}\n\n\t// Check alignment\n\tif ((count % alignment) || (sblk->pos_write % alignment))\n\t\treturn -count;\n\n\tPLANE_FLAG = (geo.nplanes == 4) ? NVM_MAGIC_FLAG_QUAD : PLANE_FLAG;\n\tPLANE_FLAG = (geo.nplanes == 2) ? NVM_MAGIC_FLAG_DUAL : PLANE_FLAG;\n\n\t#pragma omp parallel num_threads(nthreads) reduction(+:nerr)\n\t{\n\t\tconst int tid = omp_get_thread_num();\n\n\t\t#pragma omp barrier\n\t\tfor (size_t spg = spg_bgn + tid; spg < spg_end; spg += nthreads) {\n\t\t\tstruct nvm_addr addrs[NVM_CMD_NADDR];\n\t\t\tconst char *data_off;\n\n\t\t\tif (buf)\n\t\t\t\tdata_off = data + spg * nbytes * NVM_CMD_NADDR;\n\t\t\telse\n\t\t\t\tdata_off = data;\n\n\t\t\t// channels X luns X pages\n\t\t\tint ch = (spg % nchannels) + ch_off;\n\t\t\tint lun = ((spg / nchannels) % nluns) + lun_off;\n\t\t\tint vpg = ((spg / nchannels) / nluns) % npages;\n\n\t\t\t// Unroll: nplane X nsector\n\t\t\tfor (int i = 0; i < NVM_CMD_NADDR; ++i) {\n\t\t\t\taddrs[i].ppa = bgn.ppa;\n\t\t\t\taddrs[i].g.ch = ch;\n\t\t\t\taddrs[i].g.lun = lun;\n\t\t\t\taddrs[i].g.pg = vpg;\n\t\t\t\taddrs[i].g.pl = (i / nsectors) % nplanes;\n\t\t\t\t// blk is fixed and inherited from bgn\n\t\t\t\taddrs[i].g.sec = i % nsectors;\n\t\t\t}\n\n\t\t\tssize_t err = nvm_addr_write(sblk->dev,\n\t\t\t\t\t\t addrs,\n\t\t\t\t\t\t NVM_CMD_NADDR,\n\t\t\t\t\t\t data_off,\n\t\t\t\t\t\t PLANE_FLAG);\n\t\t\tif (err) {\n\t\t\t\tNVM_DEBUG(\"FAILED: nvm_addr_write e(%ld)\", err);\n\t\t\t\t++nerr;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nerr;\n}\n\nssize_t nvm_sblk_write(struct nvm_sblk *sblk, const void *buf, size_t count)\n{\n\tssize_t nerr = nvm_sblk_pwrite(sblk, buf, count, sblk->pos_write);\n\n\tif (!nerr)\n\t\tsblk->pos_write += count;\n\n\treturn nerr;\n}\n\nssize_t nvm_sblk_pad(NVM_SBLK sblk)\n{\n\treturn nvm_sblk_write(sblk, NULL, sblk->geo.tbytes - sblk->pos_write);\n}\n\nssize_t nvm_sblk_pread(struct nvm_sblk *sblk, void *buf, size_t count,\n\t\t size_t offset)\n{\n\tconst struct nvm_addr bgn = sblk->bgn;\n\n\tconst struct nvm_geo geo = nvm_sblk_attr_geo(sblk);\n\n\tconst int nchannels = geo.nchannels;\n\tconst int ch_off = bgn.g.ch;\n\tconst int nluns = geo.nluns;\n\tconst int lun_off = bgn.g.lun;\n\n\tconst int npages = geo.npages;\n\tconst int nplanes = geo.nplanes;\n\tconst int nsectors = geo.nsectors;\n\tconst int nbytes = geo.nbytes;\n\n\tconst int alignment = (nplanes * nsectors * nbytes);\n\n\tconst size_t spg_bgn = offset / alignment;\n\tconst size_t spg_end = spg_bgn + (count / alignment);\n\n\tconst int NVM_OP_NADDR = nplanes * nsectors;\n\tconst int NVM_CMD_NADDR = NVM_OP_NADDR;\n\n\tconst int nthreads = nchannels * nluns;\n\n\tssize_t nerr = 0;\n\n\tint PLANE_FLAG = 0x0;\n\n\tif ((count % alignment) || (offset % alignment)) {\t// Check align\n\t\treturn -count;\n\t}\n\n\tPLANE_FLAG = (geo.nplanes == 4) ? NVM_MAGIC_FLAG_QUAD : PLANE_FLAG;\n\tPLANE_FLAG = (geo.nplanes == 2) ? NVM_MAGIC_FLAG_DUAL : PLANE_FLAG;\n\n\t#pragma omp parallel num_threads(nthreads) reduction(+:nerr)\n\t{\n\t\tconst int tid = omp_get_thread_num();\n\n\t\t#pragma omp barrier\n\t\tfor (size_t spg = spg_bgn + tid; spg < spg_end; spg += nthreads) {\n\t\t\tstruct nvm_addr addrs[NVM_CMD_NADDR];\n\n\t\t\tchar *buf_off = buf + spg * nbytes * NVM_CMD_NADDR;\n\n\t\t\t// channels X luns X pages\n\t\t\tint ch = (spg % nchannels) + ch_off;\n\t\t\tint lun = ((spg / nchannels) % nluns) + lun_off;\n\t\t\tint vpg = ((spg / nchannels) / nluns) % npages;\n\n\t\t\t// Unroll: nplane X nsector\n\t\t\tfor (int i = 0; i < NVM_CMD_NADDR; ++i) {\n\t\t\t\taddrs[i].ppa = bgn.ppa;\n\t\t\t\taddrs[i].g.ch = ch;\n\t\t\t\taddrs[i].g.lun = lun;\n\t\t\t\taddrs[i].g.pg = vpg;\n\t\t\t\taddrs[i].g.pl = (i / nsectors) % nplanes;\n\t\t\t\t// blk is fixed and inherited from bgn\n\t\t\t\taddrs[i].g.sec = i % nsectors;\n\t\t\t}\n\n\t\t\tssize_t err = nvm_addr_read(sblk->dev,\n\t\t\t\t\t\t addrs,\n\t\t\t\t\t\t NVM_CMD_NADDR,\n\t\t\t\t\t\t buf_off,\n\t\t\t\t\t\t PLANE_FLAG);\n\t\t\tif (err) {\n\t\t\t\tNVM_DEBUG(\"FAILED: nvm_addr_read e(%ld)\", err);\n\t\t\t\t++nerr;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn -nerr;\n}\n\nssize_t nvm_sblk_read(struct nvm_sblk *sblk, void *buf, size_t count)\n{\n\tssize_t nerr = nvm_sblk_pread(sblk, buf, count, sblk->pos_read);\n\n\tif (!nerr)\n\t\tsblk->pos_read += count;\n\n\treturn nerr;\n}\n\nstruct nvm_addr nvm_sblk_attr_end(struct nvm_sblk *sblk)\n{\n\treturn sblk->end;\n}\n\nstruct nvm_geo nvm_sblk_attr_geo(struct nvm_sblk *sblk)\n{\n\treturn sblk->geo;\n}\n\nsize_t nvm_sblk_attr_pos_write(struct nvm_sblk *sblk)\n{\n\treturn sblk->pos_write;\n}\n\nsize_t nvm_sblk_attr_pos_read(struct nvm_sblk *sblk)\n{\n\treturn sblk->pos_read;\n}\n\nvoid nvm_sblk_pr(struct nvm_sblk *sblk)\n{\n\tprintf(\"sblk {\\n\");\n\tprintf(\" \"); nvm_dev_pr(sblk->dev);\n\tprintf(\" bgn \"); nvm_addr_pr(sblk->bgn);\n\tprintf(\" end \"); nvm_addr_pr(sblk->end);\n\tprintf(\" \"); nvm_geo_pr(sblk->geo);\n\tprintf(\"}\\n\");\n}\n\n"
},
{
"alpha_fraction": 0.5779427289962769,
"alphanum_fraction": 0.5885471701622009,
"avg_line_length": 20.9069766998291,
"blob_id": "0ec86dcdf8423208eb55f06bf619194d2c754914",
"content_id": "9861f369aaf3132863c6b8323576a79c96a8dfaf",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 943,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 43,
"path": "/examples/scripts/ewr_sblk.sh",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nfunction usage {\n\techo \"usage: ./sbl_ewr.sh LNVM_DEV CH_BGN CH_END LUN_BGN LUN_END BLK DRY\"\n\texit\n}\n\nLNVM_DEV=$1\nCH_BEGIN=$2\nCH_END=$3\nLUN_BEGIN=$4\nLUN_END=$5\nBLK=$6\nDRY=$7\n\nif [ -z \"$LNVM_DEV\" ] || [ -z \"$CH_BEGIN\" ] || [ -z \"$CH_END\" ] || \\\n [ -z \"$LUN_BEGIN\" ] || [ -z \"$LUN_END\" ] || [ -z \"$BLK\" ] || [ -z \"$DRY\" ]; then\n\tusage\nfi\n\nSZ=$((${#LNVM_DEV} -2))\nNVME_DEV=`echo \"$LNVM_DEV\" | cut -c-$SZ`\nNCHANNELS=`cat /sys/class/nvme/$NVME_DEV/$LNVM_DEV/lightnvm/num_channels`\nNLUNS=`cat /sys/class/nvme/$NVME_DEV/$LNVM_DEV/lightnvm/num_luns`\n\necho \"**\"\necho \"** $LNVM_DEV with nchannels($NCHANNELS) and nluns($NLUNS)\"\necho \"**\"\n\nfor CMD in erase write read\ndo\n\techo \"*\"\n\techo \"* $CMD 'spanned' block\"\n\techo \"*\"\n\tif [ $DRY -ne \"1\" ]; then\n\t\t/usr/bin/time nvm_sblk $CMD $LNVM_DEV $CH_BEGIN $CH_END $LUN_BEGIN $LUN_END $BLK\n\t\tERR=$?\n\t\tif [ $ERR -ne 0 ]; then\n\t\t\techo \"sblk operation error($ERR)\"\n\t\t\texit $ERR\n\t\tfi\n\tfi\ndone\n\n"
},
{
"alpha_fraction": 0.594159722328186,
"alphanum_fraction": 0.6279301047325134,
"avg_line_length": 20.062761306762695,
"blob_id": "be3ce7aa8b63e003693995c52735e327e3e10ab1",
"content_id": "7ccce2141fbb80507ec0be08f00b4a543e9e34ec",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5034,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 239,
"path": "/deprecated/test_beam.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * append_tests - Tests for append storage interface\n */\n#include <stdlib.h>\n#include <string.h>\n#include <stdio.h>\n#include <liblightnvm.h>\n\n#include <CUnit/Basic.h>\n\nstatic char nvm_dev_name[DISK_NAME_LEN];\n\nstatic void init_data_test(char *buf, size_t len)\n{\n\tsize_t i;\n\tchar payload = 'a';\n\n\tfor (i = 0; i < len; i++) {\n\t\tbuf[i] = (payload + i) % 28;\n\t}\n}\n\n#define TEST_AR_ALL 1\n#define TEST_AR_1K_BYTES 2\n#define TEST_AR_1_BYTE 4\nvoid beam_ar_generic(char *src, char *dst, size_t len, int flags)\n{\n\tsize_t written, read;\n\tint beam_id;\n\tint ret;\n\n\tbeam_id = nvm_beam_create(nvm_dev_name, 0, 0);\n\tCU_ASSERT(beam_id > 0);\n\n\twritten = nvm_beam_append(beam_id, src, len);\n\tCU_ASSERT(len == written);\n\n\tret = nvm_beam_sync(beam_id, 0);\n\tCU_ASSERT(0 == ret);\n\n\tread = nvm_beam_read(beam_id, dst, written, 0, 0);\n\tCU_ASSERT(written == read);\n\n\tCU_ASSERT_STRING_EQUAL(src, dst);\n\n\tif (flags & TEST_AR_1K_BYTES) {\n\t\tint i;\n\n\t\t/* read in 1000 byte chunks */\n\t\tmemset(dst, 0, len);\n\n\t\tfor (i = 0; i < len / 1000; i++) {\n\t\t\tint offset = i * 1000;\n\t\t\tread = nvm_beam_read(beam_id, dst + offset, 1000,\n\t\t\t\t\t\t\t\toffset, 0);\n\t\t\tCU_ASSERT(1000 == read);\n\t\t}\n\t\tCU_ASSERT_NSTRING_EQUAL(src, dst, len);\n\t}\n\n\tif (flags & TEST_AR_1_BYTE) {\n\t\tint i;\n\n\t\t/* read in 1 byte chunks */\n\t\tmemset(dst, 0, len);\n\n\t\tfor (i = 0; i < len; i++) {\n\t\t\tread = nvm_beam_read(beam_id, dst + i, 1, i, 0);\n\t\t\tCU_ASSERT(1 == read);\n\t\t}\n\t\tCU_ASSERT_NSTRING_EQUAL(src, dst, len);\n\t}\n\n\tnvm_beam_destroy(beam_id, 0);\n}\n\nint init_suite1(void)\n{\n\treturn nvm_beam_init();\n}\n\nint clean_suite1(void)\n{\n\tnvm_beam_exit();\n\treturn 0;\n}\n\nvoid test_CREATE_BEAM(void)\n{\n\tint beam_id;\n\n\tbeam_id = nvm_beam_create(nvm_dev_name, 0, 0);\n\tCU_ASSERT(beam_id > 0);\n\n\tnvm_beam_destroy(beam_id, 0);\n}\n\nvoid test_BEAM_CLOSE_UNGRACEFUL(void)\n{\n\tint beam_id;\n\n\tbeam_id = nvm_beam_create(nvm_dev_name, 0, 0);\n\tCU_ASSERT(beam_id > 0);\n\n\tnvm_beam_destroy(beam_id, 0);\n\n\tbeam_id = nvm_beam_create(nvm_dev_name, 0, 0);\n\tCU_ASSERT(beam_id > 0);\n\n}\n\n/*\n * Append and read back from beam.\n *\t- Append: payload < PAGE_SIZE\n *\t- Read: All\n *\t- open - append - close - open - read - close\n */\nvoid test_BEAM_AR1(void)\n{\n\tchar test[100] = \"Hello World\\n\";\n\tint test_len = strlen(test);\n\tchar test2[100];\n\n\tsize_t written, read;\n\tint beam_id;\n\tint ret;\n\n\tbeam_id = nvm_beam_create(nvm_dev_name, 0, 0);\n\tCU_ASSERT(beam_id > 0);\n\n\twritten = nvm_beam_append(beam_id, test, test_len+1);\n\tCU_ASSERT(test_len+1 == written);\n\n\tret = nvm_beam_sync(beam_id, 0);\n\tCU_ASSERT(0 == ret);\n\n\tread = nvm_beam_read(beam_id, test2, written, 0, 0);\n\tCU_ASSERT(written == read);\n\n\tCU_ASSERT_STRING_EQUAL(test, test2);\n\n\tnvm_beam_destroy(beam_id, 0);\n}\n\n/*\n * Append and read back from beam.\n *\t- Append: PAGE_SIZE < payload < BLOCK_SIZE\n *\t- Read: All, 1000 byte chunks, 1 byte chunks\n *\t- open - append - close - open - read - close\n */\nvoid test_BEAM_AR2(void)\n{\n\tchar test[5000];\n\tchar test2[5000];\n\tint flags = TEST_AR_ALL | TEST_AR_1K_BYTES | TEST_AR_1_BYTE;\n\n\tinit_data_test(test, 5000);\n\tbeam_ar_generic(test, test2, 5000, flags);\n}\n\n/*\n * Append and read back from beam.\n *\t- Append: payload is multiple pages in same block\n *\t- Read: All, 1000 byte chunks, 1 byte chunks\n *\t- open - append - close - open - read - close\n */\nvoid test_BEAM_AR3(void)\n{\n\tchar test[50000];\n\tchar test2[50000];\n\tint flags = TEST_AR_ALL | TEST_AR_1K_BYTES | TEST_AR_1_BYTE;\n\n\tinit_data_test(test, 50000);\n\tbeam_ar_generic(test, test2, 50000, flags);\n}\n\n/*\n * Append and read back from beam.\n *\t- Append: payload > BLOCK_SIZE\n *\t- Read: All, 1000 byte chunks, 1 byte chunks\n *\t- open - append - close - open - read - close\n */\nvoid test_BEAM_AR4(void)\n{\n\tsize_t test_size = 2000000;\n\tchar test[test_size];\n\tchar test2[test_size];\n\tint flags = TEST_AR_ALL | TEST_AR_1K_BYTES;\n\n\tinit_data_test(test, test_size);\n\tbeam_ar_generic(test, test2, test_size, flags);\n}\n\nint main(int argc, char **argv)\n{\n\tif (argc != 2) {\n\t\tprintf(\"Usage: %s dev_name / dev_name: LightNVM device\\n\",\n\t\t\t\t\t\t\t\t\targv[0]);\n\t\treturn -1;\n\t}\n\n\tif (strlen(argv[1]) > DISK_NAME_LEN) {\n\t\tprintf(\"Argument dev_name can be maximum %d characters\\n\",\n\t\t\t\t\t\t\tDISK_NAME_LEN - 1);\n\t}\n\n\tstrcpy(nvm_dev_name, argv[1]);\n\n\tCU_pSuite pSuite = NULL;\n\n\tif (CUE_SUCCESS != CU_initialize_registry())\n\t\treturn CU_get_error();\n\n\tpSuite = CU_add_suite(\"nvm_beam*\", init_suite1, clean_suite1);\n\tif (NULL == pSuite) {\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\tif (\n\t(NULL == CU_add_test(pSuite, \"beam_create()\", test_CREATE_BEAM)) ||\n\t(NULL == CU_add_test(pSuite, \"beam ungraceful close\", test_BEAM_CLOSE_UNGRACEFUL)) ||\n\t(NULL == CU_add_test(pSuite, \"beam AR1\", test_BEAM_AR1)) ||\n\t(NULL == CU_add_test(pSuite, \"beam AR2\", test_BEAM_AR2)) ||\n\t(NULL == CU_add_test(pSuite, \"beam AR3\", test_BEAM_AR3)) ||\n\t(NULL == CU_add_test(pSuite, \"beam AR4\", test_BEAM_AR4)) ||\n\t0)\n\t{\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\t/* Run all tests using the CUnit Basic interface */\n\tCU_basic_set_mode(CU_BRM_SILENT);\n\tCU_basic_run_tests();\n\tCU_cleanup_registry();\n\n\treturn CU_get_error();\n}\n"
},
{
"alpha_fraction": 0.7265725135803223,
"alphanum_fraction": 0.72956782579422,
"avg_line_length": 30.15999984741211,
"blob_id": "3796bcb4975da5141f1bf2626629f99ca206213f",
"content_id": "fbdb94c5aed4b8fe7d53730a82c8c7bbd5b57483",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2337,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 75,
"path": "/deprecated/nvm_atomic.h",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * atomic - atomic operations\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#ifndef __NVM_ATOMIC_H\n#define __NVM_ATOMIC_H\n#include <pthread.h>\n\ntypedef struct atomic_cnt {\n\tint cnt;\n\tpthread_spinlock_t lock;\n} atomic_cnt;\n\nstatic inline void atomic_init(struct atomic_cnt *cnt)\n{\n\tpthread_spin_init(&cnt->lock, PTHREAD_PROCESS_SHARED);\n}\n\nstatic inline void atomic_set(struct atomic_cnt *cnt, int value)\n{\n\tpthread_spin_lock(&cnt->lock);\n\tcnt->cnt = value;\n\tpthread_spin_unlock(&cnt->lock);\n}\n\nstatic inline void atomic_assign_inc(struct atomic_cnt *cnt, int *dst)\n{\n\tpthread_spin_lock(&cnt->lock);\n\tcnt->cnt++;\n\t*dst = cnt->cnt;\n\tpthread_spin_unlock(&cnt->lock);\n}\n\nstatic inline void atomic_inc(struct atomic_cnt *cnt)\n{\n\tpthread_spin_lock(&cnt->lock);\n\tcnt->cnt++;\n\tpthread_spin_unlock(&cnt->lock);\n}\n\nstatic inline int atomic_dec_and_test(struct atomic_cnt *cnt)\n{\n\tint ret;\n\n\tpthread_spin_lock(&cnt->lock);\n\tcnt->cnt--;\n\tret = (cnt->cnt == 0) ? 1 : 0;\n\tpthread_spin_unlock(&cnt->lock);\n\n\treturn ret;\n}\n\n#endif /* __NVM_ATOMIC_H */\n"
},
{
"alpha_fraction": 0.6014184355735779,
"alphanum_fraction": 0.6297872066497803,
"avg_line_length": 28.33333396911621,
"blob_id": "f12bafb2c7d469de9bacd71d95a8b9349abfcf35",
"content_id": "b5dee9cc6d89f1eba267ecc088ec39942b6be5f8",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 705,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 24,
"path": "/examples/scripts/ewr_all.sh",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nBLK=$1\nif [ -z \"$BLK\" ]; then\n\techo \"usage: $0 [SPANNED_BLK_IDX]\"\n\texit\nfi\n\nDRY=$2\nif [ -z \"$DRY\" ]; then\n\tDRY=\"1\"\nfi\n\n/usr/bin/time ./ewr_vpage.sh $BLK $DRY > ewr_vpage_${BLK}_01.log\n/usr/bin/time ./ewr_vpage.sh $BLK $DRY > ewr_vpage_${BLK}_02.log\n\n/usr/bin/time ./ewr_vpage_split.sh $BLK $DRY > ewr_vpage_split_${BLK}_01.log\n/usr/bin/time ./ewr_vpage_split.sh $BLK $DRY > ewr_vpage_split_${BLK}_02.log\n\n/usr/bin/time ./ewr_vblk.sh $BLK $DRY > ewr_vblk_${BLK}_01.log\n/usr/bin/time ./ewr_vblk.sh $BLK $DRY > ewr_vblk_${BLK}_02.log\n\n/usr/bin/time ./ewr_vblk_split.sh $BLK $DRY > ewr_vblk_split_${BLK}_01.log\n/usr/bin/time ./ewr_vblk_split.sh $BLK $DRY > ewr_vblk_split_${BLK}_02.log\n\n"
},
{
"alpha_fraction": 0.5927536487579346,
"alphanum_fraction": 0.6057971119880676,
"avg_line_length": 21.25806427001953,
"blob_id": "38c255ee10d1e4b4ae1eaf0656e97adca42241f6",
"content_id": "a623144ed696d426d55c5713834baf8a42c407f2",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1380,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 62,
"path": "/examples/scripts/ewr_vpage_split.sh",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nBLK=$1\nif [ -z \"$BLK\" ]; then\n\techo \"usage: $0 [BLK_IDX] [0|1]\"\n\texit\nfi\n\nDRY=$2\nif [ -z \"$DRY\" ]; then\n\tDRY=\"1\"\nfi\n\nNVME_DEV=nvme0\nLNVM_DEV=nvme0n1\nNCHANNELS=`cat /sys/class/nvme/$NVME_DEV/$LNVM_DEV/lightnvm/num_channels`\nCH_BEGIN=0\nCH_END=$(($NCHANNELS-1))\nNLUNS=`cat /sys/class/nvme/$NVME_DEV/$LNVM_DEV/lightnvm/num_luns`\nLUN_BEGIN=0\nLUN_END=$(($NLUNS-1))\nNPAGES=`cat /sys/class/nvme/$NVME_DEV/$LNVM_DEV/lightnvm/num_pages`\nPG_BEGIN=0\nPG_END=$(($NPAGES-1))\n\necho \"** $LNVM_DEV with nchannels($NCHANNELS) and nluns($NLUNS)\"\n\necho \"** E 'spanned' block\"\nfor CH in $(seq $CH_BEGIN $CH_END); do\n\tfor LUN in $(seq $LUN_BEGIN $LUN_END); do\n\t\techo \"*** ch($CH), lun($LUN), blk($BLK)\"\n\t\tif [ $DRY -ne \"1\" ]; then\n\t\t\tnvm_vblk erase $LNVM_DEV $CH $LUN $BLK\n\t\tfi\n\tdone\ndone\n\necho \"** W 'spanned' block\"\n\nfor CH in $(seq $CH_BEGIN $CH_END); do\n\tfor LUN in $(seq $LUN_BEGIN $LUN_END); do\n\t\techo \"*** ch($CH), lun($LUN), blk($BLK)\"\n\t\tif [ $DRY -ne \"1\" ]; then\n\t\t\tfor PG in $(seq $PG_BEGIN $PG_END); do\n\t\t\t\tnvm_vblk pwrite $LNVM_DEV $CH $LUN $BLK $PG\n\t\t\tdone\n\t\tfi\n\tdone\ndone\n\necho \"** R 'spanned' block\"\n\nfor CH in $(seq $CH_BEGIN $CH_END); do\n\tfor LUN in $(seq $LUN_BEGIN $LUN_END); do\n\t\techo \"*** ch($CH), lun($LUN), blk($BLK)\"\n\t\tif [ $DRY -ne \"1\" ]; then\n\t\t\tfor PG in $(seq $PG_BEGIN $PG_END); do\n\t\t\t\tnvm_vblk pread $LNVM_DEV $CH $LUN $BLK $PG\n\t\t\tdone\n\t\tfi\n\tdone\ndone\n"
},
{
"alpha_fraction": 0.5874799489974976,
"alphanum_fraction": 0.6003210544586182,
"avg_line_length": 17.878787994384766,
"blob_id": "3dd2a47a6965ce802a5688342e5b8dec60b374d3",
"content_id": "157af1f4215b5145e7d176a19a2d172472ce77c1",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 623,
"license_type": "permissive",
"max_line_length": 55,
"num_lines": 33,
"path": "/examples/info.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/* Target info example */\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <liblightnvm.h>\n\nint main(int argc, char **argv)\n{\n\tif (argc != 2) {\n\t\tprintf(\"Usage: %s dev_name\\n\", argv[0]);\n\t\treturn -1;\n\t}\n\n\tif (strlen(argv[1]) > DISK_NAME_LEN) {\n\t\tprintf(\"len(device_name) > %d\\n\", DISK_NAME_LEN - 1);\n\t}\n\n\tNVM_DEV dev = nvm_dev_open(argv[1]);\n\tif (!dev) {\n\t\tprintf(\"Failed opening device\\n\");\n\t\treturn -1;\n\t}\n\n\tprintf(\"** Device information -- nvm_dev_pr **\\n\");\n\tnvm_dev_pr(dev);\n\n\tprintf(\"** Device geometry -- nvm_geo_pr **\\n\");\n\tnvm_geo_pr(nvm_dev_attr_geo(dev));\n\n\tnvm_dev_close(dev);\n\n\treturn 0;\n}\n"
},
{
"alpha_fraction": 0.6722556352615356,
"alphanum_fraction": 0.674153745174408,
"avg_line_length": 52.576271057128906,
"blob_id": "392c47a53862fce510d6230ad227b96a49f0d059",
"content_id": "87cea3fd244378eb53a5cfeb52a5945989f23a9b",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 3161,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 59,
"path": "/README.rst",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "liblightnvm - User space I/O library for LightNVM\n=================================================\n\n+------------------+-------------------------------------+\n| Status | |Build Status| |Coverage Status| |\n+==================+==================+==================+\n| **Downloads** |\n+------------------+-------------------------------------+\n| deb | |Download deb| |\n+------------------+-------------------------------------+\n| tgz | |Download tgz| |\n+------------------+-------------------------------------+\n\n\nliblightnvm is a user space library that manages provisioning of and I/O\nsubmission to physical flash. The motivation is to enable I/O-intensive\napplications to implement their own Flash Translation Layer (FTLs) using\nthe internal application data structures. The design is based on the\nprinciple that high-performance I/O applications often use structures\nthat assimilates structures found within a Flash translation layer. This\ninclude log-structured data structures that provides their own\nmechanisms for data placement, garbage collection, and I/O scheduling\nstrategies.\n\nFor example, popular key-value stores often use a form of Log Structured\nMerge Trees (LSMs) as their base data structure (including RocksDB,\nMongoDB, Apache Cassandra). The LSM is in itself a form of FTL, which\nmanages data placement and garbage collection. This class of\napplications can benefit from a direct path to physical flash to take\nfull advantage of the optimizations they do and spend host resources on,\ninstead of missing them through the several levels of indirection that\nthe traditional I/O stack imposes to enable genericity: page cache, VFS,\nfile system, and device physical - logical translation table.\nliblightnvm exposes append-only primitives using direct physical flash\nto support this class of applications.\n\nContact and Contributions\n=========================\n\nliblightnvm is in active development and pull requests are very welcome.\n\nReferences\n==========\n\n1. https://github.com/OpenChannelSSD/linux/tree/liblnvm\n2. http://openchannelssd.readthedocs.org/en/latest/gettingstarted/#configure-qemu\n3. https://github.com/OpenChannelSSD/qemu-nvme\n4. https://github.com/OpenChannelSSD/lightnvm-hw\n5. https://github.com/OpenChannelSSD/rocksdb\n6. http://openchannelssd.readthedocs.org/en/latest/\n\n.. |Build Status| image:: https://travis-ci.org/OpenChannelSSD/liblightnvm.svg?branch=master\n :target: https://travis-ci.org/OpenChannelSSD/liblightnvm\n.. |Coverage Status| image:: https://coveralls.io/repos/github/OpenChannelSSD/liblightnvm/badge.svg?branch=master\n :target: https://coveralls.io/github/OpenChannelSSD/liblightnvm?branch=master\n.. |Download deb| image:: https://api.bintray.com/packages/openchannelssd/debs/liblightnvm/images/download.svg\n :target: https://bintray.com/openchannelssd/debs/liblightnvm/_latestVersion\n.. |Download tgz| image:: https://api.bintray.com/packages/openchannelssd/binaries/liblightnvm/images/download.svg\n :target: https://bintray.com/openchannelssd/binaries/liblightnvm/_latestVersion\n"
},
{
"alpha_fraction": 0.5980346202850342,
"alphanum_fraction": 0.6102012395858765,
"avg_line_length": 19.74757194519043,
"blob_id": "839de220b9834ef68069126e752194b80beb3d5e",
"content_id": "68f86e33b88a5abf751ded8d287f3494bcf83055",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2137,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 103,
"path": "/tests/test_vblk.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <string.h>\n#include <unistd.h>\n#include <liblightnvm.h>\n\n#include <CUnit/Basic.h>\n\nstatic char nvm_dev_name[DISK_NAME_LEN] = \"nvme0n1\";\n\nvoid test_VBLOCK_NEW_FREE(void)\n{\n\tNVM_VBLK vblock;\n\n\tvblock = nvm_vblk_new();\n\tCU_ASSERT_PTR_NOT_NULL(vblock);\n\n\tnvm_vblk_free(vblock);\n}\n\nvoid test_VBLOCK_GET_PUT_01(void)\n{\n\tNVM_VBLK vblock;\n\tNVM_DEV dev;\n\tint ret;\n\n\tdev = nvm_dev_open(nvm_dev_name);\n\tCU_ASSERT(dev > 0);\n\n\tvblock = nvm_vblk_new();\t/* get block from arbitrary lun */\n\tCU_ASSERT_PTR_NOT_NULL(vblock);\n\t\n\tret = nvm_vblk_get(vblock, dev);\n\tCU_ASSERT(0==ret);\n\n\tret = nvm_vblk_put(vblock);\n\tCU_ASSERT(0==ret);\n\n\tnvm_vblk_free(vblock);\n\n\tnvm_dev_close(dev);\n\tCU_ASSERT(0==ret);\n}\n\nvoid test_VBLOCK_GETS_PUT_01(void)\n{\n\tNVM_VBLK vblock;\n\tNVM_DEV dev;\n\tint ret;\n\n\tdev = nvm_dev_open(nvm_dev_name);\n\tCU_ASSERT_PTR_NOT_NULL(dev > 0);\n\n\tvblock = nvm_vblk_new();\n\tCU_ASSERT_PTR_NOT_NULL(vblock);\t\t/* get block from lun 0 */\n\t\n\tret = nvm_vblk_gets(vblock, dev, 0, 0);\n\tCU_ASSERT(0==ret);\n\n\tret = nvm_vblk_put(vblock);\n\n\tnvm_dev_close(dev);\n}\n\nint main(int argc, char **argv)\n{\n\tif (argc > 1) {\n if (strlen(argv[1]) > DISK_NAME_LEN) {\n printf(\"Argument nvm_dev can be maximum %d characters\\n\",\n DISK_NAME_LEN - 1);\n }\n\t\tstrcpy(nvm_dev_name, argv[1]);\n\t}\n\n\tCU_pSuite pSuite = NULL;\n\n\tif (CUE_SUCCESS != CU_initialize_registry())\n\t\treturn CU_get_error();\n\n\tpSuite = CU_add_suite(\"nvm_vblk*\", NULL, NULL);\n\tif (NULL == pSuite) {\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\tif (\n\t(NULL == CU_add_test(pSuite, \"nvm_vblock_[new|free]\", test_VBLOCK_NEW_FREE)) ||\n\t(NULL == CU_add_test(pSuite, \"nvm_vblock_[get|put] 1\", test_VBLOCK_GET_PUT_01)) ||\n\t(NULL == CU_add_test(pSuite, \"nvm_vblock_[gets|put] 1\", test_VBLOCK_GETS_PUT_01)) ||\n\t0)\n\t{\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\t/* Run all tests using the CUnit Basic interface */\n\tCU_basic_set_mode(CU_BRM_SILENT);\n\tCU_basic_run_tests();\n\tCU_cleanup_registry();\n\n\treturn CU_get_error();\n}\n"
},
{
"alpha_fraction": 0.5891531705856323,
"alphanum_fraction": 0.6005708575248718,
"avg_line_length": 17.63475227355957,
"blob_id": "725e2ae6b54cf27b040a7cf1afc24b45edf820a3",
"content_id": "c717a46a94bd88e0dead58bef3185852cf68520b",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5255,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 282,
"path": "/examples/sblk.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <errno.h>\n#include <liblightnvm.h>\n\n#include <sys/time.h>\n\nsize_t start, stop;\n\nsize_t wclock_sample(void)\n{\n struct timeval tv;\n gettimeofday(&tv, NULL);\n return tv.tv_usec + tv.tv_sec * 1000000;\n}\n\nsize_t timer_start(void)\n{\n start = wclock_sample();\n return start;\n}\n\nsize_t timer_stop(void)\n{\n stop = wclock_sample();\n return stop;\n}\n\ndouble timer_elapsed(void)\n{\n return (stop-start)/(double)1000000.0;\n}\n\nvoid timer_pr(const char* tool)\n{\n printf(\"Ran %s, elapsed wall-clock: %lf\\n\", tool, timer_elapsed());\n}\n\nint erase(NVM_SBLK sblk, int flags)\n{\n\tssize_t err;\n\n\tNVM_GEO sblk_geo = nvm_sblk_attr_geo(sblk);\n\n\tprintf(\"** nvm_sblk_erase(...): sblk_tbytes(%lu)\\n\", sblk_geo.tbytes);\n\tnvm_sblk_pr(sblk);\n\n\ttimer_start();\n\terr = nvm_sblk_erase(sblk);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_sblk_erase err(%ld)\\n\", err);\n\t}\n\ttimer_stop();\n\ttimer_pr(\"nvm_sblk_erase\");\n\n\treturn err;\n}\n\nint write(NVM_SBLK sblk, int flags)\n{\n\tssize_t err;\n\tchar *buf;\n\n\tNVM_GEO sblk_geo = nvm_sblk_attr_geo(sblk);\n\n\tprintf(\"** nvm_sblk_write(...): sblk_tbytes(%lu)\\n\", sblk_geo.tbytes);\n\tnvm_sblk_pr(sblk);\n\t\n\ttimer_start();\n\tbuf = nvm_buf_alloc(sblk_geo, sblk_geo.tbytes);\n\tif (!buf) {\n\t\tprintf(\"FAILED: allocating buf\\n\");\n\t\treturn -ENOMEM;\n\t}\n\ttimer_stop();\n\ttimer_pr(\"nvm_buf_alloc\");\n\n\ttimer_start();\n\tnvm_buf_fill(buf, sblk_geo.tbytes);\n\ttimer_stop();\n\ttimer_pr(\"nvm_buf_fill\");\n\n\ttimer_start();\n\terr = nvm_sblk_write(sblk,\n\t\t\t buf,\n\t\t\t sblk_geo.tbytes);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_sblk_write err(%ld)\\n\", err);\n\t}\n\ttimer_stop();\n\ttimer_pr(\"nvm_sblk_write\");\n\n\tfree(buf);\n\n\treturn err;\n}\n\nint pad(NVM_SBLK sblk, int flags)\n{\n\tssize_t err;\n\n\tNVM_GEO sblk_geo = nvm_sblk_attr_geo(sblk);\n\n\tprintf(\"** nvm_sblk_pad(...): sblk_tbytes(%lu)\\n\", sblk_geo.tbytes);\n\tnvm_sblk_pr(sblk);\n\n\ttimer_start();\n\terr = nvm_sblk_pad(sblk);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_sblk_pad err(%ld)\\n\", err);\n\t}\n\ttimer_stop();\n\ttimer_pr(\"nvm_sblk_pad\");\n\n\treturn err;\n}\n\nint read(NVM_SBLK sblk, int flags)\n{\n\tssize_t err;\n\tchar *buf;\n\n\tNVM_GEO sblk_geo = nvm_sblk_attr_geo(sblk);\n\n\tprintf(\"** nvm_sblk_read(...): sblk_tbytes(%lu)\\n\", sblk_geo.tbytes);\n\tnvm_sblk_pr(sblk);\n\n\ttimer_start();\n\tbuf = nvm_buf_alloc(sblk_geo, sblk_geo.tbytes);\n\tif (!buf) {\n\t\tprintf(\"FAILED: allocating buf\\n\");\n\t\tnvm_sblk_free(sblk);\n\t\treturn -ENOMEM;\n\t}\n\ttimer_stop();\n\ttimer_pr(\"nvm_buf_alloc\");\n\n\ttimer_start();\n\terr = nvm_sblk_read(sblk,\n\t\t\t buf,\n\t\t\t sblk_geo.tbytes);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_sblk_read err(%ld)\\n\", err);\n\t}\n\ttimer_stop();\n\ttimer_pr(\"nvm_sblk_read\");\n\n\tfree(buf);\n\n\treturn err;\n}\n\n// From hereon out the code is mostly boiler-plate for command-line parsing,\n// there is a bit of useful code exemplifying:\n//\n// * nvm_dev_open\n// * nvm_dev_close\n// * nvm_dev_attr_geo\n//\n// as well as using the NVM_ADDR data structure.\n\n#define NVM_CLI_CMD_LEN 50\n\ntypedef struct {\n\tchar name[NVM_CLI_CMD_LEN];\n\tint (*func)(NVM_SBLK, int);\n\tint argc;\n\tint flags;\n} NVM_CLI_VBLK_CMD;\n\nstatic NVM_CLI_VBLK_CMD cmds[] = {\n\t{\"erase\", erase, 8, 0x0},\n\t{\"write\", write, 8, 0x0},\n\t{\"pad\", pad, 8, 0x0},\n\t{\"read\", read, 8, 0x0},\n};\n\nstatic int ncmds = sizeof(cmds) / sizeof(cmds[0]);\nstatic char *args[] = {\n\t\"dev_name\",\n\t\"ch_bgn\",\n\t\"ch_end\",\n\t\"lun_bgn\",\n\t\"lun_end\",\n\t\"blk\"\n};\n\nvoid _usage_pr(char *cli_name)\n{\n\tint cmd;\n\n\tprintf(\"Usage:\\n\");\n\tfor (cmd = 0; cmd < ncmds; cmd++) {\n\t\tint arg;\n\t\tprintf(\" %s %6s\", cli_name, cmds[cmd].name);\n\t\tfor (arg = 0; arg < cmds[cmd].argc-2; ++arg) {\n\t\t\tprintf(\" %s\", args[arg]);\n\t\t}\n\t\tprintf(\"\\n\");\n\t}\n}\n\nint main(int argc, char **argv)\n{\n\tchar cmd_name[NVM_CLI_CMD_LEN];\n\tchar dev_name[DISK_NAME_LEN+1];\n\tint ret, i;\n\n\tNVM_CLI_VBLK_CMD *cmd = NULL;\n\t\n\tNVM_DEV dev;\n\tNVM_SBLK sblk;\n\tint ch_bgn, ch_end, lun_bgn, lun_end, blk;\n\n\tif (argc < 3) {\n\t\t_usage_pr(argv[0]);\n\t\treturn -1;\n\t}\n\t\t\t\t\t\t\t// Get `cmd_name`\n\tif (strlen(argv[1]) < 1 || strlen(argv[1]) > (NVM_CLI_CMD_LEN-1)) {\n\t\tprintf(\"Invalid cmd\\n\");\n\t\t_usage_pr(argv[0]);\n\t\treturn -EINVAL;\n\t}\n\tmemset(cmd_name, 0, sizeof(cmd_name));\n\tstrcpy(cmd_name, argv[1]);\n\n\tfor (i = 0; i < ncmds; ++i) {\t\t\t// Get `cmd`\n\t\tif (strcmp(cmd_name, cmds[i].name) == 0) {\n\t\t\tcmd = &cmds[i];\n\t\t\tbreak;\n\t\t}\n\t}\n\tif (!cmd) {\n\t\tprintf(\"Invalid cmd(%s)\\n\", cmd_name);\n\t\t_usage_pr(argv[0]);\n\t\treturn -EINVAL;\n\t}\n\n\tif (argc != cmd->argc) {\t\t\t// Check argument count\n\t\tprintf(\"Invalid cmd(%s) argc(%d) != %d\\n\",\n\t\t\tcmd_name, argc, cmd->argc);\n\t\t_usage_pr(argv[0]);\n\t\treturn -1;\n\t}\n\n\tif (strlen(argv[2]) > DISK_NAME_LEN) {\t\t// Get `dev_name`\n\t\tprintf(\"len(dev_name) > %d\\n\", DISK_NAME_LEN);\n\t\treturn -1;\n\t}\n\tmemset(dev_name, 0, sizeof(dev_name));\n\tstrcpy(dev_name, argv[2]);\n\n\tch_bgn = atol(argv[3]);\n\tch_end = atol(argv[4]);\n\n\tlun_bgn = atol(argv[5]);\n\tlun_end = atol(argv[6]);\n\n\tblk = atol(argv[7]);\n\n\tdev = nvm_dev_open(dev_name);\t\t\t// open `dev`\n\tif (!dev) {\n\t\tprintf(\"FAILED: opening device, dev_name(%s)\\n\", dev_name);\n\t\treturn -EINVAL;\n\t}\n\n\tsblk = nvm_sblk_new(dev, ch_bgn, ch_end, lun_bgn, lun_end, blk);\n\tif (!sblk) {\n\t\tprintf(\"FAILED: allocating sblk\\n\");\n\t\treturn -ENOMEM;\n\t}\n\n\tret = cmd->func(sblk, cmd->flags);\n\tprintf(\"ret(%d)\\n\", ret);\n\n\tnvm_sblk_free(sblk);\n\tnvm_dev_close(dev);\t\t\t\t// close `dev`\n\n\treturn ret != 0;\n}\n"
},
{
"alpha_fraction": 0.6545262336730957,
"alphanum_fraction": 0.6716811656951904,
"avg_line_length": 28.826770782470703,
"blob_id": "aaeb16fc56902ffce589d930779b3d3fdf929b7f",
"content_id": "05d382a21761db750d5178304eb3b9b60e67c7fc",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3789,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 127,
"path": "/src/nvm_addr.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/*\n * addr - Sector addressing functions for mark, erase, write, read, and\n * meta-data print\n *\n * Copyright (C) 2015 Javier González <[email protected]>\n * Copyright (C) 2015 Matias Bjørling <[email protected]>\n * Copyright (C) 2016 Simon A. F. Lund <[email protected]>\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * - Redistributions of source code must retain the above copyright notice,\n * this list of conditions and the following disclaimer.\n * - Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n#include <stdlib.h>\n#include <unistd.h>\n#include <string.h>\n#include <errno.h>\n#include <stdio.h>\n#include <linux/lightnvm.h>\n#include <liblightnvm.h>\n#include <nvm.h>\n#include <nvm_debug.h>\n\nstatic ssize_t nvm_addr_cmd(struct nvm_dev *dev, struct nvm_addr list[],\n\t\t\t int len, void *buf, uint16_t flags,\n\t\t\t uint16_t opcode)\n{\n\tstruct nvm_ioctl_dev_pio ctl;\n\tint err;\n\n\tmemset(&ctl, 0, sizeof(ctl));\n\tctl.opcode = opcode;\n\tctl.flags = flags;\n\tctl.nppas = len;\n\tctl.ppas = len == 1 ? list[0].ppa : (uint64_t)list;\n\tctl.addr = (uint64_t)buf;\n\tctl.data_len = buf ? dev->geo.nbytes * len : 0;\n\n\terr = ioctl(dev->fd, NVM_DEV_PIO, &ctl);\n#ifdef NVM_DEBUG_ENABLED\n\tif (err || ctl.result || ctl.status) {\n\t\tint i;\n\n\t\tNVM_DEBUG(\"WARN: err(%d), ctl.r(0x%x), ctl.s(%llu), naddr(%d):\",\n\t\t\t err, ctl.result, ctl.status, ctl.nppas);\n\t\tfor (i = 0; i < len; ++i)\n\t\t\tnvm_addr_pr(list[i]);\n\t}\n#endif\n\tif (err) {\t\t// Give up on IOCTL errors\n\t\terrno = EIO;\n\t\treturn -1;\n\t}\n\n\tswitch (ctl.result) {\n\tcase 0x0:\t// All good\n\t\treturn 0;\n\tcase 0x4700:\t// As good as it gets..\n\t\treturn 0;\n\n\tdefault:\t// We give up on everything else\n\t\terrno = EIO;\n\t\treturn -1;\n\t}\n}\n\nssize_t nvm_addr_erase(struct nvm_dev *dev, struct nvm_addr list[], int len,\n\t\t uint16_t flags)\n{\n\treturn nvm_addr_cmd(dev, list, len, NULL, flags,\n\t\t\t NVM_MAGIC_OPCODE_ERASE);\n}\n\nssize_t nvm_addr_write(struct nvm_dev *dev, struct nvm_addr list[], int len,\n\t\t const void *buf, uint16_t flags)\n{\n\tchar *cbuf = (char *)buf;\n\n\treturn nvm_addr_cmd(dev, list, len, cbuf, flags,\n\t\t\t NVM_MAGIC_OPCODE_WRITE);\n}\n\nssize_t nvm_addr_read(struct nvm_dev *dev, struct nvm_addr list[], int len,\n\t\t void *buf, uint16_t flags)\n{\n\treturn nvm_addr_cmd(dev, list, len, buf, flags, NVM_MAGIC_OPCODE_READ);\n}\n\nssize_t nvm_addr_mark(struct nvm_dev *dev, struct nvm_addr list[], int len,\n\t\t uint16_t flags)\n{\n\tswitch (flags) {\n\tcase 0x0:\n\tcase 0x1:\n\tcase 0x2:\n\t\tbreak;\n\tdefault:\n\t\terrno = EINVAL;\n\t\treturn -1;\n\t}\n\n\treturn nvm_addr_cmd(dev, list, len, NULL, flags, 0xF1);\n}\n\nvoid nvm_addr_pr(struct nvm_addr addr)\n{\n\tprintf(\"(%016lu){ ch(%02d), lun(%02d), pl(%d), \",\n\t addr.ppa, addr.g.ch, addr.g.lun, addr.g.pl);\n\tprintf(\"blk(%04d), pg(%03d), sec(%d) }\\n\",\n\t addr.g.blk, addr.g.pg, addr.g.sec);\n}\n\n"
},
{
"alpha_fraction": 0.5966331958770752,
"alphanum_fraction": 0.6089349389076233,
"avg_line_length": 17.835365295410156,
"blob_id": "88db1b0528e6f64e14cd9853797c8bc58e828f6d",
"content_id": "613e1009a088123ff7ace1d544f736d39713c71e",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3089,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 164,
"path": "/tests/test_concur.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <string.h>\n#include <unistd.h>\n#include <liblightnvm.h>\n#include <pthread.h>\n\n#include <CUnit/Basic.h>\n\nstatic char nvm_dev_name[DISK_NAME_LEN] = \"nvme0n1\";\n\nstruct context {\n\tNVM_VBLK blk;\n\tNVM_GEO geo;\n\tNVM_DEV dev;\n\tchar *buf;\n};\n\nstatic void *write_thread(void *priv)\n{\n\tstruct context *ctx = priv;\n\tint i;\n\n\tfor (i = 0; i < ctx->geo.npages; i++) {\n\t\tsize_t count, offset;\n\t\tssize_t err;\n\n\t\tcount = ctx->geo.vpg_nbytes;\n\t\toffset = ctx->geo.vpg_nbytes * i;\n\t\t\n\t\terr = nvm_vblk_pwrite(ctx->blk, ctx->buf, count, offset);\n\t\tCU_ASSERT(!err);\n\t}\n\tpthread_exit(NULL);\n}\n\nstatic void *erase_thread(void *priv)\n{\n\tstruct context *ctx = priv;\n\tint i;\n\n\tfor (i = 0; i < 4; i++) {\n\t\tssize_t err;\n\t\tusleep(2000);\n\t\terr = nvm_vblk_erase(ctx->blk);\t/* ERASE */\n\t\tCU_ASSERT(!err);\n\t}\n\n\tpthread_exit(NULL);\n}\n\n#define NUM_BLOCKS (2)\nvoid test_VBLOCK_CONCUR(void)\n{\n\tNVM_VBLK vblock[2];\n\tNVM_DEV dev;\n\tNVM_GEO geo;\n\tint i;\n\tssize_t err;\n\tstruct context ctx[2];\n\tchar *wbuf;\n\tpthread_t wr_th, er_th;\n\n\tdev = nvm_dev_open(nvm_dev_name);\n\tCU_ASSERT(dev > 0);\n\n\tgeo = nvm_dev_attr_geo(dev);\n\n\tfor (i = 0; i < NUM_BLOCKS; i++) {\n\t\tvblock[i] = nvm_vblk_new();\n\t\terr = nvm_vblk_gets(vblock[i], dev, 0, 0);\n\t\tCU_ASSERT(!err);\n\t}\n\n\twbuf = nvm_buf_alloc(geo, geo.vpg_nbytes);\n\tCU_ASSERT_PTR_NOT_NULL(wbuf);\n\tif (!wbuf) {\n\t\tprintf(\"Failed allocating write buffer(%p)\\n\", wbuf);\n\t\treturn;\n\t}\n\tmemset(wbuf, 0, geo.vpg_nbytes);\n\n\tctx[0].blk = vblock[0];\n\tctx[0].dev = dev;\n\tctx[0].buf = wbuf;\n\tctx[0].geo = geo;\n\n\tctx[1].blk = vblock[1];\n\tctx[1].dev = dev;\n\tctx[1].buf = wbuf;\n\tctx[1].geo = geo;\n\n\tif (pthread_create(&wr_th, NULL, write_thread, &ctx[0])) {\n\t\tfprintf(stderr, \"fail...\\n\");\n\t\treturn;\n\t}\n\n\tif (pthread_create(&er_th, NULL, erase_thread, &ctx[1])) {\n\t\tfprintf(stderr, \"fail2...\\n\");\n\t\treturn;\n\t}\n\n\tpthread_join(wr_th, NULL);\n\tpthread_join(er_th, NULL);\n\n\tfor (i = 0; i < geo.npages; i++) {\n\t\tsize_t count, offset;\n\t\tcount = geo.vpg_nbytes;\n\t\toffset = geo.vpg_nbytes * i;\n\n\t\terr = nvm_vblk_pread(vblock[0], wbuf, count, offset); /* READ */\n\t\tCU_ASSERT(!err);\n\t\tif (err)\n\t\t\tprintf(\"FAILED err(%ld) i(%d), wbuf(%s)\\n\",\n\t\t\t\terr, i, wbuf);\n\t}\n\n\tfor (i = 0; i < NUM_BLOCKS; i++) {\n\t\terr = nvm_vblk_put(vblock[i]);\n\t\tCU_ASSERT(!err);\n\n\t\tnvm_vblk_free(vblock[i]);\n\t}\n\n\tnvm_dev_close(dev);\n}\n\nint main(int argc, char **argv)\n{\n\tif (argc > 1) {\n\t\tif (strlen(argv[1]) > DISK_NAME_LEN) {\n\t\t\tprintf(\"Argument nvm_dev can be maximum %d characters\\n\",\n\t\t\t\tDISK_NAME_LEN - 1);\n\t\t}\n\t\tstrcpy(nvm_dev_name, argv[1]);\n\t}\n\n\tCU_pSuite pSuite = NULL;\n\n\tif (CUE_SUCCESS != CU_initialize_registry())\n\t\treturn CU_get_error();\n\n\tpSuite = CU_add_suite(\"nvm_vblk*\", NULL, NULL);\n\tif (NULL == pSuite) {\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\tif (\n\t(NULL == CU_add_test(pSuite, \"nvm_concur_write-erase\", test_VBLOCK_CONCUR)) ||\n\t0)\n\t{\n\t\tCU_cleanup_registry();\n\t\treturn CU_get_error();\n\t}\n\n\t/* Run all tests using the CUnit Basic interface */\n\tCU_basic_set_mode(CU_BRM_SILENT);\n\tCU_basic_run_tests();\n\tCU_cleanup_registry();\n\n\treturn CU_get_error();\n}\n"
},
{
"alpha_fraction": 0.7366296648979187,
"alphanum_fraction": 0.7386478185653687,
"avg_line_length": 32,
"blob_id": "7ef6e7d66afb6fac20f19ab78924b2f2349b1ba2",
"content_id": "60284b9c1333a4a4e946145e34cf4d563f0b22dc",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 991,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 30,
"path": "/examples/CMakeLists.txt",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 2.8)\nset(EXAMPLES true CACHE BOOL \"Examples: Include example programs in build\")\nset(EXAMPLES_SBLK_PAR false CACHE BOOL \"Examples-sblk: Enable parallel implementation of sblk\")\nif (NOT EXAMPLES)\n\treturn()\nendif()\n\nset(CMAKE_C_FLAGS_DEBUG \"${CMAKE_C_FLAGS_DEBUG} -DNVM_DEBUG_ENABLED\")\nset(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -Wall\")\n\ninclude_directories(\"${CMAKE_SOURCE}/include\")\n\nset(SOURCE_FILES\n\t${CMAKE_CURRENT_SOURCE_DIR}/addr.c\n\t${CMAKE_CURRENT_SOURCE_DIR}/info.c\n\t${CMAKE_CURRENT_SOURCE_DIR}/vblk.c\n\t${CMAKE_CURRENT_SOURCE_DIR}/sblk.c)\n\n#\n# We link against the lightnvm_a to avoid the runtime dependency on liblightnvm\n# NOTE: The dependency on libudev is carried on from liblightnvm\n#\nforeach(SRC_FN ${SOURCE_FILES})\n\tget_filename_component(SRC_FN_WE ${SRC_FN} NAME_WE)\n\tset(EXE_FN \"nvm_${SRC_FN_WE}\")\n\tadd_executable(${EXE_FN} ${SRC_FN})\n\ttarget_link_libraries(${EXE_FN} pthread udev lightnvm_a)\n\n\tinstall(TARGETS ${EXE_FN} DESTINATION bin)\nendforeach()\n\n"
},
{
"alpha_fraction": 0.5498338937759399,
"alphanum_fraction": 0.565780758857727,
"avg_line_length": 19.687284469604492,
"blob_id": "9e3004500607d353e2773481fb4399682bd183fe",
"content_id": "ec7584a82fdef639f0cf7bfd5bf39f0b968753eb",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 6020,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 291,
"path": "/examples/addr.c",
"repo_name": "ivpi/liblightnvm",
"src_encoding": "UTF-8",
"text": "/* Target info example */\n#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <errno.h>\n#include <liblightnvm.h>\n\nint mark(NVM_DEV dev, NVM_GEO geo, NVM_ADDR list[], int len, int flags)\n{\n\tssize_t err;\n\tint i;\n\n\tprintf(\"** nvm_addr_mark(...):\\n\");\n\tfor (i = 0; i < len; ++i) {\n\t\tnvm_addr_pr(list[i]);\n\t}\n\n\tswitch(flags) {\n\t\tcase 0x0:\t// free / good\n\t\tcase 0x1:\t// bad\n\t\tcase 0x2:\t// grown bad\n\t\t\tbreak;\n\t\tdefault:\n\t\t\treturn -EINVAL;\n\t}\n\n\terr = nvm_addr_mark(dev, list, len, flags);\n\tif (err) {\n\t\tprintf(\"FAILED: nvm_dev_mark err(%ld)\\n\", err);\n\t}\n\n\treturn err;\n}\n\nint erase(NVM_DEV dev, NVM_GEO geo, NVM_ADDR list[], int len, int flags)\n{\n\tssize_t err;\n\tint i;\n\n\tprintf(\"** nvm_addr_erase(...):\\n\");\n\tfor (i = 0; i < len; ++i) {\n\t\tnvm_addr_pr(list[i]);\n\t}\n\n\terr = nvm_addr_erase(dev, list, len, NVM_MAGIC_FLAG_DEFAULT);\n\tif (err) {\n\t\tprintf(\"ERR: nvm_addr_write err(%ld)\\n\", err);\n\t}\n\n\treturn err;\n}\n\nint write(NVM_DEV dev, NVM_GEO geo, NVM_ADDR list[], int len, int flags)\n{\n\tint buf_len, i;\n\tchar *buf;\n\tssize_t err;\n\n\tbuf_len = len * geo.nbytes;\n\tbuf = nvm_buf_alloc(geo, buf_len);\n\tif (!buf) {\n\t\tprintf(\"Failed allocating buf\\n\");\n\t\treturn -ENOMEM;\n\t}\n\tnvm_buf_fill(buf, buf_len);\n\n\tprintf(\"** nvm_addr_write(...):\\n\");\n\tfor (i = 0; i < len; ++i) {\n\t\tnvm_addr_pr(list[i]);\n\t}\n\n\terr = nvm_addr_write(dev, list, len, buf, NVM_MAGIC_FLAG_DEFAULT);\n\tif (err) {\n\t\tprintf(\"ERR: nvm_addr_write err(%ld)\\n\", err);\n\t}\n\n\tfree(buf);\n\n\treturn err;\n}\n\nint read(NVM_DEV dev, NVM_GEO geo, NVM_ADDR list[], int len, int flags)\n{\n\tint buf_len, i;\n\tchar *buf;\n\tssize_t err;\n\n\tbuf_len = len * geo.nbytes;\n\tbuf = nvm_buf_alloc(geo, buf_len);\n\tif (!buf) {\n\t\tprintf(\"Failed allocating buf\\n\");\n\t\treturn -ENOMEM;\n\t}\n\n\tprintf(\"** nvm_addr_read(...): \\n\");\n\tfor (i = 0; i < len; ++i) {\n\t\tnvm_addr_pr(list[i]);\n\t}\n\n\terr = nvm_addr_read(dev, list, len, buf, NVM_MAGIC_FLAG_DEFAULT);\n\tif (getenv(\"NVM_BUF_PR\"))\n\t\tnvm_buf_pr(buf, buf_len);\n\tif (err) {\n\t\tprintf(\"ERR: nvm_addr_read err(%ld)\\n\", err);\n\t}\n\n\tfree(buf);\n\n\treturn err;\n}\n\nint fmt_p(NVM_DEV dev, NVM_GEO geo, NVM_ADDR list[], int len, int flags)\n{\n\tint i;\n\t\n\tfor (i = 0; i < len; i++) {\n\t\tnvm_addr_pr(list[i]);\n\t}\n\n\treturn 0;\n}\n\nint fmt_g(NVM_DEV dev, NVM_GEO geo, NVM_ADDR list[], int len, int flags)\n{\n\tnvm_addr_pr(list[0]);\n\n\treturn 0;\n}\n\n#define NVM_CLI_CMD_LEN 50\n\ntypedef struct {\n\tchar name[NVM_CLI_CMD_LEN];\n\tint (*func)(NVM_DEV, NVM_GEO, NVM_ADDR[], int, int);\n\tint argc;\n\tint flags;\n} NVM_CLI_ADDR_CMD;\n\nstatic NVM_CLI_ADDR_CMD cmds[] = {\n\t{\"erase\", erase, -1, 0x0},\n\t{\"write\", write, -1, 0x0},\n\t{\"read\", read, -1, 0x0},\n\t{\"fmt_p\", fmt_p, -1, 0x0},\n\t{\"fmt_g\", fmt_g, 9, 0x0},\n\t{\"mark_f\", mark, -1, 0x0},\n\t{\"mark_b\", mark, -1, 0x1},\n\t{\"mark_g\", mark, -1, 0x2},\n};\n\nstatic int ncmds = sizeof(cmds) / sizeof(cmds[0]);\n\nvoid _usage_pr(char *cli_name)\n{\n\tint cmd;\n\n\tprintf(\"Usage:\\n\");\n\tfor (cmd = 0; cmd < ncmds; ++cmd) {\n\t\tif (cmds[cmd].argc < 0) {\n\t\t\tprintf(\" %s %6s dev_name ppa [ppa...]\\n\",\n\t\t\t\tcli_name, cmds[cmd].name);\n\t\t} else {\n\t\t\tprintf(\" %s %6s dev_name ch lun pl blk pg sec\\n\",\n\t\t\t\tcli_name, cmds[cmd].name);\n\t\t}\n\t}\n}\n\nint main(int argc, char **argv)\n{\n\tchar cmd_name[NVM_CLI_CMD_LEN];\n\tchar dev_name[DISK_NAME_LEN+1];\n\tint ret;\n\n\tNVM_CLI_ADDR_CMD *cmd = NULL;\n\n\tNVM_DEV dev;\n\tNVM_GEO geo;\n\tNVM_ADDR list[1024];\n\tint i, len;\n\n\tif (argc < 4) {\n\t\t_usage_pr(argv[0]);\n\t\treturn -EINVAL;\n\t}\n\t\t\t\t\t\t\t// Get `cmd_name`\n\tif (strlen(argv[1]) < 1 || strlen(argv[1]) > (NVM_CLI_CMD_LEN-1)) {\n\t\tprintf(\"Invalid cmd\\n\");\n\t\t_usage_pr(argv[0]);\n\t\treturn -EINVAL;\n\t}\n\tmemset(cmd_name, 0, sizeof(cmd_name));\n\tstrcpy(cmd_name, argv[1]);\n\n\tfor (i = 0; i < ncmds; ++i) {\t\t\t// Get `cmd`\n\t\tif (strcmp(cmd_name, cmds[i].name) == 0) {\n\t\t\tcmd = &cmds[i];\n\t\t\tbreak;\n\t\t}\n\t}\n\tif (!cmd) {\n\t\tprintf(\"Invalid cmd(%s)\\n\", cmd_name);\n\t\t_usage_pr(argv[0]);\n\t\treturn -EINVAL;\n\t}\n\t\t\t\t\t\t\t// Get `dev_name`\n\tif (strlen(argv[2]) < 1 || strlen(argv[2]) > DISK_NAME_LEN) {\n\t\tprintf(\"len(dev_name) > %d\\n\", DISK_NAME_LEN);\n\t\treturn -EINVAL;\n\t}\n\tmemset(dev_name, 0, sizeof(dev_name));\n\tstrcpy(dev_name, argv[2]);\n\n\tswitch(cmd->argc) {\t\t\t\t// Get `list` and `len`\n\t\tcase -1:\t\t\t\t// ppa [ppa..]\n\t\t\tlen = argc - 3;\n\t\t\tfor (i = 0; i < len; ++i) {\n\t\t\t\tlist[i].ppa = atol(argv[i+3]);\n\t\t\t}\n\t\t\tbreak;\n\t\tcase 9:\t\t\t\t\t// ch lun pl blk pg sec\n\t\t\tlen = 1;\n\t\t\tlist[0].g.ch = atoi(argv[3]);\n\t\t\tlist[0].g.lun = atoi(argv[4]);\n\t\t\tlist[0].g.pl = atoi(argv[5]);\n\t\t\tlist[0].g.blk = atoi(argv[6]);\n\t\t\tlist[0].g.pg = atoi(argv[7]);\n\t\t\tlist[0].g.sec = atoi(argv[8]);\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tprintf(\"Invalid argc(%d) for cmd(%s)\\n\",\n\t\t\t\tcmd->argc, cmd_name);\n\t\t\t_usage_pr(argv[0]);\n\t\t\treturn -EINVAL;\n\t}\n\n\tdev = nvm_dev_open(dev_name);\n\tif (!dev) {\n\t\tprintf(\"Failed opening device, dev_name(%s)\\n\", dev_name);\n\t\treturn -EINVAL;\n\t}\n\tgeo = nvm_dev_attr_geo(dev);\n\n\tint ninvalid = 0;\n\tfor (i = 0; i < len; ++i) {\t\t\t// Check `addr`\n\t\tint invalid_addr = 0;\n\t\tif (list[i].g.ch >= geo.nchannels) {\n\t\t\tprintf(\"ERR: ppa(%lu), ch(%u) out of bounds\\n\",\n\t\t\t\tlist[i].ppa, list[i].g.ch);\n\t\t\tinvalid_addr = 1;\n\t\t}\n\t\tif (list[i].g.lun >= geo.nluns) {\n\t\t\tprintf(\"ERR: ppa(%lu), lun(%u) out of bounds\\n\",\n\t\t\t\tlist[i].ppa, list[i].g.lun);\n\t\t\tinvalid_addr = 1;\n\t\t}\n\t\tif (list[i].g.pl >= geo.nplanes) {\n\t\t\tprintf(\"ERR: ppa(%lu), pl(%u) out of bounds\\n\",\n\t\t\t\tlist[i].ppa, list[i].g.pl);\n\t\t\tinvalid_addr = 1;\n\t\t}\n\t\tif (list[i].g.blk >= geo.nblocks) {\n\t\t\tprintf(\"ERR: ppa(%lu), blk(%u) out of bounds\\n\",\n\t\t\t\tlist[i].ppa, list[i].g.blk);\n\t\t\tinvalid_addr = 1;\n\t\t}\n\t\tif (list[i].g.pg >= geo.npages) {\n\t\t\tprintf(\"ERR: ppa(%lu), pg(%u) out of bounds\\n\",\n\t\t\t\tlist[i].ppa, list[i].g.pg);\n\t\t\tinvalid_addr = 1;\n\t\t}\n\t\tif (list[i].g.sec >= geo.nsectors) {\n\t\t\tprintf(\"ERR: ppa(%lu), sec(%u) out of bounds\\n\",\n\t\t\t\tlist[i].ppa, list[i].g.sec);\n\t\t\tinvalid_addr = 1;\n\t\t}\n\t\tninvalid = invalid_addr ? ninvalid + 1 : ninvalid;\n\t}\n\n\tif (ninvalid) {\n\t\tprintf(\"ninvalid(%d) addresses exceeds device boundaries\\n\",\n\t\t\tninvalid);\n\t\tnvm_geo_pr(geo);\n\t\tret = -EINVAL;\n\t} else {\n\t\tret = cmd->func(dev, geo, list, len, cmd->flags);\n\t}\n\n\tnvm_dev_close(dev);\n\n\treturn ret;\n}\n"
}
] | 26 |
ValerieFernandes/DMOJ-Practice
|
https://github.com/ValerieFernandes/DMOJ-Practice
|
8d8566271660548773b99a46a72822bce6dace07
|
3d9d926de2c8aa898ec08b05448702bbef0ff9d1
|
536984d1cd4f5aa77ca54248f18815a606cf18a0
|
refs/heads/master
| 2020-12-10T05:19:11.496787 | 2020-06-14T21:53:45 | 2020-06-14T21:53:45 | 233,511,554 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4917127192020416,
"alphanum_fraction": 0.509392261505127,
"avg_line_length": 23.85714340209961,
"blob_id": "5e3d92ea45ed1b1897f3206ccfe3cd2c0e6bcfba",
"content_id": "8b6f520f8285f1e7b60cbb58e6f931ba713da582",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 905,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 35,
"path": "/Complete/TSOC '15 Contest 2 #5 - Bebiliths.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "import operator\r\ndef main():\r\n speed = int(input())\r\n bebilith = int(input())\r\n fastBeb = []\r\n slowBeb = []\r\n\r\n for x in range(bebilith):\r\n temp = input().split()\r\n temp[0] = int(temp[0])\r\n temp[1] = int(temp[1])\r\n temp[2] = int(temp[2])\r\n temp.append(x + 1)\r\n if temp[0] >= speed:\r\n fastBeb.append(temp)\r\n else:\r\n slowBeb.append(temp)\r\n \r\n fastBeb = sorted(fastBeb, key=operator.itemgetter(0, 2))\r\n slowBeb = sorted(slowBeb, key=operator.itemgetter(0, 1))\r\n## print(fastBeb)\r\n## print(slowBeb)\r\n\r\n report = int(input())\r\n fastLen = len(fastBeb)\r\n\r\n for x in range(report):\r\n dangerous = int(input())\r\n if dangerous > fastLen:\r\n dangerous -= fastLen\r\n print(slowBeb[dangerous * -1][3])\r\n else:\r\n print(fastBeb[dangerous * -1][3])\r\n\r\nmain()\r\n"
},
{
"alpha_fraction": 0.48969072103500366,
"alphanum_fraction": 0.48969072103500366,
"avg_line_length": 19.55555534362793,
"blob_id": "4768aa0f3a9b7d4f7f5c2b62f25c27478bac6b28",
"content_id": "4d752a13443f61aadd45a26472b2f20322451de2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 9,
"path": "/Complete/COCI '08 Contest 6 #1 Buka.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "def main():\r\n first = int(input())\r\n operand = input()\r\n second = int(input())\r\n if operand == '+':\r\n print(first + second)\r\n else:\r\n print(first * second)\r\nmain()\r\n"
},
{
"alpha_fraction": 0.419741690158844,
"alphanum_fraction": 0.44464945793151855,
"avg_line_length": 24.439023971557617,
"blob_id": "2bf27476337a913ac3065b186fde49ab4849e77b",
"content_id": "7c538856b221eb6dddfa6c6d5faf699731a9c649",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 41,
"path": "/Complete/CCC '07 S3 - Friends.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "import queue\r\ndef circle(pairs, x, y):\r\n distance = [-1 for x in range(10000)]\r\n current = queue.Queue()\r\n current.put(x)\r\n\r\n while current.qsize() > 0:\r\n temp = current.get()\r\n for x2 in range(len(pairs[temp])):\r\n if pairs[temp][x2] == y:\r\n return 'Yes', distance[temp] + 1\r\n \r\n elif distance[pairs[temp][x2]] == -1:\r\n current.put(pairs[temp][x2])\r\n distance[pairs[temp][x2]] = distance[temp] + 1\r\n return 'No'\r\n\r\ndef main():\r\n students = int(input())\r\n pairs = [[] for x in range(10000)]\r\n \r\n for x in range(students):\r\n temp = input().split()\r\n v = int(temp[0])\r\n y = int(temp[1])\r\n pairs[v].append(y)\r\n\r\n while True:\r\n line = input().split()\r\n x = int(line[0])\r\n y = int(line[1])\r\n if x == 0:\r\n break\r\n else:\r\n result = circle(pairs, x, y)\r\n\r\n if result == 'No':\r\n print('No')\r\n else:\r\n print(result[0], result[1])\r\nmain()\r\n"
},
{
"alpha_fraction": 0.4923076927661896,
"alphanum_fraction": 0.5,
"avg_line_length": 18,
"blob_id": "d94efeeaa69e8b7cb18f480e051e44ca1257aaa7",
"content_id": "d77dddc796efcf9a5862b4ee443ec7c5c65858f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 260,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 13,
"path": "/Complete/Mock CCC '18 Contest 2 J3S1 - An Array Problem.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "def main():\r\n length = int(input())\r\n array = []\r\n\r\n total = 0\r\n for x in range(length):\r\n array.append(int(input()))\r\n\r\n\r\n for x in range(length):\r\n total += array[x]\r\n print(int(min((total - max(array)), (total/2))))\r\nmain()\r\n"
},
{
"alpha_fraction": 0.3885209858417511,
"alphanum_fraction": 0.40986019372940063,
"avg_line_length": 21.842105865478516,
"blob_id": "500fb85d450700b90850c27ced1cc1f5203e6510",
"content_id": "37c92dcb4b10f3fa9b4595c6fea7955ec7f896cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1359,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 57,
"path": "/Complete/CCC '07 S2 - Boxes.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport operator\r\n \r\ndef main():\r\n\r\n data = sys.stdin.read().split('\\n')\r\n available = int(data[0])\r\n choice = []\r\n\r\n for x in range (available):\r\n box = data[x + 1].split()\r\n box[0] = int(box[0])\r\n box[1] = int(box[1])\r\n box[2] = int(box[2])\r\n box.sort()\r\n box.append(box[0]*box[1]*box[2])\r\n choice.append(box)\r\n\r\n #print(choice)\r\n choice = sorted(choice, key=operator.itemgetter(3))\r\n #print(choice)\r\n\r\n newitem = int(data[x + 2])\r\n \r\n #print(newitem)\r\n \r\n for count in range(newitem):\r\n item = data[available + 2 + count].split()\r\n item[0] = int(item[0])\r\n item[1] = int(item[1])\r\n item[2] = int(item[2])\r\n item.sort()\r\n #print(item)\r\n\r\n boxNum = -1\r\n\r\n for x in range(available):\r\n\r\n #print(item)\r\n #print(choice)\r\n \r\n if item[0] <= choice[x][0]:\r\n #print('a')\r\n if item[1] <= choice[x][1]:\r\n #print('b')\r\n if item[2] <= choice[x][2]:\r\n #print('c')\r\n boxNum = x\r\n break\r\n\r\n if boxNum == -1:\r\n print('Item does not fit.')\r\n else:\r\n print(choice[boxNum][3])\r\n \r\n \r\nmain()\r\n"
},
{
"alpha_fraction": 0.8318583965301514,
"alphanum_fraction": 0.8318583965301514,
"avg_line_length": 36.66666793823242,
"blob_id": "d9620e2559b8ec60264957b8318f2464799303e4",
"content_id": "d62a919282329090f8bbfe5ddffb65b39b92f535",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 3,
"path": "/README.md",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "# DMOJ-Practice\nSolutions to practice problems submitted on DMOJ\n(Includes Graph Theory and Dynamic Programming)\n"
},
{
"alpha_fraction": 0.37599024176597595,
"alphanum_fraction": 0.3930530250072479,
"avg_line_length": 31.489795684814453,
"blob_id": "00a450c4cbf5fb882b8e8ede65184488c02c4dde",
"content_id": "9eb4a42d6efaa6d6413bf65608cad61bd428ed73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1641,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 49,
"path": "/Complete/DWITE '09 R6 #5 - Air Travel Planning.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "def main():\r\n for x in range(5):\r\n flights = int(input())\r\n dest = dict.fromkeys(['YYZ', 'SEA'])\r\n dest['YYZ'] = 1\r\n dest['SEA'] = 2\r\n dist = [0, float(\"inf\")]\r\n visit = set()\r\n edges = [[], [], []]\r\n for x in range(flights):\r\n current = input().split()\r\n current[2] = int(current[2])\r\n if current[0] not in dest:\r\n dist.append(float(\"inf\"))\r\n dest[current[0]] = len(dist)\r\n edges.append([])\r\n \r\n if current[1] not in dest:\r\n dist.append(float(\"inf\"))\r\n dest[current[1]] = len(dist)\r\n edges.append([])\r\n current[0] = dest[current[0]]\r\n current[1] = dest[current[1]]\r\n \r\n edges[current[0]].append([current[1], current[2]])\r\n \r\n places = len(dist)\r\n\r\n while True:\r\n minNode = None\r\n for x in range(1, places + 1): \r\n if x not in visit:\r\n if minNode == None:\r\n minNode = x\r\n elif dist[x - 1] <= dist[minNode - 1]:\r\n minNode = x\r\n visit.add(minNode)\r\n if minNode == 2:\r\n break\r\n else:\r\n for x in range(len(edges[minNode])):\r\n current = edges[minNode][x][0]\r\n newDist = dist[minNode - 1] + edges[minNode][x][1]\r\n if newDist < dist[current - 1]:\r\n dist[current - 1] = newDist \r\n print(dist[1]) \r\n \r\n \r\nmain()\r\n"
},
{
"alpha_fraction": 0.37771129608154297,
"alphanum_fraction": 0.3821989595890045,
"avg_line_length": 17.640625,
"blob_id": "eec408e0a3806829847cb7bd22533eefe497b8b6",
"content_id": "930d59fb935b67e7270a0e5e7639fd3ef97f4ac5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1337,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 64,
"path": "/Complete/CCC '05 J5 - Bananas.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "##def aWord(entry):\r\n## if entry == 'A':\r\n## return True\r\n##\r\n## elif len(entry) >= 3 and entry[0] == 'B' and monkey(entry[1:-2]) and entry[-1] == 'S':\r\n## return True\r\n## else:\r\n## return False\r\n##\r\n##\r\n##\r\n##def monkey(entry):\r\n## if aWord(entry):\r\n## return True\r\n##\r\n## else:\r\n## found = False\r\n## for x in range(2, len(entry)):\r\n## found = found or (aWord(entry) and entry[x] == 'N' and monkey(entry[x:]))\r\n## return found\r\n## \r\n##def main():\r\n## \r\n## while True:\r\n## temp = input()\r\n## if temp == 'X':\r\n## break\r\n## if monkey(temp):\r\n## print('YES')\r\n## else:\r\n## print('NO')\r\n##main()\r\n\r\ndef monkey(entry):\r\n\r\n\r\n while True:\r\n entry = entry.replace('ANA', 'A')\r\n entry = entry.replace('BAS', 'A')\r\n if ('ANA' not in entry) and ('BAS' not in entry):\r\n break\r\n\r\n\r\n if entry == 'A':\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef main():\r\n \r\n while True:\r\n\r\n temp = input()\r\n\r\n if temp == 'X':\r\n break\r\n \r\n if monkey(temp):\r\n print('YES')\r\n else:\r\n print('NO')\r\n \r\nmain()\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.3571428656578064,
"alphanum_fraction": 0.3728223145008087,
"avg_line_length": 19.660377502441406,
"blob_id": "818c05ca66b554fbce35689a3a59b7fb4c0441f8",
"content_id": "7036757badb4900fcac087b6019591eabe7c2b8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1148,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 53,
"path": "/Complete/CCC '06 J4 - It's tough being a teen!.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "import queue\r\ndef schedule(entry):\r\n done = set()\r\n order = ''\r\n visit = queue.Queue()\r\n count = 0\r\n for x in range(1, 8):\r\n if entry[x] == []:\r\n visit.put(x)\r\n done.add(x)\r\n order += str(x) + ' '\r\n break\r\n \r\n while visit.qsize() > 0:\r\n temp = visit.get()\r\n for x in range(1, 8):\r\n if temp in entry[x]:\r\n entry[x].remove(temp)\r\n\r\n for x in range(1, 8):\r\n if entry[x] == [] and x not in done:\r\n visit.put(x)\r\n done.add(x)\r\n order += str(x) + ' '\r\n break\r\n \r\n \r\n \r\n if len(order) != 14:\r\n\r\n return 'Cannot complete these tasks. Going to bed.'\r\n else:\r\n return order[:-1]\r\n \r\n \r\n \r\n \r\ndef main():\r\n order = [[0], [2], [], [], [1, 3], [3], [], [1]]\r\n while True:\r\n before = int(input())\r\n after = int(input())\r\n if before == 0:\r\n break\r\n order[after].append(before)\r\n\r\n print(schedule(order))\r\n \r\n\r\n\r\n\r\n \r\nmain()\r\n"
},
{
"alpha_fraction": 0.5072886347770691,
"alphanum_fraction": 0.5364431738853455,
"avg_line_length": 20.866666793823242,
"blob_id": "d4a56943d4a10c643d392c9995e19301d05fb4ac",
"content_id": "33b92a4f23579b9aef676de4adb48ab1e6ebe7e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 15,
"path": "/Complete/CCC '13 S2 - Bridge Transport.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "maxWeight = int(input())\r\ncars = int(input())\r\nweights = [0, 0, 0]\r\nlast = True\r\nfor x in range(cars):\r\n weights.append(int(input()))\r\n\r\nfor x in range(3, cars + 3):\r\n if weights[x] + weights[x - 1] + weights[x - 2] + weights[x - 3] > maxWeight:\r\n last = False\r\n break\r\nif last:\r\n print(x - 2)\r\nelse:\r\n print(x - 3)\r\n"
},
{
"alpha_fraction": 0.470664918422699,
"alphanum_fraction": 0.4856584072113037,
"avg_line_length": 13.54838752746582,
"blob_id": "f6936ef4417dd1a7d4edef33b28ea0bcad7422ca",
"content_id": "4cfa3b3f1a0da540eaf4115ad7ca3d8590ff6d15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1534,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 93,
"path": "/Complete/COCI '14 Contest 2 #2 Utrka.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "##participants = int(input())\r\n##\r\n##names = []\r\n##\r\n##for i in range(participants * 2 - 1):\r\n## names.append(input())\r\n## \r\n##\r\n##for i in range (participants):\r\n## if names.count(names[i]) % 2 == 1:\r\n## print(names[i])\r\n## break\r\n\r\n\r\n##participants = int(input())\r\n##\r\n##names = []\r\n##\r\n##for i in range(participants):\r\n## names.append(input())\r\n##\r\n##for i in range(participants - 1):\r\n## names.clear(input())\r\n##\r\n##print(names[0])\r\n\r\n\r\n##participants = int(input())\r\n##\r\n##names = []\r\n##\r\n##for i in range(participants * 2 - 1):\r\n##\r\n## temp = input()\r\n## if temp in names:\r\n## names.remove(temp)\r\n##\r\n## else:\r\n## names.append(temp)\r\n##\r\n##print(names[0])\r\n\r\n\r\n\r\n\r\n\r\n\r\n##participants = int(input())\r\n##\r\n##names = []\r\n##\r\n##for i in range(participants * 2 - 1):\r\n## names.append(input())\r\n##\r\n##\r\n##names.sort()\r\n##def name(names):\r\n##\r\n## for i in range (0, (len(names) - 1), 2):\r\n##\r\n## if names[i] != names[(i + 1)]:\r\n## return(names[i])\r\n##\r\n## return(names[-1])\r\n##\r\n##print(name(names))\r\n\r\n \r\nfrom collections import Counter\r\n\r\nimport math\r\n\r\nparticipants = int(input())\r\n\r\nnames = []\r\n\r\nfinish = []\r\n\r\nfor i in range(participants):\r\n names.append(input())\r\n \r\n\r\nfor i in range (participants, participants * 2 - 1):\r\n finish.append(input())\r\n \r\n\r\ntemp1 = Counter(names)\r\n\r\ntemp2 = Counter(finish)\r\n\r\ndifference = temp1 - temp2\r\n\r\nprint(temp1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.47625699639320374,
"alphanum_fraction": 0.5111731886863708,
"avg_line_length": 22.689655303955078,
"blob_id": "323ce2df1efb34678315e08dfed07d5f65758164",
"content_id": "b16b28beaebf81a539be5d9b33c6545f42124d46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 716,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 29,
"path": "/Complete/CCC '10 S1 - Computer Purchase.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "from operator import itemgetter\r\ncomputers = int(input())\r\n\r\ninfo = []\r\nfor x in range(computers):\r\n line = input().split()\r\n ram = int(line[1])\r\n cpu = int(line[2])\r\n diskDrive = int(line[3])\r\n num = 2 * ram + 3 * cpu + diskDrive\r\n info.append([line[0], num])\r\n\r\ninfo.sort(key = itemgetter(1), reverse = True)\r\nif computers == 1:\r\n print(info[0][0]) \r\nelif computers >= 2:\r\n first = info.pop(0)\r\n second = info.pop(0)\r\n\r\n if first[1] != second[1]:\r\n print(first[0])\r\n print(second[0])\r\n else:\r\n if first[0][0] > second[0][0]:\r\n print(second[0])\r\n print(first[0])\r\n else:\r\n print(first[0])\r\n print(second[0])\r\n"
},
{
"alpha_fraction": 0.3894081115722656,
"alphanum_fraction": 0.3987538814544678,
"avg_line_length": 22.69230842590332,
"blob_id": "5c962138dff23f67918b7e82dbde0561f478def6",
"content_id": "1b0b25710f2856a57342327faa2d44f3697f1db6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 13,
"path": "/Complete/DWITE '08 R5 #1 - Baby Diff.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "def main():\r\n for x in range(5):\r\n first = input()\r\n second = input()\r\n same = 0\r\n small = min(len(first), len(second))\r\n for x in range(small):\r\n if first[x] == second[x]:\r\n same += 1\r\n else:\r\n break\r\n print(same)\r\nmain()\r\n"
},
{
"alpha_fraction": 0.44536423683166504,
"alphanum_fraction": 0.46357616782188416,
"avg_line_length": 30.648649215698242,
"blob_id": "cddc20eedd45badaf54a871a5db5bc796f12daf8",
"content_id": "89b2e6599f32035810ba6e4c8dce28bee4d6bb00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1208,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 37,
"path": "/Complete/Single Source Shortest Path.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "#Solution for https://dmoj.ca/problem/sssp\r\ndef main():\r\n line = input().split()\r\n vertices = int(line[0])\r\n edgeNum = int(line[1])\r\n adj = [[] for x in range(vertices + 1)]\r\n dist = [0]\r\n visit = set()\r\n for x in range(vertices - 1):\r\n dist.append(float(\"inf\"))\r\n for x in range(edgeNum):\r\n edge = input().split()\r\n edge = [int(x) for x in edge]\r\n adj[edge[0]].append([edge[1], edge[2]])\r\n adj[edge[1]].append([edge[0], edge[2]])\r\n \r\n for x2 in range(vertices):\r\n minNode = None\r\n for x in range(1, vertices + 1): \r\n if x not in visit:\r\n if minNode == None:\r\n minNode = x\r\n elif dist[x - 1] <= dist[minNode - 1]:\r\n minNode = x\r\n visit.add(minNode)\r\n \r\n for x in range(len(adj[minNode])):\r\n current = adj[minNode][x][0]\r\n newDist = dist[minNode - 1] + adj[minNode][x][1]\r\n if newDist < dist[current - 1]:\r\n dist[current - 1] = newDist \r\n for x in range(vertices):\r\n if dist[x] == float(\"inf\"):\r\n print(-1)\r\n else:\r\n print(dist[x])\r\nmain()\r\n"
},
{
"alpha_fraction": 0.4605405330657959,
"alphanum_fraction": 0.4843243360519409,
"avg_line_length": 21.973684310913086,
"blob_id": "909c90bb3319da34c3fb5020b2b4b5b273ca17e5",
"content_id": "2d0a0fb3d7b67b34b702a3131df433e338235986",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 925,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 38,
"path": "/Complete/COCI '07 Contest 6 #1 Parking.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "def main():\r\n prices = input().split()\r\n a = int(prices[0])\r\n b = int(prices[1])\r\n c = int(prices[2])\r\n \r\n time = [0 for x in range(101)]\r\n \r\n truckOne = input().split()\r\n truckTwo = input().split()\r\n truckThree = input().split()\r\n \r\n arriveOne = int(truckOne[0])\r\n departOne = int(truckOne[1])\r\n arriveTwo = int(truckTwo[0])\r\n departTwo = int(truckTwo[1])\r\n arriveThree = int(truckThree[0])\r\n departThree = int(truckThree[1])\r\n \r\n for x in range(arriveOne, departOne):\r\n time[x] += 1\r\n\r\n for x in range(arriveTwo, departTwo):\r\n time[x] += 1\r\n \r\n for x in range(arriveThree, departThree):\r\n time[x] += 1\r\n\r\n total = 0\r\n for item in time:\r\n if item == 1:\r\n total += a\r\n if item == 2:\r\n total += b * 2\r\n if item == 3:\r\n total += c * 3\r\n print(total)\r\nmain()\r\n \r\n"
},
{
"alpha_fraction": 0.40645161271095276,
"alphanum_fraction": 0.42709678411483765,
"avg_line_length": 28.440000534057617,
"blob_id": "4de26f1f423867a60e75da85896c5c5ead80713b",
"content_id": "2a68ac1862616e61dce8b70a49b6d1a91fd650c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 25,
"path": "/Complete/CCC '09 S2 - Lights Going on and Off.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "\r\ndef main():\r\n rows = int(input())\r\n lights = int(input())\r\n grid = []\r\n choices = [[] for a in range(rows)]\r\n for x in range(rows):\r\n line = input().split()\r\n line = [int(x) for x in line]\r\n choices[x].append(line)\r\n \r\n for x in range(rows - 1):\r\n for x2 in range(len(choices[x])):\r\n new = []\r\n for x3 in range(lights):\r\n if choices [x][x2][x3] != choices[x + 1][0][x3]:\r\n new.append(1)\r\n else:\r\n new.append(0)\r\n choices[x + 1].append(new)\r\n choices[x + 1] = set(tuple(i) for i in choices[x + 1])\r\n choices[x + 1] = list(choices[x + 1])\r\n\r\n print(len(choices[rows - 1]))\r\n \r\nmain() \r\n \r\n\r\n\r\n"
},
{
"alpha_fraction": 0.43809524178504944,
"alphanum_fraction": 0.46666666865348816,
"avg_line_length": 25.032258987426758,
"blob_id": "3c9cec06fb3bf4c993aa8113f19856cda84975bb",
"content_id": "1a5df548c77dbe553ccb61ea96773def5f6709e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 840,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 31,
"path": "/Complete/CCC '11 S3 - Alice Through the Looking Glass.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "\r\ndef is_crystal(m,x):\r\n if m >= 1:\r\n exponent = 5 ** (m-1)\r\n placement = x // exponent\r\n if placement == 0 or placement == 4:\r\n return 0\r\n elif placement == 1 or placement == 3:\r\n return exponent + is_crystal(m - 1, x % exponent)\r\n elif placement == 2:\r\n return 2 * exponent + is_crystal(m - 1, x % exponent)\r\n return maxheightatx\r\n return 0\r\n\r\nimport sys\r\ndef main():\r\n\r\n data = sys.stdin.read().split('\\n')\r\n test = int(data[0])\r\n coord = []\r\n for x in range(test):\r\n case = data[x + 1].split()\r\n case[0] = int(case[0])\r\n case[1] = int(case[1])\r\n case[2] = int(case[2])\r\n\r\n if case[2] < is_crystal(case[0], case[1]):\r\n print('crystal')\r\n else:\r\n print('empty')\r\n \r\nmain()\r\n"
},
{
"alpha_fraction": 0.4828628897666931,
"alphanum_fraction": 0.48891130089759827,
"avg_line_length": 24.105262756347656,
"blob_id": "697f0c6938a3c38b4be54e2b0d111db250b6d7ca",
"content_id": "7e7d72eadc1794c66d41f2f0e27249897e9e504f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 38,
"path": "/Complete/MWC '15 #4 P4 Dealing with Knots.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "import queue\r\ndef tangled(entry, one, two):\r\n current = queue.Queue()\r\n visit = set()\r\n current.put(one)\r\n visit.add(one)\r\n found = False\r\n while current.qsize() > 0:\r\n knot = current.get()\r\n for x in range(len(entry[knot])):\r\n if entry[knot][x] == two:\r\n found = True\r\n break\r\n if entry[knot][x] not in visit:\r\n visit.add(entry[knot][x])\r\n current.put(entry[knot][x])\r\n if found == True:\r\n break\r\n if found == True:\r\n return 'Tangled'\r\n else:\r\n return 'Not Tangled'\r\n \r\n \r\n \r\ndef main():\r\n connections = int(input())\r\n strings = [[] for x in range(connections + 1)]\r\n for x in range(connections):\r\n temp = input().split()\r\n y = int(temp[0])\r\n z = int(temp[1])\r\n strings[y].append(z)\r\n last = input().split()\r\n one = int(last[0])\r\n two = int(last[1])\r\n print(tangled(strings, one, two))\r\nmain()\r\n"
},
{
"alpha_fraction": 0.44325241446495056,
"alphanum_fraction": 0.4816487729549408,
"avg_line_length": 26.274192810058594,
"blob_id": "4a9b9fe21392a8902818ecafadfaecc390b3f75b",
"content_id": "c74dabdd7cce9fb14512f42b2cfae9b6f2ff7b75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1771,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 62,
"path": "/Complete/CCC '16 J4 - Arrival Time.py",
"repo_name": "ValerieFernandes/DMOJ-Practice",
"src_encoding": "UTF-8",
"text": "def main():\r\n depart = input()\r\n elapsed = 120\r\n hours = int(depart[:2])\r\n minutes = int(depart[3:])\r\n totalMin = 60 * hours + minutes\r\n \r\n if totalMin < 420 and elapsed > 0:\r\n timeLeft = 420 - totalMin\r\n if timeLeft >= 120:\r\n elapsed = 0\r\n totalMin = totalMin + 120\r\n else:\r\n elapsed = elapsed - timeLeft\r\n totalMin = totalMin + timeLeft\r\n \r\n if totalMin < 600 and elapsed > 0:\r\n timeLeft = 600 - totalMin\r\n if timeLeft >= 2 * elapsed:\r\n totalMin = totalMin + elapsed * 2\r\n elapsed = 0\r\n else:\r\n elapsed = int(elapsed - (timeLeft / 2))\r\n totalMin = totalMin + timeLeft\r\n \r\n \r\n if totalMin < 900 and elapsed > 0:\r\n timeLeft = 900 - totalMin\r\n if timeLeft >= elapsed:\r\n totalMin = totalMin + elapsed\r\n elapsed = 0\r\n else:\r\n elapsed = elapsed - timeLeft\r\n totalMin = totalMin + timeLeft\r\n \r\n \r\n if totalMin < 1140 and elapsed > 0:\r\n timeLeft = 1140 - totalMin\r\n if timeLeft >= 2 * elapsed:\r\n totalMin = totalMin + elapsed * 2\r\n elapsed = 0\r\n else:\r\n elapsed = int(elapsed - (timeLeft / 2))\r\n totalMin = totalMin + timeLeft\r\n \r\n\r\n if elapsed > 0:\r\n totalMin = totalMin + elapsed\r\n\r\n hours = totalMin // 60\r\n minutes = int(totalMin - (60 * hours))\r\n hours = int((hours + 24) % 24)\r\n if hours < 10:\r\n hours = '0' + str(hours)\r\n else:\r\n hours = str(hours)\r\n if minutes < 10:\r\n minutes = '0' + str(minutes)\r\n else:\r\n minutes = str(minutes)\r\n print(hours + ':' + minutes)\r\nmain()\r\n \r\n \r\n\r\n"
}
] | 19 |
pmer/depends
|
https://github.com/pmer/depends
|
192ff5c3c09cf16910ef882aff2a551c8cfae82e
|
bfdfc43380fecde4fbcacfca6b5d76fe822a44e1
|
a17417b7f0a5ae046c180e49f0d8bef49415d7a5
|
refs/heads/master
| 2020-07-07T15:27:19.344612 | 2019-08-20T14:33:44 | 2019-08-20T14:33:44 | 203,389,396 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5334204435348511,
"alphanum_fraction": 0.5377981066703796,
"avg_line_length": 28.95107650756836,
"blob_id": "5ac0dc8f3d01b9a8d94f924313c50b615957420c",
"content_id": "d0d5776ecda1a345d22bfd9e4458b06e302217dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15305,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 511,
"path": "/depends.py",
"repo_name": "pmer/depends",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\nimport zipfile\n\nobject_file_search_paths = [\n '/Applications',\n '/Library',\n '/System',\n '/usr',\n os.environ['HOME'],\n]\n\n\nif not os.path.isfile('mach-o-files.txt'):\n print('Searching for Mach-O files...')\n\n import magic\n\n args = [\n '/usr/bin/find',\n *object_file_search_paths,\n '(',\n '-perm', '+0111',\n '-or',\n '-name', '*.dylib',\n ')',\n '-and',\n '-type', 'f',\n ]\n executable_files = subprocess.run(args, capture_output=True, text=True).stdout\n\n count = 0\n\n with open('mach-o-files.txt', 'w') as mach_o_files:\n for line in executable_files.split('\\n'):\n line = line.rstrip()\n if line == '/usr/bin/sudo':\n continue\n if line.startswith('/usr/libexec'):\n continue\n if line.startswith('/usr/sbin'):\n continue\n if line == '':\n continue\n\n m = magic.from_file(line)\n\n if 'Mach-O' in m:\n print(line, file=mach_o_files)\n count += 1\n\n print('Found', count)\n\n\nclass ObjectFile:\n def __init__(self, object_path):\n self.path = object_path\n self.filetype = None\n self.dylibs = []\n self.plugins = []\n\n\nobject_files = {}\nobjdump_parsers = {}\n\n\nfor line in open('mach-o-files.txt'):\n object_path = line.rstrip()\n object_files[object_path] = ObjectFile(object_path)\n\n\nif not os.path.isfile('objdumps.zip'):\n print('Extracting Mach-O file headers...')\n\n objdump_exe = subprocess.run(['/usr/bin/xcrun', '-f', 'objdump'],\n check=True,\n capture_output=True,\n text=True).stdout.rstrip()\n\n def objdump(object_path):\n args = [objdump_exe, '-arch=x86_64', '-macho', '-private-headers', '-non-verbose', object_path]\n try:\n completed_proc = subprocess.run(args, check=True, capture_output=True, text=True)\n except subprocess.CalledProcessError:\n print('WARNING')\n print('object_path', object_path)\n print('objdump died')\n print()\n return None\n\n stdout = completed_proc.stdout\n stderr = completed_proc.stderr\n\n if 'does not contain architecture' in stderr:\n return None\n if 'No architecture specified' in stderr:\n return None\n\n return stdout\n\n count = 0\n\n with zipfile.ZipFile('objdumps.zip', 'w') as z:\n for object_file in object_files.values():\n objdump_output = objdump(object_file.path)\n if not objdump_output:\n continue\n z.writestr(object_file.path, objdump_output)\n count += 1\n\n print('Extracted', count)\n\n\nclass ObjDumpParser:\n filetypes = {\n 2: 'Executable',\n 6: 'DyLib',\n 7: 'Dynamic linker',\n 8: 'Bundle',\n 9: 'DyLib stub',\n 11: 'Kernel extension'\n }\n\n loader_commands = set([\n 'LC_BUILD_VERSION',\n 'LC_CODE_SIGNATURE',\n 'LC_DATA_IN_CODE',\n 'LC_DYLD_ENVIRONMENT',\n 'LC_DYLD_INFO',\n 'LC_DYLD_INFO_ONLY',\n 'LC_DYLIB_CODE_SIGN_DRS',\n 'LC_DYSYMTAB',\n 'LC_FUNCTION_STARTS',\n 'LC_ID_DYLIB',\n 'LC_ID_DYLINKER',\n 'LC_LAZY_LOAD_DYLIB',\n 'LC_LOAD_DYLIB',\n 'LC_LOAD_DYLINKER',\n 'LC_LOAD_UPWARD_DYLIB',\n 'LC_LOAD_WEAK_DYLIB',\n 'LC_MAIN',\n 'LC_REEXPORT_DYLIB',\n 'LC_ROUTINES_64',\n 'LC_RPATH',\n 'LC_SEGMENT_64',\n 'LC_SEGMENT_SPLIT_INFO',\n 'LC_SOURCE_VERSION',\n 'LC_SUB_CLIENT',\n 'LC_SUB_FRAMEWORK',\n 'LC_SYMTAB',\n 'LC_UNIXTHREAD',\n 'LC_UUID',\n 'LC_VERSION_MIN_IPHONEOS',\n 'LC_VERSION_MIN_MACOSX',\n 'LC_VERSION_MIN_TVOS',\n 'LC_VERSION_MIN_WATCHOS',\n ])\n\n environ_keys = set([\n 'DYLD_VERSIONED_FRAMEWORK_PATH',\n ])\n\n subpaths_with_missing_dylib = [\n '/Library/Caches/com.apple.xbs/Sources/iTunesOpenJDK/iTunesOpenJDK-180.2/freetype/lib/libfreetype.6.dylib',\n 'X11',\n ]\n\n object_subpaths_with_missing_dylibs = [\n 'appletvos',\n 'Application Loader.app',\n 'iphoneos',\n 'Simulator.app',\n 'Simulator.platform',\n 'simulator/',\n 'TLA+ Toolbox.app',\n 'TsunagariC-Testing',\n 'watchos',\n 'Xcode.app/Contents/Developer/usr/share/xcs/CouchDB',\n ]\n\n strange_exe_files = {\n 'TLA+ Toolbox': 'toolbox',\n }\n\n def __init__(self, object_file):\n self.object_file = object_file\n self.objdump_output = None\n self.lines = objdump_output.split('\\n')\n self.environ = {}\n self.install_name = None\n self.loader_path = None\n self.executable_path = None\n self.executable_file = None\n self.rpaths = []\n self.expanded_rpaths = []\n self.load_dylib_commands = []\n self.load_weak_dylib_commands = []\n\n assert(self.lines[1] == 'Mach header')\n\n def parse(self, objdump_output):\n self.objdump_output = objdump_output\n\n self._parse_filetype()\n self._set_loader_and_executable_paths()\n self._parse_load_commands()\n\n self.objdump_output = None\n\n def resolve_dylibs(self):\n self._evaluate_load_dylib_commands()\n\n def _parse_filetype(self):\n m = re.search(r'^ *[^ ]* *\\d* *\\d* *[^ ]* *(\\d*)', self.lines[3])\n assert(m)\n\n filetype_code = int(m[1])\n\n try:\n object_file.filetype = self.filetypes[filetype_code]\n except:\n pass\n\n if not object_file.filetype:\n print('object_file.path', self.object_file.path)\n print('filetype', filetype_code)\n raise Exception('Unknown file type')\n\n def _set_loader_and_executable_paths(self):\n slash = self.object_file.path.rindex('/')\n loader_path = self.object_file.path[0:slash]\n\n self.loader_path = loader_path\n\n filetype = self.object_file.filetype\n\n if filetype == 'Executable':\n executable_path = loader_path\n elif filetype == 'DyLib':\n self._set_dylib_executable_path()\n return\n elif filetype == 'Dynamic linker':\n return\n elif filetype == 'Bundle':\n executable_path = None\n elif filetype == 'DyLib stub':\n return\n elif filetype == 'Kernel extension':\n executable_path = None\n\n self.executable_path = executable_path\n\n def _set_dylib_executable_path(self):\n path = self.object_file.path\n\n app_ext_start = path.rfind('.appex/')\n if app_ext_start == -1:\n app_ext_start = path.rfind('.app/')\n if app_ext_start == -1:\n return\n app_ext_end = path.find('/', app_ext_start)\n\n self.executable_path = path[0:app_ext_end] + '/Contents/MacOS'\n\n app_name_start = path.rindex('/', 0, app_ext_start)\n app_name = path[app_name_start+1:app_ext_start]\n\n if app_name in self.strange_exe_files:\n app_name = self.strange_exe_files[app_name]\n\n self.executable_file = self.executable_path + '/' + app_name\n\n def _parse_load_commands(self):\n cmd = None\n\n for line in self.lines[4:]:\n m = re.search(r'^ *([^ ]*) *(.*)', line)\n assert(m)\n\n key = m[1]\n value = m[2]\n\n if key == 'cmd':\n cmd = value\n if not cmd in self.loader_commands:\n print('object_file.path', self.object_file.path)\n print(self.objdump_output)\n print('cmd', cmd)\n raise Exception('Unknown loader command')\n elif cmd == 'LC_DYLD_ENVIRONMENT' and key == 'name':\n for assignment in value.split(':'):\n key, value = assignment.split('=')\n if not key in self.environ_keys:\n print('object_file.path', self.object_file.path)\n print(assignment)\n raise Exception('Unknown environment variable')\n self.environ[key] = value\n elif cmd == 'LC_ID_DYLIB' and key == 'name':\n m = re.search(r'^(.*) \\(offset 24\\)$', value)\n assert(m)\n\n name = m[1]\n self.install_name = name\n elif cmd == 'LC_LAZY_LOAD_DYLIB' and key == 'name':\n self._add_dylib(value, weak=False)\n elif cmd == 'LC_LOAD_DYLIB' and key == 'name':\n self._add_dylib(value, weak=False)\n elif cmd == 'LC_LOAD_WEAK_DYLIB' and key == 'name':\n self._add_dylib(value, weak=True)\n elif cmd == 'LC_LOAD_UPWARD_DYLIB' and key == 'name':\n self._add_dylib(value, weak=False)\n elif cmd == 'LC_RPATH' and key == 'path':\n self._add_rpath(value)\n\n def _add_dylib(self, name, weak):\n m = re.search(r'^(.*) \\(offset 24\\)$', name)\n assert(m)\n name = m[1]\n\n if weak:\n self.load_weak_dylib_commands.append(name)\n else:\n self.load_dylib_commands.append(name)\n\n def _add_rpath(self, path):\n m = re.search(r'^(.*) \\(offset 12\\)$', path)\n assert(m)\n path = m[1]\n\n expanded_path = None\n\n if path.startswith('@loader_path'):\n expanded_path = self.loader_path + path[12:]\n elif path.startswith('@executable_path'):\n if self.executable_path:\n expanded_path = self.executable_path + path[16:]\n else:\n expanded_path = path\n\n if expanded_path:\n expanded_path = os.path.realpath(expanded_path)\n\n self.rpaths.append(path)\n self.expanded_rpaths.append(expanded_path)\n\n def _evaluate_load_dylib_commands(self):\n for dylib_name in self.load_dylib_commands:\n self._evaluate_load_dylib_command(dylib_name, weak=False)\n for dylib_name in self.load_weak_dylib_commands:\n self._evaluate_load_dylib_command(dylib_name, weak=True)\n\n def _evaluate_load_dylib_command(self, dylib_name, weak):\n if dylib_name.startswith('@loader_path'):\n dylib_name = self.loader_path + dylib_name[12:]\n elif dylib_name.startswith('@executable_path'):\n dylib_name = self._search_executable_path(dylib_name[16:])\n elif dylib_name.startswith('@rpath'):\n dylib_name = self._search_rpaths(dylib_name[6:])\n\n if not dylib_name:\n return\n\n dylib_name = os.path.realpath(dylib_name)\n\n if not os.path.exists(dylib_name):\n if self._is_known_missing_dylib(dylib_name):\n return\n if weak:\n return\n print('object_file.path', self.object_file.path)\n print('dylib_name', dylib_name)\n raise Exception('dylib not found')\n if not dylib_name in object_files:\n print('object_file.path', self.object_file.path)\n print('dylib_name', dylib_name)\n raise Exception('dylib not in object_files')\n\n # TODO: Check install name.\n\n self.object_file.dylibs.append(dylib_name)\n\n def _is_known_missing_dylib(self, dylib_name):\n for subpath in self.subpaths_with_missing_dylib:\n if subpath in dylib_name:\n return True\n for subpath in self.object_subpaths_with_missing_dylibs:\n if subpath in self.object_file.path:\n return True\n return False\n\n def _search_executable_path(self, dylib_name):\n if self.executable_path:\n return self.executable_path + dylib_name\n\n print('WARNING: dylib not found')\n print('object_file.path', self.object_file.path)\n print('dylib_name', dylib_name)\n\n return None\n\n def _search_rpaths(self, dylib_name):\n for expanded_rpath in self.expanded_rpaths:\n if not expanded_rpath:\n continue\n candidate = expanded_rpath + dylib_name\n if os.path.exists(candidate):\n return candidate\n\n if self.object_file.filetype == 'DyLib' and self.executable_file in objdump_parsers:\n exe_parser = objdump_parsers[self.executable_file]\n\n for expanded_rpath in exe_parser.expanded_rpaths:\n if not expanded_rpath:\n continue\n candidate = expanded_rpath + dylib_name\n if os.path.exists(candidate):\n return candidate\n\n candidate = self.loader_path + dylib_name\n if os.path.exists(candidate):\n return candidate\n\n print('WARNING: dylib not found')\n print('object_file.path', self.object_file.path)\n print('loader_path', self.loader_path)\n print('executable_path', self.executable_path)\n for i in range(len(self.rpaths)):\n rpath = self.rpaths[i]\n expanded_rpath = self.expanded_rpaths[i]\n if not expanded_rpath:\n print('rpath', rpath, '-> ???')\n elif rpath != expanded_rpath:\n print('rpath', rpath, '->', expanded_rpath)\n else:\n print('rpath', rpath)\n print('dylib_name', '@rpath' + dylib_name)\n print()\n\n return None\n\n\nif not os.path.isfile('loads.zip'):\n print('Computing loader dependencies...')\n\n with zipfile.ZipFile('objdumps.zip') as z:\n for object_file in object_files.values():\n path = object_file.path\n\n try:\n objdump_output = str(z.read(path), encoding='utf-8')\n except:\n continue\n\n objdump_parsers[path] = ObjDumpParser(object_file)\n objdump_parsers[path].parse(objdump_output)\n\n for object_file in object_files.values():\n path = object_file.path\n\n if not path in objdump_parsers:\n continue\n\n objdump_parsers[path].resolve_dylibs()\n\n count = 0\n\n with zipfile.ZipFile('loads.zip', 'w') as z:\n for object_file in object_files.values():\n path = object_file.path\n\n if not path in objdump_parsers:\n continue\n\n z.writestr(path, '\\n'.join(object_file.dylibs))\n count += len(object_file.dylibs)\n\n print('Found', count, 'dependencies')\n\nz = zipfile.ZipFile('loads.zip')\n\ndef read_dylibs(path):\n try:\n dylibs = str(z.read(path), encoding='utf-8')\n except:\n return []\n\n if dylibs == '':\n return []\n else:\n return dylibs.split('\\n')\n\n\ndef search(path, seen=set(), indent=0):\n for _ in range(indent):\n print('\\t', end='')\n print(path)\n children_to_print = []\n for child in sorted(read_dylibs(path)):\n if child not in seen:\n seen.add(child)\n children_to_print.append(child)\n for child in children_to_print:\n search(child, seen, indent + 1)\n\n\nfor path in sys.argv[1:]:\n search(path)\n"
}
] | 1 |
Zuckonit/flappyscore
|
https://github.com/Zuckonit/flappyscore
|
20cc800e9600a44c5ca560e9530bfe844325e3d4
|
773e95be83a5d140bf3d40c6d1698ec48661d723
|
2012d56fc4af144af68352ba4d3b4e44f2fec428
|
refs/heads/master
| 2020-12-24T15:23:14.182011 | 2014-03-12T09:17:01 | 2014-03-12T09:17:01 | 17,131,547 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.643524706363678,
"alphanum_fraction": 0.6461949348449707,
"avg_line_length": 40.61111068725586,
"blob_id": "e37706f5e3e2a05dea21f42017cd4b9370b780d8",
"content_id": "70c43778560da772cf5ebac63b64d3252f13c4ce",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 749,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 18,
"path": "/app.py",
"repo_name": "Zuckonit/flappyscore",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\nimport sys\nimport optparse\nimport flappyscore\n\ndef opt(arg):\n parser = optparse.OptionParser(usage=\"usage: %prog -s score [-f] [save_file]\")\n parser.add_option( \"-s\", \"--score\", dest=\"score\", type=\"long\", help=\"the current score to be generated\")\n parser.add_option( \"-b\", \"--best\", dest=\"best\", type=\"long\", help=\"the best score to be generated\")\n parser.add_option( \"-o\", \"--output\", dest=\"output\", type=\"string\", help=\"the image to be saved\")\n (options, args) = parser.parse_args(arg)\n if len(arg) == 0 or options.score is None:\n parser.error('score required')\n flappyscore.generate(options.score, options.best, options.output)\n\nif __name__ == '__main__':\n opt(sys.argv)\n"
},
{
"alpha_fraction": 0.5745833516120911,
"alphanum_fraction": 0.5987499952316284,
"avg_line_length": 30.16883087158203,
"blob_id": "b01c205d684720d04fe0fee4d999383764096957",
"content_id": "d321e4308bcc33891ebcc03994ff1c7e92b7ece2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2400,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 77,
"path": "/flappyscore.py",
"repo_name": "Zuckonit/flappyscore",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\nimport os\nfrom PIL import Image, ImageDraw, ImageFont\n\n\nMEDAL_BRONZE = './img/bronze.jpg'\nMEDAL_SILVER = './img/silver.jpg'\nMEDAL_GOLD = './img/gold.jpg'\nMEDAL_PLATINUM = './img/platinum.jpg'\nBACKGROUND_IMG = './img/bg.jpg'\nFONT_SCORE = './fonts/04B_19__.TTF'\nDEFUALT_SCORE_IMAGE = './score.jpg'\n\n\ndef imagecopy (dst_im, src_im, dst_x, dst_y, src_x, src_y, src_w, src_h): \n src_im_crop = src_im.crop((src_x, src_y, src_x + src_w, src_y + src_h)) \n dst_im.paste(src_im_crop, (dst_x, dst_y)) \n return True \n\n\ndef medal(dst_img, s):\n src_img = None\n if 10 <= s < 20:\n src_img = Image.open(MEDAL_BRONZE)\n elif 20 <= s < 30:\n src_img = Image.open(MEDAL_SILVER)\n elif 30 <= s < 40:\n src_img = Image.open(MEDAL_GOLD)\n elif s >= 40:\n src_img = Image.open(MEDAL_PLATINUM)\n if src_img is not None:\n imagecopy(dst_img, src_img, 126, 384, 0, 0, 99, 102)\n return dst_img\n\n\ndef score(src_img, s, best=None):\n if best is None or best < s:\n best = s\n size = 42\n x, y, offset_y, offset = 460, 364, 93, 2\n color_outline = (2, 2, 2)\n color_font = (254, 254, 254)\n font = ImageFont.truetype(FONT_SCORE, size)\n draw = ImageDraw.Draw(src_img)\n draw.text((x-offset, y-offset), \"%d\"%s, color_outline, font=font)\n draw.text((x+offset, y-offset), \"%d\"%s, color_outline, font=font)\n draw.text((x-offset, y+offset), \"%d\"%s, color_outline, font=font)\n draw.text((x+offset, y+offset), \"%d\"%s, color_outline, font=font)\n\n draw.text((x-offset, y-offset+offset_y), \"%d\"%best, color_outline, font=font)\n draw.text((x+offset, y-offset+offset_y), \"%d\"%best, color_outline, font=font)\n draw.text((x-offset, y+offset+offset_y), \"%d\"%best, color_outline, font=font)\n draw.text((x+offset, y+offset+offset_y), \"%d\"%best, color_outline, font=font)\n\n draw.text((x, y), \"%d\"%s,color_font, font=font)\n draw.text((x, y+offset_y), \"%d\"%best, color_font, font=font)\n\n\ndef generate(s, best=None, f=None):\n if f is None:\n f = DEFUALT_SCORE_IMAGE\n f = os.path.abspath(f)\n f = os.path.expanduser(f)\n source = Image.open(BACKGROUND_IMG)\n medal(source, s)\n score(source, s, best)\n source.save(f)\n return source\n\nif __name__ == '__main__':\n test_score = 108\n source = generate(test_score)\n try:\n source.show()\n except:\n pass\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5828571319580078,
"avg_line_length": 18.33333396911621,
"blob_id": "b21de5bd7822e9df8255f98bfd8edc29cefae86f",
"content_id": "cf5df2a6837f8867f4241d2eecb1242fb71e9402",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 350,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 18,
"path": "/README.md",
"repo_name": "Zuckonit/flappyscore",
"src_encoding": "UTF-8",
"text": "Flappy Bird Score generator\n==============\n> flappybird score cheat \n> just for fun \n> inspired by [s5s5](https://github.com/s5s5/FlappyBirdScore) \n \n\nRequired\n================\nPython PIL\n \nScreenshots\n================\n> \n \nUsage\n===============\n```python app.py -help``` \n"
}
] | 3 |
bernard357/airbotbackend
|
https://github.com/bernard357/airbotbackend
|
04f1d3978322adbde0788f4ea34d6c5e9defc5ff
|
02d88e05af8933aff57d52baa13ab7faf130321b
|
af605d0ba4fceedc3c797835f836c9fc3477d66a
|
refs/heads/master
| 2020-03-30T19:34:13.635910 | 2018-10-04T09:10:12 | 2018-10-04T09:10:12 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6193863749504089,
"alphanum_fraction": 0.6211649775505066,
"avg_line_length": 26.432926177978516,
"blob_id": "55e2ac04dfb86eaf5ad1d477001e54b1920f07a0",
"content_id": "d43cf7e2d8d65892da78c688a8d9cc78d8ac8116",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4498,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 164,
"path": "/airbot/resolvers/variable.py",
"repo_name": "bernard357/airbotbackend",
"src_encoding": "UTF-8",
"text": "import unicodedata\nfrom airbot.model.botitem import Item\nfrom grapher import App\nfrom genid import ID\nfrom airbot import errors\nimport pprint\nfrom factory import base_factory, list_children_factory\nfrom airbot import errors\nimport json\nfrom genid import ID\nbase_factory(objecttype=\"variable\",parentobjecttype=\"entity\",parentidentitifier=\"entityid\",identifier=\"variableid\")\nfrom unidecode import unidecode\nfrom airbot.athena.query import AthenaQuery\nfrom entity import ENTITYSTATES\n\n\n\n\n\[email protected](\n field=\"deleteVariable\",\n path=\"Mutation/deleteVariable\",\n argmap={\n \"/arguments/variableid\": \"variableid\",\n \"/arguments/identity\": \"identity\"\n }\n)\ndef deleteVariable(identity, variableid):\n try:\n v= Item.get(\"variable\", variableid)\n v.delete()\n return True\n except Exception:\n return False\n\[email protected](\n field=\"getVariable\",\n path =\"Query/getVariable\",\n argmap={\n \"/arguments/variableid\": \"variableid\",\n \"/arguments/identity\": \"identity\"\n }\n)\ndef getVariable(identity, variableid) :\n try:\n variable = Item.get(\"variable\", variableid)\n entity = Item.get(\"entity\",variable.parent)\n except Exception:\n raise errors.ObjectNotFound(variableid)\n\n variable_doc = variable.json()\n variable_doc.update(variable_doc[\"doc\"])\n del variable_doc[\"doc\"]\n return variable_doc\n\n\[email protected](\n \"createVariable\",\n path=\"Mutation/createVariable\",\n argmap={\n \"/arguments/identity\" : \"identity\",\n \"/arguments/entityid\" : \"parent\",\n \"/arguments/input\" : \"options\"\n }\n)\ndef createVariable(parent,options, identity):\n print \">\", parent, options, identity\n try :\n entity = Item.get(\"entity\",parent)\n except Exception:\n raise errors.ObjectNotFound(parent)\n\n\n if entity.doc.status!=ENTITYSTATES.READY :\n raise errors.InvalidParameter(\"Could not add variable to unparsed entity\")\n bot = Item.get(\"bot\",entity.parent)\n\n accountid = parent.split(\":\")[2]\n botname = parent.split(\":\")[3]\n entityname = parent.split(\":\")[4]\n name = options[\"name\"]\n uri = \"uri:variable:%(accountid)s:%(botname)s:%(entityname)s:%(name)s\" % vars()\n print uri\n data={}\n data[\"ID\"] = uri\n data[\"name\"] = options[\"name\"]\n data[\"objecttype\"] = \"variable\"\n data[\"parent\"] = parent if parent is not None else \"service\"\n data[\"search\"] = name + options.get(\"description\", \"-\") + options.get(\"tags\", \"-\")\n data[\"createdat\"] = ID.now()\n data[\"creator\"] = identity\n data[\"doc\"] = {\n \"aliases\" : options[\"aliases\"],\n \"field\" : options[\"field\"]\n }\n item = Item(**data)\n item.save()\n\n database = bot.doc.database\n tablename = entity.doc.tablename\n columnname = options[\"field\"]\n\n\n sql = 'select %(columnname)s from \"%(database)s\".\"%(tablename)s\" group by %(columnname)s limit 30'%vars()\n response = AthenaQuery.run(**{\"sql\": sql})\n values = [r[columnname].replace('\"','') for r in response[\"data\"][\"records\"]]\n for i,val in enumerate(values):\n if len(val):\n slug = unicodedata.normalize('NFKD', val).encode('ascii','ignore')\n cache = Item(**{\"name\" : slug,\"parent\" : uri,\"doc\": {},\"createdat\" : ID.now(),\"objecttype\" : \"value\",\"ID\" : uri+\":\"+str(i),\"search\" : slug})\n print cache\n cache.save()\n\n d = item.json()\n d.update(d[\"doc\"])\n del d[\"doc\"]\n return d\n\n\ndef get_sample_values(uri):\n values = Item.query(\"value\",Item.parent==uri,limit=100)\n cache=[]\n for v in values :\n if v.name.lower() not in cache:\n cache.append(v.name.lower())\n yield v.name.lower()\n\n\n\[email protected](\n field=\"updateVariable\",\n path =\"Mutation/updateVariable\",\n argmap={\n \"/arguments/variableid\": \"variableid\",\n \"/arguments/identity\": \"identity\",\n \"/arguments/input\" : \"data\"\n }\n)\ndef updateVariable(identity, variableid, data) :\n\n try:\n v = Item.get(\"variable\",variableid)\n except Exception as e :\n raise errors.ObjectNotFound(variableid)\n\n\n v.update(actions=[\n Item.doc.aliases.set(data.get(\"aliases\",v.doc.aliases)),\n Item.doc.field.set(data.get(\"field\",v.doc.field)),\n Item.description.set(data.get(\"description\", v.description))\n ])\n d = v.json()\n d.update(d[\"doc\"])\n del d[\"doc\"]\n return d\n\n\nif __name__ == \"__main__\" :\n\n\n\n\n for v in get_sample_values(\"uri:variable:demo:mybot:thisfile:x\") :\n print v"
},
{
"alpha_fraction": 0.5277285575866699,
"alphanum_fraction": 0.5292274355888367,
"avg_line_length": 28.58467674255371,
"blob_id": "c3d532ce4c5279f4f8bceb01d4fde57e8b7c526e",
"content_id": "f860868aed3fcca5eba088382a3e8aa3de13a3a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7339,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 248,
"path": "/airbot/resolvers/bot.py",
"repo_name": "bernard357/airbotbackend",
"src_encoding": "UTF-8",
"text": "import json\nfrom airbot.model.botitem import Item\nfrom grapher import App\nfrom genid import ID\nfrom airbot import errors\nfrom factory import base_factory, list_children_factory\nimport pprint\nfrom airbot.model.botitem import Item\nfrom datetime import datetime\nbase_factory(objecttype=\"bot\",parentobjecttype=\"account\",parentidentitifier=\"accountid\",identifier=\"botid\")\nlist_children_factory(parentobjecttype=\"bot\", chilobjecttype=\"entity\", childobjectpluralized=\"entities\")\nlist_children_factory(parentobjecttype=\"bot\", chilobjecttype=\"intent\", childobjectpluralized=\"intents\")\nfrom lex import Bot\nfrom airbot.athena.query import AthenaQuery\n\n\[email protected](\n field=\"getBotStatus\",\n path=\"Query/getBotStatus\",\n argmap={\n \"/arguments/botid\" : \"botid\",\n \"/arguments/identity\" : \"identity\"\n }\n)\ndef getBotStatus(identity, botid) :\n try :\n bot = Item.get(\"bot\", botid)\n except Exception as e :\n return \"NOT READY\"\n try :\n response = Bot.get_bot(bot.name)\n return response[\"status\"]\n except Exception as e :\n return \"NOT READY\"\n\[email protected](\n field=\"createBot\",\n path=\"Mutation/createBot\",\n argmap={\n \"/arguments/accountid\" : \"parent\",\n \"/arguments/identity\" : \"identity\",\n \"/arguments/input\" : \"options\"\n }\n)\ndef createBot(parent,identity,options) :\n accountid = parent.split(\":\")[2]\n name = options.get(\"name\",\"unnamedbot\"+datetime.now().strftime(\"%Y%m%D%H%M%S\"))\n uri = \"uri:bot:%(accountid)s:%(name)s\" % vars()\n try :\n existing = Item.get(\"bot\", uri)\n raise errors.DuplicateError(uri)\n except Exception :\n pass\n input= {}\n input[\"ID\"] = uri\n input[\"objecttype\"] = \"bot\"\n input[\"parent\"] = parent\n input[\"name\"] = options.get(\"name\",\"untitled\")\n input[\"description\"] = options.get(\"description\",\"no description available\")\n input[\"search\"] = input[\"name\"]+\"-\"+input[\"description\"]\n input[\"createdat\"] = ID.now()\n input[\"creator\"] = identity\n input[\"doc\"] = {\n \"bucket\" : \"airbot2018\",\n \"database\" : \"airbot\"+input[\"name\"],\n \"status\" : \"NOTBUILT\",\n \"prefix\" : input[\"name\"]\n }\n item = Item(**input)\n item.save()\n bot = Item.get(\"bot\", uri)\n return bot.attribute_values\n\ndef reverse_operator(operator):\n index = {\n \"in\" : \"=\",\n \"outside\":\"!=\",\n \"greater\":\">=\",\n \"equals\":\"=\",\n \"different\":\"!=\",\n \"smaller\":\"<=\",\n \"bigger\":\">=\",\n \"taller\":\">=\"\n }\n return index.get(operator,\"=\")\n\n\n\n\ndef get_filters(slots,graph) :\n varmap = {}\n filters = []\n explains = []\n for s in slots :\n basename=s.replace(\"val\",\"\").replace(\"op\", \"\")\n for entity in graph[\"bot\"][\"entities\"].values() :\n for v in entity[\"variables\"].values() :\n if v[\"name\"] == basename :\n if v[\"name\"] not in varmap :\n varmap[v[\"name\"]] = {\n \"field\" : v[\"field\"],\n \"tablename\" : entity[\"tablename\"]\n }\n if \"op\" in s :\n varmap[v[\"name\"]] [\"op\"]= slots[s]\n varmap[v[\"name\"]] [\"sqlop\"]= reverse_operator(slots[s])\n if \"val\" in s:\n varmap[v[\"name\"]] [\"value\"]= slots[s]\n\n for v in varmap.values() :\n filters.append(v[\"field\"]+v[\"sqlop\"]+\"'\"+v[\"value\"]+\"'\")\n explains.append(v[\"op\"]+\" \"+v[\"value\"])\n return {\n \"filters\" : \" and \".join(filters),\n \"explained\" : \" and \".join(explains)\n }\n\n\n\[email protected](\n field = \"askBot\",\n path = \"Query/askBot\",\n argmap={\n \"arguments/identity\" : \"identity\",\n \"arguments/botid\" : \"botid\",\n \"arguments/question\" : \"question\"\n }\n)\ndef askBot(identity, botid, question) :\n import random\n try :\n bot = Item.get(\"bot\", botid)\n except Exception as e :\n return \"Seems i do not exists anymore :(\"\n\n response= Bot.send(bot=bot.name,input=question)\n print response\n if response[\"dialogState\"] :\n parts = botid.split(\":\")\n accountid=parts[2]\n botname = parts[3]\n intentname = response[\"intentName\"]\n uri = \"uri:intent:%(accountid)s:%(botname)s:%(intentname)s\"%vars()\n intent = Item.get(\"intent\", uri)\n database= bot.doc.database\n SQL = intent.doc.sql\n\n print \"SQL )= \", SQL\n\n graph = {\n \"bot\" : {\n \"entities\": {}\n }\n }\n entities = list(Item.parent_index.query( \"entity\", Item.parent==intent.parent))\n print \"??\",entities\n variables = {}\n for e in entities :\n print \">>>>>>>>>>>>>>>>>>>\",\">\"+e.name+\"<\"\n SQL = SQL.replace(e.name, '\"'+database+'\".'+'\"'+e.doc.tablename+'\"')\n graph[\"bot\"][\"entities\"][e.name] ={\n \"name\" : e.name,\n \"tablename\" : '\"'+database+'\"' +'.\"'+ e.name+'\"',\n \"database\" : database,\n \"variables\" : {\n\n }\n }\n for v in Item.parent_index.query(\"variable\", Item.parent == e.ID):\n graph[\"bot\"][\"entities\"][e.name][\"variables\"][v.name] = {\n \"name\" : v.name,\n \"field\" : v.doc.field\n }\n\n print pprint.pformat(graph)\n filters = get_filters(graph=graph, slots=response[\"slots\"])[\"filters\"]\n explains= get_filters(graph=graph, slots=response[\"slots\"])[\"explained\"]\n print \"filters = \",filters\n print \"explains = \",explains\n\n\n sql = \"select count(*) as NB from (\"+SQL+\" where \"+filters+\") as T\"\n\n results = AthenaQuery.run(sql=sql)\n\n print \"+-/\"*30\n print results[\"data\"][\"records\"][0]\n\n replytpl = random.choice(intent.doc.reply)\n reply = replytpl.replace(\"{NB}\", results[\"data\"][\"records\"][0][\"NB\"])+\" \"+explains\n return reply\n else :\n return random.choice([\n \"Sorry, this is not clear for and old bot\",\n \"Can you try to reformulate ?\",\n \"Hmm, i'm just a bot\",\n \"Well, this is clear, i'm not really awake right now\"\n ])\n\n\n\nif __name__==\"__main__\" :\n\n\n print App.route(\n {\n \"field\" : \"askBot\",\n \"arguments\": {\n \"botid\":\"uri:bot:demo:FinancialCoach\",\n \"question\" : \"how many people are living in paris\"\n }\n }\n )\n\n r= askBot(identity=\"\", botid=\"uri:bot:demo:FinancialCoach\", question=\"how many people are living in paris\")\n print pprint.pformat(r)\n\n exit()\n #print Bot.put_alias(\"FinancialCoach\")\n print Bot.put_bot_version(\"FinancialCoach\")\n\n exit()\n\n print getBotStatus(identity=\"\", botid=\"uri:bot:demo:FinancialCoach\")\n\n\n print askBot(identity=\"\", botid=\"uri:bot:demo:FinancialCoach\", question=\"number of persons\")\n\n exit()\n '''\n from grapher.json_utils import to_json\n event = {\n \"field\" : \"createBot\",\n \"path\" : \"Account/bots\",\n \"arguments\" : {\n \"identity\" :\"service\",\n \"accountid\" : \"uri:account:demo\",\n \"input\" : {\n \"name\" : \"mybot\"\n }\n }\n\n }\n b = App.route(event)\n\n print to_json(b)\n\n '''\n\n\n"
}
] | 2 |
camiloarchila/prueba
|
https://github.com/camiloarchila/prueba
|
8d7ff166a3b6616c6043878ff10ab6823036a97b
|
2ab7b8cc49aefd4b22ef854e733b4b0ed6d2c750
|
3801d2eb11041eb1c91b07169b9ec66d6091ebc7
|
refs/heads/master
| 2022-12-03T15:46:44.875963 | 2020-08-09T19:18:11 | 2020-08-09T19:18:11 | 285,640,269 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.48847925662994385,
"alphanum_fraction": 0.5053763389587402,
"avg_line_length": 23.115385055541992,
"blob_id": "3fc07678e7d27551d968a251432d7f61f3fa9f61",
"content_id": "9495ae40aaf63f63565d366d5e9a02cd1ddef03b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 651,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 26,
"path": "/ejercicios.py",
"repo_name": "camiloarchila/prueba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nCreated on Mon Jul 6 08:59:36 2020\r\n\r\n@author: Camilo\r\n\"\"\"\r\ndef quetipo():\r\n a = input(\"Digite valor: \")\r\n if type(a) == int:\r\n print(\"la variable es\", type(a))\r\n elif type(a) == str:\r\n print(\"la variable es\", type(a))\r\n elif type(a) == float:\r\n print(\"la variable es\", type(a))\r\n else:\r\n print(\"la variable es\", type(a))\r\n \r\ndef prueba():\r\n while True:\r\n entrada = input(\"Digite un valor entero\")\r\n try:\r\n entrada = int(entrada)\r\n except ValueError:\r\n print(\"el valor digitado no es un entero\")\r\n else:\r\n break\r\n print(\"exito\")"
},
{
"alpha_fraction": 0.7978723645210266,
"alphanum_fraction": 0.7978723645210266,
"avg_line_length": 14.666666984558105,
"blob_id": "f0be3f35f64083b35548577956db4f5e79579fa7",
"content_id": "0920ba18fa87834a1454f99822dbed01d04969c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 95,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 6,
"path": "/README.md",
"repo_name": "camiloarchila/prueba",
"src_encoding": "UTF-8",
"text": "# prueba\n# prueba readme\n## esto \nspjvsijvosijrvskndvsojv\n### soinvnkv\nsçvskncvsunvsjnvsurvns\n"
}
] | 2 |
smoort-portfolio/miniflow
|
https://github.com/smoort-portfolio/miniflow
|
6a272113633d58bf96102d728d037b9ab14f7ab4
|
f36e2007c76831255f05bacd95015f5c778fb183
|
89b9be200d09b1bcf26eecd06c9ae1b8f317b943
|
refs/heads/main
| 2023-02-21T19:20:51.797305 | 2021-01-29T06:05:10 | 2021-01-29T06:05:10 | 327,222,765 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5608137845993042,
"alphanum_fraction": 0.5855816006660461,
"avg_line_length": 19.95145606994629,
"blob_id": "acecba4ef8c9e1533b37426978413e576d5f0451",
"content_id": "45b226a2a5a60f6d2f0e8d3ab0a963fe4d094311",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2261,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 103,
"path": "/nn.py",
"repo_name": "smoort-portfolio/miniflow",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nMy own neural net implementation\r\nCourtesy Miniflow from Udacity\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.datasets import load_boston\r\nfrom sklearn.utils import shuffle, resample\r\nfrom miniflow import *\r\nimport pandas as pd\r\n\r\n# Load data\r\ndata = load_boston()\r\n\r\nprint(\"data type\", type(data))\r\n\r\nX_ = data['data']\r\nprint(\"X_ type\", type(X_))\r\nprint(\"X_ shape\", X_.shape)\r\nprint(\"X_ size\", X_.size)\r\n#print(X_[1])\r\ny_ = data['target']\r\nprint(\"y_ type\", type(y_))\r\nprint(\"y_ shape\", y_.shape)\r\nprint(\"y_ size\", y_.size)\r\nprint(y_[1])\r\n\r\n# Standardize data\r\nX_ = (X_ - np.mean(X_, axis=0)) / np.std(X_, axis=0)\r\n\r\nn_features = X_.shape[1]\r\nn_hidden = 3\r\nW1_ = np.random.randn(n_features, n_hidden)\r\nb1_ = np.zeros(n_hidden)\r\nW2_ = np.random.randn(n_hidden, 1)\r\nb2_ = np.zeros(1)\r\n\r\n# Neural network\r\nX, y = Input(), Input()\r\nW1, b1 = Input(), Input()\r\nW2, b2 = Input(), Input()\r\n\r\nl1 = Linear(X, W1, b1)\r\ns1 = Sigmoid(l1)\r\nl2 = Linear(s1, W2, b2)\r\ncost = MSE(y, l2)\r\n\r\nfeed_dict = {\r\n X: X_,\r\n y: y_,\r\n W1: W1_,\r\n b1: b1_,\r\n W2: W2_,\r\n b2: b2_\r\n}\r\n\r\nepochs = 10\r\n# Total number of examples\r\nm = X_.shape[0]\r\nbatch_size = 11\r\nsteps_per_epoch = m // batch_size\r\n\r\ngraph = topological_sort(feed_dict)\r\ntrainables = [W1, b1, W2, b2]\r\n\r\nprint(\"Total number of examples = {}\".format(m))\r\n\r\nloss_list = []\r\nloss_drop_list = [0]\r\n\r\n# Step 4\r\nfor i in range(epochs):\r\n loss = 0\r\n for j in range(steps_per_epoch):\r\n # Step 1\r\n # Randomly sample a batch of examples\r\n X_batch, y_batch = resample(X_, y_, n_samples=batch_size)\r\n\r\n # Reset value of X and y Inputs\r\n X.value = X_batch\r\n y.value = y_batch\r\n\r\n # Step 2\r\n forward_and_backward(graph)\r\n\r\n # Step 3\r\n sgd_update(trainables)\r\n\r\n loss += graph[-1].value\r\n\r\n print(\"Epoch: {}, Loss: {:.3f}\".format(i+1, loss/steps_per_epoch))\r\n if i > 0:\r\n loss_drop_list.append(loss_list[-1] - (loss/steps_per_epoch))\r\n loss_list.append(loss/steps_per_epoch) \r\n\r\nloss_drop_list[0] = loss_drop_list[1]\r\ndf = pd.DataFrame(data={\"Loss\": loss_list, \"Loss Drop\": loss_drop_list})\r\nprint(loss_list)\r\nprint(loss_drop_list)\r\nsns.lineplot(data=df)\r\nplt.show()\r\n"
}
] | 1 |
Moritztz/tmu_cs2_test
|
https://github.com/Moritztz/tmu_cs2_test
|
ed41c6e4cbcb0f7034205fa2eddf69af6fe1ad8f
|
4481ef2ca303ae5db92aa2132750765c3947196e
|
ef219f1ce4ca4707b799da537ec0f2d3a626212e
|
refs/heads/master
| 2020-07-29T08:20:35.796353 | 2019-09-22T14:32:29 | 2019-09-22T14:32:29 | 209,726,770 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6264324188232422,
"alphanum_fraction": 0.6325439214706421,
"avg_line_length": 35.36111068725586,
"blob_id": "aef71ba48a897b7772f756942f7f15ed0b2d1b70",
"content_id": "5ed3a930ea9d4e76525259bb5a303af3964c5027",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1383,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 36,
"path": "/tests/test_predictor.py",
"repo_name": "Moritztz/tmu_cs2_test",
"src_encoding": "UTF-8",
"text": "import unittest\nimport pickle\nfrom anomaly_detection.trainer import Trainer\nfrom anomaly_detection.predictor import Predictor\n\n\nclass TestPredict(unittest.TestCase):\n def setUp(self):\n self.model_filename = \"my_model_file.bin\"\n with open(\"data/train.pickle\", mode=\"rb\") as f:\n self.train_data = pickle.load(f)[\"features\"]\n trainer = Trainer()\n self.len_data = len(self.train_data[0])\n trainer.train(self.train_data)\n trainer.save(self.model_filename)\n\n def test_predict(self):\n predictor = Predictor()\n predictor.load(self.model_filename)\n with open(\"data/test.pickle\", mode=\"rb\") as f:\n self.test_data = pickle.load(f)[\"features\"]\n # result = predictor.predict(self.test_data)\n predictor.predict(self.test_data)\n\n # assert result[\"is_anomaly\"] is False\n # assert result[\"is_error\"] is False\n # assert result[\"score\"] == 0.0\n # assert result[\"message\"] is None\n\n # 予測したいデータの次元が学習したデータと違う場合\n def test_different_dim_predict(self):\n predictor = Predictor()\n predictor.load(self.model_filename)\n with self.assertRaises(ValueError):\n data = [[0] * (self.len_data + 1) for i in range(3)] # データ+1次元のデータを3個生成\n predictor.predict(data)\n"
},
{
"alpha_fraction": 0.8602409362792969,
"alphanum_fraction": 0.8710843324661255,
"avg_line_length": 35.130435943603516,
"blob_id": "13992b778045054850b89881339fb93be099c21b",
"content_id": "45818f51a7578b24277704efb483ca2405b457fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1786,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 23,
"path": "/README.md",
"repo_name": "Moritztz/tmu_cs2_test",
"src_encoding": "UTF-8",
"text": "[](https://circleci.com/gh/Moritztz/tmu_cs2_test)\n\n# 首都大学東京 情報科学特論2\n\n## 追加したアルゴリズムの簡単な説明\n\nGaussian Mixture Modelsを利用して精度改善を行なった。n_componentsは3に設定した。\nGitHubにアップロードして、CircleCIと連携を行なった。\n\n## 追加したテストの気をつけた点や工夫点\n\ntrainデータに対して、次元が違うデータをpredictした時を想定したテストを実装した。次元はtrainデータに応じて変更されるようにした。\nn_componentsをフィールドとして用意することにより、データなしの場合だけでなく、n_componentsよりもデータが少ない場合でもエラーの判定ができるようにした。その際、n_componentsが小さく設定されて正常に処理された場合も考慮したテストに変更した。\nGitHubのcommitを意識して編集した。\n\n## 講義の感想\n\nプログラムを頑健にするためのツール(gitやlintなど)の知識はあり、実践しながら理解することができたよかったが、正直言うと機械学習の基礎知識が乏しかったので、そのあたりのプログラムの書き方がまったくわからなかった。開発の行い方に関しては、これまでtestツールの存在を知らなかったので、課題として実践することができて大変勉強になった。今回はじめてPythonを触ってみたけれど、javascriptやc#を知っていたのでスムーズに理解できた。\n\n## Contributer\n\nこの課題は吉田さんのリポジトリを元に作成しました。\nhttps://github.com/syou6162/tmu_cs2.git"
}
] | 2 |
2004alice/half_term_3
|
https://github.com/2004alice/half_term_3
|
9429ae143c4b8bb7dd2d894b117c5fe8a99d176a
|
e87385c79c1d21fb067ca45f16c1d0b96e781113
|
7a5a80e3a8eb54634bfde0c53e3d8a7a560bde93
|
refs/heads/master
| 2023-02-22T03:39:08.153465 | 2021-01-24T18:04:42 | 2021-01-24T18:04:42 | 331,911,305 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5414847135543823,
"alphanum_fraction": 0.6113536953926086,
"avg_line_length": 17.31999969482422,
"blob_id": "316958edfc7005dea8c328e4865429f6f1d3140c",
"content_id": "cfdf692dc9fa5cded24ba65f7f693a2f5a734804",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 458,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 25,
"path": "/main.py",
"repo_name": "2004alice/half_term_3",
"src_encoding": "UTF-8",
"text": "import pygame\n\npygame.init()\n\nDISPLAY = pygame.display.set_mode([330, 600])\nFPSCLOCK = pygame.time.Clock()\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nleft = 50\ntop = 50\nwidth = 40\nheight = 30\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n DISPLAY.fill((0, 0, 0))\n pygame.draw.rect(DISPLAY, white, (left, top, width, height))\n\n left += 5\n\n pygame.display.update()\n FPSCLOCK.tick(20)\n"
}
] | 1 |
junyi1997/TQC_Python
|
https://github.com/junyi1997/TQC_Python
|
35b1320ca7a1b2f8eee4b9e8d4f1b9d7f4a5c02f
|
befe177880a7034f37848ff404bb7d33f9a07ff9
|
c37e63a902106bbde63fb5517f420e2c043c7f93
|
refs/heads/master
| 2020-04-25T04:05:57.508858 | 2019-03-24T17:03:07 | 2019-03-24T17:03:07 | 172,499,205 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5544554591178894,
"alphanum_fraction": 0.6072607040405273,
"avg_line_length": 12.217391014099121,
"blob_id": "44819fbfbc78134bdf88d89be7d399050a454013",
"content_id": "85cfd63649f1e2f58c2952ada49dbd28d7bcbc10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 23,
"path": "/6.第六類/PYD604.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:04 2018\n\n@author: user\n\n眾數\n\n\"\"\"\n\ndata=[]\nnumcount=0\nnumna=0\nfor i in range(10):\n data.append(int(input()))\nds=set(data)\nfor ev in ds:\n num=data.count(ev)\n if num>numcount:\n numcount=num\n numna=ev\nprint(numna)\nprint(numcount)"
},
{
"alpha_fraction": 0.3441295623779297,
"alphanum_fraction": 0.37246963381767273,
"avg_line_length": 16.714284896850586,
"blob_id": "6e2bd284d218a64c2fb0dcf76fb54059c1adde78",
"content_id": "fccbe3a1029c75f7ce2d5901500b6d18b7bf96b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 247,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 14,
"path": "/New code/class5/PYA508.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute():\n a=input()\n \n c=a.split(\",\")\n x=int(c[0])\n y=int(c[1])\n f=min(x,y)\n j=0\n for i in range(1,f):\n if (x % i == 0) and (y % i == 0):\n j=i\n print(j) \ncompute()"
},
{
"alpha_fraction": 0.3539822995662689,
"alphanum_fraction": 0.38348081707954407,
"avg_line_length": 16.842105865478516,
"blob_id": "939e3c6e32d3c9a1dc2a02b3bb1470af76bd8bac",
"content_id": "0c3daa8cca994208badefcf1135b15ef8559b26e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 19,
"path": "/New code/class5/PYA509.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute():\n a=input()\n b=input()\n c=a.split(\",\")\n d=b.split(\",\")\n x=int(c[0])\n y=int(c[1])\n m=int(d[0])\n n=int(d[1])\n mol=(x*n)+(y*m)\n den=y*n\n i=1\n j=0\n for i in range(1,mol):\n if (mol % i == 0) and (den % i == 0):\n j=i\n print() \ncompute()\n"
},
{
"alpha_fraction": 0.45856353640556335,
"alphanum_fraction": 0.5745856165885925,
"avg_line_length": 8.578947067260742,
"blob_id": "b1afb17545b2310366d38569a87afcc3b2259427",
"content_id": "8a6c7086de3bd95ccd3b13270b1f8278719b25f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 19,
"path": "/6.第六類/PYD602.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:15:54 2018\n\n@author: user\n\n撲克牌總和\n\n\"\"\"\n\nJ=11\nQ=12\nK=13\nA=1\nsum=0\nfor i in range(5):\n num=eval(input())\n sum+=num\nprint(sum)"
},
{
"alpha_fraction": 0.40697672963142395,
"alphanum_fraction": 0.45348837971687317,
"avg_line_length": 16.200000762939453,
"blob_id": "fa88cca32757ee96c16b008d7580c797eb723994",
"content_id": "174d649e9a55f90bac600a34afe8a7a44b8d7931",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 10,
"path": "/New code/class4/PYA404.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=int(input())\nif num==0:\n print(0)\nelse:\n while num >0:\n r=int(num%10)\n print(r,end=\"\")\n num=(num-r)/10\n print(\"\")\n"
},
{
"alpha_fraction": 0.2535211145877838,
"alphanum_fraction": 0.26408451795578003,
"avg_line_length": 15.199999809265137,
"blob_id": "7db77bfc025a2a8b325b58ff512c5ace8da4a9a1",
"content_id": "8ff27fa063e0770c18fe8823864ab490839fa476",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 292,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 10,
"path": "/7.第七類/PYD706.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#全字母句\n\na=int(input())\nfor i in range(a): \n b=set(input())\n if len(b) > 26:\n print(\"True\")\n else:\n print(\"False\")\n \n \n\n\n \n \n \n \n\n \n \n \n \n\n"
},
{
"alpha_fraction": 0.4816513657569885,
"alphanum_fraction": 0.5458715558052063,
"avg_line_length": 11.166666984558105,
"blob_id": "dbd8fd8e4a12e01a76879e339d906b303e54f51e",
"content_id": "35fe856233120745c071463dd550ea49227c52ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 226,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 18,
"path": "/5.第五類/PYD503.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:14:48 2018\n\n@author: user\n\n連加計算\n\n\"\"\"\n\ndef compute(a,b):\n sum=0\n for i in range(a,b+1):\n sum+=i\n print(sum)\na=int(input())\nb=int(input())\ncompute(a,b)"
},
{
"alpha_fraction": 0.3870967626571655,
"alphanum_fraction": 0.458781361579895,
"avg_line_length": 12.333333015441895,
"blob_id": "0b4e2094eb3c38111ec6c4af6c1dd73a57b82fdd",
"content_id": "892549e553b1768caa0e69b35bbc2ffe0bd4ebff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 21,
"path": "/New code/Python題目/Python code/spyder code/PYD404.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#106111123 / 吳驊涓\n#第二題\n\"\"\"\ni=1\na=input()\nwhile i <= len(a):\n print(a[-i],end=\"\")\n i=i+1\n\"\"\"\nnum=int(input())\nif num >0:\n while num >0: \n r=int(num % 10)\n print(r,end=\"\")\n num=(num-r)/10\nelif num == 0:\n print(0)\n\n\ninput()"
},
{
"alpha_fraction": 0.43589743971824646,
"alphanum_fraction": 0.49572649598121643,
"avg_line_length": 22.600000381469727,
"blob_id": "7b05a0a64488f4b9feba122094b766c4ff8ab490",
"content_id": "eb411f22f814db507f030af511a1ec505bae193a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 5,
"path": "/New code/class8/PYA805.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=input()\nprint(\"|{:<10}|\".format(a))\nprint(\"|{:^10}|\".format(a))\nprint(\"|{:>10}|\".format(a))"
},
{
"alpha_fraction": 0.5328947305679321,
"alphanum_fraction": 0.5394737124443054,
"avg_line_length": 18.125,
"blob_id": "08f260f56b40dbcda269a4b05c441afe01cc1aee",
"content_id": "6fb00330b441cc22eb33d95f86a46a344f7ee0c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/New code/class8/PYA806.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute(a,b):\n c=a.count(b)\n print(\"{:} occurs {:} time(s)\".format(b,c))\n return c\na=input()\nb=input()\ncompute(a,b)"
},
{
"alpha_fraction": 0.5151515007019043,
"alphanum_fraction": 0.5378788113594055,
"avg_line_length": 21,
"blob_id": "641bcff1d29d729c02dd635d00a5147dcdd09b09",
"content_id": "fb1922a4fa3e371ff614a44b8ed1c363f8d3006a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 6,
"path": "/New code/class2/PYA201.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=int(input())\nif a % 2 == 0:\n print(a,\"is an even number.\")\nelse:\n print(a,\"is not an even number.\")\n"
},
{
"alpha_fraction": 0.49038460850715637,
"alphanum_fraction": 0.49358972907066345,
"avg_line_length": 16.33333396911621,
"blob_id": "6aa736cde7cf9e5ea6bf7bfca77f471c6f56fccb",
"content_id": "4aab12e80d95bd423c507c856ef41fdacb7857f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 18,
"path": "/New code/class7/PYA710.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nd={}\nwhile True:\n print(\"Key:\",end=\"\")\n key=input()\n if key == \"end\":\n break\n else:\n print(\"Value:\",end=\"\")\n value=input()\n d[key]=value\nprint(\"Search key:\",end=\"\")\nskey=input()\ndk=d.keys()\nif skey in dk:\n print(\"True\")\nelse:\n print(\"False\")\n"
},
{
"alpha_fraction": 0.41035857796669006,
"alphanum_fraction": 0.45418328046798706,
"avg_line_length": 15.666666984558105,
"blob_id": "edf6166a509eedb597351ada03bc7cb335df9498",
"content_id": "38ab67df4c8c02b91965540513c74b3ffea58a01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 15,
"path": "/New code/class6/PYA601.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=[]\nsum=0\nfor i in range(12):\n a.append(int(input()))\nk=0\nfor j in a:\n ind=a.index(j)\n if ind % 2 == 0:\n sum=sum+j\n k=k+1\n print(\"{:>3d}\".format(j),end=\"\")\n if k % 3 ==0:\n print(\"\")\nprint(sum)\n\n"
},
{
"alpha_fraction": 0.6906474828720093,
"alphanum_fraction": 0.6942446231842041,
"avg_line_length": 18.928571701049805,
"blob_id": "d42a654186cd79f4b0395da6cf9118434bd41d18",
"content_id": "39489d817fe5df01d7ee8bf4526d458496ee29f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 14,
"path": "/New code/class8/PYA804.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#title(),(全部字首大寫)\n#capitalize(),(第一個字首大宿)\n\"\"\"\nupper():將字串轉成大寫,并返回一個拷貝\nlower() :將字串轉成小写,并返回一個拷貝\ncapitalize() :將字串首字母大寫,并返回一個拷貝\ntitle() :將每个單字的首字母大写,并返回一個拷貝\nisupper() :判斷一個字串是否是大寫\nislower() :判斷一個字串是否是小寫\n\"\"\"\na=input()\nprint(a.upper())\nprint(a.capitalize())"
},
{
"alpha_fraction": 0.4415094256401062,
"alphanum_fraction": 0.505660355091095,
"avg_line_length": 21,
"blob_id": "618a11e76c6559ec70ce8823c98d228175db0bc7",
"content_id": "a00b72cadf9ff51877cef12f55ff5d9c9a96f029",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 12,
"path": "/1.第一類/PYD102.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#浮點數格式化輸出\na=eval(input())\nb=eval(input())\nc=eval(input())\nd=eval(input())\n\nprint(\"|{:>7.2f} {:>7.2f}|\".format(a,b))\nprint(\"|{:>7.2f} {:>7.2f}|\".format(c,d))\nprint(\"|{:<7.2f} {:<7.2f}|\".format(a,b))\nprint(\"|{:<7.2f} {:<7.2f}|\".format(c,d))\n\n"
},
{
"alpha_fraction": 0.5298507213592529,
"alphanum_fraction": 0.5597015023231506,
"avg_line_length": 21.33333396911621,
"blob_id": "5aab1b785ed8d7ab4e8c7f5f2e449f03a88f1515",
"content_id": "b27ea6ad2c226d496e3276ee58bd94f5dac8d278",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/New code/class1/PYA110.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport math\nn=int(input())\ns=int(input())\na=(n*s**2)/(4*math.tan(math.pi/n))\nprint(\"Area = {:.4f}\".format(a))\n"
},
{
"alpha_fraction": 0.42547425627708435,
"alphanum_fraction": 0.49593496322631836,
"avg_line_length": 18.473684310913086,
"blob_id": "6a3b58bd24527863005e9f22234c4112e8afaf9d",
"content_id": "17e3ec9b50d97e80c6a64aacc966f763a2637f30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 387,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 19,
"path": "/4.第四類/PYD407.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:13:58 2018\n\n@author: user\n\n不定數迴圈-閏年判斷\n\n\"\"\"\n\nwhile True:\n year = int(input())\n if year == -9999:\n break\n else:\n if((year%4==0)and (year % 100 != 0 or year % 400 == 0)):\n print(\"{:} is a leap year.\".format(year))\n else:\n print(\"{:} is not a leap year.\".format(year))"
},
{
"alpha_fraction": 0.5159235596656799,
"alphanum_fraction": 0.5286624431610107,
"avg_line_length": 16.55555534362793,
"blob_id": "a97819cad5ed03b236f0de67efb254e401276ecc",
"content_id": "7f7071f63f7408ceedaa6abdbfcf8d68c5e48409",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 9,
"path": "/New code/class8/PYA802.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#ord\na=input()\nsum=0\nfor i in a:\n ordi=ord(i)\n sum+=ordi\n print(\"ASCII code for '{:}' is {:}\".format(i,ordi))\n print(sum)"
},
{
"alpha_fraction": 0.4688427448272705,
"alphanum_fraction": 0.5252225399017334,
"avg_line_length": 15.095237731933594,
"blob_id": "8204f17917cf1d52f8d9fc01421f33ae546fbd85",
"content_id": "e0c9077d0d74c536f25728709a662ae822220193",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 21,
"path": "/3.第三類/PYD308.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:12:28 2018\n\n@author: user\n\n 迴圈位數加總\n\"\"\"\n\nfloor=int(input())\nsum=0\nfor i in range(floor):\n num=int(input())\n copy=num\n sum=0 \n while num > 0:\n r=int(num%10)\n sum=sum+r\n num=(num-r)/10\n \n print(\"Sum of all digits of {:} is {:}\".format(copy,sum))"
},
{
"alpha_fraction": 0.4424242377281189,
"alphanum_fraction": 0.5090909004211426,
"avg_line_length": 23.799999237060547,
"blob_id": "79c898fa20e7a29bab06251954ef92875fe21ff4",
"content_id": "031f55acec999f63eb139ef8c098d9708bf619fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 20,
"path": "/New code/class2/PYA203.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#num=int(input())\n#if (num % 4 ==0) and not(num % 100 == 0):\n# if (num % 4 == 0) or (num % 400 == 0):\n# print(num,\"is a leap year.\")\n#else:\n# print(num,\"is not a leap year.\")\n\n#year=int(input())\n#if year % 4 == 0:\n# if year % 400 ==0:\n# print(year,\"is a leap year.\")\n#elif year % 400 ==0:\n# \n\nyear=int(input())\nif (year % 400==0)or (year % 4 ==0 and year % 100 != 0):\n print(year,\"is a leap year.\")\nelse:\n print(year,\"is not a leap year.\")"
},
{
"alpha_fraction": 0.4481481611728668,
"alphanum_fraction": 0.4962962865829468,
"avg_line_length": 14.05555534362793,
"blob_id": "fdeca79482cc6da70651c18d30771f0bd57cc6b9",
"content_id": "bfa9e163c37055247c4cc7aa46875b14e36760a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 284,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 18,
"path": "/6.第六類/PYD606.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:15 2018\n\n@author: user\n\n二維串列行列數\n\n\"\"\"\n\ndef compute(): \n r=int(input())\n c=int(input())\n for i in range(r):\n for j in range(c):\n print(\"{:4}\".format(j-i),end=\"\")\n print(\"\")\ncompute()"
},
{
"alpha_fraction": 0.4787878692150116,
"alphanum_fraction": 0.48181816935539246,
"avg_line_length": 12.199999809265137,
"blob_id": "9612cd8c067910052945059f76d937768a389eb8",
"content_id": "f5b0c71466e3ec0c94228c2184e216715bbe5e9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 25,
"path": "/New code/class2/PYA204.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#a=input()\n#b=input()\n#op=input()\n#\n#rea=a+op+b\n#result=eval(rea)\n#print(result)\n\na=eval(input())\nb=eval(input())\nop=input()\nif op == \"+\":\n ans=a+b\nelif op == \"-\":\n ans=a-b\nelif op == \"*\":\n ans=a*b\nelif op == \"/\":\n ans=a/b\nelif op == \"//\":\n ans=a//b\nelif op == \"**\":\n ans=a**b\nprint(ans)\n"
},
{
"alpha_fraction": 0.47058823704719543,
"alphanum_fraction": 0.5196078419685364,
"avg_line_length": 23,
"blob_id": "f06f1d4502ff6bed1fb6911911aa51e642581440",
"content_id": "3bcdfd30040a61298c2ca9c4327814316e6e487a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 408,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 17,
"path": "/New code/class4/PYA406.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nwhile True:\n height=eval(input())\n if (height==-9999):\n break\n weight=eval(input())\n h=height/100\n BMI=weight/(h**2)\n print(\"BMI: {:.2f}\".format(BMI))\n if BMI>=30:\n print(\"State: fat\")\n elif BMI >=25:\n print(\"State: over weight\")\n elif BMI >=18.5:\n print(\"State: normal\")\n elif BMI <18.5:\n print(\"State: under weight\")\n"
},
{
"alpha_fraction": 0.49881234765052795,
"alphanum_fraction": 0.5391923785209656,
"avg_line_length": 16.58333396911621,
"blob_id": "0764b703506de7feaf235f0887bce15fc24a8ee8",
"content_id": "856ef14cc75ec82856018bfd55dfd0d3afa3c6b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 24,
"path": "/9.第九類/PYD907.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:41 2018\n\n@author: user\n\n詳細資料顯示\n\n\"\"\"\n\nfd=input()\nd_li=0\nd_w=0\nd_ch=0\nwith open(fd,\"r\",encoding=\"utf-8\") as fd:\n for data in fd:\n d_li+=1\n words=data.split()\n d_w=d_w+len(words)\n d_line=\"\".join(words)\n d_ch=d_ch+len(d_line)\nprint(\"{:} line(s)\".format(d_li))\nprint(\"{:} word(s)\".format(d_w))\nprint(\"{:} character(s)\".format(d_ch))"
},
{
"alpha_fraction": 0.49877750873565674,
"alphanum_fraction": 0.5501222610473633,
"avg_line_length": 18.5238094329834,
"blob_id": "db7d984c273857e56d1c999c073cdd8793f2fc81",
"content_id": "aab76fc81a1b3e9e28e1cfe1980bd5cb42cd8452",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 421,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 21,
"path": "/9.第九類/PYD910.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:58 2018\n\n@author: user\n\n學生基本資料\n\n\"\"\"\n\nf_name = \"read.dat\"\nwith open(f_name,\"r\",encoding=\"utf-8\") as fd:\n title=fd.readline()\n print(title)\n gender={\"1\":0,\"0\":0}\n for d in fd:\n print(d)\n da=d.split(\" \")\n gender[da[2]]+=1\nprint(\"Number of males: {:}\".format(gender[\"1\"]))\nprint(\"Number of females: {:}\".format(gender[\"0\"]))"
},
{
"alpha_fraction": 0.39613527059555054,
"alphanum_fraction": 0.4492753744125366,
"avg_line_length": 13.275861740112305,
"blob_id": "e3fd4d334b7bb9981e69f493321035d5304d84c7",
"content_id": "156b1eb6f65e53c8d9ad9cb341fc51beee0fadd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 29,
"path": "/5.第五類/PYD509.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:15:28 2018\n\n@author: user\n\n最簡分數\n\n\"\"\"\n\ndef compute(p,q):\n f=max(p,q)\n j=0\n for i in range(1,f):\n if (p % i == 0) and (q % i == 0):\n j=i\n print(\"{:}/{:} + {:}/{:} = {:.0f}/{:.0f}\".format(x,y,m,n,p/j,q/j))\n\na=input()\nb=input()\nc=a.split(\",\")\nd=b.split(\",\")\nx=int(c[0])\ny=int(c[1])\nm=int(d[0])\nn=int(d[1])\np=x*n+y*m\nq=y*n\ncompute(p,q)\n"
},
{
"alpha_fraction": 0.4681818187236786,
"alphanum_fraction": 0.5545454621315002,
"avg_line_length": 11.277777671813965,
"blob_id": "e4544b1bb29eeb578640ae18a1f1b639a9ff0104",
"content_id": "adbbd2feb11d1706a4b77bf97c6cd3f2f6a8a9c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 228,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 18,
"path": "/6.第六類/PYD603.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:15:59 2018\n\n@author: user\n\n數字排序\n\n\"\"\"\n\ndata=[]\nfor i in range(10):\n data.append(int(input()))\ndata.sort()\nd=data[-3:]\n#dm=d[::-1]\n#for i in dm:\nprint(d[2],d[1],d[0])"
},
{
"alpha_fraction": 0.45348837971687317,
"alphanum_fraction": 0.5155038833618164,
"avg_line_length": 14.235294342041016,
"blob_id": "78855df3860abba9fae14fc831598d32357e8ae8",
"content_id": "e794fa6de540cbfa8b0d822eea918eb8d44cf10d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 272,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 17,
"path": "/4.第四類/PYD410.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:14:20 2018\n\n@author: user\n\n繪製等腰三角形\n\n\"\"\"\n\nfool=int(input())\nfor i in range(1,fool+1):\n for j in range(fool-i):\n print(\" \",end=\"\")\n for k in range(i*2-1):\n print(\"*\",end=\"\")\n print(\"\")"
},
{
"alpha_fraction": 0.417391300201416,
"alphanum_fraction": 0.5,
"avg_line_length": 15.5,
"blob_id": "525bf8fb49dbf0006e8ef8b2e87e3fd133ecef63",
"content_id": "10ab8ab8f2f918844e2e9d1aebbb8aa2edc5cb1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 236,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 14,
"path": "/3.第三類/PYD307.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:12:23 2018\n\n@author: user\n\n乘法表\n\"\"\"\n\nj=int(input())\nfor a in range(1,j+1):\n for b in range(1,j+1):\n print(\"{:<2d}* {:<2d}= {:<4d}\".format(b,a,b*a),end=\"\")\n print(\"\")"
},
{
"alpha_fraction": 0.47239264845848083,
"alphanum_fraction": 0.5214723944664001,
"avg_line_length": 14.571428298950195,
"blob_id": "3ea9a64770285d4dc6bd3a1e37e0b95f54084bc8",
"content_id": "28c0acb1ff928f7f3c3d41ac15e885f0cbd70bdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 338,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 21,
"path": "/7.第七類/PYD704.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:17:07 2018\n\n@author: user\n\n集合條件判斷\n\"\"\"\n\na=set()\nwhile True:\n b=int(input())\n if b == -9999:\n break\n else:\n a.add(b)\n\nprint(\"Length: {:}\".format(len(a)))\nprint(\"Max: {:}\".format(max(a)))\nprint(\"Min: {:}\".format(min(a)))\nprint(\"Sum: {:}\".format(sum(a)))"
},
{
"alpha_fraction": 0.3477366268634796,
"alphanum_fraction": 0.4403292238712311,
"avg_line_length": 17.730770111083984,
"blob_id": "e687d4fb75aeb3b7cfa0117a4188a92e32bba810",
"content_id": "16a1b36ea54b6fb14d758f9c0a14948affd88bf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 26,
"path": "/New code/class5/PYA506.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#a=int(input())\n#b=int(input())\n#c=int(input())\n#x1=0\n#x2=0\n#x1=(-1*b+(b**2-(4*a*c))**0.5)/2*a\n#x1=(-1*b-(b**2-(4*a*c))**0.5)/2*a\n#print(x1)\n#print(x2)\n\ndef compute(a,b,c):\n \n x1=0\n x2=0\n if((b**2-4*a*c)<0):\n print(\"Your equation has no root.\")\n else: \n x1=((-1)*b+(b**2-4*a*c)**0.5)/(2*a)\n x2=((-1)*b-(b**2-4*a*c)**0.5)/(2*a)\n print(\"%.1f,\"%x1,x2) \n \na=int(input()) \nb=int(input())\nc=int(input()) \ncompute(a,b,c)"
},
{
"alpha_fraction": 0.4928131401538849,
"alphanum_fraction": 0.5420944690704346,
"avg_line_length": 12.189188957214355,
"blob_id": "33c68786cab94e6ec6ff690d1e3a2c38ff6bf5b2",
"content_id": "2575fe8b807c162e70220d2fddeb00b760de7dfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 37,
"path": "/7.第七類/PYD707.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:17:24 2018\n\n@author: user\n\n共同科目\n\"\"\"\n\nx=set()\ny=set()\nprint(\"Enter group X's subjects:\")\nwhile True:\n a=input()\n if a == \"end\":\n break\n else:\n x.add(a)\nprint(\"Enter group Y's subjects:\")\nwhile True:\n a=input()\n if a == \"end\":\n break\n else:\n y.add(a)\nz1=list(x|y)\nz2=list(x&y)\nz3=list(y-x)\nz4=list((x|y)-(x&y))\nz1.sort()\nz2.sort()\nz3.sort()\nz4.sort()\nprint(z1)\nprint(z2)\nprint(z3)\nprint(z4)"
},
{
"alpha_fraction": 0.4843205511569977,
"alphanum_fraction": 0.5540069937705994,
"avg_line_length": 12.090909004211426,
"blob_id": "ea1a5295e7371037a7230a330b25ede9cdfecb3e",
"content_id": "7bcda0835ec15200eeb1b8d8e37b3cbf3bccaa87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 309,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 22,
"path": "/4.第四類/PYD408.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:14:05 2018\n\n@author: user\n\n奇偶數個數計算\n\n\"\"\"\n\na=[]\nodd=0#奇數\neven=0#偶數\nfor i in range(10):\n a.append(int(input()))\n if a[i] %2 ==0:\n even=even+1\n else:\n odd=odd+1\n\nprint(\"Even numbers:\",even)\nprint(\"Odd numbers:\",odd)"
},
{
"alpha_fraction": 0.44489794969558716,
"alphanum_fraction": 0.5061224699020386,
"avg_line_length": 25.22222137451172,
"blob_id": "ac4f36d5cc8c5e9cade4c31a3cabf66ecd07bb40",
"content_id": "e8f990ffc230bd6ff311f20e24f3a6026199c1b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 245,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 9,
"path": "/New code/class4/PYA407.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nwhile True:\n year=int(input())\n if year==-9999:\n break\n elif (year%4==0)or (year%100!=0 and year%400==0):\n print(year,\"is a leap year.\")\n else:\n print(year,\"is not a leap year.\")\n \n"
},
{
"alpha_fraction": 0.5656565427780151,
"alphanum_fraction": 0.6565656661987305,
"avg_line_length": 18.866666793823242,
"blob_id": "ca2049973d298c89cb7681a5aaa9c061c534e536",
"content_id": "79f5399d9b75c64ad03afcdd76ee7792131f6297",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 297,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 15,
"path": "/New code/class6/PYA605.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#data=[89,78,67,80,75,98,77,89,76,60]\ndata=[]\nfor i in range(10):\n data.append(int(input()))\nsum=0\nmaxnum=max(data)\nminnum=min(data)\nmax1=data.remove(maxnum)\nmin1=data.remove(minnum)\nfor a in data:\n sum=sum+a\naver=sum/len(data)\nprint(sum)\nprint(\"{:.2f}\".format(aver))"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5978835821151733,
"avg_line_length": 22.75,
"blob_id": "47cfc7c67fa826ec9c14ddd64c649c2d0ce7068b",
"content_id": "1040ca6ed14d0b01719c6fb30c9d910a55f80319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 189,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 8,
"path": "/New code/class1/PYA104.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport math\nr=eval(input())\na=r*r*math.pi\nPer=2*math.pi*r\nprint(\"Radius = {:.2f}\".format(r))\nprint(\"Perimeter = {:.2f}\".format(Per))\nprint(\"Area = {:.2f}\".format(a))"
},
{
"alpha_fraction": 0.4583333432674408,
"alphanum_fraction": 0.5520833134651184,
"avg_line_length": 12.785714149475098,
"blob_id": "970b001abe6008467d4c8833c33b2c9875b41c08",
"content_id": "33d12c1657a7f7a319562ba4015832e2a1c7aaa4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 14,
"path": "/8.第八類/PYD805.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:18:22 2018\n\n@author: user\n\n字串輸出\n\n\"\"\"\n\nval=input()\nprint(\"|{:<10}|\".format(val))\nprint(\"|{:^10}|\".format(val))\nprint(\"|{:>10}|\".format(val))"
},
{
"alpha_fraction": 0.46889951825141907,
"alphanum_fraction": 0.4784688949584961,
"avg_line_length": 22.33333396911621,
"blob_id": "687a21605305e281ddee9b98ad3443e247eea16f",
"content_id": "86053d7f1033d253459e17bf3a5204072175b330",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 9,
"path": "/New code/class6/PYA606.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute(): \n row=int(input())\n col=int(input())\n for r in range (row):\n for c in range(col):\n print(\"{:4d}\".format(c-r),end=\"\")\n print()\ncompute()"
},
{
"alpha_fraction": 0.3080808222293854,
"alphanum_fraction": 0.4595959484577179,
"avg_line_length": 12.714285850524902,
"blob_id": "99131e70b10ffc036ae46fe24fd8660948ea41ba",
"content_id": "695edc8b7af4992efbd02d0824ad71cfb1f67b1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 14,
"path": "/2.第二類/PYD207.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#折扣方案\n\na=int(input())\nif a>=8000:\n b=a*0.95 \nelif a>=18000:\n b=a*0.9 \nelif a>=28000:\n b=a*0.8 \nelif a>=38000:\n b=a*0.7\nprint(\"{:.1f}\".format(b)) \n\n"
},
{
"alpha_fraction": 0.5520833134651184,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 15,
"blob_id": "18c8f05da344c1453ffa862c03fbfee7a67b3abc",
"content_id": "04c8c2b3e82b40aad2f64f1d857432b3d5e4506e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 6,
"path": "/New code/class9/PYA905.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nrd=open(\"data.txt\",\"r\")\ns=input()\ns1=rd.readline()\nprint(s1)\nrd.close()\n"
},
{
"alpha_fraction": 0.5083333253860474,
"alphanum_fraction": 0.5583333373069763,
"avg_line_length": 19.16666603088379,
"blob_id": "afd46d9722b7639e145d103242b42c8744d9d792",
"content_id": "d54481657c6f9d5ec4d098cfbfeb20d49191068a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/New code/class5/PYA109.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport math\ns=eval(input())\n\na=(5*s**2)/(4*math.tan(math.pi/5))\nprint(\"Area = {:.4f}\".format(a))"
},
{
"alpha_fraction": 0.49425286054611206,
"alphanum_fraction": 0.540229856967926,
"avg_line_length": 16.399999618530273,
"blob_id": "60ad1922c4dd37be8bcf17857366a399b5991b5e",
"content_id": "5fdf0d1ffc8d81a3872043f5331c8895fdc4aa14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 20,
"path": "/7.第七類/PYD701.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:50 2018\n\n@author: user\n\n串列數組轉換\n\"\"\"\nda=[]\nwhile True:\n a=int(input())\n if a == -9999:\n break\n else:\n da.append(a)\nprint(tuple(da))\nprint(\"Length: {:}\".format(len(da)))\nprint(\"Max: {:}\".format(max(da)))\nprint(\"Min: {:}\".format(min(da)))\nprint(\"Sum: {:}\".format(sum(da)))\n"
},
{
"alpha_fraction": 0.6813187003135681,
"alphanum_fraction": 0.6813187003135681,
"avg_line_length": 14.333333015441895,
"blob_id": "cf7912321f188fcbd3900cfa1523baa80c6e4acf",
"content_id": "f4048e62c34d6008fe82f8bcf0d6510199f274cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/New code/PYD707.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# TODO\n\nprint(\"Enter group X's subjects:\")\n# TODO\nprint(\"Enter group Y's subjects:\")\n# TODO"
},
{
"alpha_fraction": 0.465753436088562,
"alphanum_fraction": 0.4833659529685974,
"avg_line_length": 16.65517234802246,
"blob_id": "e3dd51761ad180b4b4c575a49fca0162f574be17",
"content_id": "f77640515cb022337391b58bcd067343227d4544",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 29,
"path": "/8.第八類/PYD810.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#最大值與最小值之差\n\na=int(input())\nfor i in range(a):\n n=input()\n data=[]\n da=n.split()\n \n damax=0\n damin=99999\n for j in range(len(da)):\n if damax < eval(da[j]):\n damax=eval(da[j])\n elif damin > eval(da[j]):\n damin=eval(da[j])\n daval=damax-damin\n print(\"{:.2f}\".format(daval))\n\n'''\na=int(input())\nfor i in range(a):\n b=input()\n b=b.split(\" \")\n c=list(map(eval,b))\n d=max(c)-min(c)\n print(\"{:.2f}\".format(d))\n '''"
},
{
"alpha_fraction": 0.4467005133628845,
"alphanum_fraction": 0.4517766535282135,
"avg_line_length": 16.909090042114258,
"blob_id": "b76c2e2f5044b80099c2a3d9cbfb5df7deac7726",
"content_id": "bb2e9bbf5598c9084ff5ddedac1372637f04ea72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 11,
"path": "/New code/class5/PYA505.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute():\n a=input()\n x=int(input())\n y=int(input())\n for i in range(y):\n for j in range(x):\n print(a,end=\" \")\n print()\n\ncompute()\n"
},
{
"alpha_fraction": 0.38461539149284363,
"alphanum_fraction": 0.4564102590084076,
"avg_line_length": 13.615385055541992,
"blob_id": "7e998dfbb84aa757e04a47a8aae8a09db74ae058",
"content_id": "7dfe6acee7b0c79a23627b1465feba41e928e9fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 13,
"path": "/2.第二類/PYD206.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#等級判斷\n\na=int(input())\nif a>=80 and a<=100:\n print(\"A\") \nelif a>=70 and a<=79:\n print(\"B\") \nelif a>=60 and a<=69:\n print(\"C\") \nelse:\n print(\"F\") \n\n"
},
{
"alpha_fraction": 0.4953051507472992,
"alphanum_fraction": 0.5422534942626953,
"avg_line_length": 18.409090042114258,
"blob_id": "9e889542cc491fe838659ef26d9fc610169da9d6",
"content_id": "9feb092dd3b5e4b32ec95ec2eaa58c699c6f7c90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 22,
"path": "/6.第六類/PYD607.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:21 2018\n\n@author: user\n\n成績計算\n\"\"\"\n\ndata=[]\na=[\"1st\",\"2nd\",\"3rd\"]\nfor i in range(3):\n data.append([])\n print(\"The {:} student:\".format(a[i]))\n for j in range(5):\n data[i].append(eval(input()))\n\n\nfor j in range(3):\n print(\"Student {:}\".format(j+1))\n print(\"#Sum {:}\".format(sum(data[j])))\n print(\"#Average {:.2f}\".format(sum(data[j])/len(data[j])))"
},
{
"alpha_fraction": 0.46621620655059814,
"alphanum_fraction": 0.5135135054588318,
"avg_line_length": 14.88888931274414,
"blob_id": "a6fc9a1fd8b2c8a341eaca09c74dda6e33f17b0a",
"content_id": "55785740ecb43176e190ff66ff99aef94c022d6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 9,
"path": "/2.第二類/PYD203.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# 閏年判斷\n\na=int(input())\nif a%4==0 or a%400==0:\n print(a,\"is a leap year.\")\nelse:\n print(a,\"is not a leap year.\") \n\n"
},
{
"alpha_fraction": 0.5757575631141663,
"alphanum_fraction": 0.6010100841522217,
"avg_line_length": 18.899999618530273,
"blob_id": "6953b972c7142b9bcdd2f3d36ae7b391a79d66f3",
"content_id": "eb877cce531f36f4b77bc057f010d2aaf00aa516",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 210,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 10,
"path": "/1.第一類/PYD104.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#圓形面積計算\nimport math\nr=eval(input())\nPer=2*r*math.pi\na=r*r*math.pi\nprint(\"Radius = {:.2f}\".format(r))\nprint(\"Perimeter = {:.2f}\".format(Per))\nprint(\"Area = {:.2f}\".format(a))"
},
{
"alpha_fraction": 0.5897436141967773,
"alphanum_fraction": 0.5897436141967773,
"avg_line_length": 6.800000190734863,
"blob_id": "335bc158db4d39d84d414bb91df06ecbc5353864",
"content_id": "52a7b96600e5cb769e7f0ab69737615226b8a77c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 39,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 5,
"path": "/New code/class5/PYD104.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "\n# TODO\n\nradius = eval(input())\n\n# TODO"
},
{
"alpha_fraction": 0.4412955343723297,
"alphanum_fraction": 0.479082316160202,
"avg_line_length": 18.0256404876709,
"blob_id": "240e78d7cff8fddd3b2813be08a997cd86b897c1",
"content_id": "802d76e1b8c775075fe72919bc71ca7877871cf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 941,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 39,
"path": "/New code/Python題目/Python code/spyder code/第一題,第二題.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#吳驊涓/106111123\n#第一題\n\"\"\"\nnum=int(input(\"請輸入一個整數:\"))\n\nif (num % 3 == 0) and (num % 5 == 0):\n print(\"%d 是三且五的倍數\"%num)\nelif num % 3 == 0:\n print(\"%d 是三的倍數\"%num)\nelif num % 5 == 0:\n print(\"%d 是五的倍數\"%num)\nelse:\n print(\"%d 非三與五的倍數\"%num)\ninput()\n\"\"\"\nnum=int(input(\"請輸入一個整數:\"))\nif (num % 3) == 0:\n if (num % 5) == 0:\n print(\"%d 是三且五的倍數\"%num)\n else:\n print(\"%d 是三的倍數\"%num)\nelse:\n if (num % 5) == 0:\n print(\"%d 是五的倍數\"%num)\n else:\n print(\"%d 非三與五的倍數\"%num)\ninput()\n#---------------------------------------\n#第二題\na=int(input(\"請輸入一個整數:\"))\nb=int(input(\"請輸入一個大於a的整數:\"))\ntotal=0\nwhile a<=b:\n if a % 3 == 0:\n total = total+a\n a = a + 1\nprint(\"三倍數的總合為 %d\" %(total))\ninput()"
},
{
"alpha_fraction": 0.4595959484577179,
"alphanum_fraction": 0.5050504803657532,
"avg_line_length": 13.214285850524902,
"blob_id": "6178238fb216aecb2b9589a8f7030715af38017d",
"content_id": "a1474790f6195f15f0117e2eb547ed3c1bca696c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 14,
"path": "/New code/Python題目/Python code/spyder code/PYD305.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#數字\nnum=eval(input())\nwhile num > 0:\n r=int(num % 10)\n print(r,end=\"\")\n num=(num-r)/10\n\n#字串\nnum=input()\ni=len(num)-1\nwhile i >= 0:\n print(num[i],end=\"\")\n i=i-1"
},
{
"alpha_fraction": 0.5151515007019043,
"alphanum_fraction": 0.521212100982666,
"avg_line_length": 13.727272987365723,
"blob_id": "70e51b1c6f16a8c345eff93250ac9a244e9e0bae",
"content_id": "b6952d927ac06b26c38d6843d3e2bb1ffc32b24b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 11,
"path": "/2.第二類/PYD210.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#三角形判斷\n\na=eval(input())\nb=eval(input())\nc=eval(input())\nif (a+b>c) and ( a+c>b) and (b+c>a):\n print(a+b+c)\nelse:\n print(\"Invalid\") \n\n"
},
{
"alpha_fraction": 0.41326531767845154,
"alphanum_fraction": 0.47789114713668823,
"avg_line_length": 25.727272033691406,
"blob_id": "d767fa879b20c78336657b1360d221433a3ee67b",
"content_id": "dcc1ebb8bcae406cf7b870730e2d55c951506aa1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 22,
"path": "/New code/class6/PYA608.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#num=[[6,4,8],[39,12,3],[-3,49,33]]\nnum=[]\nfor i in range(3):\n num.append([])\n for j in range(3):\n num[i].append(int(input()))\nprint(num)\nmaxn1=num[0][0]\nmaxn2=num[0][0]\nfor a in range(3):\n for b in range(3):\n if maxn1 < num[a][b]:\n maxn1=num[a][b]\n a1=a\n b1=b\n elif maxn2 > num[a][b]:\n maxn2=num[a][b]\n a2=a\n b2=b\nprint(\"Index of the largest number {:} is: ({:}, {:})\".format(maxn1,a1,b1))\nprint(\"Index of the smallest number {:} is: ({:}, {:})\".format(maxn2,a2,b2))\n"
},
{
"alpha_fraction": 0.42258065938949585,
"alphanum_fraction": 0.4935483932495117,
"avg_line_length": 13.090909004211426,
"blob_id": "29a283e2fa419ed0757a53fab480439655579a98",
"content_id": "e814c4875773e4958d8d274a13e5567720bdf0b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 22,
"path": "/6.第六類/PYD601.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:15:46 2018\n\n@author: user\n\n偶數索引值加總\n\n\"\"\"\ndata=[]\nsum=0\nfor i in range(12):\n data.append(int(input()))\nk=0\nfor j in data:\n print(\"{:>3}\".format(j),end=\"\")\n if (k % 2)==0:\n sum+=data[k]\n k+=1\n if (k % 3)==0:\n print(\"\") \nprint(sum)\n"
},
{
"alpha_fraction": 0.4748953878879547,
"alphanum_fraction": 0.5543932914733887,
"avg_line_length": 14.966666221618652,
"blob_id": "55fa2d50ccf430942787b4724cd0d009b6e51794",
"content_id": "6256bef2cef54e6ce2a1fd922e6a4493903d6c65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 30,
"path": "/7.第七類/PYD702.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:56 2018\n\n@author: user\n\n 數組合併排序\n\"\"\"\n\nb1=()\nb2=()\nprint(\"Create tuple1:\")\nwhile True:\n a1=int(input())\n if a1 == -9999:\n break\n else:\n b1=b1+(a1,)\nprint(\"Create tuple2:\")\nwhile True:\n a2=int(input())\n if a2 == -9999:\n break\n else:\n b2=b2+(a2,)\n\nprint(\"Combined tuple before sorting: {:}\".format(b1+b2))\nt=list(b1+b2)\nt.sort()\nprint(\"Combined list after sorting: {:}\".format(t))"
},
{
"alpha_fraction": 0.5341615080833435,
"alphanum_fraction": 0.6024844646453857,
"avg_line_length": 17,
"blob_id": "dc7c975278f3aa71cc705dc9610c7f6d80c049a1",
"content_id": "737b5bca292cf712fde12ed8077a2aa5d39a0d36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 9,
"path": "/New code/class5/PYA106.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nx=eval(input())\ny=eval(input())\nz=eval(input())\nsec=x*60+y\nhr=3600\nkm=float(hr/sec*3)\nft=float(km/1.6)\nprint(\"Speed = {:.1f}\".format(ft))"
},
{
"alpha_fraction": 0.48311689496040344,
"alphanum_fraction": 0.5246753096580505,
"avg_line_length": 17.380952835083008,
"blob_id": "f544d457fb04cf4cba1bbea8e203bc758d131ecd",
"content_id": "e5f5c7c8fb205944a50a0c5708fe3e00dc8b6d86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 21,
"path": "/9.第九類/PYD909.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:53 2018\n\n@author: user\n\n聯絡人資料\n\n\"\"\"\n\nwith open(\"data.dat\",\"w\",encoding=\"utf-8\") as fd:\n for i in range(5):\n fd.write(input())\n fd.write(\"\\n\")\n if i <4:\n fd.write(\"\\n\")\n\nwith open(\"data.dat\",\"r\",encoding=\"utf-8\") as fd:\n print(\"The content of \\\"data.dat\\\":\")\n data=fd.read()\n print(data)"
},
{
"alpha_fraction": 0.5669782161712646,
"alphanum_fraction": 0.6074766516685486,
"avg_line_length": 15.947368621826172,
"blob_id": "13bb836d7ad44a1807e7fbadb202278c55c18a59",
"content_id": "e117085ad01b2cd86e1fd03629c29ab0bd0bd066",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 333,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 19,
"path": "/9.第九類/PYD905.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:31 2018\n\n@author: user\n\n字串資料刪除\n\n\"\"\"\n\nf_name = input()\nstring = input()\nwith open(f_name,\"r\",encoding=\"utf-8\") as fd:\n data=fd.read()\n print(\"=== Before the deletion\")\n print(data)\n data=data.replace(string,\"\")\nprint(\"=== After the deletion\")\nprint(data)"
},
{
"alpha_fraction": 0.467576801776886,
"alphanum_fraction": 0.4744027256965637,
"avg_line_length": 23.41666603088379,
"blob_id": "8f4f7629ae71f6db09c309b6be91af6815c2590d",
"content_id": "cf4a230811235d50adc0bb5715c27834a149e6b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 12,
"path": "/New code/class6/PYA606-1.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute(): \n rclist=[]\n row=int(input())\n col=int(input())\n for r in range (row):\n rclist.append([])\n for c in range(col):\n rclist[r].append(c-r)\n print(\"{:4d}\".format(rclist[r][c]),end=\"\")\n print()\ncompute()\n"
},
{
"alpha_fraction": 0.3614034950733185,
"alphanum_fraction": 0.41228070855140686,
"avg_line_length": 21.760000228881836,
"blob_id": "f7f087df978a4ae9689f0f9fdc168eca63b27827",
"content_id": "b54ee05883f200170775febe89b010bdaa0d0e05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 570,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 25,
"path": "/New code/class5/PYA507.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute():\n num=int(input())\n if num == 1:\n print(\"Not Prime\")\n elif num == 2:\n print(\"Prime\")\n elif num < 0:\n print(\"Not Prime\")\n elif (num-1)%2==0:\n print(\"Prime\")\n else:\n print(\"Not Prime\")\ncompute()\n\n\n\n\n\n\n\n# if (num % 2 !=0) and (num % 3 !=0) and (num % 5 !=0) and (num % 7 !=0) and (num % 9 !=0) and (num % 11 !=0) and (num % 13 !=0) and (num % 17 !=0) and (num % 19 !=0):\n# print(\"Prime\")\n# else:\n# print(\"Not Prime\")\n\n"
},
{
"alpha_fraction": 0.49127182364463806,
"alphanum_fraction": 0.5162094831466675,
"avg_line_length": 21.27777862548828,
"blob_id": "99f069192b05f447a0c55174d46efae81a2a479f",
"content_id": "d29f123d6dc8e7023ef599ed5fe7084dfd8c411a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 18,
"path": "/New code/class6/PYA607.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=[]\ns=[]\na=[]\ntext=[\"1st\",\"2nd\",\"3rd\"]\nfor i in range(3):\n num.append([])\n print(\"The {:s} student:\".format(text[i]))\n for j in range(5):\n num[i].append(eval(input()))\nfor k in range(3):\n st=sum(num[k])\n aver=st/5\n s.append(st)\n a.append(aver)\n print(\"Student\",k+1)\n print(\"#Sum {:}\".format(s[k]))\n print(\"#Average {:.2f}\".format(a[k]))\n"
},
{
"alpha_fraction": 0.523809552192688,
"alphanum_fraction": 0.5936508178710938,
"avg_line_length": 16.55555534362793,
"blob_id": "5fbc5b860367774c77073f6bef5bbb062d08daa2",
"content_id": "059c7172dd7d366f3a18113bc6fe54011609aa82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 323,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 18,
"path": "/3.第三類/PYD309.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:12:33 2018\n\n@author: user\n\n存款總額\n\"\"\"\n\nmoney=eval(input())\nAmount=eval(input())\nMonth=int(input())\nprint('%s \\t %s' % ('Month', 'Amount'))\ntotal=0.0\nfor i in range(1,Month+1):\n total=money+money*Amount/1200\n money=total\n print('%3d \\t %.2f' % (i, total))"
},
{
"alpha_fraction": 0.42134830355644226,
"alphanum_fraction": 0.516853928565979,
"avg_line_length": 10.933333396911621,
"blob_id": "b77885f20f0e2f87e6315b9e0a14047b83aa766f",
"content_id": "c9e2876d02c0700fc57ebcfcb164b43e6bc1b900",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 186,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 15,
"path": "/3.第三類/PYD305.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:12:12 2018\n\n@author: user\n\n數字反轉\n\"\"\"\n\na=int(input())\nwhile a > 0:\n r=int(a%10)\n print(r,end=\"\")\n a=(a-r)/10\nprint(\"\")"
},
{
"alpha_fraction": 0.44075828790664673,
"alphanum_fraction": 0.5450236797332764,
"avg_line_length": 13.133333206176758,
"blob_id": "42ead938a54f52d0a124f6d75a817582f41c058a",
"content_id": "f9f6a4cb40072c35614a6ce280a3021e68e38901",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 223,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 15,
"path": "/3.第三類/PYD310.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:12:54 2018\n\n@author: user\n\n迴圈公式計算\n\"\"\"\n\na=int(input())\nsum=0\nfor i in range(2,a+1):\n ans=1/((i-1)**0.5+(i**0.5))\n sum=sum+ans\nprint(\"{:.4f}\".format(sum))"
},
{
"alpha_fraction": 0.5540540814399719,
"alphanum_fraction": 0.5675675868988037,
"avg_line_length": 13.800000190734863,
"blob_id": "b731aa47c3e18b52a13b71dfddd47629f776c9a9",
"content_id": "af123aa32e30c1f559d390e85232be0e4a00dc01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 5,
"path": "/8.第八類/PYD804.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#大寫轉換\na=input()\nprint(a.upper())\nprint(a.title())\n"
},
{
"alpha_fraction": 0.40460526943206787,
"alphanum_fraction": 0.44736841320991516,
"avg_line_length": 21.846153259277344,
"blob_id": "cb25b3024dd56040ce78d95d60095f85d327b67c",
"content_id": "faeac7601106959d43b1f47f2213a62150c2de6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 13,
"path": "/New code/class2/PYA202.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=int(input())\nif (a % 3 == 0):\n if (a % 5 == 0):\n print(a,\"is a multiple of 3 and 5.\") \n else:\n print(a,\"is a multiple of 3.\")\nelse:\n if (a % 5 == 0):\n print(a,\"is a multiple of 5.\")\n\n else:\n print(a,\"is not a multiple of 3 or 5.\")\n\n \n\n"
},
{
"alpha_fraction": 0.5416666865348816,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 15.166666984558105,
"blob_id": "675e03531a8e28eace758d3fa2e60597985ff023",
"content_id": "c5e1aff6b8274718e6da00b3821019ac98ff16b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 6,
"path": "/New code/class3/PYA306.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=int(input())\nsum=1\nfor i in range(1,num+1):\n sum=sum*i\nprint(sum)"
},
{
"alpha_fraction": 0.5851755738258362,
"alphanum_fraction": 0.713914155960083,
"avg_line_length": 84.33333587646484,
"blob_id": "1a74a21e8a671278902f19c88ccc3d655e0e453b",
"content_id": "614f47bfe7b671be7c3a5e4ffcd0c7c6db7c8efa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 817,
"license_type": "no_license",
"max_line_length": 624,
"num_lines": 9,
"path": "/New code/Python題目/Python code/spyder code/.spyproject/workspace.ini",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "[workspace]\nrestore_data_on_startup = True\nsave_data_on_exit = True\nsave_history = True\nsave_non_project_files = False\n\n[main]\nversion = 0.1.0\nrecent_files = ['C:\\\\Users\\\\Administrator\\\\.spyder-py3\\\\temp.py', 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Python code\\\\123\\\\PYD401.py', 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Python實作(USB)\\\\考試\\\\0321_106111123\\\\106111123\\\\s_106111123.py', 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Python code\\\\123\\\\PYD207.py', 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Python實作(USB)\\\\考試\\\\a_20180418\\\\a_0418\\\\S_106111123_1.py', 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Python code\\\\123\\\\PYD404.py', 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Python實作(USB)\\\\考試\\\\上機小考_03_1070327\\\\PYD103.py', 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Python實作(USB)\\\\考試\\\\上機小考_03_1070327\\\\PYD206.py']\n\n"
},
{
"alpha_fraction": 0.4114002585411072,
"alphanum_fraction": 0.46592316031455994,
"avg_line_length": 21.41666603088379,
"blob_id": "d4d186b041c6cd52f527c72a16dbc3269ee6ed7d",
"content_id": "fc788ec32e93f58dc06358b812d1ab2356e09b16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 807,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 36,
"path": "/New code/class6/PYA609-1.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=[]\nb=[]\nc=[[],[]]\nprint(\"Enter matrix 1:\")\nfor i in range (2):\n a.append([])\n for j in range(2):\n print(\"[{:}, {:}]: \".format(i+1,j+1),end=\"\")\n a[i].append(int(input()))\nprint(\"Enter matrix 2:\")\nfor i in range (2):\n b.append([])\n for j in range(2):\n print(\"[{:}, {:}]: \".format(i+1,j+1),end=\"\")\n b[i].append(int(input()))\nprint(\"Matrix 1:\")\nfor i in range(2):\n for j in range(2):\n print(a[i][j],end=\" \")\n print()\nprint(\"Matrix 2:\")\nfor i in range(2):\n for j in range(2):\n print(b[i][j],end=\" \")\n print()\nprint(\"Sum of 2 matrices:\")\n\nc[0][0]=a[0][0]+b[0][0]\nc[0][1]=a[0][1]+b[0][1]\nc[1][0]=a[1][0]+b[1][0]\nc[1][1]=a[1][1]+b[1][1]\nfor i in range(2):\n for j in range(2):\n print(c[i][j],end=\"\")\n print(\"\")\n"
},
{
"alpha_fraction": 0.3962264060974121,
"alphanum_fraction": 0.4609164297580719,
"avg_line_length": 12.285714149475098,
"blob_id": "024d4bc1e3f5c30763b06a658c0f9f9cb224f6b5",
"content_id": "d5b2ddf75bb4d82756a50d8ef4549fa9064afffc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 383,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 28,
"path": "/4.第四類/PYD403.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:13:26 2018\n\n@author: user\n\n倍數總和計算\n\n\"\"\"\n\na=int(input())\nb=int(input())\nc=[]\nj=0\nsum=0\nwhile a <= b:\n if (a % 4 == 0) or (a % 9 == 0):\n c.append(a)\n sum=sum+a\n a=a+1\nfor i in c:\n j=j+1\n print(\"{:<4}\".format(i),end=\"\")\n if j % 10 == 0:\n print(\"\")\nprint(\"\") \nprint(len(c)) \nprint(sum)"
},
{
"alpha_fraction": 0.5065789222717285,
"alphanum_fraction": 0.5723684430122375,
"avg_line_length": 14.100000381469727,
"blob_id": "7b61519d0251f714634db2b8f9ad9987e513fa4b",
"content_id": "fca022a8482d224afb3ccee5b688959168d3920a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 10,
"path": "/1.第一類/PYD106.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#公里英哩換算\na=eval(input())\nb=eval(input())\nc=eval(input())\nhr=3600\nkm=hr/(a*60+b)*c\nft=km/1.6\nprint(\"Speed = {:.1f}\".format(ft))\n\n"
},
{
"alpha_fraction": 0.4673366844654083,
"alphanum_fraction": 0.5477386713027954,
"avg_line_length": 11.5,
"blob_id": "5d167246a8490a4bcb7cbe87a40392d8329b1492",
"content_id": "cd785be3a3676f2a3899510ff559586e20908c72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 16,
"path": "/4.第四類/PYD402.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:13:20 2018\n\n@author: user\n\n不定數迴圈-最小值\n\"\"\"\n\na=[]\nwhile True:\n num=eval(input())\n if num == 9999:\n break\n a.append(num)\nprint(min(a))"
},
{
"alpha_fraction": 0.4651162922382355,
"alphanum_fraction": 0.5038759708404541,
"avg_line_length": 15.125,
"blob_id": "6326e8f0d55ae03bbbd29b5d4d9cbdd295f7cbe7",
"content_id": "bdaaea0c783446f1fbe8bb44088f368ed41e4084",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 8,
"path": "/New code/class4/PYA402.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=[]\nwhile True:\n num=eval(input())\n if num == 9999:\n break\n a.append(num)\nprint(min(a))\n"
},
{
"alpha_fraction": 0.49070632457733154,
"alphanum_fraction": 0.535315990447998,
"avg_line_length": 13.210526466369629,
"blob_id": "8ab897df61c848ed56c6869e8f48388f11ef9b24",
"content_id": "e50d7a7501f6538ae2cfc2acc21e5fe8d477e27e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 19,
"path": "/5.第五類/PYD501.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:14:33 2018\n\n@author: user\n\n訊息顯示\n\n\"\"\"\n\ndef compute():\n a=input()\n b=input()\n c=input()\n print(\"Department: {:}\".format(a))\n print(\"Student ID: {:}\".format(b))\n print(\"Name: {:}\".format(c))\n\ncompute()"
},
{
"alpha_fraction": 0.451977401971817,
"alphanum_fraction": 0.4689265489578247,
"avg_line_length": 13.75,
"blob_id": "c0551444aeaf76ac8e6c4e3bd32e656c7201a7c1",
"content_id": "a42d577082276f245fbb992a8cd8f7d24e3dcac0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 12,
"path": "/New code/class5/PYA503.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute():\n x=int(input())\n y=int(input())\n sum=0\n while x <=y:\n sum=sum+x\n x=x+1\n print(sum)\n return sum\n\ncompute()\n"
},
{
"alpha_fraction": 0.4106280207633972,
"alphanum_fraction": 0.4746376872062683,
"avg_line_length": 18.279069900512695,
"blob_id": "1fe360424a6ce95e63ccd1fd5ad36e717e4f247f",
"content_id": "85a155c9455a44fcf9d0c7b1add71d205c70738e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 836,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 43,
"path": "/6.第六類/PYD609.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:31 2018\n\n@author: user\n\n矩陣相加\n\"\"\"\n\na=[]\nb=[]\nprint(\"Enter matrix 1:\")\nfor i in range(2):\n a.append([])\n for j in range(2):\n print(\"[%d, %d]: \" % (i+1, j+1), end = '')\n a[i].append(int(input()))\n \nprint(\"Enter matrix 2:\")\nfor i in range(2):\n b.append([])\n for j in range(2):\n print(\"[%d, %d]: \" % (i+1, j+1), end = '')\n b[i].append(int(input()))\n\nprint(\"Matrix 1:\")\nfor i in range(2):\n for j in range(2):\n print(a[i][j],end=\" \")\n print(\"\")\nprint(\"Matrix 2:\")\nfor i in range(2):\n for j in range(2):\n print(b[i][j],end=\" \")\n print(\"\")\n\nprint(\"Sum of 2 matrices:\")\na1=a[0][0]+b[0][0]\na2=a[0][1]+b[0][1]\na3=a[1][0]+b[1][0]\na4=a[1][1]+b[1][1]\nprint(\"{:} {:} \".format(a1,a2))\nprint(\"{:} {:} \".format(a3,a4))"
},
{
"alpha_fraction": 0.4871794879436493,
"alphanum_fraction": 0.5705128312110901,
"avg_line_length": 11,
"blob_id": "15b77a8cc88e3361da5365b1fd70a51aef3e3638",
"content_id": "3b9b93d02dda46433fcbfcbcba341cec11b28b2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 166,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 13,
"path": "/8.第八類/PYD803.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:18:07 2018\n\n@author: user\n\n倒數三個詞\n\n\"\"\"\nval=input()\ndata=val.split(\" \")\nda=data[-3:]\nprint(\" \".join(da))\n"
},
{
"alpha_fraction": 0.41692790389060974,
"alphanum_fraction": 0.4482758641242981,
"avg_line_length": 19.571428298950195,
"blob_id": "25aa639ff3db3acab82653ca68e02f4a981cc6d9",
"content_id": "3df3a3e41bad7f3fb22a12d758507f1af7a0b517",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 319,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 14,
"path": "/New code/class3/PYA308.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfloor=int(input())\nsum=0\nfor i in range(floor):\n num=int(input())\n copy=num\n sum=0 \n while num > 0:\n r=int(num%10)\n sum=sum+r\n num=(num-r)/10\n \n print(\"Sum of all digits of {:} is {:}\".format(copy,sum))\n# if num ==13h:\n \n \n \n"
},
{
"alpha_fraction": 0.4336283206939697,
"alphanum_fraction": 0.43510323762893677,
"avg_line_length": 15.949999809265137,
"blob_id": "a10f5a394853b355e4b633d6f6a7bf76a5f27008",
"content_id": "cbb7c289a23a511f3f89c8ca0eba36af99fe2ddc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 678,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 40,
"path": "/New code/class7/PYA708.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndic={}\nwhile True:\n print(\"key:\",end=\"\")\n key=input()\n if key == \"end\":\n break\n else:\n print(\"value:\",end=\"\")\n value=input()\n dic[key]=value\n\na=dic.keys()\nb=dic.values()\n\nfor i in a:\n print(\"{:}: {}\".format(i,dic[i]))\n \n\n\n\n\n#key=[]\n#value=[]\n#while True:\n# print(\"key:\",end=\"\")\n# i=input()\n# if i == \"end\":\n# break\n# else:\n# print(\"value:\",end=\"\")\n# j=input()\n# if i in key:\n# key.append(i)\n# if j in value:\n# value.append(j)\n#key.sort()\n#value.sort()\n#for k in range(len(key)):\n# print(\"{:}: {:}\".format(key[k],value[k]))\n"
},
{
"alpha_fraction": 0.3287292718887329,
"alphanum_fraction": 0.3922652006149292,
"avg_line_length": 16.210525512695312,
"blob_id": "bad749049f8d6c0185e81cb9596d514901a3650a",
"content_id": "a6d52da0167b1f38448357124183021f43e8a6d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 19,
"path": "/8.第八類/PYD808.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#社會安全碼\n\na=input()\nal=\"1,2,3,4,5,6,7,8,9,0\"\nb=a.split(\"-\")\nx=0\ny=0\nif (len(b[0]) == 3) and (len(b[1]) == 2) and (len(b[2]) == 4):\n for i in range(len(a)):\n if a[i] in al:\n x+=1\n else:\n y+=1\nif (x == 9) and (y==2):\n print(\"Valid SSN\")\nelse:\n print(\"Invalid SSN\")\n \n \n\n \n \n\n"
},
{
"alpha_fraction": 0.45046234130859375,
"alphanum_fraction": 0.4768824279308319,
"avg_line_length": 21.969696044921875,
"blob_id": "1f96461aa12c9b078f1090017a4f0d5bedcd03fb",
"content_id": "aa361f9d74a38bc28499bb636dabc847353aff9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 757,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 33,
"path": "/New code/class6/PYA609.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=[]\nb=[]\nc=[]\nprint(\"Enter matrix 1:\")\nfor i in range (2):\n a.append([])\n for j in range(2):\n print(\"[{:}, {:}]: \".format(i+1,j+1),end=\"\")\n a[i].append(int(input()))\nprint(\"Enter matrix 2:\")\nfor i in range (2):\n b.append([])\n for j in range(2):\n print(\"[{:}, {:}]: \".format(i+1,j+1),end=\"\")\n b[i].append(int(input()))\nprint(\"Matrix 1:\")\nfor i in range(2):\n for j in range(2):\n print(a[i][j],end=\" \")\n print()\nprint(\"Matrix 2:\")\nfor i in range(2):\n for j in range(2):\n print(b[i][j],end=\" \")\n print()\nprint(\"Sum of 2 matrices:\")\nfor i in range(2):\n c.append([])\n for j in range(2):\n c[i].append(a[i][j]+b[i][j])\n print(c[i][j],end=\" \")\n print()"
},
{
"alpha_fraction": 0.4406779706478119,
"alphanum_fraction": 0.5042372941970825,
"avg_line_length": 14.800000190734863,
"blob_id": "86dbf0cf83c52017c1a91f29f7bb9a9fb532b565",
"content_id": "1d2ae31f4db2c24ebaa63f8842f1f757b37e095a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 15,
"path": "/9.第九類/PYD901.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:08 2018\n\n@author: user\n\n成績資料\n\n\"\"\"\n\nwith open(\"write.txt\",\"w\",encoding=\"utf-8\") as fd:\n for i in range(5):\n fd.write(input())\n if i != 4:\n fd.write(\"\\n\")"
},
{
"alpha_fraction": 0.49854227900505066,
"alphanum_fraction": 0.5131195187568665,
"avg_line_length": 21.933332443237305,
"blob_id": "5729885a5945252780e8c0e0632babd896c39ce4",
"content_id": "5fc064c2f5ef33bb1be92d2f14eaf8a37e1c2768",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 15,
"path": "/9.第九類/PYD903.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#成績資料\n\nwith open(\"data.txt\",\"a\",encoding=\"utf-8\") as fd:\n fd.write(\"\\n\")\n for i in range(5):\n fd.write(input())\n if i != 4:\n fd.write(\"\\n\")\nwith open(\"data.txt\",\"r\",encoding=\"utf-8\") as fd:\n print(\"Append completed!\")\n print(\"Content of \\\"data.txt\\\":\")\n s=fd.read()\n print(s)"
},
{
"alpha_fraction": 0.5104602575302124,
"alphanum_fraction": 0.5648535490036011,
"avg_line_length": 18.58333396911621,
"blob_id": "2d572b2cd4406d365629bea544ca30dd690ce631",
"content_id": "50f03e0891b42a01b8ebd08f2612df3ca7688cf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 12,
"path": "/New code/class7/PYA704.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=[]\nwhile True:\n num1=int(input())\n if num1 == -9999:\n break\n num.append(num1)\nnum1=set(num)\nprint(\"Length:\",len(num1))\nprint(\"Max:\",max(num1))\nprint(\"Min:\",min(num1))\nprint(\"Sum:\",sum(num1))\n \n"
},
{
"alpha_fraction": 0.4512820541858673,
"alphanum_fraction": 0.4769230782985687,
"avg_line_length": 16.545454025268555,
"blob_id": "3ccef4d37dbd74c96e95f8bcfff8b18f0a92099a",
"content_id": "43d9029f67fb97bb3c4fdbf3f7339a9fc85d65ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 11,
"path": "/1.第一類/PYD108.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#座標距離計算\na=eval(input())\nb=eval(input())\nc=eval(input())\nd=eval(input())\ne=((a-c)**2+(b-d)**2)**0.5\nprint(\"(\",a,\",\",b,\")\")\nprint(\"(\",c,\",\",d,\")\")\nprint(\"Distance = \",e)\n\n\n"
},
{
"alpha_fraction": 0.3951219618320465,
"alphanum_fraction": 0.42601627111434937,
"avg_line_length": 14.79487133026123,
"blob_id": "8db536b20b50de7eea0cd8851333b67c31c06be3",
"content_id": "0da4d79f6730a1acee6b76de2a0dca3b65d91959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 615,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 39,
"path": "/New code/class4/PYA403.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=int(input())\nb=int(input())\nc=[]\nj=[]\nsum=0\nwhile a <= b:\n if (a % 4 == 0) or (a % 9 == 0):\n c.append(a)\n sum=sum+a\n a=a+1\nfor i in c:\n print(\"{:<4d}\".format(i),end=\"\")\n j.append(i)\n if len(j)==10:\n print(\"\")\nprint(\"\") \nprint(len(c)) \nprint(sum)\n\n\n#a=int(input())\n#b=int(input())\n#c=[]\n#j=[]\n#sum=0\n#while a <= b:\n# if (a % 4 == 0) or (a % 9 == 0):\n# c.append(a)\n# sum=sum+a\n# a=a+1\n#for i in c:\n# print(\"{:<4d}\".format(i),end=\"\")\n# j.append(i)\n# if len(j)==10:\n# print(\"\")\n#print(\"\") \n#print(len(c)) \n#print(sum)"
},
{
"alpha_fraction": 0.508571445941925,
"alphanum_fraction": 0.5885714292526245,
"avg_line_length": 10.733333587646484,
"blob_id": "45c48cebdc0b0427889f6a483061dc52efbd8c62",
"content_id": "a3cac330493a8f406f4f900217787cb0b674b075",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 187,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 15,
"path": "/3.第三類/PYD301.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:11:31 2018\n\n@author: user\n\n迴圈整數連加\n\"\"\"\n\na=int(input())\nb=int(input())\nsum=0\nfor i in range(a,b+1):\n sum=sum+i\nprint(sum)"
},
{
"alpha_fraction": 0.3701067566871643,
"alphanum_fraction": 0.41992881894111633,
"avg_line_length": 17.733333587646484,
"blob_id": "c9ff2958c74445bc47cc293355148051a72c89fd",
"content_id": "de0ae54ca2d1ff41285eb722f9b2180c311374fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 15,
"path": "/New code/class4/PYA405.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nwhile True:\n num=int(input())\n if num == -9999:\n break\n elif num >=90:\n print(\"A\")\n elif num >=80:\n print(\"B\")\n elif num >=70:\n print(\"C\")\n elif num >=60:\n print(\"D\")\n elif num >=0:\n print(\"E\")\n"
},
{
"alpha_fraction": 0.37404578924179077,
"alphanum_fraction": 0.4198473393917084,
"avg_line_length": 12.666666984558105,
"blob_id": "3ade7b80905fbae9a1c27ec2cfc4905c6e2b4202",
"content_id": "84919b4acf693aad7379ab37772dede498ba9409",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 9,
"path": "/New code/class3/PYA304.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=int(input())\ni=1\nsum=0\nwhile i <= a:\n if (i % 5 == 0):\n sum=sum+i\n i=i+1\nprint(sum) \n"
},
{
"alpha_fraction": 0.42966753244400024,
"alphanum_fraction": 0.4731457829475403,
"avg_line_length": 13,
"blob_id": "a42073796af00d6bef1fda3e0667797c18916f07",
"content_id": "25425e4f3657103ee9da571c8d1309e33c94845f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 28,
"path": "/5.第五類/PYD507.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:15:16 2018\n\n@author: user\n\n質數\n\n\"\"\"\n\ndef compute(x):\n i=2\n while i < x: \n if (x%i)==0:\n return False\n i+=1\n return True\nx=int(input())\nif x <= 1:\n print(\"Not Prime\")\nelif x == 2:\n print(\"Prime\")\nelse:\n r=compute(x)\n if r ==True:\n print(\"Prime\")\n else:\n print(\"Not Prime\")"
},
{
"alpha_fraction": 0.454849511384964,
"alphanum_fraction": 0.4949832856655121,
"avg_line_length": 12.636363983154297,
"blob_id": "50c51af9e7e231557e5c2eb9f361557c2595c0da",
"content_id": "b29598803d24f8ee9095f12aa757d0dc16f6b3b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 22,
"path": "/7.第七類/PYD709.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:17:38 2018\n\n@author: user\n\n詞典排序\n\n\"\"\"\n\nc={}\nwhile True:\n key = input(\"Key: \")\n if key ==\"end\":\n break\n else:\n value = input(\"Value: \")\n c[key]=value\nd=list(c.keys())\nd.sort()\nfor i in d:\n print(\"{:}: {:}\".format(i,c[i]))"
},
{
"alpha_fraction": 0.5350877046585083,
"alphanum_fraction": 0.5877193212509155,
"avg_line_length": 14.266666412353516,
"blob_id": "5331d4d96cce6103e6bb031180a9fba0517e8579",
"content_id": "bdac9d5c70261b4dd02614f6b13743e50ccad2e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 236,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 15,
"path": "/8.第八類/PYD807.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:18:34 2018\n\n@author: user\n\n字串加總\n\n\"\"\"\n\nval=input()\nval=val.split(\" \")\nvl=list(map(eval,val))\nprint(\"Total = {:}\".format(sum(vl)))\nprint(\"Average = {:}\".format(sum(vl)/len(vl)))"
},
{
"alpha_fraction": 0.5491803288459778,
"alphanum_fraction": 0.5819672346115112,
"avg_line_length": 17.846153259277344,
"blob_id": "dbd0d37fe4f345a23f096b60d54943d332a094de",
"content_id": "eb8fcf66ebea50e42364b809b41f3f89055c1dd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 13,
"path": "/New code/class7/PYA701-1.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=[]\nwhile True:\n num1=int(input())\n if num1 == -9999:\n break\n num.append(num1)\nnumn=tuple(num)\nprint(numn)\nprint(\"Length:\",len(num))\nprint(\"Max:\",max(num))\nprint(\"Min:\",min(num))\nprint(\"Sum:\",sum(num))"
},
{
"alpha_fraction": 0.44628098607063293,
"alphanum_fraction": 0.4793388545513153,
"avg_line_length": 13.5625,
"blob_id": "cf5efdb715a3ed739a4179e66a62760e705babd8",
"content_id": "31cc157ea2945aed928d7ad90d6054fd823286f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 16,
"path": "/8.第八類/PYD809.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#密碼規則\n\na=input()\nx=0\ny=0\nfor i in a:\n if i.isdigit():\n x+=1\n elif i.isalpha():\n y+=1\nif (len(a)>=8) and (x>=1) and (y>=1):\n print(\"Valid password\")\nelse:\n print(\"Invalid password\")\n \n"
},
{
"alpha_fraction": 0.3552631437778473,
"alphanum_fraction": 0.5131579041481018,
"avg_line_length": 14.266666412353516,
"blob_id": "6a42a03ae38c5c0175ab61ffa5f0e00e189a5798",
"content_id": "b4f27c0c74ff29a4bde0bfeb32a436cc846313c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 228,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 15,
"path": "/New code/Python題目/Python code/spyder code/PYD207.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#1\ntotal=0.0\na=eval(input())\nif a >= 38000.0:\n total = a * 0.7\nelif a >= 28000.0:\n total = a * 0.8\nelif a >= 18000.0:\n total = a * 0.9\nelif a >= 8000.0:\n total = a * 0.95\nprint(total)\ninput()"
},
{
"alpha_fraction": 0.5263158082962036,
"alphanum_fraction": 0.5639097690582275,
"avg_line_length": 15.375,
"blob_id": "108285e0b5276d5a3d9fec00f0ce259ec78d9ee2",
"content_id": "8ebfcb0b90459ff30d43b0b6002cf531865faafb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 8,
"path": "/1.第一類/PYD109.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#正五邊形面積計算\n\nimport math\na=eval(input())\nb=(5*a*a)/(4*math.tan(math.pi/5))\nprint(\"Area = {:.4f}\".format(b))\n\n\n"
},
{
"alpha_fraction": 0.5602605938911438,
"alphanum_fraction": 0.6091205477714539,
"avg_line_length": 13.666666984558105,
"blob_id": "3a0df8348dde2c90c019fee0bb716cb62e251c9f",
"content_id": "61acd2d860f006df84f351dbd9fb350031efcbe5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 315,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 21,
"path": "/6.第六類/PYD605.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:09 2018\n\n@author: user\n\n成績計算\n\"\"\"\n\ndata=[]\n\nfor i in range(10):\n num=eval(input())\n data.append(num)\ndmax=max(data)\ndmin=min(data)\ndata.remove(dmax)\ndata.remove(dmin)\n \nprint(\"{:}\".format(sum(data)))\nprint(\"{:.2f}\".format(sum(data)/len(data)))"
},
{
"alpha_fraction": 0.489130437374115,
"alphanum_fraction": 0.554347813129425,
"avg_line_length": 9.882352828979492,
"blob_id": "ec2f85784467aac51833d18fb32554e504296b3d",
"content_id": "acc02df1976269ba6d073dbf168c1897260e7fdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 17,
"path": "/5.第五類/PYD504.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:14:59 2018\n\n@author: user\n\n次方計算\n\n\"\"\"\n\ndef compute(a,b):\n ans=a**b\n print(ans)\n \na=int(input())\nb=int(input())\ncompute(a,b)"
},
{
"alpha_fraction": 0.5860306620597839,
"alphanum_fraction": 0.5877342224121094,
"avg_line_length": 17.90322494506836,
"blob_id": "8eed9545b279d9f991fff300f3b66505a096a03c",
"content_id": "38079d6546f2ffe00cd32bd87347c8aa67cb373a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 607,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 31,
"path": "/New code/class7/PYA707.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#x=[\"Math\",\"Literature\",\"English\",\"History\",\"Geography\"]\n#y=[\"Math\",\"Literature\",\"Chinese\",\"Physical\",\"Chemistry\"]\nx=[]\ny=[]\nprint(\"Enter group X's subjects:\")\nwhile True:\n na=input()\n if na == \"end\":\n break\n x.append(na)\nprint(\"Enter group Y's subjects:\") \nwhile True:\n na=input()\n if na == \"end\":\n break\n y.append(na)\nxset=set(x)\nyset=set(y)\na=list(xset | yset)#全部\nb=list(xset & yset)#共同\nc=list(yset - xset)#Y有X沒有\nd=list((xset | yset)-(xset & yset))#都沒有\na.sort()\nb.sort()\nc.sort()\nd.sort()\nprint(a)\nprint(b)\nprint(c)\nprint(d)\n\n"
},
{
"alpha_fraction": 0.5186722278594971,
"alphanum_fraction": 0.5726141333580017,
"avg_line_length": 13.235294342041016,
"blob_id": "cdbb7e4b99055efdaffa3750730cafc0f4038c57",
"content_id": "6d8d94ddfd6458fb178e59ba3ecd4f480b9585e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 17,
"path": "/8.第八類/PYD802.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:18:01 2018\n\n@author: user\n\n字元對應\n\n\"\"\"\n\nval=input()\nsum=0\nfor i in range(len(val)):\n ordv=ord(val[i])\n sum+=ordv\n print(\"ASCII code for '{:}' is {:}\".format(val[i],ordv))\nprint(sum)"
},
{
"alpha_fraction": 0.5458715558052063,
"alphanum_fraction": 0.5504587292671204,
"avg_line_length": 20.899999618530273,
"blob_id": "dbd44f17c7f5d8fda129a71b8643f73ab53673b4",
"content_id": "0e657eb967d646eeb39f10589c0a47a68a6c9342",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 10,
"path": "/New code/class5/PYA501.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute():\n dep=input()\n stid=input()\n name=input()\n print(\"Department: {:}\".format(dep))\n print(\"Student ID: {:}\".format(stid))\n print(\"Name: {:}\".format(name))\n\ncompute()"
},
{
"alpha_fraction": 0.46125462651252747,
"alphanum_fraction": 0.509225070476532,
"avg_line_length": 18.071428298950195,
"blob_id": "300b0e93629e5112faf0453d07361e275b0b9855",
"content_id": "f05a77ba4ad3b64dbf7ae258d9d7a8a846c15462",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 14,
"path": "/2.第二類/PYD202.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#倍數判斷\n\na=int(input())\nif a%3==0:\n if a%5==0:\n print(a,\"is a multiple of 3 and 5.\")\n else:\n print(a,\"is a multiple of 3.\")\nelif a%5==0:\n print(a,\"is a multiple of 5.\") \nelse:\n print(a,\"is not a multiple of 3 or 5.\") \n\n"
},
{
"alpha_fraction": 0.4655172526836395,
"alphanum_fraction": 0.4719827473163605,
"avg_line_length": 16.846153259277344,
"blob_id": "266a447c44c650640b6c38f9ad23512f9cacb1e9",
"content_id": "3ea516cc1afd0bb56f68f290463b32e5876d1eb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 26,
"path": "/7.第七類/PYD708.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "\n# -*- coding: utf-8 -*-\n#詞典合併\na={}\nb={}\nprint(\"Create dict1:\")\nwhile True:\n key = input(\"Key: \")\n if key == \"end\":\n break\n else:\n value = input(\"Value: \")\n a[key]=value\nprint(\"Create dict2:\")\nwhile True:\n key = input(\"Key: \")\n if key == \"end\":\n break\n else:\n value = input(\"Value: \")\n a[key]=value \nfor i in b:\n a[i]=b[i]\nc=list(a.keys())\ncs=c.sort()\nfor i in c:\n print(\"{:}: {:}\".format(i,a[i]))"
},
{
"alpha_fraction": 0.4780600368976593,
"alphanum_fraction": 0.5127021074295044,
"avg_line_length": 14.5,
"blob_id": "0540b25bcd19bee563a4bc41e20e0d438811f8b2",
"content_id": "65c51d2d8ba2f1e35716b5671e5d8d0502ae6304",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 28,
"path": "/9.第九類/PYD908.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:47 2018\n\n@author: user\n\n單字次數計算\n\n\"\"\"\n\nf_name = input()\nn = int(input())\nnl=[]\nwcom={}\nwith open(f_name,\"r\",encoding=\"utf-8\")as fd:\n data=fd.read()\n data=data.split()\n for k in data:\n if k in wcom:\n wcom[k]+=1\n else:\n wcom[k]=1\nfor k in wcom:\n if wcom[k]==n:\n nl.append(k)\nnl.sort()\nfor i in range(len(nl)):\n print(nl[i])"
},
{
"alpha_fraction": 0.4628099203109741,
"alphanum_fraction": 0.5206611752510071,
"avg_line_length": 11.149999618530273,
"blob_id": "847744eeb6885ea81ba8f99626d710cb0d308aea",
"content_id": "eb93fd1fdf4df23559a1751ec09b020c64796d05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 20,
"path": "/7.第七類/PYD703.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:17:02 2018\n\n@author: user\n\n數組條件判斷\n\n\"\"\"\n\nd=[]\nwhile True:\n a=input()\n if a == \"end\":\n break\n else:\n d.append(a)\nprint(tuple(d))\nprint(tuple(d[:3]))\nprint(tuple(d[-3:]))"
},
{
"alpha_fraction": 0.5610687136650085,
"alphanum_fraction": 0.5877862572669983,
"avg_line_length": 23.952381134033203,
"blob_id": "4aead0bb8fc916db2759ddce58870ba4c030b700",
"content_id": "878e05370393c9e17585d6286c865e6828dffa8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 524,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 21,
"path": "/New code/class4/PYA409.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nNami=0\nChopper=0\nscrap=0\nfor i in range(5):\n poll=int(input())\n if poll ==1:\n Nami=Nami+1\n elif poll ==2:\n Chopper=Chopper+1\n else:\n scrap=scrap+1\n print(\"Total votes of No.1: Nami = \",Nami)\n print(\"Total votes of No.2: Chopper = \",Chopper)\n print(\"Total null votes = \",scrap)\nif Nami < Chopper:\n print(\"=> No.2 Chopper wins the election.\")\nelif Nami > Chopper:\n print(\"=> No.1 Nami wins the election.\")\nelse:\n print(\"=> No one wins the election.\")\n"
},
{
"alpha_fraction": 0.6029411554336548,
"alphanum_fraction": 0.6411764621734619,
"avg_line_length": 16.049999237060547,
"blob_id": "35e4a4aeab8e3b12c19b7958402a67aa7573a108",
"content_id": "ab83fc37b1b788c0b18de7e04406a1c56446c5ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 352,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 20,
"path": "/9.第九類/PYD906.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:36 2018\n\n@author: user\n\n字串資料取代\n\n\"\"\"\n\nf_name = input()\nstr_old = input()\nstr_new = input()\nwith open(f_name,\"r\",encoding=\"utf-8\") as fd:\n data=fd.read()\nprint(\"=== Before the replacement\")\nprint(data)\ndata=data.replace(str_old,str_new)\nprint(\"=== After the replacement\")\nprint(data)"
},
{
"alpha_fraction": 0.5796344876289368,
"alphanum_fraction": 0.5953002572059631,
"avg_line_length": 21.52941131591797,
"blob_id": "c95b77ad011da176da1cc18e086267436386c930",
"content_id": "f5c195205a29770b78b974d199929fcfb8e3daae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 383,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 17,
"path": "/New code/class9/PYA907.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nf_name=input()\nrd=open(f_name,\"r\",encoding=\"UTF-8\")\nn_line=0\nn_word=0\nn_ch=0\nfor eachline in rd:\n n_line+=1\n words=eachline.strip(\"\\n\").split(\" \")\n n_word=n_word+len(words)\n newline=\"\".join(words)\n n_ch=n_ch+len(newline)\nrd.close()\n\nprint(\"{:} line(s)\".format(n_line))\nprint(\"{:} word(s)\".format(n_word))\nprint(\"{:} character(s)\".format(n_ch))\n"
},
{
"alpha_fraction": 0.5108225345611572,
"alphanum_fraction": 0.5346320271492004,
"avg_line_length": 13.935483932495117,
"blob_id": "06d4ccfe37dd9dff7119fa03ae613dbfe87f698f",
"content_id": "8e14ec4a67b5298069a95d05987e324c0cac7486",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 31,
"path": "/New code/class5/PYA107.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=[]\nfor i in range(1,6):\n a.append(eval(input()))\n\nsum=0.0\nfor i in range(5):\n sum=sum+a[i]\n\naver=sum/len(a)\nfor i in a:\n print(i,end=\" \")\nprint(\"\")\nprint(\"Sum =\",sum)\nprint(\"Average =\",aver)\n\n#a=[]\n#for i in range(1,6):\n# a.append(eval(input()))\n#\n#sum=0\n#for i in a:\n# sum=sum+i\n#\n#aver=sum/len(a)\n#\n#for i in a:\n# print(i,end=\" \")\n#print(\"\")\n#print(\"Sum = {:.1f}\".format(sum))\n#print(\"Average = {:.1f}\".format(aver))"
},
{
"alpha_fraction": 0.529411792755127,
"alphanum_fraction": 0.570135772228241,
"avg_line_length": 19.18181800842285,
"blob_id": "03063e095c1d1a7388d186030d27cf0dc314cc9a",
"content_id": "8d9a58f98fd226713ae9fe235811a41287ba5330",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 11,
"path": "/New code/class7/PYA703.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=[]\nwhile True:\n num1=input()\n num.append(num1)\n if (len(num)>5) and num1 == \"end\":\n break\nnum1=num.pop(-1)\nprint(tuple(num))\nprint(tuple(num[:3]))\nprint(tuple(num[len(num)-3:]))"
},
{
"alpha_fraction": 0.4117647111415863,
"alphanum_fraction": 0.4156862795352936,
"avg_line_length": 11.449999809265137,
"blob_id": "6d5f046973cf0c2b3792631c598ca43d5c0c8923",
"content_id": "ab3e5e454300f0da042ded604b4e53493acc442b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 263,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 20,
"path": "/2.第二類/PYD204.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#算術運算\n\na=eval(input())\nb=eval(input())\nc=input()\nif c == \"+\":\n ans=a+b\nelif c == \"-\":\n ans=a-b\nelif c == \"*\":\n ans=a*b\nelif c == \"/\":\n ans=a/b\nelif c == \"//\":\n ans=a//b\nelif c == \"**\":\n ans=a**b\nprint(ans) \n\n\n"
},
{
"alpha_fraction": 0.5348837375640869,
"alphanum_fraction": 0.5627906918525696,
"avg_line_length": 23,
"blob_id": "3a955e9bb7afc297af0b6a4e50dc640e288517c4",
"content_id": "b2975764a372b820755882b61df8c9ced6ffeff6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 9,
"path": "/New code/class1/PYA105.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nh=eval(input())\nw=eval(input())\nPer=2*(h+w)\na=h*w\nprint(\"Height = {:.2f}\".format(h))\nprint(\"Width = {:.2f}\".format(w))\nprint(\"Perimeter = {:.2f}\".format(Per))\nprint(\"Area = {:.2f}\".format(a))"
},
{
"alpha_fraction": 0.5246636867523193,
"alphanum_fraction": 0.5560538172721863,
"avg_line_length": 19.090909957885742,
"blob_id": "683b77b5f40eea69f0f0d7e84955f580ad769d1e",
"content_id": "3cc09dad6c32df95293024c71733bd3b3c74f70d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 11,
"path": "/1.第一類/PYD105.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#矩形面積計算\na=eval(input())\nb=eval(input())\nc=a*2+b*2\nd=a*b\nprint(\"Height = {:.2f}\".format(a))\nprint(\"Width = {:.2f}\".format(b))\nprint(\"Perimeter = {:.2f}\".format(c))\nprint(\"Area = {:.2f}\".format(d))\n\n\n"
},
{
"alpha_fraction": 0.5057618618011475,
"alphanum_fraction": 0.5544174313545227,
"avg_line_length": 20.135135650634766,
"blob_id": "310ebf94fb16694f86ef4d39f4d2d5db0bf64dee",
"content_id": "bbd66f284fe8d7051bb4841f18523e1a650e56ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 781,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 37,
"path": "/New code/class3/PYA309.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#money=int(input())\n#Amount=eval(input())\n#Month=int(input())\n#a=[]\n#b=[]\n#for i in range(1,6):\n# a.append(i)\n#print(\"{:5s}{:>11s}\".format(\"Month\",\"Amount\"))\n#for i in range(5):\n# money=money+money*Amount/1200\n# b.append(money)\n# print(\"{:^5d}{:12.2f}\".format(a[i],b[i]))\n\nm=int(input())\np=eval(input())\nmon=int(input())\ni=1\ntotal=0\nprint(\"%s %s\"%(\"Month\",\"Amount\"))\nwhile i < mon+1 :\n total=m+m*p/1200\n m=total\n print(\"%3d %.2f\"%(i,total))\n i=i+1\n#money=int(input())\n#Amount=eval(input())\n#Month=int(input())\n#a=[]\n#b=[]\n#for i in range(1,6):\n# a.append(i)\n#print(\"{:<9s}{:>7s}\".format(\"Month\",\"Amount\"))\n#for i in range(5):\n# money=money+money*Amount/1200\n# b.append(money)\n# print(\"{:^5d}{:>12.2f}\".format(a[i],b[i]))"
},
{
"alpha_fraction": 0.5840579867362976,
"alphanum_fraction": 0.613043487071991,
"avg_line_length": 24.55555534362793,
"blob_id": "78bf1be74804478e0d8aa683cdef2d9ac76eda47",
"content_id": "d9823f62d4ae87768d9c57e57f9a28810660bf3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 698,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 27,
"path": "/9.第九類/PYD904.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:26 2018\n\n@author: user\n\n資料計算\n\n\"\"\"\n\ndata=[]\nname=[]\nweight=[]\nheight=[]\nwith open(\"read.txt\",\"r\",encoding=\"utf-8\") as file:\n for data in file:\n print(data)\n data=data.split(\" \")\n name.append(data[0])\n height.append(int(data[1]))\n weight.append(int(data[2]))\nh_in=height.index(max(height))\nw_in=weight.index(max(weight))\nprint(\"Average height: {:.2f}\".format(sum(height)/len(height)))\nprint(\"Average weight: {:.2f}\".format(sum(weight)/len(weight)))\nprint(\"The tallest is {:} with {:.2f}cm\".format(name[h_in],height[h_in]))\nprint(\"The heaviest is {:} with {:.2f}kg\".format(name[w_in],weight[w_in])) "
},
{
"alpha_fraction": 0.5306553840637207,
"alphanum_fraction": 0.5983086824417114,
"avg_line_length": 17.230770111083984,
"blob_id": "4decd150bde51b2623214cf26ea508834d39493a",
"content_id": "bf3f27d6166a1543a307f25b7ba1d588040591e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 487,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 26,
"path": "/7.第七類/PYD705.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:17:12 2018\n\n@author: user\n\n子集合與超集合\n\"\"\"\n\ns1=set()\ns2=set()\ns3=set()\nprint(\"Input to set1:\")\nfor i in range(5):\n n=int(input())\n s1.add(n)\nprint(\"Input to set2:\")\nfor i in range(3):\n n=int(input())\n s2.add(n)\nprint(\"Input to set3:\")\nfor i in range(9):\n n=int(input())\n s3.add(n)\nprint(\"set2 is subset of set1: {:}\".format(s2.issubset(s1)))\nprint(\"set3 is superset of set1: {:}\".format(s3.issuperset(s1)))"
},
{
"alpha_fraction": 0.4514925479888916,
"alphanum_fraction": 0.4962686598300934,
"avg_line_length": 13.1578950881958,
"blob_id": "b4e47eae2f794f99bdf91a42c2a1246395648022",
"content_id": "21e0c23dadf491523c9d6d795369f69be4583ff0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 284,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 19,
"path": "/5.第五類/PYD505.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:15:05 2018\n\n@author: user\n\n依參數格式化輸出\n\n\"\"\"\n\ndef compute():\n a=input()\n x=int(input())\n y=int(input())\n for i in range(y):\n for j in range(x):\n print(a,end=\" \")\n print(\"\")\ncompute()"
},
{
"alpha_fraction": 0.42084941267967224,
"alphanum_fraction": 0.47297295928001404,
"avg_line_length": 15.21875,
"blob_id": "1be3db79e940c92558c2af80b0b8aa51ce5635bc",
"content_id": "74641eb9fe1a4063ebd2b641b7ede06382da1786",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 526,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 32,
"path": "/6.第六類/PYD610.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:38 2018\n\n@author: user\n\n平均溫度\n\n\"\"\"\n\nd=[]\nh=0\nl=999999\nsum=0\nk=0\nfor i in range(4):\n print(\"Week {:}:\".format(i+1))\n d.append([])\n for j in range(3):\n k+=1\n print(\"Day {:}:\".format(j+1),end=\"\")\n num=eval(input())\n d[i].append(num)\n sum+=num\n if num > h:\n h=num\n if num < l:\n l= num\n\nprint(\"Average: {:.2f}\".format(sum/k))\nprint(\"Highest: {:}\".format(h))\nprint(\"Lowest: {:}\".format(l))"
},
{
"alpha_fraction": 0.42990654706954956,
"alphanum_fraction": 0.5747663378715515,
"avg_line_length": 16.5,
"blob_id": "0e8d98a3a2c21cc34fb8ba7f9ac2cb4b850b5862",
"content_id": "707be10f2078e3b722c3b9dae22f12a5eee40fcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 12,
"path": "/New code/class2/PYA207.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#num=0.0\nnum=eval(input())\nif num >= 38000:\n price=num*0.7\nelif num >= 28000:\n price=num*0.8\nelif num >= 18000:\n price=num*0.9\nelif num >= 8000:\n price=num*0.95\nprint(price) \n"
},
{
"alpha_fraction": 0.4971751272678375,
"alphanum_fraction": 0.5649717450141907,
"avg_line_length": 9.470588684082031,
"blob_id": "c9a0d6a53c427d5e5b72194ac19df66a7b2aafa6",
"content_id": "08d9c8e089784135e0f12ddf623fbc17faa1f363",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 17,
"path": "/5.第五類/PYD502.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:14:41 2018\n\n@author: user\n\n乘積\n\n\"\"\"\n\ndef compute(x,y):\n ans=x*y\n print(ans)\nx=int(input())\ny=int(input())\n\ncompute(x,y)"
},
{
"alpha_fraction": 0.5766423344612122,
"alphanum_fraction": 0.6058394312858582,
"avg_line_length": 16.1875,
"blob_id": "57f0be27c45f5c65da6d0604d822d876e060cfd8",
"content_id": "2effd473ef6237b515d21350d5944ca2d27e0259",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 16,
"path": "/New code/class7/PYA701.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=[]\nwhile True:\n num1=int(input())\n if num1 == -9999:\n break\n num.append(num1)\nnumn=tuple(num)\nmaxn=max(num)\nminn=min(num)\nsumn=sum(num)\nprint(numn)\nprint(\"Length:\",len(num))\nprint(\"Max:\",maxn)\nprint(\"Min:\",minn)\nprint(\"Sum:\",sumn)"
},
{
"alpha_fraction": 0.5335968136787415,
"alphanum_fraction": 0.5375494360923767,
"avg_line_length": 11.699999809265137,
"blob_id": "0d250daf7e05c0bad261130efcfea74c401c6bdf",
"content_id": "80638f01aa393a4fdf2a829013dd13abcfd16eb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 20,
"path": "/New code/class5/PYA504.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute():\n\n value=a**b\n print(value)\n return value\n\na=int(input())\nb=int(input())\ncompute()\n\n#def compute():\n# a=int(input())\n# b=int(input())\n# value=a**b\n# print(value)\n# return value\n#\n#\n#compute()"
},
{
"alpha_fraction": 0.516968309879303,
"alphanum_fraction": 0.5429864525794983,
"avg_line_length": 16.019229888916016,
"blob_id": "34c86b7d39e91a5d1ef1be8acf95ad1a931f7978",
"content_id": "d024965fcbf3bf287a218ad2f13c2bba916f70d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 884,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 52,
"path": "/New code/class1/PYA107.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#a=[]\n#for i in range(5):\n# a.append(eval(input()))\n#\n#sum=0.0\n#for j in range(5):\n# sum=sum+a[j]\n#aver=sum/len(a)\n#print(a[0],a[1],a[2],a[3],a[4])\n#print(\"Sum =\",sum)\n#print(\"Average =\",aver)\n\na=eval(input())\nb=eval(input())\nc=eval(input())\nd=eval(input())\ne=eval(input())\nsum=a+b+c+d+e\naver=sum/5\nprint(a,b,c,d,e)\nprint(\"Sum = {:.1f}\".format(sum))\nprint(\"Average = {:.1f}\".format(aver))\n#a=[]\n#for i in range(1,6):\n# a.append(eval(input()))\n#\n#sum=0\n#for i in a:\n# sum=sum+i\n#\n#aver=sum/len(a)\n#\n#for i in a:\n# print(i,end=\" \")\n#print(\"\")\n#print(\"Sum = {:.1f}\".format(sum))\n#print(\"Average = {:.1f}\".format(aver))\n\n#a=[]\n#for i in range(1,6):\n# a.append(eval(input()))\n#\n#sum=0.0\n#for i in range(5):\n# sum=sum+a[i]\n#aver=sum/len(a)\n#for i in a:\n# print(\"{:d} \".format(i),end=\"\")\n#print(\"\")\n#print(\"Sum =\",sum)\n#print(\"Average =\",aver)"
},
{
"alpha_fraction": 0.6017315983772278,
"alphanum_fraction": 0.6385281682014465,
"avg_line_length": 19.130434036254883,
"blob_id": "679253a06f735c5e6bafdc8b84419450501b4f29",
"content_id": "28ceb43451e95df93498af6d939e9e48305706b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 23,
"path": "/New code/class7/PYA702.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum1=[]\nnum2=[]\nprint(\"Create tuple1:\")\nwhile True:\n num=int(input())\n if num == -9999:\n break\n num1.append(num)\n \nprint(\"Create tuple2:\")\nwhile True:\n num=int(input())\n if num == -9999:\n break\n num2.append(num)\nnumtotal=num1[:]\nnumtotal.extend(num2)\nnumsort=numtotal[:]\nnumsort.sort()\nprint(numtotal)\nprint(\"Combined tuple before sorting:\",tuple(numtotal))\nprint(\"Combined list after sorting:\",numsort)"
},
{
"alpha_fraction": 0.47741934657096863,
"alphanum_fraction": 0.5677419304847717,
"avg_line_length": 11,
"blob_id": "1a87d19ab39b6554ec36aff05debeb725849ffa4",
"content_id": "3c538af2873d00adb731264401d8814ce756941c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 13,
"path": "/4.第四類/PYD401.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:13:13 2018\n\n@author: user\n\n最小值\n\"\"\"\n\na=[]\nfor i in range(10):\n a.append(eval(input()))\nprint(min(a))"
},
{
"alpha_fraction": 0.4285714328289032,
"alphanum_fraction": 0.5047619342803955,
"avg_line_length": 10.44444465637207,
"blob_id": "2adbf7c2d6eaaa417cd88cb60d4b5576fc2171ab",
"content_id": "f5932c90e8600359c8886cf407756c713c140952",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 222,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 18,
"path": "/3.第三類/PYD302.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:11:35 2018\n\n@author: user\n\n迴圈偶數連加\n\"\"\"\n\na=int(input())\nb=int(input())\ni=a\nsum=0\nwhile i <= b:\n if (i % 2 == 0):\n sum=sum+i\n i=i+1\nprint(sum) "
},
{
"alpha_fraction": 0.5657142996788025,
"alphanum_fraction": 0.5771428346633911,
"avg_line_length": 18.55555534362793,
"blob_id": "1e2096e9e837ac1e62e27ff241c58f00000cb40a",
"content_id": "281c06b20e9abbd9878c75bf15b81829bcb53a6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 9,
"path": "/New code/class8/PYA807.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=input()\ns=a.split(\" \")\ntotal=0\nfor i in s:\n total+=int(i)\naver=total/len(s)\nprint(\"Total = {:}\".format(total))\nprint(\"Average = {:}\".format(aver))"
},
{
"alpha_fraction": 0.5027027130126953,
"alphanum_fraction": 0.5081080794334412,
"avg_line_length": 14.363636016845703,
"blob_id": "6be36050cc07dc1a6c5b1d57bed7d85bd575650f",
"content_id": "da64a385560ada4aee3661d1d83799de58dbc759",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 193,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 11,
"path": "/2.第二類/PYD205.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#字元判斷\n\na=input()\nif a.isalpha():\n print(a,\"is an alphabet.\")\nelif a.isdigit():\n print(a,\"is a number.\")\nelse:\n print(a,\"is a symbol.\")\n \n \n \n\n"
},
{
"alpha_fraction": 0.42168673872947693,
"alphanum_fraction": 0.5240963697433472,
"avg_line_length": 11.692307472229004,
"blob_id": "4825e636607dced26ec0b1b119234814efae4b55",
"content_id": "1c8aca1558ec6f5fbae5b7b3465ee643e83ac46e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 13,
"path": "/2.第二類/PYD209.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#距離判斷\n\nx1=eval(input())\ny1=eval(input())\nx2=5\ny2=6\nd=((x1-x2)**2+(y1-y2)**2)**0.5\nif d <=15:\n print(\"Inside\")\nelse:\n print(\"Outside\")\n\n"
},
{
"alpha_fraction": 0.4223826825618744,
"alphanum_fraction": 0.4801444113254547,
"avg_line_length": 12.2380952835083,
"blob_id": "8a853f8ddc4088b9ffdba8c2e7ef105c46d8b8e5",
"content_id": "9973da99cd1fcbb0be6d47d80844fa52bd158e84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 21,
"path": "/5.第五類/PYD510.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:15:34 2018\n\n@author: user\n\n費氏數列\n\n\"\"\"\n\ndef compute():\n F=[0,1]\n num=int(input())\n for i in range(num-2):\n Fn=F[i]+F[i+1]\n F.append(Fn)\n \n for j in F:\n print(j,end=\" \")\n print()\ncompute()"
},
{
"alpha_fraction": 0.3870967626571655,
"alphanum_fraction": 0.4193548262119293,
"avg_line_length": 15.600000381469727,
"blob_id": "6f73e96ab125bcbd3262b382e48db5f7de46c7d1",
"content_id": "4e258476ac7a69187903cb519e642be8296cd58e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 258,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 15,
"path": "/5.第五類/PYD508.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#最大公因數\n\ndef compute():\n n=input()\n a=n.split(\",\")\n x=int(a[0])\n y=int(a[1])\n nmin=min(x,y)\n j=0\n for i in range(1,nmin+1):\n if (x % i == 0) and (y % i == 0):\n j=i\n print(j)\ncompute()"
},
{
"alpha_fraction": 0.5302013158798218,
"alphanum_fraction": 0.6140939593315125,
"avg_line_length": 16.294116973876953,
"blob_id": "9459faf36d506985c18f2c46e6dee0b5946e5a2a",
"content_id": "93e1f31befcc04d155da5927bcdad6d27c28d675",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 298,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 17,
"path": "/New code/class6/PYA604.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#data=[34,18,22,32,18,29,30,38,42,18]\ndata=[]\nmaxcount=0\nmaxnum=0\nfor i in range(10):\n data.append(int(input()))\n\ndataset=set(data)\n\nfor ev in dataset:\n num=data.count(ev)\n if num > maxcount:\n maxcount=num\n maxnum=ev\nprint(maxnum)\nprint(maxcount)\n "
},
{
"alpha_fraction": 0.43478259444236755,
"alphanum_fraction": 0.508695662021637,
"avg_line_length": 18.25,
"blob_id": "d3759cd5e41a1b1ef4e5583b6194c765c471040d",
"content_id": "8137f4edf506ec5e78ebd6adfbd3af17952b02ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 12,
"path": "/1.第一類/PYD103.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#字串格式化輸出\na=input()\nb=input()\nc=input()\nd=input()\n\nprint(\"|{:>10s} {:>10s}|\".format(a,b))\nprint(\"|{:>10s} {:>10s}|\".format(c,d))\nprint(\"|{:<10s} {:<10s}|\".format(a,b))\nprint(\"|{:<10s} {:<10s}|\".format(c,d))"
},
{
"alpha_fraction": 0.546875,
"alphanum_fraction": 0.5625,
"avg_line_length": 20.33333396911621,
"blob_id": "486e43d35e60c26cfb9d64dd8c40e83d671ac54f",
"content_id": "af0477520fd664301065e48fb589d355eea631c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/New code/class2/PYA208.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=int(input())\nprint(format(num,\"X\"))\n"
},
{
"alpha_fraction": 0.5890411138534546,
"alphanum_fraction": 0.5913242101669312,
"avg_line_length": 22.105262756347656,
"blob_id": "4bcd8e9687294a2003892a57a34261d5cb47957b",
"content_id": "431ee5e450a490404501693863e32783a2761266",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 470,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 19,
"path": "/New code/class2/PYA205.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "## -*- coding: utf-8 -*-\n#char=eval(input())\n#if (char >=\"a\" and char <=\"z\") or (char >=\"A\" and char <=\"Z\"):\n# print(char,\"is an alphabet.\")\n#elif type(char)==int:\n# print(char,\"is a number.\")\n#else:\n# print(\"is a symbol.\")\n\n#\n#str.isalpha() 所有字符都是字母\n#str.isdigit() 所有字符都是数字\nchar=input()\nif char.isalpha():\n print(char,\"is an alphabet.\")\nelif char.isdigit():\n print(char,\"is a number.\")\nelse:\n print(char,\"is a symbol.\")"
},
{
"alpha_fraction": 0.5060975551605225,
"alphanum_fraction": 0.5975610017776489,
"avg_line_length": 10.785714149475098,
"blob_id": "79f952666edd78289f0eed2e7caf1f7b21d1c9a4",
"content_id": "de94b11d2f527035f0d2538bd2b5b88125375050",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 176,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 14,
"path": "/3.第三類/PYD306.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:12:18 2018\n\n@author: user\n\n迴圈階乘計算\n\"\"\"\n\nnum=int(input())\nsum=1\nfor i in range(1,num+1):\n sum=sum*i\nprint(sum)"
},
{
"alpha_fraction": 0.5392156839370728,
"alphanum_fraction": 0.5490196347236633,
"avg_line_length": 19.600000381469727,
"blob_id": "0ef5d909c60ff9a81e0fffd9b733588757ff16c4",
"content_id": "80e3eb6f5936eb69cfe7486f7e309026dc7ab776",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 5,
"path": "/New code/class7/PYA706.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nk=int(input())\nfor i in range(k):\n p=set(input())\n print(p.ispangram(p))"
},
{
"alpha_fraction": 0.483146071434021,
"alphanum_fraction": 0.550561785697937,
"avg_line_length": 12.769230842590332,
"blob_id": "2d504d6e472a7d0b3b5992816de77ed2b182a0d4",
"content_id": "8d7deaeac7cacfcdd7b937aa1d7321f5413cac62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 186,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 13,
"path": "/8.第八類/PYD801.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:17:55 2018\n\n@author: user\n\n字串索引\n\n\"\"\"\n\nval=input()\nfor i in range(len(val)):\n print(\"Index of '{:}': {:}\".format(val[i],i))"
},
{
"alpha_fraction": 0.42465752363204956,
"alphanum_fraction": 0.5022830963134766,
"avg_line_length": 23.44444465637207,
"blob_id": "a0bcba4e3a7678caa84307334cc6af597ea205f2",
"content_id": "8f1debb6a68895f7dd6345f76cc0e86909332599",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 9,
"path": "/New code/class1/PYA103.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=input()\nb=input()\nc=input()\nd=input()\nprint(\"|{:>10s} {:>10s}|\".format(a,b))\nprint(\"|{:>10s} {:>10s}|\".format(c,d))\nprint(\"|{:<10s} {:<10s}|\".format(a,b))\nprint(\"|{:<10s} {:<10s}|\".format(c,d))"
},
{
"alpha_fraction": 0.504807710647583,
"alphanum_fraction": 0.5625,
"avg_line_length": 12.0625,
"blob_id": "49a13349dc47e0dad21e8dfe72ee3352e41cee8a",
"content_id": "24c9dd1184e59511fc9490fd95739ee07a2fe7bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 16,
"path": "/8.第八類/PYD806.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:18:28 2018\n\n@author: user\n\n字元次數計算\n\n\"\"\"\n\ndef compute(a,b):\n c=a.count(b)\n print(\"{:} occurs {:} time(s)\".format(b,c))\na=input()\nb=input()\ncompute(a,b)"
},
{
"alpha_fraction": 0.4268292784690857,
"alphanum_fraction": 0.48170730471611023,
"avg_line_length": 15.300000190734863,
"blob_id": "937542890bebff95e71482ac55e626d5d7f7f8da",
"content_id": "69a3919705e60893625aa5f77412f6deecaa9be2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 10,
"path": "/New code/class2/PYA206.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nnum=int(input())\nif num >= 80:\n print(\"A\")\nelif num >= 70:\n print(\"B\")\nelif num >= 60:\n print(\"C\")\nelif num <= 59:\n print(\"F\")\n\n"
},
{
"alpha_fraction": 0.47863247990608215,
"alphanum_fraction": 0.5641025900840759,
"avg_line_length": 10.699999809265137,
"blob_id": "67b6199573809537c78490ccc73c7bf51491ebcc",
"content_id": "9fb82c3f1f52c513a7ec3b7573d43539d8b9a7a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 10,
"path": "/New code/class6/PYA602.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nA=1\nJ=11\nQ=12\nK=13\nsum=0\nfor i in range(5):\n num=eval(input())\n sum=sum+num\nprint(sum)\n"
},
{
"alpha_fraction": 0.5229681730270386,
"alphanum_fraction": 0.5300353169441223,
"avg_line_length": 13.947368621826172,
"blob_id": "c61c468c9877517cb1ccd18db12139085d382dd9",
"content_id": "a550e0997cb99bea9f1359a4ee39fa91037ebfbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 283,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 19,
"path": "/New code/class2/PYA210.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=eval(input())\nb=eval(input())\nc=eval(input())\nif a+b>=c:\n print(a+b+c)\nelif a+c >=b:\n print(a+b+c)\nelif b+c >=a:\n print(a+b+c)\nelse:\n print(\"Invalid\")\n#a=[]\n#for i in range(3):\n# a.append(int(input()))\n#b=a.sort()\n#print(a)\n#print(b)\n#type(b)"
},
{
"alpha_fraction": 0.4383561611175537,
"alphanum_fraction": 0.4794520437717438,
"avg_line_length": 23.33333396911621,
"blob_id": "e146d87d425df33a61594654d816d793ac20944d",
"content_id": "f910bd0b6ad467385efe1064550efe72c8e52300",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 6,
"path": "/New code/class3/PYA303.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=int(input())\nfor i in range(1,a+1):\n for j in range(1,i+1):\n print(\"{:>4d}\".format(i*j),end=\"\")\n print(\"\")\n"
},
{
"alpha_fraction": 0.4968421161174774,
"alphanum_fraction": 0.6126315593719482,
"avg_line_length": 21.66666603088379,
"blob_id": "063839c2e8394749de1a5390f141119c7c6b6669",
"content_id": "c9201ed9d47b1be6f18c2266006d82c626b9e324",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 475,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 21,
"path": "/New code/class7/PYA705.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#s1=[3,28,-2,7,39]\n#s2=[2,77,0]\n#s3=[3,28,12,99,39,7,-1,-2,65]\ns1=[]\ns2=[]\ns3=[]\nprint(\"Input to set1:\")\nfor i in range(5):\n s1.append(input())\nprint(\"Input to set2:\")\nfor i in range(3):\n s2.append(input())\nprint(\"Input to set3:\")\nfor i in range(9):\n s3.append(input())\nset1=set(s1)\nset2=set(s2)\nset3=set(s3)\nprint(\"set2 is subset of set1: {:}\".format(set2.issubset(set1)))\nprint(\"set3 is superset of set1: {:}\".format(set3.issuperset(set1)))"
},
{
"alpha_fraction": 0.3658536672592163,
"alphanum_fraction": 0.502439022064209,
"avg_line_length": 17.727272033691406,
"blob_id": "40ecd44a11b94888803cf31b93ca8457c7c4973e",
"content_id": "dd3a5f7c3c6f718dea7e8aa2d48a2405280d5390",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 205,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 11,
"path": "/New code/class6/PYA603.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#a=[40,32,12,29,20,19,38,48,57,44]\na=[]\n#for i in range(10):\n# a.append(int(input()))\nb=a[::]\nb.sort()\nc=b[::-1]\n#for i in range(3):\n# print(c[i],end=\" \")\nprint(c[0],c[1],c[2])"
},
{
"alpha_fraction": 0.37220844626426697,
"alphanum_fraction": 0.4590570628643036,
"avg_line_length": 16.565217971801758,
"blob_id": "09491045e4367607802fd9f0dc23635bcb01c10b",
"content_id": "ed6ac0972f328ef6c4f7e54f632f2ed023120516",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 23,
"path": "/5.第五類/PYD506.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:15:11 2018\n\n@author: user\n\n一元二次方程式\n\n\"\"\"\n\ndef compute():\n a=eval(input())\n b=eval(input())\n c=eval(input())\n x1=0\n x2=0\n if (b**2)-(4*a*c)<0:\n print(\"Your equation has no root.\")\n else:\n x1=((-1*b)+(b**2-4*a*c)**0.5)/(2*a)\n x2=((-1*b)-(b**2-4*a*c)**0.5)/(2*a)\n print(\"{:}, {:}\".format(x1,x2))\ncompute()"
},
{
"alpha_fraction": 0.5142857432365417,
"alphanum_fraction": 0.5285714268684387,
"avg_line_length": 10.5,
"blob_id": "731c8b8061d3749e3a564481d2c60037ab3215b8",
"content_id": "7c8adcdc5d2cac938cba18fd9567867e5ffcc785",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 6,
"path": "/2.第二類/PYD208.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#十進位換算\n\na=int(input())\nprint(format(a,\"X\"))\n\n"
},
{
"alpha_fraction": 0.49790793657302856,
"alphanum_fraction": 0.5523012280464172,
"avg_line_length": 14.800000190734863,
"blob_id": "bcdf97340b97b71d84263858503fa9b82f0113d2",
"content_id": "b47ed50ddc5df83f5bf8fbd4161390158bf4c997",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 247,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 15,
"path": "/9.第九類/PYD902.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:19:15 2018\n\n@author: user\n\n資料加總\n\n\"\"\"\n\nwith open(\"read.txt\",\"r\",encoding=\"utf-8\") as fd:\n data=fd.read()\n d_sp=data.split(\" \")\n d_li=list(map(eval,d_sp))\n print(sum(d_li) ) "
},
{
"alpha_fraction": 0.44600939750671387,
"alphanum_fraction": 0.5258215665817261,
"avg_line_length": 14.285714149475098,
"blob_id": "9dd8172888b3f6ec352ae6a7c76b620bd4584e51",
"content_id": "532db6426e5bb9d9628077fedc46373728c6b587",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 14,
"path": "/3.第三類/PYD303.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:11:40 2018\n\n@author: user\n\n迴圈數值相乘\n\"\"\"\n\na=int(input())\nfor i in range(1,a+1):\n for j in range(1,i+1):\n print(\"{:>4d}\".format(i*j),end=\"\")\n print(\"\")"
},
{
"alpha_fraction": 0.5370370149612427,
"alphanum_fraction": 0.5648148059844971,
"avg_line_length": 14.428571701049805,
"blob_id": "d7e72fb197f7fef7ee15a42e480695fefa4a6d72",
"content_id": "f3eee06dc64683b9f11b2cef0dc47ba7a12d7b47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 7,
"path": "/New code/class3/PYA301.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=int(input())\nb=int(input())\nsum=0\nfor i in range(a,b+1):\n sum=sum+i\nprint(sum)\n"
},
{
"alpha_fraction": 0.5392156839370728,
"alphanum_fraction": 0.5392156839370728,
"avg_line_length": 9.300000190734863,
"blob_id": "0cf997ef099ff08e15c2a593bbce3c7a2f4f7e74",
"content_id": "542457ae239155c78cea4ce3a6d41ed751bc2d17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 10,
"path": "/New code/PYD608.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# TODO\n\n\n\n\n\n\"\"\"\nIndex of the largest number _ is: (_, _)\nIndex of the smallest number _ is: (_, _)\n\"\"\""
},
{
"alpha_fraction": 0.4578096866607666,
"alphanum_fraction": 0.5008976459503174,
"avg_line_length": 20.461538314819336,
"blob_id": "fe770d37c3f012383470ba567f00d7c563d70240",
"content_id": "f2831263799476d02d6107d8dda3737c6e08f4e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 26,
"path": "/6.第六類/PYD608.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:16:26 2018\n\n@author: user\n\n最大最小值索引\n\n\"\"\"\n\ndata=[]\nd_max=0\nd_min=99999\nfor i in range(3):\n data.append([])\n for j in range(3):\n num=eval(input())\n data[i].append(num)\n if num > d_max:\n d_max=num\n d_ind=(i,j)\n if num < d_min:\n d_min=num\n da_ind=(i,j)\nprint(\"Index of the largest number {:} is: ({:}, {:})\".format(d_max,d_ind[0],d_ind[1]))\nprint(\"Index of the smallest number {:} is: ({:}, {:})\".format(d_min,da_ind[0],da_ind[1]))"
},
{
"alpha_fraction": 0.5436241626739502,
"alphanum_fraction": 0.563758373260498,
"avg_line_length": 13.699999809265137,
"blob_id": "04a37c6edad21621d73a57f3e85bed58d8da8496",
"content_id": "3f6be10924584642a439b80ba48a2ad61c57bb4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 163,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 10,
"path": "/1.第一類/PYD110.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#正n邊形面積計算\n\nimport math\nc=eval(input())\na=int(input())\n\nb=(c*a*a)/(4*math.tan(math.pi/c))\nprint(\"Area = {:.4f}\".format(b))\n\n\n"
},
{
"alpha_fraction": 0.546875,
"alphanum_fraction": 0.5546875,
"avg_line_length": 11.899999618530273,
"blob_id": "e263e8b216274c5a7734c2094c180070108800ba",
"content_id": "48d6ad904cc3089651b18b1d2f6605424dae4548",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 128,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 10,
"path": "/New code/class5/PYA502.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\ndef compute(x,y):\n ans=x*y\n print(ans)\n return ans\n\n\nx=int(input())\ny=int(input())\ncompute(x,y)"
},
{
"alpha_fraction": 0.4672897160053253,
"alphanum_fraction": 0.5046728849411011,
"avg_line_length": 12.375,
"blob_id": "e63fbbfbec22887fb7431166e46922fbff9bebfc",
"content_id": "0859bfc47d47c7fb1209f9dc6da5863f56dca807",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 24,
"path": "/7.第七類/PYD710.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 7 20:17:43 2018\n\n@author: user\n\n詞典搜尋\n\n\"\"\"\n\nd={}\nwhile True:\n key = input(\"Key: \")\n if key == \"end\":\n break\n else:\n value = input(\"Value: \")\n d[key]=value\nc=input(\"Search key: \")\ne=d.keys()\nif c in e:\n print(\"True\")\nelse:\n print(\"False\") "
},
{
"alpha_fraction": 0.5441860556602478,
"alphanum_fraction": 0.5627906918525696,
"avg_line_length": 15.538461685180664,
"blob_id": "1d775434398b908e52ee13b7171601ed1163aff0",
"content_id": "82f10c7f04c4ce306adbe6629c21bced50b47277",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 223,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 13,
"path": "/1.第一類/PYD107.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n#數值計算\na=eval(input())\nb=eval(input())\nc=eval(input())\nd=eval(input())\ne=eval(input())\nf=a+b+c+d+e\ng=f/5\nprint(a,b,c,d,e )\nprint(\"Sum = {:.1f}\".format(f))\nprint(\"Average = {:.1f}\".format(g))\n"
},
{
"alpha_fraction": 0.43216079473495483,
"alphanum_fraction": 0.5175879597663879,
"avg_line_length": 19,
"blob_id": "48122fef84068472f5c97965cb858749bf186589",
"content_id": "83fdf994cde3deb12f683be0104961ef1134add9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 199,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 10,
"path": "/New code/class1/PYA108.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nx1=eval(input())\ny1=eval(input())\nx2=eval(input())\ny2=eval(input())\n\nDis=((x1-x2)**2+(y1-y2)**2)**0.5\nprint(\"(\",x1,\",\",y1,\")\")\nprint(\"(\",x2,\",\",y2,\")\")\nprint(\"Distance =\",Dis)"
},
{
"alpha_fraction": 0.4571428596973419,
"alphanum_fraction": 0.46666666865348816,
"avg_line_length": 19.200000762939453,
"blob_id": "9466c00c882fbc288182a15a90ca99dffa3f4d69",
"content_id": "52ca53e5a1ec8ad518a71c8ee869618e81de58af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 5,
"path": "/New code/class8/PYA801.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\na=input()\nfor i in a:\n print(\"Index of '{:}': {:}\".format(i,a.index(i)))\n "
},
{
"alpha_fraction": 0.43589743971824646,
"alphanum_fraction": 0.4615384638309479,
"avg_line_length": 14.800000190734863,
"blob_id": "561b8e6f0959bc5d6563076266c725be7cade901",
"content_id": "65a290753e4baabcf4c70beac850cf00135748b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 78,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 5,
"path": "/New code/class8/PYA803.py",
"repo_name": "junyi1997/TQC_Python",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\na=input()\nb=a.split(\" \")\nc=b[-3:]\nprint((\" \").join(c))"
}
] | 161 |
UKCloud/OpenStack-Loadbalancer-Template-Creator
|
https://github.com/UKCloud/OpenStack-Loadbalancer-Template-Creator
|
e4f5ed44436f5af27f8cae0a8af3609ac938e687
|
9d7641a1fc420fe2a433eea1a0f340d2e87e27ef
|
349511a8e4a87af88054d3bb6c5d16e05c25c4d7
|
refs/heads/master
| 2020-03-27T09:25:28.615880 | 2018-09-05T15:25:50 | 2018-09-05T15:25:50 | 146,340,195 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8167330622673035,
"alphanum_fraction": 0.8167330622673035,
"avg_line_length": 24.100000381469727,
"blob_id": "18177842f2d3d35375fa547c8496882eb68e59c1",
"content_id": "eaa54b77ae9ba9b90aa58755174252c0eb64cd9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 10,
"path": "/README.md",
"repo_name": "UKCloud/OpenStack-Loadbalancer-Template-Creator",
"src_encoding": "UTF-8",
"text": "# OpenStack-Loadbalancer-Template-Creator\nThis python application creates heat templates to allow you to quickly deploy loadbalancers onto UKCloud OpenStack\n\n## Install requirements\npip install -r requirements.txt\n\n## Run the app\npython app.py\n\nEnjoy\n"
},
{
"alpha_fraction": 0.6867139339447021,
"alphanum_fraction": 0.6917123794555664,
"avg_line_length": 47.077518463134766,
"blob_id": "ef4f7a0d52b45fc8c456f73fd9ea0cfa0612800c",
"content_id": "365eef17bd0e788aaa7c4b12ee8aa3e5c72c8ea4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6202,
"license_type": "no_license",
"max_line_length": 394,
"num_lines": 129,
"path": "/app.py",
"repo_name": "UKCloud/OpenStack-Loadbalancer-Template-Creator",
"src_encoding": "UTF-8",
"text": "from base import base\nfrom tabulate import tabulate\nfrom git import Repo\nfrom shutil import copyfile\n\ndef run():\n conn = base.connect().create_connection()\n external_networks = get_external_networks(conn)\n keys = get_keys(conn)\n images = get_images(conn)\n routers = get_routers(conn)\n flavors = get_flavors(conn)\n generateEnvFile(external_networks, keys, images, routers, flavors)\n\ndef get_flavors(conn):\n flavors = conn.compute.flavors()\n return flavors\n\ndef get_external_networks(conn):\n external_networks = conn.network.networks(is_router_external=True)\n return external_networks\n\n\ndef get_keys(conn):\n keys = conn.compute.keypairs()\n return keys\n\ndef get_images(conn):\n images = conn.image.images(visibility=\"public\")\n return images\n\ndef get_routers(conn):\n routers = conn.network.routers()\n return routers\n\ndef list_gen(obj):\n return_object = []\n setup_headers = []\n setup_headers.append(\"ID\")\n setup_headers.append(\"Name\")\n setup_headers.append(\"GUID\")\n return_object.append(setup_headers)\n id = 1\n for item in obj:\n temp_list = []\n temp_list.append(id)\n temp_list.append(item.name)\n temp_list.append(item.id)\n return_object.append(temp_list)\n id += 1\n return return_object\n\ndef print_item(obj):\n print(tabulate(obj, headers=\"firstrow\"))\n\ndef print_intro():\n clear()\n print(\"#### UKCloud Loadbalancer Heat Template Generator app ####\")\n print(\"This application will help you to generate a template file that can be used with UKCloud HaProxy on OpenStack heat templates\")\n print(\"The HaProxy on OpenStack heat templates deploy a pair of servers running haproxy and keepalived, these can be used to provide loadbalancing services\")\n print(\"It will create a new front end network for the loadbalancers to sit on, this will need to be connected to a router, which has a connection to both the external network you wish to serve, and the backend network that the target servers exist on. Alternativly you can use the template to build a new netwok and deploy both your loadbalancers and back end servers to the same network.\")\n print(\"You can also provide a haproxy config file, and this will be auto uploaded to the new instnces\")\n print(\"Caching information\")\n run()\n\ndef clear():\n print(chr(27)+'[2j')\n print('\\033c')\n print('\\x1bc')\n\ndef generateEnvFile(external_networks, keys, images, routers, flavors):\n external_network_list = list_gen(external_networks)\n key_pair_list = list_gen(keys)\n imgae_list = list_gen(images)\n flavor_list = list_gen(flavors)\n rooter_list = list_gen(routers)\n stack_name = input(\"Please enter the name for the stack: \")\n print_item(external_network_list)\n external_network_selection = int(input(\"Please select an external network that this loadbalancer will service: \"))\n print_item(rooter_list)\n rooter_selection = int(input(\"Please select router you want to connect your new network to: \"))\n print_item(imgae_list)\n image_selection = int(input(\"Please select which image you wish to use (Only Centos is currently supported): \"))\n print_item(flavor_list)\n flavor_selection = int(input(\"Please select which flavor you wish to use (t1.small is usuall enough for most applications): \"))\n print_item(key_pair_list)\n key_pair_selection = int(input(\"Please select which keypair you wish to use: \"))\n server_names = input(\"What do you wish your servers to be called (E.G. blog-balancers): \")\n network_name = input(\"Please enter the name of the new network you wish to create: \")\n netowrk_cidr = input(\"Please enter the network CIDR you wish the network to use (E.G. 10.0.0.0/24): \")\n dns = input(\"Please enter the dns servers you wish your loadbalancers to use (E.G. 8.8.8.8): \")\n ha_proxy_config = input(\"Please enter the location for your custom haproxy config (full path), leave blank to use the default (This will need to be customised to work): \")\n ha_proxy_ports = input(\"Please enter the ports you will access your services on, or leave blank for 80 and 443 (Enter as comma delimited: \")\n\n selected_external_network = external_network_list[external_network_selection][2]\n selected_rooter = rooter_list[rooter_selection][2]\n selected_image = imgae_list[image_selection][2]\n selected_flavor = flavor_list[flavor_selection][2]\n selected_key_pair = key_pair_list[key_pair_selection][2]\n clone_ha_proxy_repo(stack_name)\n write_env_file(stack_name, server_names, network_name, netowrk_cidr, dns, ha_proxy_config, selected_external_network, selected_rooter, selected_image, selected_flavor, selected_key_pair, ha_proxy_ports)\n\ndef clone_ha_proxy_repo(stack_name):\n data = Repo.clone_from(\"https://github.com/UKCloud/haproxy-on-openstack.git\", stack_name)\n data.git.checkout('hotfix/updating_to_work_with_newton')\n\ndef copy_ha_proxy_config(stack_name, ha_proxy_config_location):\n copyfile(ha_proxy_config_location, stack_name + \"/files/haproxy.cfg\")\n\ndef write_env_file(stack_name, server_names, network_name, netowrk_cidr, dns, ha_proxy_config, selected_external_network, selected_rooter, selected_image, selected_flavor, selected_key_pair, ha_proxy_ports):\n f = open(stack_name + \"/\" + stack_name + \"_enviroment.yaml\", \"a\")\n f.write(\"parameters:\" + \"\\n\")\n f.write(\" key_name: \" + selected_key_pair +\"\\n\")\n f.write(\" flavor: \" + selected_flavor + \"\\n\")\n f.write(\" image: \" + selected_image + \"\\n\")\n f.write(\" router: \" + selected_rooter + \"\\n\")\n f.write(\" external_network: \" + selected_external_network + \"\\n\")\n f.write(\" vrrp_subnet_cidr: \" + netowrk_cidr + \"\\n\")\n f.write(\" vrrp_subnet_dns: \" + dns + \"\\n\")\n f.write(\" haproxy_ports: \" + \"'\" + ha_proxy_ports + \"'\" + \"\\n\")\n f.write(\" server_name: \" + server_names + \"\\n\")\n f.write(\" frontend_network_name: \" + network_name + \"\\n\")\n if ha_proxy_config:\n copy_ha_proxy_config(stack_name, ha_proxy_config)\n print(\"Now run: openstack stack create -t \" + stack_name + \"/haproxy.yaml -e \" + stack_name + \"/\" + stack_name + \"_enviroment.yaml\" + \" --wait \" + stack_name)\n\n\nif __name__ == \"__main__\":\n print_intro()\n"
},
{
"alpha_fraction": 0.5442771315574646,
"alphanum_fraction": 0.5466867685317993,
"avg_line_length": 36.29213333129883,
"blob_id": "3416684a240668e649a1fcdcb814c49943c39d31",
"content_id": "989bb2204645faf9e5ee1e906c8d0bab1573d9cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3320,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 89,
"path": "/base/base.py",
"repo_name": "UKCloud/OpenStack-Loadbalancer-Template-Creator",
"src_encoding": "UTF-8",
"text": "import os\nfrom openstack import utils\nfrom openstack import connection\nfrom openstack import profile\nfrom openstack import version as os_ver\n\n\nclass connect:\n \"\"\"Connection related class\n\n Class to collate all operations related to connecting to OpenStack.\n \"\"\"\n def create_connection(self):\n \"\"\"Creates a connection\n\n Method creates a connection and returns it for use in other methods.\n\n Returns:\n connection: a connection object\n \"\"\"\n ver_tmp = os_ver.__version__\n major_tmp, minor_tmp, rel_tmp = ver_tmp.split(\".\")\n\n major_version = int(major_tmp)\n minor_version = int(minor_tmp)\n rel_version = int(rel_tmp)\n\n prof = profile.Profile()\n prof.set_region(profile.Profile.ALL, os.environ['OS_REGION_NAME'])\n prof.set_interface('identity', 'public')\n prof.set_version('identity', 'v3')\n prof.set_version('image', 'v2')\n \"\"\"\n Between v2 and v3 OS_TENANT_NAME is changed to OS_PROJECT_NAME\n This switch ensures we dont care what version you are using\n \"\"\"\n if os.environ['OS_TENANT_NAME'] is None:\n projectSelect = os.environ['OS_PROJECT_NAME']\n else:\n projectSelect = os.environ['OS_TENANT_NAME']\n\n if major_version >= 0 and minor_version >= 13 and rel_version >= 0:\n # Newer version of OpenStackSDK\n return connection.Connection(\n region_name='regionOne',\n auth=dict(\n auth_url=os.environ['OS_AUTH_URL'],\n username=os.environ['OS_USERNAME'],\n password=os.environ['OS_PASSWORD'],\n project_name=projectSelect),\n identity_interface='public')\n else:\n # Older version of OpenStackSDK\n return connection.Connection(\n profile=prof,\n user_agent='examples',\n auth_url=os.environ['OS_AUTH_URL'],\n project_name=projectSelect,\n username=os.environ['OS_USERNAME'],\n password=os.environ['OS_PASSWORD'])\n\n\nclass orphanFinder:\n \"\"\"Orphan resource related class\n\n Class to collate all operations related to finding orphan resources.\n \"\"\"\n def findOrphans(self, resources, projIdList):\n \"\"\"Find orphan resources\n\n Method takes a list of resources, and a list of project IDs then finds\n the resources that don't relate to a valid project.\n\n Args:\n resources (list): List of resources to filter.\n projIdList (list): List of valid project IDs.\n\n Returns:\n list: list of resources not owned by a valid project\n \"\"\"\n orphans = []\n for resource in resources:\n if hasattr(resource, 'project_id'):\n if resource.project_id not in projIdList:\n orphans.append(resource)\n elif hasattr(resource, 'owner'):\n if resource.owner not in projIdList:\n orphans.append(resource)\n return orphans\n\n"
}
] | 3 |
jorgemira/gps_tracker
|
https://github.com/jorgemira/gps_tracker
|
193261cb4f4a4444b941296e0519164a201bcf23
|
3f5a5b73cde34cc0f8052276791cddb59a020d54
|
d4b4cb818bd670ec4c9e1287961a21bedc816c45
|
refs/heads/main
| 2023-01-03T09:23:47.540017 | 2020-10-26T15:47:08 | 2020-10-26T15:47:08 | 305,957,858 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7306034564971924,
"alphanum_fraction": 0.7306034564971924,
"avg_line_length": 29.933332443237305,
"blob_id": "d56ed4cc554b4bdbfcefd6cc58ed423c0bc98a02",
"content_id": "9ea9a8b3f5e0e8bdb104c150df55f1b2a4f42f86",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 464,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 15,
"path": "/server/gps_tracker/urls.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "from django.urls import include, path\nfrom rest_framework import routers\nfrom rest_framework.authtoken.views import obtain_auth_token\n\nfrom .gps_tracker import urls, viewsets\n\nrouter = routers.DefaultRouter()\nrouter.register(r\"locations\", viewsets.LocationCreateAPIView)\n\nurlpatterns = [\n path(\"api/\", include(router.urls)),\n path(\"auth/\", obtain_auth_token, name=\"auth\"),\n path(\"api/panic/\", viewsets.panic, name=\"panic\"),\n path(\"\", include(urls)),\n]\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6683250665664673,
"avg_line_length": 30.736841201782227,
"blob_id": "cca4decb183bdea0a21d53c8d570ae97ff62ba0e",
"content_id": "89be1d22aad4532532374cb99fc755028b6ea19c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 603,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 19,
"path": "/server/gps_tracker/gps_tracker/filters.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "from django_filters import rest_framework as filters\n\nfrom .models import Location\n\n\nclass LocationFilter(filters.FilterSet):\n min_date = filters.DateTimeFilter(field_name=\"datetime\", lookup_expr=\"gte\")\n max_date = filters.DateTimeFilter(field_name=\"datetime\", lookup_expr=\"lte\")\n show_all = filters.BooleanFilter(method=\"show_all_filter\")\n\n def show_all_filter(self, queryset, value, *args, **kwargs):\n if args and args[0]:\n return queryset.all()\n else:\n return queryset.filter(hidden=False)\n\n class Meta:\n model = Location\n fields = []\n"
},
{
"alpha_fraction": 0.44607189297676086,
"alphanum_fraction": 0.6071904301643372,
"avg_line_length": 22.46875,
"blob_id": "e4a7f7dacb29c898eafab40a576f4eb7fe6c9356",
"content_id": "0e6aee54965be79f3b5b16c0db26868ac9818643",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 751,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 32,
"path": "/client/gps_tracker/sample_data.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "from datetime import datetime, timedelta\nfrom decimal import Decimal\n\nfrom .location import Location\nfrom .server import Server\n\n# TODO: add more data\nSAMPLE_DATA = [\n [-0.510864, 38.333039],\n # [0.054932, 38.706946],\n # [-0.307617, 39.185433],\n # [-0.060425, 39.981329],\n # [0.582275, 40.713956],\n # [1.230469, 41.10833],\n # [2.04895, 41.467428],\n # [2.988281, 42.269179],\n]\n\n\ndef create_sample_data() -> None:\n Server.login()\n\n dt = datetime.utcnow() - timedelta(days=2)\n for (lon, lat) in SAMPLE_DATA:\n dt += timedelta(minutes=30)\n loc = Location(Decimal(lon), Decimal(lat), dt)\n Server.post_location(loc)\n Server.post_location(loc)\n\n\nif __name__ == '__main__':\n create_sample_data()\n"
},
{
"alpha_fraction": 0.7204081416130066,
"alphanum_fraction": 0.7204081416130066,
"avg_line_length": 22.33333396911621,
"blob_id": "ec31755a3a964b72ab1db22a577d422d9aa63f15",
"content_id": "2c4d873d12a2fbfbb8613abff66b9e741c6fb421",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 490,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 21,
"path": "/server/gps_tracker/gps_tracker/views.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n\nfrom .helpers import is_panic, set_panic_mode\n\n\ndef login(request):\n return render(request, \"login.html\", {})\n\n\n# @login_required # TODO: Move to main\ndef show_map(request):\n return render(request, \"map.html\", {})\n\n\n@login_required\ndef switch_panic(request):\n mode = not is_panic()\n set_panic_mode(mode)\n return JsonResponse({\"panic\": mode})\n"
},
{
"alpha_fraction": 0.7155796885490417,
"alphanum_fraction": 0.7228260636329651,
"avg_line_length": 35.79999923706055,
"blob_id": "d6dc2108073255f3420857c2c1ca93c43351d861",
"content_id": "8aa466e36e3d7c521b9ec51df7cb54f54f4d7e17",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 15,
"path": "/server/gps_tracker/gps_tracker/serializers.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\n\nfrom .models import Location\n\n\nclass LocationSerializer(serializers.HyperlinkedModelSerializer):\n latitude = serializers.DecimalField(max_digits=9, decimal_places=6)\n longitude = serializers.DecimalField(max_digits=9, decimal_places=6)\n datetime = serializers.DateTimeField(format=\"%Y-%m-%dT%H:%M:%S.%fZ\")\n panic = serializers.ReadOnlyField()\n hidden = serializers.ReadOnlyField()\n\n class Meta:\n model = Location\n fields = (\"latitude\", \"longitude\", \"datetime\", \"panic\", \"hidden\")\n"
},
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 20,
"blob_id": "f4b4ab88945e25cd779801e2f7fc7cb1ef75fd74",
"content_id": "39eb5ba86232b7b6323adff8e5c28220afaf48e3",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 42,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 2,
"path": "/README.md",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "# gps_tracker\nGPS tracker for RaspberryPi\n"
},
{
"alpha_fraction": 0.6366322040557861,
"alphanum_fraction": 0.6706055998802185,
"avg_line_length": 28.434782028198242,
"blob_id": "7c9502be07c8b5394dca4217d5990fcbcadc4c13",
"content_id": "f5bd918ad53eb623eb39d0cf8d6a5ac9b3515a22",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 677,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 23,
"path": "/client/gps_tracker/constants.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "# REST API server related constants\nUSERNAME = \"asdfg\"\nPASSWORD = \"asdfg\"\nHOST = \"http://127.0.0.1:8000\"\nAUTH_URL: str = f\"{HOST}/auth/\"\nLOCATIONS_URL: str = f\"{HOST}/api/locations/\"\nPANIC_URL: str = f\"{HOST}/api/panic/\"\n\n\n# Format constants\nDATETIME_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\nPRECISION = 6\n\n# Job scheduling constants\nTIME_PANIC = 1 # TODO: Change to 60\nTIME_NO_PANIC = 5 # TODO: Change to 1800\nTIME_CHECK_PANIC = 3 # TODO: Change to 300\n\n# Logging constants\nLOG_FILE = \"gps_tracker.log\" # TODO: move to /var/log/\nLOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\"\n\nPENDING_FILE = \"pending_locations.json\" # TODO: move to /usr/local/share/gps_tracker/\n"
},
{
"alpha_fraction": 0.7311475276947021,
"alphanum_fraction": 0.7311475276947021,
"avg_line_length": 19.33333396911621,
"blob_id": "5f1d457a066526a895824deba2b1e951d76eb2a1",
"content_id": "5db729cf48a654c09cfeebe310e0e6a2a4c56084",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 305,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 15,
"path": "/client/README.md",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "```shell script\nsudo mv gps_tracker.py /usr/local/bin/\n\nsudo mkdir /usr/local/share/gps_tracker\n\nsudo chmod +x /usr/local/bin/gps_tracker.py\n\nsudo mv gps_tracker.sh /etc/init.d/\n\nsudo chmod +x /etc/init.d/gps_tracker.sh\n\nsudo update-rc.d gps_tracker.sh defaults\n\nsudo /etc/init.d/gps_tracker.sh start\n```\n"
},
{
"alpha_fraction": 0.8983050584793091,
"alphanum_fraction": 0.8983050584793091,
"avg_line_length": 13.75,
"blob_id": "cecf494119a55c3060ca825c21f7403ec0113c2c",
"content_id": "520361864715ee3af0293b9d54c8a0e80d5357e6",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 59,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 4,
"path": "/server/requirements.txt",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "django\ndjangorestframework\ndjango-filter\ndjango-extensions\n"
},
{
"alpha_fraction": 0.6692160367965698,
"alphanum_fraction": 0.6730401515960693,
"avg_line_length": 19.115385055541992,
"blob_id": "e0509ee5db47cf68c7d47b7f9f07eb16fb027c91",
"content_id": "8b907028c6c8123b67a73ead2a12406aa6069d35",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 523,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 26,
"path": "/client/main.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nfrom time import sleep\n\nimport schedule\n\nfrom gps_tracker import constants as c\nfrom gps_tracker.jobs import panic_job, post_location_job, schedule_job\nfrom gps_tracker.logging import get_logger\n\nlogger = get_logger(__name__)\n\n\ndef main() -> None:\n # Schedule jobs\n schedule_job(post_location_job, c.TIME_NO_PANIC)\n schedule_job(panic_job, c.TIME_CHECK_PANIC)\n\n # Run scheduled jobs\n while True:\n schedule.run_pending()\n sleep(1)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6681647896766663,
"alphanum_fraction": 0.6681647896766663,
"avg_line_length": 26.244897842407227,
"blob_id": "8232aa9ddd315a5c79f7d2db2dc1082e3ad678ae",
"content_id": "e67c955b49a3ae027a50af56ee040359920bcb89",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1335,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 49,
"path": "/client/gps_tracker/jobs.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "from typing import Callable\n\nimport schedule\n\nfrom . import constants as c\nfrom .gpsd import GPSD\nfrom .logging import get_logger\nfrom .server import Server\n\nlogger = get_logger(__name__)\n\n\ndef schedule_job(job: Callable, seconds: int) -> None:\n \"\"\"Clear a previously running job, if exists, and launch it again\"\"\"\n schedule.clear(job.__name__)\n job()\n schedule.every(seconds).seconds.do(job).tag(job.__name__)\n\n\ndef post_location_job() -> None:\n \"\"\"Post unsent location list and then post current location\"\"\"\n if not Server.token:\n Server.login()\n\n try:\n location = GPSD.get_location()\n except Exception:\n logger.exception(\"Cannot acquire location\")\n return\n\n if Server.token:\n Server.send_unsent_locations()\n Server.post_location(location)\n else:\n Server.append_failed_location(location)\n\n\ndef panic_job() -> None:\n \"\"\"Check for panic mode and reschedule post_location_job if necesary\"\"\"\n new_panic = Server.is_panic_mode()\n\n if Server.panic_mode and not new_panic:\n logger.info(\"Disabling panic mode\")\n schedule_job(post_location_job, c.TIME_NO_PANIC)\n elif not Server.panic_mode and new_panic:\n logger.info(\"Enabling panic mode\")\n schedule_job(post_location_job, c.TIME_PANIC)\n\n Server.panic_mode = new_panic\n"
},
{
"alpha_fraction": 0.6100151538848877,
"alphanum_fraction": 0.6160849928855896,
"avg_line_length": 30.380952835083008,
"blob_id": "bdf483ab3343efbb897aaf9f3639fb2ed5c467fb",
"content_id": "9e0156ac74fac8e8f1a861c88869f9d555a7c54d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 659,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 21,
"path": "/server/gps_tracker/gps_tracker/models.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "import math\nfrom decimal import Decimal\n\nfrom django.db import models\n\n\nclass Location(models.Model):\n latitude = models.DecimalField(max_digits=9, decimal_places=6)\n longitude = models.DecimalField(max_digits=9, decimal_places=6)\n datetime = models.DateTimeField(db_index=True, unique=True)\n panic = models.BooleanField(default=False)\n hidden = models.BooleanField()\n\n def __str__(self) -> str:\n return (\n f\"Lat: {self.latitude}, \"\n f\"Long: {self.longitude} \"\n f\"Time: {self.datetime.strftime('%Y-%m-%d %H:%M:%S')} \"\n f\"Panic: {self.panic} \"\n f\"Hidden: {self.hidden}\"\n )\n"
},
{
"alpha_fraction": 0.5973597168922424,
"alphanum_fraction": 0.5995599627494812,
"avg_line_length": 28.322580337524414,
"blob_id": "2dabcae0ec108d2a71d2fedb9ab852f08f717893",
"content_id": "c6ddc9e841376a71c13196da7b58ae3225323cf7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 909,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 31,
"path": "/client/gps_tracker/location.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "import json\nfrom datetime import datetime\nfrom decimal import Decimal\n\nfrom . import constants as c\n\n\nclass Location:\n def __init__(self, longitude: Decimal, latitude: Decimal, datetime_: datetime):\n self.latitude = latitude\n self.longitude = longitude\n self.datetime_ = datetime_\n\n def to_json(self) -> str:\n decimals = Decimal(10) ** -c.PRECISION\n\n return json.loads(\n f'{{\"latitude\": {self.latitude.quantize(decimals)}, '\n f'\"longitude\": {self.longitude.quantize(decimals)}, '\n f'\"datetime\": \"{self.datetime_.strftime(c.DATETIME_FORMAT)}\"}}'\n )\n\n @classmethod\n def from_json(cls, text: str) -> \"Location\":\n value = json.loads(text)\n\n return cls(\n Decimal(value[\"latitude\"]),\n Decimal(value[\"longitude\"]),\n datetime.strptime(value[\"datetime\"], c.DATETIME_FORMAT)\n )\n"
},
{
"alpha_fraction": 0.7354466915130615,
"alphanum_fraction": 0.7354466915130615,
"avg_line_length": 37.55555725097656,
"blob_id": "3b0312e18a51a0eac6739da8e3eb0c81312e9939",
"content_id": "fd6d9ef3bc5497bf7d1df52410880e5eea388cc9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1735,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 45,
"path": "/server/gps_tracker/gps_tracker/viewsets.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "from django.http import JsonResponse\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.decorators import api_view, permission_classes, authentication_classes\nfrom rest_framework.generics import ListCreateAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.viewsets import ViewSetMixin\n\nfrom .filters import LocationFilter\nfrom .helpers import is_panic, distance_coordinates, MAX_DISTANCE\nfrom .models import Location\nfrom .serializers import LocationSerializer\n\n\nclass LocationCreateAPIView(ViewSetMixin, ListCreateAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n filter_backends = (filters.OrderingFilter, DjangoFilterBackend)\n ordering_fields = (\"datetime\",)\n filterset_class = LocationFilter\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n\n def perform_create(self, serializer):\n last_location = Location.objects.filter(hidden=False).order_by(\"-datetime\").first()\n if last_location:\n distance = distance_coordinates(\n last_location.latitude,\n last_location.longitude,\n serializer.validated_data.get(\"latitude\"),\n serializer.validated_data.get(\"longitude\"),\n )\n hidden = distance < MAX_DISTANCE\n else:\n hidden = False\n\n serializer.save(hidden=hidden, panic=is_panic())\n\n\n@api_view()\n@permission_classes((IsAuthenticated,))\n@authentication_classes((TokenAuthentication,))\ndef panic(request):\n return JsonResponse({\"panic\": is_panic()})\n"
},
{
"alpha_fraction": 0.620820164680481,
"alphanum_fraction": 0.6214510798454285,
"avg_line_length": 30.700000762939453,
"blob_id": "f685598728c34ed843f8a7399b36c84f32e3c30d",
"content_id": "f7aeca0fb8097d59b76b7df26fdea0340d85d34e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1585,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 50,
"path": "/client/gps_tracker/gpsd.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nfrom decimal import Decimal\nfrom typing import Tuple, List\n\nfrom gps import gps, WATCH_ENABLE, WATCH_NEWSTYLE\n\nfrom . import constants as c\nfrom .location import Location\n\n\nclass GPSD:\n gpsd = gps(mode=WATCH_ENABLE | WATCH_NEWSTYLE)\n\n @classmethod\n def get_location(cls) -> Location:\n \"\"\"Create a new Location object based on the GPS coordinates\"\"\"\n latitude, longitude, datetime_ = GPSD._get_coordinates()\n return Location(latitude, longitude, datetime_)\n\n @classmethod\n def _get_coordinates(cls) -> Tuple[Decimal, Decimal, datetime]:\n \"\"\"Get GPS coordinates as an average of the coordinates since last time it was collected\"\"\"\n time = datetime.utcnow().strftime(c.DATETIME_FORMAT)\n needed = {\"lat\", \"lon\", \"time\"}\n coords = {\"lat\", \"lon\"}\n lats = []\n lons = []\n\n location = cls.gpsd.next()\n keys = set(location)\n\n while needed - keys or time > location.time:\n if not coords - keys:\n lats.append(Decimal(location.lat))\n lons.append(Decimal(location.lon))\n\n location = cls.gpsd.next()\n keys = set(location)\n\n location_time = datetime.strptime(location.time, c.DATETIME_FORMAT)\n\n return cls._avg(lats), cls._avg(lons), location_time\n\n @staticmethod\n def _avg(items: List[Decimal]) -> Decimal:\n \"\"\"Return the average value of a list of Decimals\"\"\"\n try:\n return sum(items) / len(items)\n except ZeroDivisionError:\n return Decimal(0)\n"
},
{
"alpha_fraction": 0.5475000143051147,
"alphanum_fraction": 0.5950000286102295,
"avg_line_length": 21.22222137451172,
"blob_id": "ae3005852358a546a0d5830393fcd4ce7c75d4ea",
"content_id": "b2dc54ec4ca72ea9f139ab8aedaf24a8eb4f60d9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 400,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 18,
"path": "/server/gps_tracker/gps_tracker/migrations/0002_auto_20201022_1259.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1.2 on 2020-10-22 12:59\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('gps_tracker', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='location',\n name='datetime',\n field=models.DateTimeField(db_index=True, unique=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6006984710693359,
"alphanum_fraction": 0.6356227993965149,
"avg_line_length": 23.542856216430664,
"blob_id": "48f0968e7fe39797dfe0a75009fe02d7e2003f1f",
"content_id": "9d797b89b4758a328240b8c64de1a708659fd577",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 859,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 35,
"path": "/server/gps_tracker/gps_tracker/helpers.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "import math\nimport os\nfrom decimal import Decimal\n\nPANIC_FILE = \"panic_file\"\nR = 6371\nMAX_DISTANCE = Decimal(5)\n\n\ndef is_panic() -> bool:\n return os.path.exists(PANIC_FILE)\n\n\ndef set_panic_mode(mode: bool) -> None:\n if mode:\n os.remove(PANIC_FILE)\n else:\n open(PANIC_FILE, \"w\").close()\n\n\ndef distance_coordinates(lat1: Decimal, lon1: Decimal, lat2: Decimal, lon2: Decimal) -> Decimal:\n \"\"\"Calculate the distance between two coordinates using the Haversine formula\"\"\"\n lat1 = math.radians(lat1)\n lon1 = math.radians(lon1)\n lat2 = math.radians(lat2)\n lon2 = math.radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = Decimal(R * c)\n\n return distance\n"
},
{
"alpha_fraction": 0.6113550662994385,
"alphanum_fraction": 0.6132371425628662,
"avg_line_length": 33.279571533203125,
"blob_id": "50ff492ca0c6280a13c85eecf11a1b15ebc3a5b5",
"content_id": "8c50d08671bfe77985b5f51814593e0fbaf9d930",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3188,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 93,
"path": "/client/gps_tracker/server.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "import json\nimport os\nfrom json.decoder import JSONDecodeError\nfrom typing import List, Union\n\nimport requests\nfrom requests import RequestException\n\nfrom . import constants as c\nfrom .location import Location\nfrom .logging import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass Server:\n token: Union[str, None] = None\n panic: bool = False\n panic_mode: bool = False\n\n @classmethod\n def login(cls) -> None:\n \"\"\"Get and store authentication token from server\"\"\"\n contents = {\"username\": c.USERNAME, \"password\": c.PASSWORD}\n try:\n response = requests.post(c.AUTH_URL, json=contents)\n if response.status_code != 200:\n raise ValueError(\"Log in response was not successful\")\n content = json.loads(response.content)\n cls.token = content[\"token\"]\n except (RequestException, JSONDecodeError, ValueError):\n logger.exception(\"Error logging into server\")\n cls.token = None\n\n @classmethod\n def post_location(cls, location: Location) -> None:\n \"\"\"Upload a location into the server\"\"\"\n headers = {\"Authorization\": f\"Token {cls.token}\"}\n data = location.to_json()\n try:\n response = requests.post(c.LOCATIONS_URL, json=data, headers=headers)\n if response.status_code != 201:\n raise ValueError(\"Location posting response was not successful\")\n except (RequestException, ValueError):\n logger.exception(\"Error posting location\")\n cls.append_failed_location(location)\n\n @classmethod\n def is_panic_mode(cls) -> bool:\n headers = {\"Authorization\": f\"Token {cls.token}\"}\n try:\n response = requests.get(c.PANIC_URL, headers=headers)\n content = json.loads(response.content)\n return content[\"panic\"]\n except (RequestException, JSONDecodeError):\n logger.exception(\"Cannot get panic mode\")\n return False\n\n @staticmethod\n def append_failed_location(location: Location) -> None:\n \"\"\"Append location into PENDING_FILE\"\"\"\n try:\n with open(c.PENDING_FILE, \"a\") as file:\n file.write(json.dumps(location.to_json()) + \"\\n\")\n except IOError:\n logger.exception(\"Cannot append failed location\")\n\n @classmethod\n def send_unsent_locations(cls) -> None:\n \"\"\"Iterate through the list of locations that have not been sent and try to send them\"\"\"\n unsent_locations = cls._get_unsent_locations()\n\n for location in unsent_locations:\n cls.post_location(location)\n\n @staticmethod\n def _get_unsent_locations() -> List[Location]:\n \"\"\"Return a list of the locations that have not been sent\"\"\"\n locations = []\n\n if not os.path.exists(c.PENDING_FILE):\n return locations\n\n with open(c.PENDING_FILE) as file:\n for line in file:\n try:\n locations.append(Location.from_json(line))\n except JSONDecodeError:\n logger.warn(f\"Error decoding string: '{line}'\")\n\n os.remove(c.PENDING_FILE)\n\n return locations\n"
},
{
"alpha_fraction": 0.7419962286949158,
"alphanum_fraction": 0.7419962286949158,
"avg_line_length": 26.947368621826172,
"blob_id": "0515cfd27dbe867be8e7f600b814e55a8deaf23c",
"content_id": "75a133126a481b7904aa9b52c0bb3db17ad23ad2",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 531,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 19,
"path": "/client/gps_tracker/logging.py",
"repo_name": "jorgemira/gps_tracker",
"src_encoding": "UTF-8",
"text": "import logging\nfrom logging.handlers import TimedRotatingFileHandler\n\nfrom . import constants as c\n\n\ndef get_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n log_formatter = logging.Formatter(c.LOG_FORMAT)\n\n log_file_handler = TimedRotatingFileHandler(filename=c.LOG_FILE, when=\"midnight\")\n streamhandler = logging.StreamHandler()\n\n for handler in (log_file_handler, streamhandler):\n handler.setFormatter(log_formatter)\n logger.addHandler(handler)\n\n return logger\n"
}
] | 19 |
EvilZoidbergMD/pychess
|
https://github.com/EvilZoidbergMD/pychess
|
2b225c0c6502a17525a476bcc2d2e599f7917522
|
1abbddf359bd6cb41267e324b792ee4aba1ca088
|
91087c92c9ef0290fb67f3bce4ba9e92105737ad
|
refs/heads/master
| 2021-01-20T06:54:50.077736 | 2015-04-03T04:01:13 | 2015-04-03T04:01:13 | 33,326,844 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6317365169525146,
"alphanum_fraction": 0.6407185792922974,
"avg_line_length": 19.9375,
"blob_id": "dd259f85b79ba1bd893f4c8993e451f5ea2b81c7",
"content_id": "bf19a833a96bc541b98c06443a6d9c65722ba74b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 16,
"path": "/queen.py",
"repo_name": "EvilZoidbergMD/pychess",
"src_encoding": "UTF-8",
"text": "from settings import *\nfrom piece import *\n\nclass queen(piece):\n\n\tdef __init__(self, posX, posY, team):\n\t\ttext = 'X'\n\t\tif team == 0:\n\t\t\ttext = white_pieces[4]\n\t\telse:\n\t\t\ttext = black_pieces[4]\n\t\tpiece.__init__(self, posX, posY, team, text)\n\n\tdef can_move(self, x, y):\n\t\tprint 'Checking if this piece can move like that'\n\t\treturn False"
},
{
"alpha_fraction": 0.6528497338294983,
"alphanum_fraction": 0.6606217622756958,
"avg_line_length": 18.350000381469727,
"blob_id": "f89f97635f670267f1528383c285aba52cf857a7",
"content_id": "a4be072c54a1b8ba90bdcd70b3a8d3952eda1457",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 20,
"path": "/piece.py",
"repo_name": "EvilZoidbergMD/pychess",
"src_encoding": "UTF-8",
"text": "class piece:\n\ttext_value = 'X'\n\tposX = 0\n\tposY = 0\n\tteam = -1\n\tblocked_by_pieces = True\n\tblocked_by_attacks = False\n\n\tdef __init__(self, posX, posY, team, text):\n\t\tself.posX = posX\n\t\tself.posY = posY\n\t\tself.team = team\n\t\tself.text_value = text\n\n\tdef to_string(self):\n\t\treturn self.text_value\n\n\tdef can_move(self, x, y):\n\t\tprint 'Checking if this piece can move like that'\n\t\treturn False"
},
{
"alpha_fraction": 0.6376021504402161,
"alphanum_fraction": 0.6457765698432922,
"avg_line_length": 19.44444465637207,
"blob_id": "0402df6d0ac94a8281a95c6361efe6cb6bddd625",
"content_id": "6b45ab54bf43283b05ace790a78eeb241f9c1fc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 18,
"path": "/king.py",
"repo_name": "EvilZoidbergMD/pychess",
"src_encoding": "UTF-8",
"text": "from settings import *\nfrom piece import *\n\nclass king(piece):\n\n\tdef __init__(self, posX, posY, team):\n\t\ttext = 'X'\n\t\tif team == 0:\n\t\t\ttext = white_pieces[5]\n\t\telse:\n\t\t\ttext = black_pieces[5]\n\t\tpiece.__init__(self, posX, posY, team, text)\n\n\t\tself.blocked_by_attacks = True\n\n\tdef can_move(self, x, y):\n\t\tprint 'Checking if this piece can move like that'\n\t\treturn False"
},
{
"alpha_fraction": 0.6796875,
"alphanum_fraction": 0.6796875,
"avg_line_length": 17.33333396911621,
"blob_id": "dcbcc68f89a8cc9145c5844be2087b391efa3a2a",
"content_id": "2eb87dd445eaeeba12b67628c9c83de43560cc24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 21,
"path": "/space.py",
"repo_name": "EvilZoidbergMD/pychess",
"src_encoding": "UTF-8",
"text": "class space:\n\toccupied = False\n\toccupant = None\n\tstring_value = ' '\n\n\tdef __init__(self, text):\n\t\tself.string_value = text\n\n\tdef to_string(self):\n\t\tif self.occupied:\n\t\t\treturn self.occupant.to_string()\n\t\telse:\n\t\t\treturn self.string_value\n\n\tdef set_occupant(self, p):\n\t\tself.occupied = True\n\t\tself.occupant = p\n\n\tdef clear_occupant(self):\n\t\tself.occupied = False\n\t\tself.occupant = None"
}
] | 4 |
brl1906/Golland_TimesheetBot
|
https://github.com/brl1906/Golland_TimesheetBot
|
1523ffd856152d8466093fffbefc9165564c42ae
|
20388e4f0028295bb288a5f686e44f4a53227ecb
|
870bdba4953c2c923812e4fa487f2eb3556f5fe9
|
refs/heads/master
| 2018-05-19T13:12:15.044734 | 2017-06-07T18:01:58 | 2017-06-07T18:01:58 | 93,281,879 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.709193229675293,
"alphanum_fraction": 0.7504690289497375,
"avg_line_length": 39.92307662963867,
"blob_id": "bfe82b42991ca3ba53f6e0c4b0cdd5303520a4d3",
"content_id": "e40b995ff480400d09042229e1b34db8e1aadd31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 533,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 13,
"path": "/todo.txt",
"repo_name": "brl1906/Golland_TimesheetBot",
"src_encoding": "UTF-8",
"text": "1. set to email every 2 minutes as test\n2. have run automatically every 2 minutes\n3. set to run automatically every 2nd Tuesday\n4. get image in email body\n***5. send emails to a list individually.\n***6. change email to [email protected]\n***7. email & password to sysv\n8. implementation file for set up, deploy\n***9. finalize email message\n10. write error handler to catch emails not sent and continue through program\n11. add word of the day feature\n12. per Gary send out Tuesdays -- due before 4pm\n13. send out at 7am on regular basis \n"
},
{
"alpha_fraction": 0.8444444537162781,
"alphanum_fraction": 0.8444444537162781,
"avg_line_length": 88.5,
"blob_id": "0c511f1f59933da44a21256474ad490de2d20c7e",
"content_id": "169eb4b00d7975fa920b52ef0654dc90706dc2ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 2,
"path": "/README.md",
"repo_name": "brl1906/Golland_TimesheetBot",
"src_encoding": "UTF-8",
"text": "# Golland_TimesheetBot\nAutomates and improves the manual reminders for timesheet completion. Replaces manually copy pasted simple clock gifs with NewYorker caption contest image. \n"
},
{
"alpha_fraction": 0.6542280912399292,
"alphanum_fraction": 0.6599458456039429,
"avg_line_length": 35.516483306884766,
"blob_id": "58543f3efa310e6fe3ce6458b41250e34b14f1e9",
"content_id": "056d828d1e22ebefb5355425d2c679e7c60ffbac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3323,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 91,
"path": "/gollandtimesheetbot.py",
"repo_name": "brl1906/Golland_TimesheetBot",
"src_encoding": "UTF-8",
"text": "\"\"\"This program sends an email to a list every 2nd Tuesday to\\\nremind them to complete their timesheet. It scrapes the Ny Times\ncaption contest images each time and attaches a new image to the\nemail reminder.\"\"\"\n\nimport datetime,bs4,os,requests,sys\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\nfrom email.mime.image import MIMEImage\n\nprint('Remember to pass email password as second argument to run program.')\n\n\n#get image\ncartoon_img_url = 'http://www.newyorker.com/cartoons'\nresObj = requests.get(cartoon_img_url)\nresObj.raise_for_status()\nsoup = bs4.BeautifulSoup(resObj.text,'html.parser')\nurl_img = soup.select('div.caption-contest-container img')\nif url_img == []:\n print ('Cartoon could not be found.')\nelse:\n try:\n img = url_img[0].get('src')\n print('Downloading image....') #add image src to print\n resObj = requests.get(img)\n resObj.raise_for_status()\n except OSError:\n pass\n\n\n#save image\ntry:\n os.makedirs('Golland_pics')\nexcept OSError:\n pass\n\nimg_file = open(os.path.join('Golland_pics',os.path.basename(img)),'wb')\nfor chunk in resObj.iter_content(100000):\n img_file.write(chunk)\nimg_file.close()\n\n\nemailList = {'Jackson': '[email protected]','Berke':'[email protected]',\n 'Catherine':'[email protected]','Nick':'[email protected]',\n 'Willem':'[email protected]','Babila':'[email protected]',\n 'Varghese':'[email protected]','Cole':'[email protected]',\n 'Ben':'[email protected]','Evan':'[email protected]',\n 'Troy':'[email protected]','Terrence':'[email protected]',\n 'Ron':'[email protected]','Janice':'[email protected]',\n 'Ryan':'[email protected]','Kay':'[email protected]',\n 'Gary':'[email protected]'}\n\n\n\n\nfor name, email in emailList.items():\n #attach image and send email\n msg = MIMEMultipart()\n msg['To'] = email\n msg['From'] = 'GollandBot'\n msg['Subject'] = name + ' Timesheets--Golland : )'\n body = MIMEText(name + ',' + \"\\n\\tThis is Golland, your friendly timesheet reminder. I'm a bot. My job is to remind you to submit a \\\ncompleted timesheet to your supervisor. That\\'s what I care abot. You know the clock is ticking on this task, so rather than send you an \\\nimage of a ticking clock, instead, I will shoot you the most recent caption contest image from the New Yorker Magazine.\\n\\nGet those \\\ncreative juices flowing! If you have a good idea for this week's caption contest, I encourage you to submit it and share with your \\\ncomrades.\\n\\nHand in your timesheet today by 10:00am, if you have not done so already and good luck with the caption contest.\\\n\\n\\t\\t\\t\\t\\t\\t\\t\\t--Gollandbot\")\n msg.attach(body)\n\n if len(os.path.basename(img)) > 1:\n # file = os.path.join('Golland_pics',os.path.basename(img))\n file = open(os.path.join('Golland_pics',os.path.basename(img)),'rb').read()\n attachment = MIMEImage(file, name = os.path.basename(img))\n # attachment.add_header('Content-Disposition','attachment',filename = file)\n msg.attach(attachment)\n else:\n pass\n\n smtpObj = smtplib.SMTP('smtp.gmail.com',587)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.ehlo()\n smtpObj.login('[email protected]', sys.argv[1])\n smtpObj.sendmail(msg['From'],msg['To'],msg.as_string())\n smtpObj.quit()\n print('Email sent to ' + name)\n\nprint('Emails Sent...')\n"
}
] | 3 |
corast/kyb
|
https://github.com/corast/kyb
|
d6d4fe53de3f3a44292286f6fc0934097bb67f06
|
e6c83ff22021a21be70d7535628926b20befc264
|
2b28cda735ebd7e28410d68e0507174e93e8163d
|
refs/heads/master
| 2023-06-13T02:46:18.478071 | 2020-04-20T11:13:29 | 2020-04-20T11:13:29 | 248,447,468 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6519173979759216,
"alphanum_fraction": 0.6519173979759216,
"avg_line_length": 29.81818199157715,
"blob_id": "814ec8dc6ee478ee4d0755ab132c3526f89289d5",
"content_id": "7b7c41181ae80ea1450acbae8c6f139edb035b69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 11,
"path": "/kubernetes/vue-flask-mongo-kubernetes/flask-api/common/utils.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "from bson import ObjectId\nfrom flask.json import JSONEncoder\n\nclass MongoJSONEncoder(JSONEncoder):\n \"\"\" Customize JSONEncoder to convert ObjectId -> string\n \"\"\"\n def default(self, o): # pylint: disable=method-hidden\n if isinstance(o, ObjectId):\n return str(o)\n else:\n return super().default(o)\n"
},
{
"alpha_fraction": 0.5047619342803955,
"alphanum_fraction": 0.6857143044471741,
"avg_line_length": 16.5,
"blob_id": "bec1cfd97799157b81a44a1d15d34388914b7de6",
"content_id": "8c0ae92185b0c19ee59c8804d9f2a37ba00f145e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 6,
"path": "/kubernetes/vue-flask-mongo-kubernetes/flask-api/requirements.txt",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "Flask==1.1.1\nFlask-Cors==3.0.8\nFlask-PyMongo==2.3.0\nFlask-RESTful==0.3.8\npymongo==3.10.1\nWerkzeug==1.0.0\n"
},
{
"alpha_fraction": 0.6179245114326477,
"alphanum_fraction": 0.6179245114326477,
"avg_line_length": 29.35714340209961,
"blob_id": "6963272823ee841605afe4df98895d725293ef71",
"content_id": "d2c9c3c9a890b84775d5a74cbbc877c3ab1facb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 424,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 14,
"path": "/kubernetes/vue-flask-mongo-kubernetes/vue-frontend/src/api.js",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "import axios from 'axios'\n\nconst flaskApi = axios.create({\n baseURL: 'http://app-boilerplate/api/'\n})\n\nexport default {\n ping: () => flaskApi.get('/ping'),\n getAllPosts: () => flaskApi.get('/posts'),\n getPost: (id) => flaskApi.get(`/posts/${id}`),\n createPost: (body) => flaskApi.post('/posts', body),\n editPost: (id, body) => flaskApi.put(`/posts/${id}`, body),\n deletePost: (id) => flaskApi.delete(`/posts/${id}`)\n}"
},
{
"alpha_fraction": 0.78899085521698,
"alphanum_fraction": 0.78899085521698,
"avg_line_length": 20.799999237060547,
"blob_id": "c3b63a2d971278ecce0dc27736d3139fca1c23b0",
"content_id": "231a2af7fde812750f1e837763e38c05ea250ba8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 10,
"path": "/kubernetes/scaling_mongodb_on_kubernetes/deploy.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\necho \"Deploying Mongo\"\nkubectl apply -f mongo.yaml\n\necho \"Deploying mongo ingress\"\nminikube addons enable ingress\nkubectl apply -f ingress.yaml\n\necho \"Deploying dnsutils pod\"\nkubectl apply -f dnsutils.yaml\n"
},
{
"alpha_fraction": 0.5620437860488892,
"alphanum_fraction": 0.5766423344612122,
"avg_line_length": 24,
"blob_id": "41b2b52fa1b43536bea210f16262de048f9de0fd",
"content_id": "bfc27b6302b23df4c0b7c44d0562e1027b97694a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 11,
"path": "/kubernetes/vue-flask-mongo-kubernetes/flask-api/resources/health.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "import os\nfrom flask import make_response\nfrom flask_restful import Resource\n\nclass Health(Resource):\n def get(self):\n return make_response({\n 'status': 'success',\n 'message': 'pong!',\n 'container_id': os.uname()[1]\n }, 200)"
},
{
"alpha_fraction": 0.7676767706871033,
"alphanum_fraction": 0.7676767706871033,
"avg_line_length": 48,
"blob_id": "b6be3765cc57d6c1858ec3297374685466d32175",
"content_id": "d9374933cd54b42357133252b7e5a72711db42ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 2,
"path": "/helmcharts/mychart/README.md",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "# Create your first helm chart\n<https://docs.bitnami.com/tutorials/create-your-first-helm-chart/>\n\n"
},
{
"alpha_fraction": 0.8102766871452332,
"alphanum_fraction": 0.8102766871452332,
"avg_line_length": 24.299999237060547,
"blob_id": "fdb378128f40d4e59368df42eebd55f27dd7aba6",
"content_id": "4c94e51f763e428cbed6ae310682b44c9856c8a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 10,
"path": "/kubernetes/scaling_mongodb_on_kubernetes/teardown.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nkubectl delete -f mongo.yaml\n\necho \"Delete ingress\"\nkubectl delete ingress -l stateful-ingress\nminikube addons disable ingress\n\necho \"Deleting dnsutils\"\nkubectl delete pod -l name=dnsutils\nkubectl delete persistentvolumeclaims -l role=mongo\n"
},
{
"alpha_fraction": 0.7755835056304932,
"alphanum_fraction": 0.7755835056304932,
"avg_line_length": 26.899999618530273,
"blob_id": "745a062256e85c121a98bed56cae27376984eac7",
"content_id": "3829f846434270d2992d93c9f33661270cedad68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 557,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/kubernetes/vue-flask-mongo-kubernetes/deploy.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "echo \"Creating the mongodb deployment and services...\"\n \nkubectl create -f ./kubernetes/scaling_mongodb_on_kubernetes/mongo.yaml\n\necho \"Creating the flask-api deployment and service...\"\n\nkubectl create -f ./kubernetes/flask-api.yml\n\necho \"Adding the ingress...\"\n\nminikube addons enable ingress\nkubectl apply -f ./kubernetes/app-ingress.yml\n\necho \"Creating the vue-frontend deployment and service...\"\n\nkubectl create -f ./kubernetes/vue-frontend.yml\n\necho \"Deploying dnsutils pod\"\n\nkubectl apply -f ./kubernetes/scaling_mongodb_on_kubernetes/dnsutils.yaml"
},
{
"alpha_fraction": 0.7558229565620422,
"alphanum_fraction": 0.7558229565620422,
"avg_line_length": 34.77777862548828,
"blob_id": "5c02ef7f1f0f3589cba4df90326fcf3e27c8c498",
"content_id": "1b051b3fe1b418c1ba9e95f7589c6a9361d31ca2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2576,
"license_type": "no_license",
"max_line_length": 250,
"num_lines": 72,
"path": "/kubernetes/vue-flask-mongo-kubernetes/README.md",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "# vue-flask-mongo-kubernetes\n\nThis is a simple app boilerplate using a Vue.js frontend, Python Flask API, MongoDB database and kubernetes with minikube deployment.\n\nThis app is based on https://testdriven.io/blog/running-flask-on-kubernetes/\n\n## Install dependencies\n\n Dependencies:\n Docker\n Kubernetes\n Minikube\n\nInstall docker: https://docs.docker.com/install/\n\nInstall kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/\n\nInstall minikube: https://kubernetes.io/docs/tasks/tools/install-minikube/\n\n## Get started\nClone the repository\n\n $ git clone https://github.ibm.com/Havard-Thom/vue-flask-mongo-kubernetes\n $ cd vue-flask-mongo-kubernetes\n\nStart the minikube cluster and open minikube dashboard which will be useful later to get a deployment overview\n\n $ minikube start\n $ minikube dashboard\n\nBefore building the docker images for our applications, we want to point our terminal to the minikube docker environment.\n\n $ minikube docker-env\n $ eval $(minikube -p minikube docker-env)\n $ docker ps (should show minikube containers running)\n\nBuild the flask-api docker image\n\n $ cd flask-api\n $ docker build -t flask-api .\n $ cd ..\n\nBuild the vue-frontend docker image\n\n $ cd vue-frontend\n $ docker build -t vue-frontend .\n $ cd ..\n\nTo understand what we are deploying, study the deployment files in `./kubernetes` folder. There is also a more detailed description on https://testdriven.io/blog/running-flask-on-kubernetes/\n\nRun the deploy script to deploy everything on the minikube cluster. It will try to clean up existing deployments so don't worry if there are some errors/warnings.\n\n $ sh deploy.sh\n\nCheck the minikube dashboard to see if everything deployed successfully. There should be three deployments and services (vue-frontend, flask-api and mongodb). There should also be a persistent volume and a persistent volume claim for mongodb storage.\n\nTo access our deployed app, we first need to update our `/etc/hosts` file to route requests from the host we have defined in `./kubernetes/minikube-ingress.yml` to the Minikube instance. The host is called `app-boilerplate`.\n\nAdd an entry to `/etc/hosts`:\n\n $ echo \"$(minikube ip) app-boilerplate\" | sudo tee -a /etc/hosts\n\nWe should now be able to open the application at `http://app-boilerplate/`\n\nFor more information on Scalability, Helpful Commands etc. see https://testdriven.io/blog/running-flask-on-kubernetes/\n\nNB:\n\n Boilerplate does not include:\n Proper exception handling\n Authentication and Authorization\n DB scalability\n"
},
{
"alpha_fraction": 0.5777778029441833,
"alphanum_fraction": 0.7111111283302307,
"avg_line_length": 21.66666603088379,
"blob_id": "86629062841be151a74964d6d5693d7202720cc8",
"content_id": "355df3b274a8874cc10c139537d5149ffda4cc93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 6,
"path": "/flask/bookstore/books/requirements.txt",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "python_version>=\"3.6\"\nFlask==1.1.1\nopenapi-spec-validator==0.2.8\nrequests==2.23.0\nconnexion[swagger-ui]==2.6.0\npython_dateutil >= 2.6.0"
},
{
"alpha_fraction": 0.767123281955719,
"alphanum_fraction": 0.767123281955719,
"avg_line_length": 21.33333396911621,
"blob_id": "b777ad527decd3b4b3e0ca69c840dc713c8d77e2",
"content_id": "a24bbef8fa9e95c32e710619cce0696c3128388e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 803,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 36,
"path": "/kubernetes/running_flask_kub/flask-vue-kubernetes-mongo/teardown.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\necho \"Deleting the volume...\"\n\nkubectl delete -f ./kubernetes/persistent-volume.yml\nkubectl delete -f ./kubernetes/persistent-volume-claim.yml\n\n\necho \"Deleting the database credentials...\"\n\nkubectl delete -f ./kubernetes/secret.yml\n\n\necho \"Deleting the postgres deployment and service...\"\n\nkubectl delete -f ./kubernetes/postgres-deployment.yml\nkubectl delete -f ./kubernetes/postgres-service.yml\n\n\n\necho \"Deleting the flask deployment and service...\"\n\nkubectl delete -f ./kubernetes/flask-deployment.yml\nkubectl delete -f ./kubernetes/flask-service.yml\n\n\necho \"Deleting the ingress...\"\n\nkubectl delete -f ./kubernetes/minikube-ingress.yml\n\n\necho \"Deleting the vue deployment and service...\"\n\nkubectl delete -f ./kubernetes/vue-deployment.yml\nkubectl delete -f ./kubernetes/vue-service.yml"
},
{
"alpha_fraction": 0.7011494040489197,
"alphanum_fraction": 0.7011494040489197,
"avg_line_length": 13.666666984558105,
"blob_id": "e411f2a85091e86b303a41dac0f483604b27d4d1",
"content_id": "e677ca1349ce60f055403c745ea7a567f933252b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 6,
"path": "/kubernetes/vue-flask-mongo-kubernetes/fresh_deploy.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Clearing old deployments.\n/bin/bash ./teardown.sh\n\n/bin/bash ./deploy.sh"
},
{
"alpha_fraction": 0.7847411632537842,
"alphanum_fraction": 0.7847411632537842,
"avg_line_length": 26.22222137451172,
"blob_id": "104565d96a9a5c0d96fcab7b88450ff602b3dd89",
"content_id": "5b3ec106589bb3797b36d2f2c4c0c453f4c9809b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 734,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 27,
"path": "/kubernetes/vue-flask-mongo-kubernetes/teardown.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# All deletes based on labels used. Must be unique for each kind.\necho \"Delete mongo database\"\n\nkubectl delete -f ./kubernetes/scaling_mongodb_on_kubernetes/mongo.yaml\n\necho \"Deleting pv and pvc of mongo database\"\n\nkubectl delete pv,pvc -l role=mongo\n\necho \"Deleting the flask-api deployment and service...\"\n\nkubectl delete service -l service=flask-api\nkubectl delete deployment -l name=flask-api\n\necho \"Disabling the ingress...\"\n\nkubectl delete ingress -l name=app-ingress\nminikube addons disable ingress\n\necho \"Deleting the vue-frontend deployment and service...\"\n\nkubectl delete deployment -l name=vue-frontend\nkubectl delete service -l service=vue-frontend\n\necho \"Deleting dnsutils\"\nkubectl delete pod -l name=dnsutils"
},
{
"alpha_fraction": 0.6441176533699036,
"alphanum_fraction": 0.6558823585510254,
"avg_line_length": 32.599998474121094,
"blob_id": "baf4613189db341b7ea4552df9779b35fe1c0548",
"content_id": "0772a5d59e4e7cba59fbb8f2149a559d07c5db29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 340,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 10,
"path": "/flask/bookstore/books/app.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "import connexion\n\nif __name__ == \"__main__\":\n PORT = 9001\n # Create the application instance\n app = connexion.FlaskApp(__name__, specification_dir=\"./openapi/\")\n # Read the swagger.yml file to configure the endpoints\n app.add_api(\"openapi.yaml\")\n application = app.app # wisi application?\n app.run(PORT,debug=True)\n "
},
{
"alpha_fraction": 0.7266660928726196,
"alphanum_fraction": 0.7429009079933167,
"avg_line_length": 46.22646713256836,
"blob_id": "f23ad72e60963de6d6ff01bd6da9433bdbe1c099",
"content_id": "a1d54909fa4a9db51747445c15cc0647ed7e2d73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 34623,
"license_type": "no_license",
"max_line_length": 484,
"num_lines": 733,
"path": "/README.md",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "# Home office tasks\n\nHome office \n[x] - Done \n[-] - not done or could not be completed, blocker etc.\n\n## Dag 19.3.20\n\n- [x] Setup work enviroment\n - [x] Install VurtualBox guest additions\n - [x] Install Centos8 on virtualBox\n - [x] Install TMUX\n - [x] Install minoconda\n - [x] Install Minikube (single-node Kubernetes) <https://kubernetes.io/docs/tasks/tools/install-kubectl/>\n - [x] None-empty `grep -E --color 'vmx|svm' /proc/cpuinfo`\n - [x] Enable Nested VT-x/AMD-V VB manager\n - [x] Install Kubernetes <https://phoenixnap.com/kb/how-to-install-kubernetes-on-centos>\n - [x] Install kubectl\n - [x] Setup SSH key on github\n - [x] Setup postman <https://tutorialforlinux.com/2019/09/26/how-to-install-postman-on-centos-8-easy-guide/2/>\n - [x] Fix not enough space on root disk (16 G too small!)\n - [x] Install gparted `sudo dnf install gparted`\n - [x] Extend partition in VirtualBox\n - [x] Extend partiton table with 16.23 GiB more\n - `lvextend -L +16.23GiB /dev/mapper/cl-root`\n - `partprobe`\n - `xfs_growfs /`mm\n- [x] Setup conda enviroment \"kubec\" \n`conta create -n kubec python=3.6 pip` \n`source activate kubec` \n`source deactivate` \n - [x] Install Python v3.6\n - [x] Install Flask\n - [x] Install pymongo\n - [x] Install redis\n - [x] Install python-dateutil: <https://pypi.org/project/python-dateutil/>\n - [-] Install Hamcrest\n- [x] Install Vscode\n - [x] Python extension\n - [x] Yaml extension\n- [x] Plan workdays ahead.\n- [x] Look at flask basic tutorials.\n\n## Dag 20.3.20\n\n- [x] Kubernetes architecture overview: <https://www.youtube.com/watch?v=8C_SCDbUJTg>\n- [x] Follow Tutorial to learn basics of Kubernetes\n- [x] Follow Basic Tutorial: <https://api.mongodb.com/python/current/tutorial.html>\n- [x] Setup Mongodb client\n - [x] Install mongodb: <https://docs.mongodb.com/manual/tutorial/install-mongodb-on-red-hat/>\n\n```repo\ncat >/etc/yum.repos.d/mongodb-org-4.2.repo <<EOL\n[mongodb-org-4.2]\nname=MongoDB Repository\nbaseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/development/x86_64/\ngpgcheck=1\nenabled=1\ngpgkey=https://www.mongodb.org/static/pgp/server-4.2.asc\nEOL\n```\n\n- [x] Aggregate in Mongodb: <https://medium.com/@paulrohan/aggregation-in-mongodb-8195c8624337>\n- [x] Aggregation Mongod: <https://docs.mongodb.com/manual/aggregation/>\n\n## Dag 23.3.20\n\n- [x] Aggregate in Mongodb: <https://medium.com/@paulrohan/aggregation-in-mongodb-8195c8624337>\n- [x] Mongo Aggregations in 5 Minutes: <https://engineering.universe.com/mongo-aggregations-in-5-minutes-b8e1d9c274bb>\n- [x] Chanter 4: Quering: <https://www.oreilly.com/library/view/mongodb-the-definitive/9781449344795/ch04.html>\n\n## Dag 24.3.20\n\n- [x] (Corrupted virtual machine image first time, try to advoid?)Installing Podman-docker <https://thenewstack.io/check-out-podman-red-hats-daemon-less-docker-alternative/>\n `sudo dnf install @container-tools`\n- [x] Install podman <https://podman.io/getting-started/installation>\n- [x] Basic Setup and Use of podman <https://podman.io/getting-started/>\n\n```cmd\npodman run -dt -p 8080:8080/tcp -e HTTPD_VAR_RUN=/var/run/httpd -e HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \\\n -e HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \\\n -e HTTPD_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/httpd/ \\\n registry.fedoraproject.org/f29/httpd /usr/bin/run-httpd\n```\n\n- [-] Multi-container pods and pod communication <https://www.mirantis.com/blog/multi-container-pods-and-container-communication-in-kubernetes/>\n- [x] Configure pod initialization <https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-initialization/>\n- [x] Proxy access to kubernets api <https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/>\n\n- [-] Snapshot of current build\n- [x] Git push changes before shutdown.\n\n## Dag 25.03.20\n\n- [x] Connecting Applications with Services <https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/>\n\n ```bash\n # Create a public private key pair\n openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /keys/nginx.key -out /keys/nginx.crt -subj \"/CN=my-nginx/O=my-nginx\"\n # Convert the keys to base64 encoding\n cat /d/tmp/nginx.crt | base64\n cat /d/tmp/nginx.key | base64\n ```\n\n 1. `kubectl create configmap nginxconfigmap --from-file=default.conf`\n 1. `mdir /etc/nginx/ssl | cp keys/* /etc/nginx/ssl/`\n 2. `kubectl apply -f nginxsecrets.yaml`: nginxsecret\n 3. `kubectl delete deployments,svc my-nginx; kubectl create -f ./nginx-secure-app.yaml`: Delete deployments and services with name 'my-nginx'.\n - Volume mapping to other resource (Not local files)\n 4. `kubectl get pods -l run=my-nginx -o yaml | grep -i podip` IP-pod\n 5. `kubectl apply -f curlpod.yaml`\n 6. `kubectl exec -it curl-deployment-<rest> -- bin/sh`\n 1. `curl -k https://IP-pod`\n 7. LoadBalancer not availible in Minikube. \n - [x] Install go <https://computingforgeeks.com/how-to-install-go-on-rhel-8/>\n - [x] Corrupt GIT data <https://stackoverflow.com/questions/4254389/git-corrupt-loose-object/13918515#13918515?newreg=7082361fbc474a77b63e977aa8a80ff0>\n- [x] xclip <https://computingforgeeks.com/how-to-copy-and-paste-text-in-a-linux-terminal/>\n- [*] Setup kubernetes cluster\n- [x] Container images <https://blog.giantswarm.io/building-container-images-with-podman-and-buildah/>\n - Podman use Dockerfiles to build images, or can use buildah syntax when Dockerfile is too restrictive (i.e. script-like approach).\n- [x] Git push changes before shutdown.\n\n## Dag 26.03.20\n\n- [x] VBox snapshot\n- [x] Style bash PS1 <https://www.cyberciti.biz/faq/bash-shell-change-the-color-of-my-shell-prompt-under-linux-or-unix/>\n- [-] Services <https://kubernetes.io/docs/concepts/services-networking/service/>\n- [x] Ingress <https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/>\n - `minikube addons enable ingress` No ADDRESS if not enabled\n - Verify with `kubectl get pods -n kube-system` give nginx-ingress-controller.\n - Add Ingress IP to /etc/hosts, `IP host-name`\n- [x] Enable kubectl autocompletion <https://kubernetes.io/docs/tasks/tools/install-kubectl/>\n - as root user: `kubectl completion bash >/etc/bash_completion.d/kubectl`\n- [x] Flask-vue-kubernetes <https://testdriven.io/blog/running-flask-on-kubernetes/>\n- [x] Git push changes before shutdown.\n- [x] Snapshot.\n\n## Dag 27.03.20\n\n- [x] Persistent Volume / Persisten Volume Claim\n- [x] Stateful applications blog <https://kubernetes.io/blog/2016/12/statefulset-run-scale-stateful-applications-in-kubernetes/>\n- [x] Deployment vs statefulsett vs deamonsets <https://medium.com/stakater/k8s-deployments-vs-statefulsets-vs-daemonsets-60582f0c62d4>\n- [-] Standalone Mongodb on Kube <https://medium.com/@dilipkumar/standalone-mongodb-on-kubernetes-cluster-19e7b5896b27>\n- [-] Running MongoDB on Kubernetes with StatefulSets <https://kubernetes.io/blog/2017/01/running-mongodb-on-kubernetes-with-statefulsets/>\n- [-] <https://leadwithoutatitle.wordpress.com/2018/03/05/how-to-deploy-mongodb-with-persistent-volume-in-kubernetes/>\n\n- [x] Git push changes before shutdown.\n\n## Dag 30.03.20\n\n- [x] minikube mongodb demo <https://github.com/pkdone/minikube-mongodb-demo>\n - `db.getSiblingDB('admin').auth(\"main_admin\", \"abc123\");`\n - `db.setSlaveOk();` Set Secondary as Slaves to Primary.\n- [x] Fortsette med deployment av Mongodb.\n- [x] Sette opp Mongo container.\n- [-] <https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pods-in-a-statefulset>\n - `Pods in a StatefulSet have a unique ordinal index and a stable network identity.`\n -`<StatefulSet>-<Ordinal>.<Service>`\n- [-] <https://kubernetes.io/blog/2017/01/running-mongodb-on-kubernetes-with-statefulsets/>\n - `mongodb://mongo-0.mongo,mongo-1.mongo,mongo-2.mongo:27017/dbname\\_?`\n\n## Dag 31.03.20\n\n- [x] <https://kubernetes.io/docs/tasks/run-application/run-replicated-stateful-application/>\n- [x] <https://kubernetes.io/blog/2017/01/running-mongodb-on-kubernetes-with-statefulsets/>\n- [x] <https://docs.mongodb.com/manual/replication/>\n - `kubectl drain m01 --force --delete-local-data --ignore-daemonsets`\n - `kubectl uncordon m01`\n- [-] How to send traffic to Mongodb, and queries...\n\n## Dag 01.04.20\n\n- [x] Fatal: Git index smaller than expected: <https://stackoverflow.com/questions/4254389/git-corrupt-loose-object/13918515#13918515?newreg=7082361fbc474a77b63e977aa8a80ff0>\n- [x] mondb-replication <https://maruftuhin.com/blog/mongodb-replica-set-on-kubernetes/>\n- [x] <https://medium.com/faun/scaling-mongodb-on-kubernetes-32e446c16b82>\n- [x] dnslookup pod <https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/>\n\n## Dag 02.04.20\n\n- [x] Koble opp db med repo <https://github.ibm.com/gbs-norway-tech-community/vue-flask-mongo-kubernetes>\n - Building repos\n - `podman build -t flask-backend .`\n - `su root` too many files in vue-frontend for default users.\n - `podman build -t vue-frontend .`\n - [x] create repo\n `https://hub.docker.com/repository/docker/sondreweb/vue-frontent`\n - [x] push image to repo\n - [x] `podman login --username sondreweb docker.io`, paste key from docker.io as password.\n - `podman push localhost/vue-frontend sondreweb/vue-frontend:latest`\n - `minikube podman-env`\n - `eval $(minikube -p minikube podman-env)` Use podman for images\n \n- [x] Helm <https://www.youtube.com/watch?v=fy8SHvNZGeE>\n- [x] Install helm <https://www.digitalocean.com/community/tutorials/how-to-install-software-on-kubernetes-clusters-with-the-helm-package-manager>\n- [-] Blocker! <https://github.com/kubernetes/minikube/issues/6350>\n - [-] Mitigate by uploading images to docker.io instead and point to public repo imags.\n - As root user (podman cannot build all images unless root, file size/security issues):\n\n```s\n podman login --username <username> docker.io # key from docker.io as password.\n podman build -t <image-tag> <location>\n podman push localhost/<image-tag> <username>/<image-repo>[:]<optional-version-tag>\n # set image tag in yaml to repo image to use.\n image: <username>/<image-repo>[:]<optional-version-tag>\n # Rebuild kubernetes kind\n```\n\n## Day 03.04.20\n\n- [x] Koble opp db med repo <https://github.ibm.com/gbs-norway-tech-community/vue-flask-mongo-kubernetes>\n - Sette begge docker images til public på docker hub.\n - Enviroment varlaibles `/# printenv` in shell of container.\n - Reaching primary mongodb <https://github.com/helm/charts/issues/1569>\n - `db.getSiblingDB(\"mongo\"); use mongo; db.post.find()` i master\n - `db.getSiblingDB(\"mongo\"); use mongo; db.setSlaveOk();db.post.find()`\n - [x] Added replicaSet as parameter to when init_mongodb.\n \n- [-] Setup statefull database and write using master node and read using slaves.\n - [-] Fix loadbalander not selecting only master on write operations.\n - [-] Potential fix, need a load-balancer that handles each mongo query, instead of calling Client directly from flask application endpoints.\n- [-] Helm init\n - [x] Install Helm and Tiller <https://www.digitalocean.com/community/tutorials/how-to-install-software-on-kubernetes-clusters-with-the-helm-2-package-manager>\n\n## Day 06.04.20\n\nFortsette med helm charts. Helm commandolinjen. Hvordan hente charts fra andre repoer.\n\n- [x] Create your first helm chart <https://docs.bitnami.com/tutorials/create-your-first-helm-chart/>\n - `helm init --wait` if tiller not availible\n - `helm install --name=example ./mychart --set service.type=NodePort` deploy chart to kuberentes\n - Follow steps from Notes to see application.\n- [x] Autocompletion heml <https://helm.sh/docs/helm/helm_completion/>\n- [x] Upgrade helm from 2-3 <https://helm.sh/docs/topics/v2_v3_migration/>\n- [x] Using Helm Documentation <https://helm.sh/docs/intro/using_helm/>\n\n## Day 07.4.20\n\nFortsette med helm charts. Se litt mer på syntaxen på selv chart filene.\nStarte på connexion flask api\n\n- [x] Using helm and Kubernetes <https://www.baeldung.com/kubernetes-helm>\n - Create helm repo on github\n - `helm package ./[helm_chart]`\n - `helm repo index my-repo/ --url https://[username].github.io/my-repo`\n Testing\n - `helm repo add my-repo https://[username].github.io/my-repo`\n - `helm install my-repo/hello-world --name=hello-world`\n- [x] Helm from basics to advanced <https://banzaicloud.com/blog/creating-helm-charts/>\n- [-] Helm from basics to advanced part-2 <https://banzaicloud.com/blog/creating-helm-charts-part-2/>\n- [x] Python REST APIs With Flask, Connexion, and SQLAlchemy <https://realpython.com/flask-connexion-rest-api/>\n- [-] Setup OpenAPI flask configurations (swagger codegen). <https://medium.com/@hmajid2301/implementing-a-simple-rest-api-using-openapi-flask-connexions-1bdd01ca916>\n<https://dev.to/hmajid2301/implementing-a-simple-rest-api-using-openapi-flask-connexions-28kk>\n\n## Day 15.04.20\n\nFortsette med flask connexion API. Se litt på authentisering.\n\n- [x] Setup OpenAPI flask configurations (swagger codegen). <https://medium.com/@hmajid2301/implementing-a-simple-rest-api-using-openapi-flask-connexions-1bdd01ca916>\n<https://dev.to/hmajid2301/implementing-a-simple-rest-api-using-openapi-flask-connexions-28kk>\n- [x] Setting up flask in docker container.\n- [-] Flask deployment options\n- [x] OAuth 2.0: An Overview <https://www.youtube.com/watch?v=CPbvxxslDTU>\n- [x] Understanding Oauth2 <https://www.youtube.com/watch?v=f36s7KtnUD4>\n- [-] Performance tests <https://medium.com/@peter.jp.xie/scale-up-rest-api-functional-tests-to-performance-tests-in-python-3239859c0e27>\n\n## Day 16.04.20\n\nStart arbeid på helm-chart for flask-connexion prosjekt.\nBygge containere, og pushe opp til docker hub repo.\n\n- [x] Flask-connexion <https://github.com/hjacobs/connexion-example>\n- [x] Setup Flask-connexion in kubernetes\n- [-] Setup Flask-connexion in helm\n\n## Day 17.04.20\n\nForsette med flask-connexion kubernetes helm charts.\nDeploye forskjellige APi-er med denne via forskjellige values.yaml filer.\n\nhelm-bookstore.\nHelm config filer\n\n- [x] Setup Flask-connexion in helm\n- [x] <https://github.com/thedataincubator/flask-chart/blob/master/flask-chart/templates/deployment.yaml>\n - `helm install --dry-run --debug ./mychart`\n - `helm install mydemo ./mychart/ --debug --dry-run`\n- [x] Enable ingress\n `minikube addons enable ingress`\n `echo \"<NODE-IP> <HOST>\" > /etc/hosts`\n- [x] Multiple containers from same helm chart.\n - `helm install mydemo-authors -f values-authors.yaml . --debug`\n - `helm install mydemo-books -f values-books.yaml . --debug`\n\n## Day 20.04.20\n\n- [x] Dockercon 17, what is Prometheus, on docker <https://www.youtube.com/watch?v=PDxcEzu62jk>\n- [ ] Prometius in kubernetes overview. <https://www.youtube.com/watch?v=bErGEHf6GCc>\n- [ ] Prometheus monitoring Kubernetes. <https://sysdig.com/blog/kubernetes-monitoring-prometheus/>\n- [ ] Sette opp mongodb chart.\n\n### Backlog\n\n- [ ] Setup OAuth2 support in Flask <https://flask-oauthlib.readthedocs.io/en/latest/oauth2.html>\n- [ ] Setup Gunicorn support\n- [ ] Setup Promethious logging of Pods\n- [ ] Setup Performance Tests in Kubernetes\n- [ ] Create chart from flask-vue-mongod project.\n- [ ] Setup feature flags on kubernetes \n- [ ] Setup feature flags on flask \n- [ ] Setup Flask Frontend (Flask megatutorial)\n- [ ] Setup Redis with mongodb \n- [ ] Setup keycloak.\n- [ ] Setup OpenAPI flask configurations (swagger codegen). <https://medium.com/@hmajid2301/implementing-a-simple-rest-api-using-openapi-flask-connexions-1bdd01ca916>\n<https://dev.to/hmajid2301/implementing-a-simple-rest-api-using-openapi-flask-connexions-28kk>\n\n\n- [ ] Feature toggling.\n- [ ] Feature gates <https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/>\n- [ ] K9 Kubectl improved?\n- [ ] Kubernetes Vaults <https://testdriven.io/blog/managing-secrets-with-vault-and-consul/>\n- [ ] grafana <https://medium.com/faun/kubernetes-multi-cluster-monitoring-using-prometheus-and-thanos-7549a9b0d0ae>\n- [ ] Sikkere mongodb replicaset <http://pauldone.blogspot.com/2017/06/deploying-mongodb-on-kubernetes-gke25.html>\n- [ ] Zookeper?\n- [ ] Flask deployment <https://flask.palletsprojects.com/en/1.1.x/deploying/>\n- [ ] Combining PIP anv virtual enviroments using Pipenv <https://github.com/pypa/pipenv>\n- [ ] Gevent async I/O <https://iximiuz.com/en/posts/flask-gevent-tutorial/>\n- [ ] Readiness and Liveness probes in kubernetes <https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/>\n- [ ] Git diff terminal\n\n## Prometheus\n\n### Querying\n\nFunctional language query, not SQL.\n\nGet error rate of all http request paths for the last 5 minutes.\n\n``` prometheus\nsum by(path) (rate(http_requests_total{status=\"500\"}[5m])) / sum by(path) (rate(http_requests_total[5m]))\n```\n\n99th percentile request latencey across all instances.\n\n```p\nhistogram_quantile(0.99,\n sum without(instance) (rate(request_latency_seconds_bucket[5m]))\n)\n```\n\nExpression browser, test expressions against latest values. Afterwarda graph these results in Graphana.\n\n### Alerting\n\nAll paths with an error_rate ratio greater than 5 percent. ALl will become an alert that inherits the label of its timeseries.\n\n```p\nALERT Many500Errors\nIF\n (\n sum by(path) (rate(http_requests_total{status=\"500\"}[5m]))\n / sum by(path) (rate(http_requests_total)[5m])\n ) * 100 > 5\nFOR 5m\nLABELS {\n severity = \"critical\"\n}\nANNOTATIONS {\n summary = \"Many 500 errors for path {{$labels.path}} ({{$value}}%)\"\n}\n```\n\n## Helm\n\nHelm installs _charts_ into Kubernetes, creating a new _release_ for each installation. And to find new charts, you can search Helm chart _repositories_.\n\n### Repositories\n\nA _Repository_ is a place where charts can be collected and shared. Like Fedora Package Database, but for Kubernetes packages.\n\n### Commands examples\n\n`helm serve` Start a local chart repository server that serves charts from a local directory. \n`helm search local` Will find all local repositories. This should show the result from `helm serve`. \nYum repo for Kubernetes projects handler. Optimize reusability of kubernetes projects. \n\n`helm search repo` Searches the repositories that have been added to local helm client (with helm repo add). This search is over local data, thus no public network connection is needed. \n`helm search hub` Show all the available charts. \nThe search uses fuzzy string matching algorithm with query. \n\n`helm repo add [repo]` Add repository\n`helm repo update` Update charts in repositories.\n\n`helm repo remove [repo]` Remove repo from helm client\n\n`heml install myapp` Deploy application to kubernetes with helm config. \n`heml upgrade mypp` Update application on kubernetes with upgrade helm config. \n`helm rollback version...` Rollback to previous configuration history. \n`helm package [chart]` to send helm chart to repo, to make reusability easier. Creates .tgz file.\n`helm create <name>` Create new chart example template folder. \n`helm init --wait` Installs Tiller onto Kubernetes Cluster and sets up local configurations in $HELM_HOME and using the default context. \n`helm lint ./helmproject` check for helm syntax errors. \n\n`helm install --dry-run --debug ./mychart` to inspect the general definitions\n\n`helm install -f config.yaml stable/mariadb --generate-name` Run chart with overwritten config file, generate name automaticly\n\n`helm package ./mychart` Package a chart into a versioned chart archive file. A .tgz file.\nThis can be used to start a new helm chart instance directly, instead of from local directory.\n`helm install --name=<name> mychart-<version>.tgz --set service.type=NodePort`\n\n`helm list` Show all running releases\n\n`helm show values <repo>/<chart>` to see what are configurable on a chart,\ne.g. `helm show values bitnami/pytorch`\n\n`helm get values <release>` to ses overwritten configuration of a release.\n\n`helm upgrade -f panda.yaml mariadb-1586182515 bitnami/mariadb` Change configuration to a release. Helm tries to perform the least invasive upgrade, and only update things that have changed since the last release.\n\n`helm rollback [release] [revision]` first revision always start at 1, and increments by 1 for each update/install or rollback.\n`helm rollback <release> 1` to rollback a release to previous configurations. In this case first configuration version\n\n`helm history [release]` to see history of a release.\n\n`helm uninstall [release]` Removes release from the cluster.\n\n### Helm charts\n\nA _chart_ is a Helm package. It contains all the resource definitions necessary to run an application, tool, or service inside of a Kubernetes cluster. Like a Yum RPM file.\nKeep track of a set of kubernetes resources (pods, deployments, services etc) in the project.\n\nA _Release_ is an instance of a chart running in a Kubernetes cluster. A chart can be installed many times into the same cluster. With each installation, a new _release_ is created.\n\n### Config files\n\n`--values or -f` specifies a YAML file with overrides. Rightmostfile will take precedence. `--set` have higher precedence.\n\n`--set` accepts settings in equvivalent yaml format.\n`--set a=b,c=d` \n\n```yaml\na: b\nc: d\n```\n\n`--set servers[0].port=80,servers[0].host=example`\n\n```yaml\nservers:\n - port: 80\n host: example\n```\n\nWith escape characters on '.'\n`--set NodeSelector.\"Kubernetes\\.io/role\"=master`\n\n```yaml\nNodeSelector:\n kubernetes.io/role: master\n```\n\n_required_ option will make parameter required. `{{ required \"A valid foo is required!\" .Values.foo }}` \nIdentation. Can be piped in. `{{ .Values.annotations | indent 4 }}`\nMore readable alternative nindent, newline + indent. \n`annotations: {{ .Values.annotations | nindent 4 }}`\nDefault values can be provided with pipe. `{{ .Values.name | default (include \"chart.name\" .) }}`\nWhitespace removal with the `{{-` chomp preceding whitespace, or `-}}` to chomp ensuing whitespace.\n\n### Tiller\n\nService component of helm. Takes command sent to cli client and turn them into something the kubernetes kluster will understand.\n\n### Templates\n\nHolds YAML definitions for Services, Deployments and other Kubernetes objects.\nEach file in this directory is run through a Go template rendering engine before running the kybernetes project. This rendering includes variable computation, logical completion among other things.\n\n__.Chart__ provide metadata about the chart to your definitions such as the name, or version.\n__.Values__ used to expose configurations that can be set at the time of deployment.\n__NOTES.txt__ printed out after a chart is successfully deployed, usefull to desribe steps to run chart, and give information like runtime ip.\n__templates__ This is the directory where Kubernetes resources are defined as templates\n__charts__ This is an optional directory that may contain sub-charts.\n__.helmignore__ Define patterns that helm ignore when packaging (like .gitignore).\n\n## Kubernetes Architecture\n\n<https://phoenixnap.com/kb/understanding-kubernetes-architecture-diagrams>\n\n\n\n### Deployments\n\nAn Api object that manages a replicated application. \nProvides declarative updates for Pods and ReplicateSets(replica set of pods)\n\n#### Ingress\n\nIngress exposes HTTP and HTTPS routes from outside the cluster to services within the cluster. Traffic routing is controlled by rules defined on the Ingress resource.\n\n```none\n internet\n |\n [ Ingress ]\n --|-----|--\n [ Services ]\n```\n\n#### Steps in a basic Kubernetes process\n\n1. An administrator creates and places the desired state of an application into a mnanifest file (yml).\n2. The file provided to the Kubernetes API Server using a CLI or UI. Kubernetes command-line tool called __kubectl__\n3. Kubernetes stores the file (an application's desired state) in a database called the __Key-Value Store (etcd)__.\n4. Kubernetes then implements the desired state on all relevant applications within the cluster.\n5. Kubernetes continuously monitors the elements of cluster to make sure the current state of the application does not vary from the desired state.\n\n#### Master Node\n\n\n\n- Recives input from a CLI or UI via an API.\n- Define pods, replica sets, and services that you want Kubernetes to maintain.\n- Provide the parameters of the desired state for the application(s) runnin in that cluster.\n\n##### API server\n\nThe front-end of the control plane and the only component in the control plane that we interact with directly. \nInternal system components, as well as external user components, all communicate via the same API.\n\n##### Key-value Store (etcd)\n\nThe database Kubernetes uses to back-up all cluster data. It stores the entire configuration and state of the cluster. \nThe master node queries __etcd__ to retrieve parameters for the sate of the nodes, pods and containers.\n\n##### Controller\n\nRole: Obtain the desired state from the API Server. Checks the current state of the nodes it is tasked to control, and determines if there are any differences, and resolves them, if any. \n\n##### Scheduler\n\nWatches for new requests coming from the API server and assings them to healty nodes. \nRanks the quality of the nodes and deploys pods to the best-suited node. If there are no suitable nodes, the pods are put in a pending state until such a node appears.\n\n#### Worked Node\n\nListen to the API server for new work assignments. \nThey execute the work assignments and then report the results back to the Kubernetes Master node.\n\n##### Kubelet\n\nRuns on every node in the cluster. It is the principal Kubernetes agent, by installing kubelet, the node's CPU, RAM, and storage become part of the broader cluster. \nIt watches for tasks sent from the API Server, executes the task, and reports back to the Master. \nMonitors pods and reports back to the control panel if a pod is not fully functional. \nBased in this information, the Master can then decide how to allocate tasks and resources to reach the desired state.\n\n##### Container Runtime\n\nPulls images from a __container image registry__ and starts and stops containers. \nA 3rd party software or plugin, such as Docker, usually performs this function.\n\n##### Kube-proxy\n\nMakes sure that each node gets its IP adress, implements local _iptables_ and rules to handle routing and traffic load-balancing. \n\n##### Pod\n\n\n\nSmalles element of scheduling in Kubernetes. Without it, a container cannot be part of a cluster. \nThe pods serves as a 'wrapper' for a single container with the application code. Based on the availability of the resources, the Master schedules the pod on a specific node and coordinates with the container runtime to launch the container. \n\nIf pods unexpectedly fail to perform their tasks, Kubernetes creates and starts a new pod in its place. The pod is a replica, expect for the DNS and IP address. \n\nPods need to be desidned so that an entirely new pod, created anywhere within the cluster, can seamlessly take its place. __Services__ assist in this process. \n\n##### Services\n\n__Services__ are introduces to provide reliable networking by bringing stable IP addresses and DNS names to the unstable world of pods.\n\nPods are associated with services through key-value pairs called __labels__ and __selectors__. A service automatically discovers a new pod with labels that match the selector. This also removes terminated pods from the cluster.\n\n###### Headless Service\n\nLike a normal Kubernetes Service, except it doesn't perform any load balancing.\n\n## Podman\n\n`podman pull images`: First check registry.redhat.io for latest image, then docker.io if not there. \n`podman images`: List images availible. \n`podman rmi ID`: Delete images on ID tag. \n`podman rm ID`: Delete container _'-f'_ to force already running containers. \n`podman inspect ID`: Inspect '-l' latest running or ID container for metadata. \n`podman logs ID`: View container's logs. \n`podman top ID`: View container's top statuses (CPU %, PID etc). \n`sudo podman container checkpoint ID`: Stop and store state of a container for later use. \n`sudo podman container restore ID`: Restore container at the exact same state as checkpoint. \n\n## mongodb queries\n\n`db.collection.find({query})`: \n`db.collection.insert_one({})`: Insert one document into collection. \n`db.collection.insert_many([{}])`: Insert multiple documents at once. (Not one after the other). \n`db.collection.create_index([('user_id', pymongo.ASCENDING)],unique=True)`: Unique index per document. \n\n__Operators__ come in three varities: __stages__,__expressions__, and __accumulators__. \n\n### Aggregastion\n\nA Pipeline.\n\n\n#### Minikube\n\n`minikube start` : Start cluster \n`minikube dashboard` : Access the Kubernetes Dashboard running within the minikube cluster. \n`minikube ip`: Get Node IP\n`minikube stop` : Stop local cluster \n`minikube delete` : Removes a local Kubernetes cluster\n`minikube delete --all` : Delete all local clusters and profiles \n\nOnce started: `kubectl` \n`kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4` : Start server. \n`kubectl expose deployment hello-minikube --type=NodePort --port=8080` : Exposing a service as a NodePort. \n`minikube service hello-minikube` : Open exposed endpoin in your browser. \n`minikube service <service> --url`: Show URLs of service\n`minikube service list`: Show all info of the services in list.\n\n`minikube start -p cluster2` : Start second local cluster (only with bare-metal/none driver). \n\n`minikube config set memory x` : increase memory beyond just default 2 GB. \n\n#### Kubectl - Kubernetes command-line tool\n\n`kubectl version`: Display the kubectl version\nAllows to run commands against Kubernetes clusters. \n`kubectl cluster-info` : Details of the cluster and its health status \n`kubectl get <kind>`: kinds:{pods, nodes, secrets, deployments, pv} \n`kubectl get po(ds) -A` : See all pod state \n `-l`: label. `-o`: output\n`kubectl get nodes` : view the nodes in the cluster \n`kubectl get pv` : Get persistent volume \n\n`kubectl get deployments`: Get availible deploymnents. A Deployment provides declarative updates for Pods and ReplicaSets. \n`kubectl get (svc|services) <name>`: Get services on label name\n\n`kubectl drain <nodename>` : evict all user pods from the node \n`kubectl delete node <nodename>` : delete node from cluster \n`kubectl delete pods <podname>ls`: Delete specific pod. _'-f'_ to force\n\n`kubectl proxy`: Create proxy that forward communications into the cluster-wide, private network, to the Kubernetes API. \n`kubectl config view`: Check the location and credentials that kubectl knows about. \n\n`kubectl apply -f pod_config.yaml`: Apply config file _--record_ flag to save the kubectl command that is making changes to the resource.\n\n`kubectl describe x`: Show details of a specific resource or group of resources\n`kubectl describe node`: Show attributes of node (CPU/RAM usage etc).\n\n`kubectl get secrets`: Get secrets\n`kubectl exec -it shell-demo -- /bin/bash`: Get bash shell to running container\n\nGet name of a pod (if only one)\n`kubectl get pod -l service=postgres -o jsonpath=\"{.items[0].metadata.name}\"`\n\nCHeck if volume are still attached.\n`Kubectl get volumeattachment`\n\n##### Deployment\n\n`kubectl scale deployment <deploymnt> --replicas=x`: Adjusy replica count on deployment.\n\n## Kubernetes\n\nPods that are running inside Kubernetes are running on a private, isolated network. By default they are visible from other pods and services within the same kubernetes cluster, but not outside that network. \n\n### DNS lookup\n\n```bash\nkubectl apply -f curl-pod.yaml # Create resource from file.\nkubectl attack curl -i # Attatch to running container\nroot@curl:/ nslookup my-ngnix\n```\n\n### Volumes\n\nStorage. Pods access storage by using the claim as a Volume.\n\n__Emptydir__ is an empty directory either in RAM or on disk (SSH/HDD). Not persistent, as data from the _epmtyDir_ is deleted. \n\n__hostPath__ is a volume that mounts a file or directory from the host node's filesystem into your Pod. Essentially a Non-empty __EmptyDir__. \n\n__nfs__ is a volume that allows an existing NFS (Network File System) share to be mounted into your Pod. Nfs volues are preserved and can be used my multiple pods. \n\n#### Provisioning\n\n__PersistentVolume__ refers to static storage. \n__StorageClass__ refers to dynamic storage. \n\n__Static__ is a number of __PersistentVolumes__, that carry the detail of the real storage which is available for use bh the cluster user. They exist in the Kube API and are availible for consumption.\n\nWhen none of the static PVs matches a user's PersistentVolumeClaim, the cluster may try to __dynamically__ provision a volume specially for the PVC, based on _StorageClasses_. The PVC must request a specific class the administrator have created beforehand.\n\n#### PersistentVolumeClaim\n\n__persistentVolumeClaim__ is used to mount a __PersistentVolume__ into a Pod. Will connect to _PersistentVolume_ if __StorageClassName__ is not provided, else go to __StorageClass__. A controller in PVC watches for new PVCs, when it finds match, it will _bind_ them together, _binds_ are exclusive, meaning cannot bind 2 pvc to the same pv. A __StorageClass__ provides a way for administrators to describe the \"classes\" of storage they offer. __Foundation__ of dynamic provisioning. \n\n__PersistentVolume__ are a way for users to \"claim\" durable storage (such as a GCE PersistentDisk or an iSCSI volume) without knowing the details of the particular cloud enviroment.\n\n__PersistentVolumeRecycle__ Rescycle, Delete\n\n__Selecter__ filter out wich PV to use from the PVC.\n\n## Postgres\n\n`\\l` List tables\n`\\c collection` Use collection.\n`select * from collection`: Get all data records in collection.\n\n### Cluster\n\nSet of node machines running containerized applications.\n\n## OAuth2\n\nEnables a third-party application to obtain limited access to an HTTP service, either on behalf of a resource owner by orchestrating an approval intrection between the resource owner and the HTTP service, or by allowing the third-party application to obtain access on its own behalf.\n\n__user__ is the Resource Owner.\n\nOAuth2 key points.\n\n- Is a good standard for implementing an authorization system\n- Has many different grant types for different use cases\n- Needs to be combined with an authentication mechanism\n- Defines squishy boundaries, that you need to be explicit about\n- Can help you to centralize the auth system in your organization\n"
},
{
"alpha_fraction": 0.6496906280517578,
"alphanum_fraction": 0.6539742946624756,
"avg_line_length": 29.449275970458984,
"blob_id": "3abe657603a0dd318861641c6623a8a530710cb7",
"content_id": "4fa18a7df2df71ce409e4f80d12db7bd473cb7b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2101,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 69,
"path": "/kubernetes/vue-flask-mongo-kubernetes/flask-api/app.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "import os\nimport logging\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_restful import Api\nfrom database.mongo import configure_mongodb\n\n\"\"\"API object\"\"\"\napi = Api()\n\"\"\"CORS object\"\"\"\ncors = CORS()\n\ndef create_app(**config):\n \"\"\" Create application.\n app = create_app() # app can be used as WSGI application\n app.run() # Or you can run as a simple web server\n \"\"\"\n app = Flask(__name__, static_folder=None)\n\n configure_app(app)\n configure_logging(app)\n configure_cors(app)\n configure_mongodb(app)\n configure_api(app)\n \n return app\n\ndef configure_app(app):\n \"\"\" Configure application. \"\"\"\n from common.utils import MongoJSONEncoder\n app.json_encoder = MongoJSONEncoder\n\n app.config['LOG_LEVEL'] = 'DEBUG'\n\n app.config['MONGO_HOST'] = os.environ.get('MONGO_HOST', 'localhost')\n app.config['MONGO_PORT'] = os.environ.get('MONGO_PORT', '27017')\n\n app.config['MONGO_DB'] = os.environ.get('MONGO_DB', 'flask_api')\n app.config['MONGO_SDB'] = os.environ.get('MONGO_SDB')\n\n app.config['MONGO_REPLICASET'] = os.environ.get('MONGO_REPLICASET', 'MainRepSet')\n\n app.config['MONGO_URI'] = f\"mongodb://{app.config['MONGO_HOST']}:{app.config['MONGO_PORT']}/{app.config['MONGO_DB']}?replicaset={app.config['MONGO_REPLICASET']}\"\n\ndef configure_logging(app):\n \"\"\" Configure logging.\n Call ``logging.basicConfig()`` with the level ``LOG_LEVEL`` of application.\n \"\"\"\n logging.basicConfig(level=getattr(logging, app.config['LOG_LEVEL']))\n\ndef configure_cors(app):\n \"\"\" Configure Cross Origin Resource Sharing.\n Uses `Flask-CORS <https://flask-cors.readthedocs.io/>`_\n \"\"\"\n cors.init_app(app)\n\ndef configure_api(app):\n \"\"\" Configure API Endpoints. \"\"\"\n from resources.health import Health\n from resources.posts import Posts, Post \n\n api.add_resource(Health, '/api/ping')\n api.add_resource(Posts, '/api/posts')\n api.add_resource(Post, '/api/posts/<ObjectId:post_id>')\n api.init_app(app)\n \nif __name__ == '__main__':\n app = create_app()\n app.run(host='0.0.0.0', debug=True)\n"
},
{
"alpha_fraction": 0.804347813129425,
"alphanum_fraction": 0.8115941882133484,
"avg_line_length": 45,
"blob_id": "6bee936acd41669f9d216f1aa5479e90fc3e43f5",
"content_id": "3b22488cb9cfc4cc3728b6bbc473b32c99ae86e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 138,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 3,
"path": "/flask/people_endpoint/README.md",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "# Realpython people connexion api example\n\n<https://github.com/realpython/materials/blob/master/flask-connexion-rest/version_4/people.py>\n"
},
{
"alpha_fraction": 0.7304075360298157,
"alphanum_fraction": 0.7304075360298157,
"avg_line_length": 38.75,
"blob_id": "2db9fd12da2ef463e686f86fe4d2a679e63f2ece",
"content_id": "7716f6a60bee5f9f172a250b130d84b3f57a0ce0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 319,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 8,
"path": "/kubernetes/deploy_mongodb_persistent/deploy.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"Creating the mongo service and statefulset\"\n# The StatefulSet \"mongo\" is invalid: spec: Forbidden:\n# updates to statefulset spec for fields other than 'replicas',\n# 'template', and 'updateStrategy' are forbidden\nkubectl delete -f ./svc_sts_mongo.yaml\nkubectl apply -f ./svc_sts_mongo.yaml\n\n"
},
{
"alpha_fraction": 0.4718800187110901,
"alphanum_fraction": 0.4805838167667389,
"avg_line_length": 27.399240493774414,
"blob_id": "14e9d0b5fe1ac70bef6e5cf7ac123d0e0e87c88e",
"content_id": "45ea24738bc89aae85e17c71d65fb0d9bbe00816",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7472,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 263,
"path": "/mongo-q/tutor-main.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "## Main tutorial on mongodb site. Extended with aggegation of queries.\n\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport pymongo\nimport bson\n\nfrom bson.code import Code\n\nimport json\nfrom pathlib import Path\nimport pathlib\nimport datetime\nimport dateutil.parser\n\nfrom pprint import pprint\n\nclient = MongoClient('localhost', 27017)\n\ndb = client['test_db']\nbooks = db.books\nauthors = db.authors\ncities = db.cities\n\ndef init_db(refresh=False):\n\n def clear_collections():\n books.remove({})\n authors.remove({})\n cities.remove({})\n\n def get_counts():\n b_c = books.count_documents(filter={})\n a_c = authors.count_documents(filter={})\n c_c = cities.count_documents(filter={})\n return b_c, a_c, c_c\n\n if(refresh):\n clear_collections()\n\n file = Path.cwd() / 'mongo-q/data/data.json'\n\n if not file.exists():\n print(f\"{file} does not exist\")\n else:\n b_c, a_c, c_c = get_counts()\n\n if not (b_c == 0 or a_c == 0 or c_c == 0):\n print(f\"Collections are filled with data: books:{b_c} authors:{a_c} city:{c_c}\" )\n else:\n # We need to clear the database, and replace all the data\n clear_collections()\n\n books.create_index(\"_id\")\n authors.create_index(\"_id\")\n cities.create_index(\"_id\")\n\n with file.open() as json_file: \n data = json.load(json_file)\n \n # End result length\n num_cities = len(data[\"cities\"])\n num_books = len(data[\"books\"])\n num_authors = len(data[\"authors\"])\n\n for city in data[\"cities\"]:\n cities.insert_one(city)\n\n for author in data[\"authors\"]:\n #Update author city reference\n city_id = db.cities.find_one({\"name\": author.get(\"city\")}).get(\"_id\")\n author.update({\"city\":city_id})\n\n authors.insert_one(author)\n\n for book in data[\"books\"]:\n date_written = book.get(\"date_written\")\n book.update({\"date_written\":dateutil.parser.parse(date_written)}) # update field as DateTime object\n # Update city field with Id reference.\n city_id = db.cities.find_one({\"name\": book.get(\"city\")}).get(\"_id\")\n book.update({\"city\":city_id})\n # Update author field with Id reference.\n author_id = db.authors.find_one({\"email\": book.get(\"author\")}).get(\"_id\")\n book.update({\"author\":author_id})\n\n books.insert_one(book)\n\n num_books = len(data[\"books\"])\n num_authors = len(data[\"authors\"])\n num_cities = len(data[\"cities\"])\n \n book_c, authors_c, cities_c = get_counts()\n if(num_books != book_c or num_authors != authors_c or num_cities != cities_c):\n raise ValueError(\"Wrong number of documents in collections\")\n\n \n\ndef aggregate_books():\n\n startDate = datetime.datetime(2019, 1, 1, 23, 59) # 2019-01-01 23:59:00\n endDate = datetime.datetime(2019, 10, 1, 23, 59) # 2019-06-01 23:59:00\n\n pipeline = [\n { # Filter documents that are outside of date range\n \"$match\": { \n \"date_written\" : {\n \"$gte\": startDate,\n \"$lt\": endDate\n }\n }\n },\n { # Groups input documents by the specified _id expression \n # and for each distinct grouping\n \"$group\": { \n \"_id\":\"$city\",\n \"total_books_written\": {\n \"$sum\": \"$no_of_books_sold\"\n }\n }\n },{ # Performs a left outer join to an unsharded collection in the same database \n # to filter in documents from the “joined” collection for processing.\n \"$lookup\": {\n \"from\": \"cities\",\n \"localField\":\"_id\",\n \"foreignField\":\"_id\",\n \"as\": \"refcity\",\n }\n },{ # Deconstructs an array field from the input documents to output a document for each element.\n \"$unwind\":\"$refcity\"\n },\n {\n \"$project\": { # Project document in this format,\n \"_id\":0,\n \"total_books_written\": \"$total_books_written\",\n \"city\":\"$refcity.name\"\n }\n },\n {\n # Sort on decending on total_books_written tag\n \"$sort\": {\n \"total_books_written\":pymongo.DESCENDING\n }\n },\n {\n \"$limit\": 3\n }\n ]\n\n result = books.aggregate(pipeline)\n print(\"Books written by\")\n pprint(list(result))\n\n # Cont aggregation\n # Find all books written by author from same city.\n\n\n pipeline_test = [ \n { \n \"$lookup\": {\n \"from\": \"authors\",\n \"localField\":\"author\",\n \"foreignField\":\"_id\",\n \"as\": \"ref_author\",\n }\n },\n {\n \"$unwind\":\"$ref_author\"\n },\n {\n \"$match\": {\n \"$expr\": {\n \"$eq\": [\"$city\", \"$ref_author.city\"]\n }\n }\n },\n {\n \"$project\": {\n \"_id\":1,\n \"no_of_books_sold\":1\n }\n\n }\n ] \n\n result_test = books.aggregate(pipeline_test)\n print(\"Books same authors\")\n pprint(list(result_test))\n\n # Find number of books each author have sold\n\n pipeline_test = [ \n {\n \"$lookup\": {\n \"from\": \"authors\",\n \"localField\":\"author\",\n \"foreignField\":\"_id\",\n \"as\": \"ref_author\",\n }\n },\n {\n \"$unwind\": \"$ref_author\"\n },\n {\n \"$group\": { \n \"_id\":\"$ref_author\",\n \"total_books_sold\": {\n \"$sum\": \"$no_of_books_sold\"\n }\n }\n },{\n \"$project\":{\n \"total_books_sold\":\"$total_books_sold\",\n \"email\":\"$_id.email\",\n \"_id\":\"$_id._id\"\n \n }\n }\n\n ]\n \n result_test = books.aggregate(pipeline_test)\n print(\"Books authors\")\n pprint(list(result_test))\n\n\ndef query_books():\n\n # Count number of books availible.\n book_count = books.count_documents(filter={})\n print(\"num of books\", book_count)\n\n # Want to find all books with specific author\n\n query_no_books = {\n \"no_of_books_sold\": {\n \"$lt\": 5\n }\n }\n\n result = books.find(query_no_books)\n print(\"Query_no_books:\")\n pprint(list(result))\n\n\n result = books.find({}).sort([(\"date_written\",pymongo.DESCENDING)]).limit(2)\n print(\"Query sort limit:\")\n pprint(list(result))\n\n # Wrappers\n result = books.find({}).max_time_ms(1)\n print(\"Query wrap max time ms:\")\n pprint(list(result))\n\n # Snapshot depricated: use hint { _id: 1} instead, \n # to prevent cursor from returning a document more than once. \n\n # runCommand({\"drop\":\"test\"}) \n\n\nif __name__ == \"__main__\": # Only ran when this is main module (not imported)\n init_db()\n aggregate_books()\n #query_books()"
},
{
"alpha_fraction": 0.4865255057811737,
"alphanum_fraction": 0.5288739204406738,
"avg_line_length": 24.341463088989258,
"blob_id": "9821e5775c7c3dcdf5934bd94379c4b4bd8f545f",
"content_id": "39e1d2d4bca845d7ef69d065aa81644a833dc0e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2078,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 82,
"path": "/flask/bookstore/books/controllers/books_controller.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\n\nfrom flask import make_response, abort, request\n\n#UTIL FUNCTIONS\ndef get_timestamp():\n return datetime.utcnow().isoformat()\n\ndef health():\n return \"pong\"\n\nBOOKS = {\n \"book_name_1\":{\n \"name\":\"book_name_1\",\n \"date_written\":\"2019-01-09T00:00:00.000+0000\",\n \"no_of_books_sold\":2, \n \"city\":\"City-A\",\n \"author\":\"[email protected]\"\n },\n \"book_name_2\":{\n \"name\":\"book_name_2\",\n \"date_written\":\"2019-02-10T00:00:00.000+0000\",\n \"no_of_books_sold\":3, \n \"city\":\"City-B\",\n \"author\":\"[email protected]\"\n },\n \"book_name_3\":{\n \"name\":\"book_name_3\",\n \"date_written\":\"2019-03-10T04:00:00.000+0000\",\n \"no_of_books_sold\":5, \n \"city\":\"City-B\",\n \"author\":\"[email protected]\"\n }\n}\n\ndef read_all():\n return [BOOKS[key] for key in sorted(BOOKS.keys())]\n\ndef read_one(bookname):\n # Does the person exist in people?\n if bookname in BOOKS:\n book = BOOKS.get(bookname)\n return book\n # otherwise, nope, not found\n else:\n abort(\n 404, f\"Book with name {bookname} not found\"\n )\n\ndef create():\n book = request.get_json()\n print(book)\n bookname = book.get(\"name\",None)\n\n if bookname is not None and bookname not in BOOKS:\n BOOKS.update({\n bookname: {\n \"name\":bookname,\n \"date_written\":get_timestamp(),\n \"no_of_books_sold\":book.get(\"amount\",0),\n \"city\":book.get(\"city\",None),\n \"author\":book.get(\"author\",None)\n }\n })\n response = make_response(BOOKS.get(bookname),201)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n abort(\n 400, f\"Book must have a name\"\n )\n \ndef delete(bookname):\n if bookname in BOOKS:\n del BOOKS[bookname]\n return make_response(\n f\"{bookname} successfully deleted\", 200\n )\n else:\n abort(\n 404, f\"Book with name {bookname} not found\"\n )\n"
},
{
"alpha_fraction": 0.7394958138465881,
"alphanum_fraction": 0.7535014152526855,
"avg_line_length": 24.571428298950195,
"blob_id": "a7530a7399e3f196a719ebd8412192eeea46616c",
"content_id": "72a1c62b2b693989bb3a8e25905c999fdf553fe4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 357,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 14,
"path": "/kubernetes/deploy_an_app/commands.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "kubectl version\n\nkubectl get nodes\n\nkubectl create deployment kubernetes-bootcamp --image=gcr.io/google-samples/kubernetes-bootcamp:v1\n\nkubectl get deployments\n\nkubectl proxy\n\ncurl http://localhost:8001/version\n\nexport POD_NAME=$(kubectl get pods -o go-template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}')\necho Name of the Pod: $POD_NAME"
},
{
"alpha_fraction": 0.563971996307373,
"alphanum_fraction": 0.5792489051818848,
"avg_line_length": 31.70833396911621,
"blob_id": "9ce8aeb54b8a330eaf9609fa4cc7f59fb118b6b6",
"content_id": "7d506eae84dea714fb465cb2abf73343aec4c7dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1571,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 48,
"path": "/kubernetes/vue-flask-mongo-kubernetes/flask-api/resources/posts.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "from flask import jsonify, request, make_response\nfrom flask_restful import Resource\nfrom pymongo.collection import ReturnDocument\nfrom database.mongo import mongo\n\nclass Posts(Resource):\n def get(self):\n # readPreference='secondaryPreferred' with GET, must get an slave client istead for this.\n posts_db = mongo.db.posts\n posts = list(posts_db.find())\n return make_response(jsonify(posts), 200)\n\n def post(self):\n post = request.get_json()\n posts_db = mongo.db.posts # get collection.\n post_id = posts_db.insert(post)\n new_post = posts_db.find_one({'_id': post_id })\n return make_response(new_post, 200)\n \nclass Post(Resource):\n def put(self, post_id):\n post = request.get_json()\n posts_db = mongo.db.posts\n new_post = posts_db.find_one_and_replace(\n {'_id': post_id}, \n post, \n return_document=ReturnDocument.AFTER\n )\n if (new_post):\n return make_response(new_post, 200)\n else:\n return make_response('', 404) \n \n def delete(self, post_id):\n posts_db = mongo.db.posts\n result = posts_db.delete_one({'_id': post_id})\n if (result.deleted_count):\n return make_response('', 204)\n else:\n return make_response('', 404)\n\n def get(self, post_id):\n posts_db = mongo.db.posts\n post = posts_db.find_one(post_id)\n if (post):\n return make_response(post, 200)\n else:\n return make_response('', 404)\n\n"
},
{
"alpha_fraction": 0.7005714178085327,
"alphanum_fraction": 0.7279999852180481,
"avg_line_length": 40.619049072265625,
"blob_id": "6882d2a191c905d04e0c378dc9e459f4e6aeada4",
"content_id": "7a72ea3edb6a227bfcb29f9c626c6032d1fd54a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 875,
"license_type": "no_license",
"max_line_length": 245,
"num_lines": 21,
"path": "/kubernetes/deploy_mongodb_persistent/README.md",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "# Mongodb pod\n\nScalable mongodb with replicasets\nOne master responsible for all write operations, with slaves copying from master and grandint read access to these copies.\n\n## Setup replicasets\n\n```bash\n$ mongo\n> rs.initiate({_id: \"MainRepSet\", version: 1, members: [\n { _id: 0, host : \"mongod-0.mongodb-service.default.svc.cluster.local:27017\" },\n { _id: 1, host : \"mongod-1.mongodb-service.default.svc.cluster.local:27017\" },\n { _id: 2, host : \"mongod-2.mongodb-service.default.svc.cluster.local:27017\" }\n ]});\n```\n\n\"mongod-2.mongodb-service.default.svc.cluster.local\" is Hostname of pods\n\n## Anti affinity\n\nInter-Pod Anti-Affinity ensures that no 2 Mongo Pods are scheduled on the same worker node, thus, making it resilient to node failures. Also, it is recommended to keep the nodes in different AZs so that the cluster is resilient to Zone failures.\n\n"
},
{
"alpha_fraction": 0.690656840801239,
"alphanum_fraction": 0.7332226634025574,
"avg_line_length": 43.157066345214844,
"blob_id": "2832bd44d7018412bdd6aa811f13cee7c2fee484",
"content_id": "5d8f4b4911c8932bdaf90c8614743f9a0dbce828",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8450,
"license_type": "no_license",
"max_line_length": 397,
"num_lines": 191,
"path": "/kubernetes/scaling_mongodb_on_kubernetes/README.md",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "# scaling mongodb on kubernetes\n\n<https://medium.com/faun/scaling-mongodb-on-kubernetes-32e446c16b82>\n\n_StatefulSets are intended to be used with stateful applications and distributed systems._\n\nOne of the best use cases for this is to orchaestrate data-store services such as MongoDB, ElasticSearch, Redis, Zookeeper and so on.\n\nSome of the features that can be ascribed to StatefulSets are:\n\n1. Pods with Ordinal Indexes\n2. Stable Network Identities\n3. Ordered and Parallel Pod Management\n4. Rolling Updates\n\nDetails for these can be found here. <https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/>\n\nOne very distinct feature of Stateful Sets is to provide Stable\nNetwork Identities which when used with Headless Services , can be even more powerful.\n\nWithout spending much time on information readily available in Kubernetes documentation, let us focus on running and scaling a MongoDB cluster.\n\nYou need to have a running Kubernetes Cluster with RBAC enabled (recommended). In this tutorial I will be using a GKE cluster, however, AWS EKS or Microsoft’s AKS or a Kops Managed K8’s K8’s are also viable alternatives.\n\nWe will deploy the following components for our MongoDB cluster\n\n1. Daemon Set to configure HostVM\n2. Service Account and ClusterRole Binding for Mongo Pods\n3. Storage Class to provision persistent SSDs for the Pods\n4. Headless Service to access to Mongo Containers\n5. Mongo Pods Stateful Set\n6. GCP Internal LB to access MongoDB from outside the kuberntes cluster (Optional)\n7. Access to pods using Ingress (Optional)\n\nIt is important to note that each MongoDB Pod will have a sidecar running, in order to configure the replica set, on the fly. The sidecar checks for new members every 5 seconds.\n\n__Important Points:__\n\n1. The Sidecar for Mongo should be configured carefully with proper environment variables, stating the labels given to the pod, namespace for the deployment and service. Details about the sidecar container can be found here <https://github.com/cvallance/mongo-k8s-sidecar>.\n2. The guidance around default cache size is: “50% of RAM minus 1 GB, or 256 MB”. Given that the amount of memory requested is 2GB, the WiredTiger cache size here, has been set to 256MB\n3. Inter-Pod Anti-Affinity ensures that no 2 Mongo Pods are scheduled on the same worker node, thus, making it resilient to node failures. Also, it is recommended to keep the nodes in different AZs so that the cluster is resilient to Zone failures.\n4. The Sevice Account currently deployed has admin priviledges. However, it should be restricted to the DB’s namespace.\n\n## Delpoy steps\n\n```s\nkubectl apply -f configure-node.yml\nkubectl apply -f mongo.yml\n```\n\nThe headless service with noCluster-IP and neither an External-IP, is a __headless service.__ svc/mongo will directly resolve to __Pod-IPs__ for our Stateful Sets.\n\n### DNS resolution\n\nTo verify DNS resolution. We launch an interactive shell within our cluster.\n\n```s\nkubectl apply -f dnslookup.yaml\nkubectl exec -it dnsutils -- sh\n/# dig mongo.default +search +noall +answer +cmd\n; <<>> DiG 9.11.6-P1 <<>> mongo.default +search +noall +answer +cmd \n;; global options: +cmd\n<answer>\nmongo.default.svc.cluster.local. 30 IN A 172.17.0.9\nmongo.default.svc.cluster.local. 30 IN A 172.17.0.3\nmongo.default.svc.cluster.local. 30 IN A 172.17.0.4\n```\n\nOLD METHOD FROM AUTHOR (dig not included in ubuntu image anymore)\n\n```s\nkubectl run my-shell --rm -i --tty --image ubuntu -- bash\nroot@my-shell-68974bb7f7-cs4l9:/# dig mongo.mongo +search +noall +answer\n; <<>> DiG 9.11.3-1ubuntu1.1-Ubuntu <<>> mongo.mongo +search +noall +answer\n;; global options: +cmd\nmongo.mongo.svc.cluster.local. 30 IN A 10.56.7.10\nmongo.mongo.svc.cluster.local. 30 IN A 10.56.8.11\nmongo.mongo.svc.cluster.local. 30 IN A 10.56.1.4\n```\n\nThe DNS for service will be [name of service].[namespace of service]. In our case `mongo.mongo`.\n\nThe IPS( 10.56.6.17, 10.56.7.10, 10.56.8.11 ) are our Mongo Stateful Set’s Pod IPs. \nThis can be tested by running a nslookup over these, from inside the cluster.\n\n```s\nroot@my-shell-68974bb7f7-cs4l9:/# nslookup 10.56.6.17\n\n17.6.56.10.in-addr.arpa name = mongo-0.mongo.mongo.svc.cluster.local.\n```\n\nNslookup inside dnsutils pod.\n\n```sh \n/ # nslookup 172.17.0.9\n9.0.17.172.in-addr.arpa name = mongo-2.mongo.default.svc.cluster.local. \n\n/ # nslookup 172.17.0.3\n3.0.17.172.in-addr.arpa name = mongo-0.mongo.default.svc.cluster.local. \n\n/ # nslookup 172.17.0.4\n4.0.17.172.in-addr.arpa name = mongo-1.mongo.default.svc.cluster.local.\n```\n\nIf you app is deployed in the K8’s cluster itself, then it can access the nodes by\n\n```s\nNode-0: mongo-0.mongo.mongo.svc.cluster.local:27017\nNode-1: mongo-1.mongo.mongo.svc.cluster.local:27017\nNode-2: mongo-2.mongo.mongo.svc.cluster.local:27017\n```\n\nIf you would like to access the mongo nodes from outside the cluster you can deploy internal load balancers for each of these pods or create an internal ingress, using an Ingress Controller such as NGINX or Traefik.\n\n### GCP Internal LB SVC Configuration (Optional)\n\n```yaml\napiVersion: v1\nkind: Service\nmetadata: \n annotations: \n cloud.google.com/load-balancer-type: Internal\n name: mongo-0\n namespace: mongo\nspec: \n ports: \n - \n port: 27017\n targetPort: 27017\n selector: \n statefulset.kubernetes.io/pod-name: mongo-0\n type: LoadBalancer\n```\n\nDeploy 2 more such services for mongo-1 and mongo-2.\nYou can provide the IPs of the Internal Load Balancer to the MongoClient URI.\n\n```bash\nroot$ kubectl -n mongo get svc\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nmongo ClusterIP None <none> 27017/TCP 15m\nmongo-0 LoadBalancer 10.59.252.157 10.20.20.2 27017:30184/TCP 9m\nmongo-1 LoadBalancer 10.59.252.235 10.20.20.3 27017:30343/TCP 9m\nmongo-2 LoadBalancer 10.59.254.199 10.20.20.4 27017:31298/TCP 9m\n```\n\nThe external IPs for mongo-0/1/2 are the IPs of the newly created TCP loadbalancers. \nThese are local to your Subnetwork or peered networks, if any.\n\n### Access Pods using Ingress (Optional)\n\nTraffic to Mongo Stateful set pods can also be directed using an Ingress Controller such as Nginx. \nMake sure the ingress service is internal and not exposed over public ip. The ingress object will look something like this:\n\n```yaml\nspec:\n rules:\n - host: mongo.example.com\n http:\n paths:\n - path: '/'\n backend:\n serviceName: mongo # There is no extra service. This is \n servicePort: '27017' # the headless service.\n```\n\nIt is important to note that your application is aware of atleast one mongo node which is currently up so that it can discover all the others.\n\nAuthor used Robo3T as mongoclient for following step.\nConnect to one of the nodes, and running `rs.status()` to see details of replicaset, and check if the other 2 pods were configured and connected to the Replica Set automatically.\nA fully qualified domain name should be seen from each member.\n\nNow we scale the Stateful Set for mongo Pods to check if the new mongo containers get added to the ReplicaSet or not.\n\n```s\nroot$ kubectl -n mongo scale statefulsets mongo --replicas=4\nstatefulset \"mongo\" scaled\n\nroot$ kubectl -n mongo get pods -o wide\n```\n\nThe scaling action will also automatically provision a persistent volume, which will act as the data directory for the new pod.\nWe run `rs.scale()` again to see if another member was added.\n\n## Further Considerations\n\n1. It can be helpful to label the Node Pool which will be used for Mongo Pods and ensure that appropriate Node Affinity is mentioned in the Spec for the Stateful Set and HostVM configurer Daemon Set . This is because the Daemon set will tweak some parameters of the host OS and those settings should be restricted for MongoDB Pods only. Other applications might work better without those settings.\n\n2. Labelling a node pool is extremely easy in GKE, can be directly from the GCP console.\n3. Although we have specified CPU and Memory limits in the Pod Spec, we can also consider deploying a VPA (Vertical Pod Autoscaler).\n4. Traffic to our DB from inside the cluster can be controlled by implementing network policies or a service mesh such as Istio.\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 26.66666603088379,
"blob_id": "e7277822985316c561d067c1c19fcc0dfd8a3b07",
"content_id": "5b0dc88127e0398d0edd1bba8f98521e1270efee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 165,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 6,
"path": "/kubernetes/deploy_mongodb_persistent/teardown.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"Deleting the mongo service and statefulset\"\nkubectl delete -f ./svc_sts_mongo.yaml\n\nkubectl delete persistentvolumeclaims -l service=mongo-service"
},
{
"alpha_fraction": 0.6676802635192871,
"alphanum_fraction": 0.6794092059135437,
"avg_line_length": 27.432098388671875,
"blob_id": "d295224dd2ee1a79b6997a5b7cad01c1ef3eb6b2",
"content_id": "4e6d198e84adba8e7c46d468b16f46c6caae7c2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2302,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 81,
"path": "/mongo-q/basic-main.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "## Basic tutorial from mongodb.comm\nimport datetime\nimport pprint\n\nimport pymongo\nfrom pymongo import MongoClient\n\nclient = MongoClient('localhost', 27017)\n\ndb = client['test_database']\nprint(db.name)\n\npost = {\"author\": \"Mike\",\"text\": \"My first blog post!\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}\n\nc_posts = db.posts # select posts collection in test_database\nprint(c_posts)\n\npost_id = c_posts.insert_one(post).inserted_id\n\nprint(post_id)\nprint(db.list_collection_names())\npprint.pprint(c_posts.find_one())\n\n\nprint(c_posts.find_one({\"author\": \"Mike\"}))\n\npprint.pprint(c_posts.find_one({\"_id\": post_id}))\n\npost_id_as_str = str(post_id)\nprint(c_posts.find_one({\"_id\": post_id_as_str}) ) # Mone\n\nfrom bson.objectid import ObjectId\n\ndef get(post_id):\n # Convert from string to ObjectId:\n document = client.db.collection.find_one({'_id': ObjectId(post_id)})\n return document\n\nnew_posts = [{\"author\": \"Mike\",\"text\": \"Another Post!\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()},\n {\"author\": \"Eliot\",\"text\": \"My first blog post!\",\n \"title\":\"MongoDB is fun\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}]\n\nresult = c_posts.insert_many(new_posts)\nprint(result.inserted_ids)\n\nfor post in c_posts.find():\n pprint.pprint(post)\n\nprint(c_posts.count_documents(filter={}))\nprint(c_posts.count_documents(filter={\"author\":\"Mike\"}))\n\n# Find only documents date greater than a specific datetime.\nd = datetime.datetime(2009, 11, 12, 12)\nfor post in c_posts.find({\"date\":{\"$gt\":d}}).sort(\"author\"):\n pprint.pprint(post)\n\n# Indexing\n\nresult = db.profiles.create_index([('user_id', pymongo.ASCENDING)],unique=True)\nprint(sorted(list(db.profiles.index_information())))\nuser_profiles = [\n {'user_id': 211, 'name': 'Luke'},{'user_id': 212, 'name': 'Ziltoid'}\n]\ntry:\n result = db.profiles.insert_many(user_profiles)\nexcept pymongo.errors.BulkWriteError as e:\n print(\"Id is already present is Database:\",e)\n\nnew_profile = {'user_id':213,\"name\":\"Drew\"}\nduplicate_profile = {'user_id':213,\"name\":\"Tommy\"}\ntry:\n result = db.profiles.insert_one(new_profile)\n result = db.profiles.insert_one(duplicate_profile)\nexcept pymongo.errors.DuplicateKeyError as e: \n print(e)"
},
{
"alpha_fraction": 0.6623931527137756,
"alphanum_fraction": 0.6623931527137756,
"avg_line_length": 22.399999618530273,
"blob_id": "1ceb6240f3c33d69ce2a7e37e8b9111ea2678d10",
"content_id": "727be5ef0e8935d7b7d04f6e7b0c5e18a04e2228",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 234,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 10,
"path": "/kubernetes/vue-flask-mongo-kubernetes/flask-api/database/mongo.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "from flask_pymongo import PyMongo\n\n\"\"\"Database object\"\"\"\nmongo = PyMongo()\n\ndef configure_mongodb(app):\n \"\"\" Configure MongoDB.\n Uses `Flask-PyMongo <https://flask-pymongo.readthedocs.org/>`_\n \"\"\"\n mongo.init_app(app)\n"
},
{
"alpha_fraction": 0.502743124961853,
"alphanum_fraction": 0.5132169723510742,
"avg_line_length": 22.85714340209961,
"blob_id": "91eb427721c51dce3c7e8da6e2b6f9638db9bad6",
"content_id": "2a3b4deff53ec8b109e7ca52930c7cfb73b3b929",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2005,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 84,
"path": "/flask/bookstore/authors/controllers/authors_controller.py",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "\nfrom datetime import datetime\n\nfrom flask import make_response, abort, request\n\n#UTIL FUNCTIONS\ndef get_timestamp():\n return datetime.utcnow().isoformat()\n\ndef health():\n return \"pong\"\nAUTHORS = {\n \"[email protected]\":{\n \"fname\":\"Per\",\n \"lname\":\"Olav\",\n \"age\":45,\n \"city\":\"City-A\",\n \"email\":\"[email protected]\"\n },\n \"[email protected]\":{\n \"fname\":\"Jon\",\n \"lname\":\"Olav\",\n \"age\":67,\n \"city\":\"City-B\",\n \"email\":\"[email protected]\"\n },\n \"[email protected]\":{\n \"fname\":\"Tor\",\n \"lname\":\"Petter\",\n \"age\":33,\n \"city\":\"City-C\",\n \"email\":\"[email protected]\"\n }\n}\n\ndef read_all_authors():\n return [AUTHORS[key] for key in sorted(AUTHORS.keys())]\n\ndef read_one(email):\n # Does the person exist in people?\n if email in AUTHORS:\n author = AUTHORS.get(email)\n return author\n # otherwise, nope, not found\n else:\n abort(\n 404, f\"Author with email {email} not found\"\n )\n\ndef create():\n author = request.get_json()\n print(\"Attemtping to create:\",author)\n email = author.get(\"email\",None)\n\n if email is not None and email not in AUTHORS:\n AUTHORS.update({\n email: {\n \"fname\":author.get(\"fname\",None),\n \"lname\":author.get(\"lname\",None),\n \"age\":author.get(\"age\",None),\n \"city\":author.get(\"city\",None),\n \"email\":email,\n }\n })\n\n author = AUTHORS.get(email) \n assert author is not None\n response = make_response(author,201)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n abort(\n 400, f\"Author must have an email\"\n )\n \ndef delete(email):\n if email in AUTHORS:\n del AUTHORS[email]\n return make_response(\n f\"{email} of author successfully deleted\", 200\n )\n else:\n abort(\n 404, f\"Author with email {email} not found\"\n )\n"
},
{
"alpha_fraction": 0.7695418000221252,
"alphanum_fraction": 0.7695418000221252,
"avg_line_length": 25.5,
"blob_id": "dead84b269b6dfc68c1d0ec4a255b3c110f10979",
"content_id": "3936387d0642e5878f1f7baa244e01aaf54d90d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 742,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 28,
"path": "/kubernetes/running_flask_kub/flask-vue-kubernetes/teardown.sh",
"repo_name": "corast/kyb",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\necho \"Creating the volume...\"\n\nkubectl delete -f ./kubernetes/persistent-volume-claim.yml\n\necho \"Creating the database credentials...\"\n\nkubectl delete -f ./kubernetes/secret.yml\n\necho \"Creating the postgres deployment and service...\"\n\nkubectl delete -f ./kubernetes/postgres-deployment.yml\nkubectl delete -f ./kubernetes/postgres-service.yml\n\necho \"Creating the flask deployment and service...\"\n\nkubectl delete -f ./kubernetes/flask-deployment.yml\nkubectl delete -f ./kubernetes/flask-service.yml\n\necho \"Adding the ingress...\"\n\nkubectl delete -f ./kubernetes/minikube-ingress.yml\n\necho \"Creating the vue deployment and service...\"\n\nkubectl delete -f ./kubernetes/vue-deployment.yml\nkubectl delete -f ./kubernetes/vue-service.yml\n"
}
] | 29 |
xandernewton/DeepLearningLabs
|
https://github.com/xandernewton/DeepLearningLabs
|
442f6d9f520a2d9fb2893ccbf9711e0b5e224f8b
|
2fb622905ff4a16f9a2b29fc380d507afaf136e2
|
9ffd675e433a34c6d45ad37fcfade72e8e504b4c
|
refs/heads/master
| 2022-11-15T08:19:36.649251 | 2020-06-24T10:59:48 | 2020-06-24T10:59:48 | 238,684,823 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4113980531692505,
"alphanum_fraction": 0.4643811285495758,
"avg_line_length": 29.364864349365234,
"blob_id": "d0daa5b793e2c266e3ff5074818e373e1a76d87c",
"content_id": "8d5651f64d44a64bd8b27069c646a0eb9109622a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2246,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 74,
"path": "/Excercise1/exercise1.py",
"repo_name": "xandernewton/DeepLearningLabs",
"src_encoding": "UTF-8",
"text": "import torch\nfrom typing import Tuple\n\n\ndef sgd_factorise(A: torch.Tensor, rank: int, num_epochs=1000, lr=0.01) \\\n -> Tuple[torch.Tensor, torch.Tensor]:\n m, n = A.shape\n U = torch.rand(m, rank)\n V = torch.rand(n, rank)\n for epoch in range(0, num_epochs):\n for r in range(0, m):\n for c in range(0, n):\n e = A[r, c] - U[r, :] @ V[c, :].t()\n U[r, :] = U[r, :] + lr * e * V[c, :]\n V[c, :] = V[c, :] + lr * e * U[r, :]\n return U, V\n\n\n\ndef truncatedSVD(A: torch.Tensor):\n\n U, S, V = torch.svd(A)\n m = S.shape[0] -1\n S[m] = 0\n return U, S, V\n\n\ndef sgd_factorise_masked(A: torch.Tensor, M: torch.Tensor, rank: int, num_epochs=1000,\n lr=0.01) -> Tuple[torch.Tensor, torch.Tensor]:\n m, n = A.shape\n U = torch.rand(m, rank)\n V = torch.rand(n, rank)\n for epoch in range(0, num_epochs):\n for r in range(0, m):\n for c in range(0, n):\n if M[r, c] == 1:\n e = A[r, c] - U[r, :] @ V[c, :].t()\n U[r, :] = U[r, :] + lr * e * V[c, :]\n V[c, :] = V[c, :] + lr * e * U[r, :]\n return U, V\n\n\n\n\n\nif __name__ == '__main__':\n\n test = torch.tensor([[0.3374, 0.6005, 0.1735],\n [3.3359, 0.0492, 1.8374],\n [2.9407, 0.5301, 2.2620]])\n\n U, V = sgd_factorise(test, 2)\n loss = torch.nn.functional.mse_loss([email protected](), test, reduction='sum')\n print(f\"Approximation {[email protected]()}\")\n print(f'Loss is {loss}')\\\n\n U, S , V = truncatedSVD(test)\n reconstruction = U @ torch.diag(S) @ V.t()\n loss = torch.nn.functional.mse_loss(reconstruction, test, reduction='sum')\n print(f\"Approximation \\n {reconstruction}\")\n print(f'Loss is {loss}')\n\n test_2 = torch.tensor([[0.3374, 0.6005, 0.1735],\n [0, 0.0492, 1.8374],\n [2.9407, 0, 2.2620]])\n\n mask = torch.tensor([[1, 1, 1],\n [0, 1, 1],\n [1, 0, 1]])\n\n U, V = sgd_factorise_masked(test_2, mask, 2)\n loss = torch.nn.functional.mse_loss(U @ V.t(), test, reduction='sum')\n print(f\"Approximation \\n {U @ V.t()}\")\n print(f'Loss is {loss}')"
},
{
"alpha_fraction": 0.8513513803482056,
"alphanum_fraction": 0.8513513803482056,
"avg_line_length": 73,
"blob_id": "e7b99998ad9e7b057a554cc870f27586e860d39d",
"content_id": "cfb175e66e1c604f413a714bc21ad1d992d6b5b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 1,
"path": "/README.md",
"repo_name": "xandernewton/DeepLearningLabs",
"src_encoding": "UTF-8",
"text": "Deep Learning Labs for the University of Southampton Deep Learning Module\n"
}
] | 2 |
ragu-git/SearchAlgorithms
|
https://github.com/ragu-git/SearchAlgorithms
|
8ade832eaf8a04f7a11d37e78e4edebc95f54612
|
dab45a1af393e898de01dcef164c71fd366d7226
|
c68199c0a05841507f20fbd195681d64bb77eb85
|
refs/heads/master
| 2020-04-14T01:51:13.981540 | 2019-01-24T05:28:38 | 2019-01-24T05:28:38 | 163,570,835 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.31333523988723755,
"alphanum_fraction": 0.33835655450820923,
"avg_line_length": 30.68468475341797,
"blob_id": "e219e2b9bc6259558a6b072ff46991216f1d3d7d",
"content_id": "81ba23849cefdb474ff9026bd01bb471b9323b76",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3517,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 111,
"path": "/src/ExpansionSearch.py",
"repo_name": "ragu-git/SearchAlgorithms",
"src_encoding": "UTF-8",
"text": "# -----------\n# User Instructions:\n#\n# Modify the the search function so that it returns\n# a shortest path as follows:\n# \n# [['>', 'v', ' ', ' ', ' ', ' '],\n# [' ', '>', '>', '>', '>', 'v'],\n# [' ', ' ', ' ', ' ', ' ', 'v'],\n# [' ', ' ', ' ', ' ', ' ', 'v'],\n# [' ', ' ', ' ', ' ', ' ', '*']]\n#\n# Where '>', '<', '^', and 'v' refer to right, left, \n# up, and down motions. Note that the 'v' should be \n# lowercase. '*' should mark the goal cell.\n#\n# You may assume that all test cases for this function\n# will have a path from init to goal.\n# ----------\n\ngrid = [[0, 0, 1, 0, 0, 0],\n [1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0],\n [0, 0, 1, 0, 1, 0],\n [0, 0, 1, 0, 1, 0]]\ninit = [0, 0]\ngoal = [len(grid)-1, len(grid[0])-1]\ncost = 1\n\ndelta = [[-1, 0 ], # go up\n [ 0, -1], # go left\n [ 1, 0 ], # go down\n [ 0, 1 ]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\n\n\ndef search(grid,init,goal,cost):\n # ----------------------------------------\n # insert code here\n # ----------------------------------------\n closed = [[0 for col in range(len(grid[0]))] for row in range(0,len(grid))]\n #closed[init[0]][init[1]]=1\n action = [[' ' for col in range(len(grid[0]))] for row in range(0,len(grid))]\n \n rows = len(grid)\n cols = len(grid[0])\n #print (rows , cols,closed,goal,grid)\n x= init[0]\n y= init[1]\n g=0\n tOpen = [[g,x,y]]\n open =[]\n found = False\n \n while found is False :\n #open=[]\n open = tOpen[:]\n tOpen = []\n g+=cost\n #print (g)\n if len(open)==0 :\n print ('Fail')\n return action\n else :\n for i in range(0,len(open)):\n\n x = open[i][1]\n y = open[i][2]\n for d in range(0,len(delta)) :\n dx=x+delta[d][0]\n dy=y+delta[d][1]\n \n if dx>-1 and dy>-1 and dx<rows and dy<cols:\n # print (dx,dy)\n if closed[dx][dy]!=1 and grid[dx][dy]!=1:\n tOpen.append([g,dx,dy])\n closed[dx][dy]=1\n \n action[dx][dy]= d\n # print(delta_name[d],dx,dy,goal[0],goal[1])\n #print(goal[0]==dx and goal[1]==dy)\n if goal[0]==dx and goal[1]==dy:\n found = True\n open = tOpen[:]\n #print(closed) \n #expanded.sort()\n #print (action)\n plan = [[' ' for col in range(len(grid[0]))] for row in range(0,len(grid))]\n x=goal[0]\n y=goal[1]\n plan[x] [y]='*'\n while x!=init[0] or y!=init[1]:\n \n x2=x - delta[action[x][y]][0]\n y2=y - delta[action[x][y]][1] \n plan[x2][y2]=delta_name[action[x][y]]\n x=x2\n y=y2\n return plan\n \n \n \n \n\nresult =search(grid,init,goal,cost)\n\n \nfor i in range(len(result)):\n print(result[i])\n"
},
{
"alpha_fraction": 0.37949103116989136,
"alphanum_fraction": 0.405688613653183,
"avg_line_length": 27.4255313873291,
"blob_id": "007aa28d6e1041b1404b66cda35292390b49d9c9",
"content_id": "f1305648c2abed7301f2feadf452e6cdf7c18366",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2672,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 94,
"path": "/src/DynamicSearch.py",
"repo_name": "ragu-git/SearchAlgorithms",
"src_encoding": "UTF-8",
"text": "# ----------\n# User Instructions:\n# \n# Create a function compute_value which returns\n# a grid of values. The value of a cell is the minimum\n# number of moves required to get from the cell to the goal. \n#\n# If a cell is a wall or it is impossible to reach the goal from a cell,\n# assign that cell a value of 99.\n# ----------\n\ngrid = [[0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0]]\n \n\ngoal = [len(grid)-1, len(grid[0])-1]\ncost = 1 # the cost associated with moving from a cell to an adjacent one\n\ndelta = [[-1, 0 ], # go up\n [ 0, -1], # go left\n [ 1, 0 ], # go down\n [ 0, 1 ]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\ndef compute_value(grid,goal,cost):\n # ----------------------------------------\n # insert code below\n # ----------------------------------------\n \n # make sure your function returns a grid of values as \n # demonstrated in the previous video.\n closed = [[0 for col in range(len(grid[0]))] for row in range(0,len(grid))]\n x = goal[0]\n y = goal[1]\n closed[x][y] = 1\n\n expand = [[99 for col in range(len(grid[0]))] for row in range(0,len(grid))]\n \n g = 0\n \n \n \n rows = len(grid)\n cols = len(grid[0])\n count =0\n tOpen = [[g,x,y]]\n open =[]\n Found = False\n \n while Found is False:\n #open=[]\n open = tOpen[:]\n \n #open.reverse()\n tOpen = []\n g+=cost\n #print (g)\n if len(open)==0 :\n #print ('Fail')\n #print (closed)\n return expand\n else :\n #for i in range(0,len(open)):\n open.sort()\n \n for i in range(len(open)):\n x = open[i][1]\n y = open[i][2]\n expand[x][y]= open[i][0]\n \n for d in range(0,len(delta)) :\n dx=x+delta[d][0]\n dy=y+delta[d][1]\n \n if dx>-1 and dy>-1 and dx<rows and dy<cols:\n \n #print (dx,dy,open)\n if closed[dx][dy]!=1 and grid[dx][dy]!=1:\n \n closed[dx][dy]=1\n #print (count,dx,dy, count+h,f,h,f>=(count+heuristic[dx][dy]))\n #if f>=(count+h):\n \n tOpen.append([g,dx,dy])\n \n \n \nresult = compute_value(grid,goal,cost) \nfor r in range(len(result)):\n print(result[r])\n"
},
{
"alpha_fraction": 0.3448275923728943,
"alphanum_fraction": 0.37786149978637695,
"avg_line_length": 28.75,
"blob_id": "f7426d189aed3f1170bbb543ec9e775c1d9b96d7",
"content_id": "8b5c41d0331bc33f156ac1a990711f6635e2397a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3451,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 116,
"path": "/src/A*Search.py",
"repo_name": "ragu-git/SearchAlgorithms",
"src_encoding": "UTF-8",
"text": "# -----------\n# User Instructions:\n#\n# Modify the the search function so that it becomes\n# an A* search algorithm as defined in the previous\n# lectures.\n#\n# Your function should return the expanded grid\n# which shows, for each element, the count when\n# it was expanded or -1 if the element was never expanded.\n# \n# If there is no path from init to goal,\n# the function should return the string 'fail'\n# ----------\n\ngrid = [[0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]]\nheuristic = [[9, 8, 7, 6, 5, 4],\n [8, 7, 6, 5, 4, 3],\n [7, 6, 5, 4, 3, 2],\n [6, 5, 4, 3, 2, 1],\n [5, 4, 3, 2, 1, 0]]\n\ninit = [0, 0]\ngoal = [len(grid)-1, len(grid[0])-1]\ncost = 1\n\ndelta = [[-1, 0 ], # go up\n [ 0, -1], # go left\n [ 1, 0 ], # go down\n [ 0, 1 ]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\ndef search(grid,init,goal,cost,heuristic):\n # ----------------------------------------\n # modify the code below\n # ----------------------------------------\n closed = [[0 for col in range(len(grid[0]))] for row in range(0,len(grid))]\n x = init[0]\n y = init[1]\n closed[x][y] = 1\n\n expand = [[-1 for col in range(len(grid[0]))] for row in range(0,len(grid))]\n action = [[-1 for col in range(len(grid[0]))] for row in range(0,len(grid))]\n # expand[x][y] = 1\n # expand[goal[0]][goal[1]] = '*' \n g = 0\n h = heuristic[x][y]\n f =g+ h\n \n \n rows = len(grid)\n cols = len(grid[0])\n count =0\n tOpen = [[f,g,h,x,y]]\n open =[]\n found = False\n \n while found is False :\n #open=[]\n open = tOpen[:]\n \n #open.reverse()\n tOpen = []\n g+=cost\n #print (g)\n if len(open)==0 :\n print ('Fail')\n print (closed)\n return 'Fail'\n else :\n #for i in range(0,len(open)):\n open.sort()\n \n \n x = open[0][3]\n y = open[0][4]\n expand[x][y]= count\n count+=1\n for d in range(0,len(delta)) :\n dx=x+delta[d][0]\n dy=y+delta[d][1]\n \n if dx>-1 and dy>-1 and dx<rows and dy<cols:\n \n #print (dx,dy,open)\n if closed[dx][dy]!=1 and grid[dx][dy]!=1:\n h = heuristic[dx][dy]\n f =count+ h\n closed[dx][dy]=1\n #print (count,dx,dy, count+h,f,h,f>=(count+heuristic[dx][dy]))\n #if f>=(count+h):\n \n tOpen.append([f,g,h,dx,dy])\n \n action[dx][dy]= d\n \n \n # print(delta_name[d],dx,dy,goal[0],goal[1])\n #print(goal[0]==dx and goal[1]==dy)\n if goal[0]==dx and goal[1]==dy:\n found = True\n open = tOpen[:]\n #print(closed) \n #open.sort()\n expand[dx][dy]= count\n \n \n return expand\n \n \nprint(search(grid,init,goal,cost,heuristic))\n"
}
] | 3 |
CarlosAlvarezGomez/AlgorithimicTrading
|
https://github.com/CarlosAlvarezGomez/AlgorithimicTrading
|
4c149c86e7f671636add2647ea826cde11a201e0
|
fc593482a2138a112e8f06d4834f26b70b1749c3
|
bb4918f6f15655b98ae5af3b853f4f216b52488e
|
refs/heads/master
| 2020-07-10T11:34:20.489573 | 2019-08-25T06:16:19 | 2019-08-25T06:16:19 | 204,254,105 | 1 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5304300785064697,
"alphanum_fraction": 0.5432332754135132,
"avg_line_length": 39.92620086669922,
"blob_id": "a4b303e3540bb4dd3d819479d3d1ceda28bed217",
"content_id": "30ef172592d53296f242c153bbac4268b4de4f53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11091,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 271,
"path": "/Backtester.py",
"repo_name": "CarlosAlvarezGomez/AlgorithimicTrading",
"src_encoding": "UTF-8",
"text": "import datetime\nimport math\nimport pandas as pd\nimport sqlite3\n\n################################################################################\n# Helper functions for backtest begin here\n################################################################################\n\n\ndef execute_action(action, date, dataframe, portfolio, commission):\n \"\"\"\n checks if an action is possible given the current portfolio, commission, and\n prices(obtained using date and dataframe), and executes it if it is\n possible. An action is a string the contains a type (either BUY or SELL),\n followed by a quantity(an int), and a ticker that is in the dataframe\n \"\"\"\n\n action = action.split()\n quantity = int(action[1])\n if quantity <= 0:\n raise Exception(\"You must trade a positive number of stocks\")\n company = action[2]\n price = dataframe.loc[lambda dataframe: (dataframe[\"Company\"] == company), :].iloc[\n 0\n ][2]\n if action[0] == \"BUY\":\n if portfolio[\"Dollars\"] >= (price * quantity) + commission:\n portfolio[\"Dollars\"] -= (price * quantity) + commission\n if company in portfolio.keys():\n portfolio[company] += quantity\n else:\n portfolio.update({company: quantity})\n print(portfolio)\n print(portfolio)\n return portfolio\n else:\n print(portfolio)\n raise Exception(\"You cannot afford to buy this many stocks\")\n elif action[0] == \"SELL\":\n if (company in portfolio.keys()) & (portfolio[company] >= quantity):\n if portfolio[\"Dollars\"] >= commission:\n portfolio[\"Dollars\"] += (price * quantity) - commission\n portfolio[company] -= quantity\n if portfolio[company] == 0:\n del portfolio[company]\n print(portfolio)\n return portfolio\n else:\n print(portfolio)\n raise Exception(\"You cannot afford the commission expense\")\n else:\n print(portfolio)\n raise Exception(\"You do not own this many stocks\")\n else:\n raise Exception(\"This type of command could not be processed\")\n return portfolio\n\n\ndef split_by_date(dataframe, start_date, end_date):\n \"\"\"\n splits a dataframe into a dictionary of n dataframes. Data that is available\n before the start_date is at key \"before start date,\" the rest will be the\n key with the corresponding date. For example: if df is a dataframe that\n contains data about daily temperatures in the 1980s, the start_date is\n January 1, 1980, and the end_date is January 1, 1983, then split_by_date(df,\n start_date, end_date) will return a dictionary of dataframes containing all\n the temperatures on weekdays between January 1, 1981 and January 1, 1983\n \"\"\"\n\n print(\"Splitting dataframe\")\n is_available = dataframe[\"Date\"] < str(start_date)[:10]\n not_available = dataframe[\"Date\"] >= str(start_date)[:10]\n available_data = dataframe[is_available]\n dataframe = dataframe[not_available]\n dictionary = dict(tuple(dataframe.groupby(\"Date\")))\n dictionary.update({\"before start date\": available_data})\n print(\"Dictionary has been split\")\n return dictionary\n\n\n# returns a dataframe with all the companies from the dataframe that are in the\ndef filter_by_company(dataframe, company_list):\n df = dataframe.iloc[0:0]\n for stock in company_list:\n df = pd.concat([df, dataframe[dataframe[\"Company\"] == stock]])\n return df\n\n\n################################################################################\n# Helper functions for backtest end here\n################################################################################\n\n\ndef backtest(database, strategy, initial_cash, commission=0):\n\n connection = sqlite3.connect(database)\n cursor = connection.cursor()\n\n cursor.execute(\"SELECT MIN(date) FROM Historical_Prices_and_Volumes\")\n earliest_date = datetime.datetime.strptime(cursor.fetchall()[0][0], \"%Y-%m-%d\")\n\n cursor.execute(\"SELECT MAX(date) FROM Historical_Prices_and_Volumes\")\n latest_date = datetime.datetime.strptime(cursor.fetchall()[0][0], \"%Y-%m-%d\")\n\n # earliest_date = datetime.datetime(2005, 1, 1)\n # latest_date = datetime.datetime(2010, 1, 1)\n\n portfolio = {\"Dollars\": initial_cash}\n\n price_volume_command = (\n \"SELECT * FROM Historical_Prices_and_Volumes WHERE Date >= '\"\n + str(earliest_date)[:10]\n + \"' AND Date <= '\"\n + str(latest_date)[:10]\n + \"';\"\n )\n price_volume_dataframe = pd.read_sql_query(price_volume_command, connection)\n price_volume_dataframe = price_volume_dataframe[\n price_volume_dataframe[\"Price\"] != \"nan\"\n ]\n price_volume_dataframe_dict = split_by_date(\n price_volume_dataframe, earliest_date, latest_date\n )\n del price_volume_command\n del price_volume_dataframe\n del price_volume_dataframe_dict[\"before start date\"]\n\n dividends_command = (\n \"SELECT * FROM Historical_Dividends WHERE Date >= '\"\n + str(earliest_date)[:10]\n + \"' AND Date <= '\"\n + str(latest_date)[:10]\n + \"';\"\n )\n dividends_dataframe = pd.read_sql_query(dividends_command, connection)\n dividends_dataframe = dividends_dataframe.dropna()\n\n dividend_dataframe_dict = split_by_date(\n dividends_dataframe, earliest_date, latest_date\n )\n\n del dividends_command\n del dividends_dataframe\n del dividend_dataframe_dict[\"before start date\"]\n\n stock_splits_command = (\n \"SELECT * FROM Historical_Stock_Splits WHERE Date <= '\"\n + str(latest_date)[:10]\n + \"' AND Split_Ratio != '1/0';\"\n )\n stock_splits_dataframe = pd.read_sql_query(stock_splits_command, connection)\n stock_splits_dataframe = stock_splits_dataframe.dropna()\n\n connection.close()\n\n stock_split_dataframe_dict = split_by_date(\n stock_splits_dataframe, earliest_date, latest_date\n )\n\n del stock_splits_command\n del stock_splits_dataframe\n del stock_split_dataframe_dict[\"before start date\"]\n\n i = 0\n\n for date in pd.date_range(earliest_date, latest_date, freq=\"B\"):\n str_date = str(date)[:10]\n print(str_date)\n if len(portfolio) > 1:\n\n # adds dividend payments to portfolio\n if str_date in dividend_dataframe_dict.keys():\n todays_dividends = dividend_dataframe_dict.pop(str_date)\n todays_dividends = filter_by_company(todays_dividends, portfolio.keys())\n if len(todays_dividends) > 0:\n for i in range(len(todays_dividends)):\n portfolio[\"Dollars\"] += (\n math.floor(\n (\n todays_dividends.iloc[i][2]\n * portfolio[todays_dividends.iloc[i][1]]\n )\n * 100\n )\n / 100\n )\n\n # adjusts portfolio for stock splits\n if str_date in stock_split_dataframe_dict.keys():\n todays_stock_splits = stock_split_dataframe_dict.pop(str_date)\n todays_stock_splits = filter_by_company(\n todays_stock_splits, portfolio.keys()\n )\n if len(todays_stock_splits) > 0:\n for i in range(len(todays_stock_splits)):\n ratio = todays_stock_splits.iloc[i][2].split(\"/\")\n if ratio[1] != \"0\":\n quotient = math.floor(\n portfolio[todays_stock_splits.iloc[i][1]]\n / int(ratio[0])\n )\n remainder = portfolio[todays_stock_splits.iloc[i][1]] % int(\n ratio[0]\n )\n portfolio[todays_stock_splits.iloc[i][1]] = (\n quotient * int(ratio[1])\n ) + remainder\n\n if str_date in price_volume_dataframe_dict.keys():\n # uses strategy to analyze most recent data\n todays_prices = price_volume_dataframe_dict.pop(str_date)\n actions = strategy(portfolio, date, todays_prices, commission)\n print(actions)\n # executes each action given by the strategy\n for action in actions:\n portfolio = execute_action(\n action, date, todays_prices, portfolio, commission\n )\n return portfolio\n\n\n################################################################################\n# This is a strategy used for testing the backtester that buys the lowest price\n# possible and sells all stocks at the end of the year. This is purely for\n# testing.\n################################################################################\n\n\n# def buy_lowest_price(portfolio, date, dataframe, commission):\n# actions = []\n# if str(date)[5:7] == \"12\":\n# if (str(date)[8:10] == \"29\") & (date.weekday() == 4):\n# for stock in portfolio.keys():\n# if (stock != \"Dollars\") & (\n# any((dataframe[dataframe[\"Company\"] == stock])[\"Price\"] != \"nan\")\n# ):\n# actions.append(\"SELL \" + str(portfolio[stock]) + \" \" + stock)\n# elif (str(date)[8:10] == \"30\") & (date.weekday() == 4):\n# for stock in portfolio.keys():\n# if (stock != \"Dollars\") & (\n# any((dataframe[dataframe[\"Company\"] == stock])[\"Price\"] != \"nan\")\n# ):\n# actions.append(\"SELL \" + str(portfolio[stock]) + \" \" + stock)\n# elif str(date)[8:10] == \"31\":\n# for stock in portfolio.keys():\n# if (stock != \"Dollars\") & (\n# any((dataframe[dataframe[\"Company\"] == stock])[\"Price\"] != \"nan\")\n# ):\n# actions.append(\"SELL \" + str(portfolio[stock]) + \" \" + stock)\n# if portfolio[\"Dollars\"] < (2 * commission):\n# return actions\n# todays_prices = dataframe[dataframe[\"Price\"] != \"nan\"]\n# todays_prices = todays_prices.sort_values(by=[\"Price\"])\n# lowest_price = round(math.ceil(todays_prices.iloc[0][2] * 100) / 100, 2)\n# company = todays_prices.iloc[0][1]\n# volume = math.floor(0.05 * todays_prices.iloc[0][3])\n# if portfolio[\"Dollars\"] >= (lowest_price + (2 * commission)):\n# quantity = min(\n# math.floor((portfolio[\"Dollars\"] - (2 * commission)) / lowest_price), volume\n# )\n# while (quantity * lowest_price + 2 * commission) > portfolio[\"Dollars\"]:\n# quantity -= 1\n# if quantity > 0:\n# actions.append(\"BUY \" + str(quantity) + \" \" + company)\n# return actions\n\n\n# portfolio = backtest(\"historical_data.db\", buy_lowest_price, 1000, commission=6.95)\n\n# print(portfolio)\n"
},
{
"alpha_fraction": 0.557356595993042,
"alphanum_fraction": 0.6097257137298584,
"avg_line_length": 28.703702926635742,
"blob_id": "8bf646c274744b85b0a1c6bfecdd72f3afcf51b3",
"content_id": "c20721f0dbc37b60fbc0936d44a276d142f89760",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 802,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 27,
"path": "/Forecast.py",
"repo_name": "CarlosAlvarezGomez/AlgorithimicTrading",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef forecast(price, mean, sd, days, sample, strike):\n exercised = 0\n y = [[]]\n for i in range(sample):\n latestReturn = np.random.normal(mean, sd)\n latestPrice = round(price * (1 + latestReturn), 2)\n prices = [latestPrice]\n for n in range(days - 1):\n latestReturn = np.random.normal(mean, sd)\n latestPrice = round(latestPrice * (1 + latestReturn), 2)\n prices.append(latestPrice)\n if any(i < strike for i in prices):\n exercised += 1\n y.append(prices)\n del y[0]\n return y, exercised / sample\n\n\ny, probability = forecast(41.95, 0.00050814, 0.022468102, 12, 10000, 36)\nprint(probability)\nfor prices in y:\n plt.plot(range(12), prices)\nplt.show()\n"
},
{
"alpha_fraction": 0.5347875952720642,
"alphanum_fraction": 0.5521240234375,
"avg_line_length": 34.84362030029297,
"blob_id": "4461f827dbc644ff7af7721f677b946c72c6084d",
"content_id": "ffeff1d88eba4f4e8f0a593eb7d672d7af612bc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8710,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 243,
"path": "/Data Miner.py",
"repo_name": "CarlosAlvarezGomez/AlgorithimicTrading",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport PyPDF2 as p2\nimport requests\nimport sqlite3\n\n################################################################################\n# This part creates a list of all tickers of the companies in the Russell 3000\n################################################################################\n\n################################################################################\n# Helper functions for creating tickers begin here\n################################################################################\n\n# is_digit(string) returns true if string is a single digit\ndef is_digit(string):\n return (\n (string == \"0\")\n | (string == \"1\")\n | (string == \"2\")\n | (string == \"3\")\n | (string == \"4\")\n | (string == \"5\")\n | (string == \"6\")\n | (string == \"7\")\n | (string == \"8\")\n | (string == \"9\")\n )\n\n\n# is_valid_ticker(text) returns true if the text has no digits, a length\n# between 1 and 5, with the exception of NYLD.A, and if it is not a company's\n# full name\ndef is_valid_ticker(text):\n if (\n (len(text) > 0)\n & ((len(text) < 6) | (text == \"NYLD.A\"))\n & (text != \"AECOM\")\n & (text != \"CAINC\")\n & (text != \"HPINC\")\n & (text != \"PETIQ\")\n & (text != \"NNINC\")\n & (text != \"ZUORA\")\n & (text != \"ZYNEX\")\n ):\n for char in list(text):\n if is_digit(char):\n return False\n return True\n else:\n return False\n\n\n################################################################################\n# Helper functions for creating tickers end here\n################################################################################\n\n# reads the elements of the PDF files containing all the tickers\nPDFfile1 = open(\"Get Data/Russell 3000 Components.pdf\", \"rb\")\nPDFfile2 = open(\"Get Data/Russell 3000 Additions 2019.pdf\", \"rb\")\nRussell_2018 = p2.PdfFileReader(PDFfile1)\nRussell_2019_additions = p2.PdfFileReader(PDFfile2)\n\ntickers = []\n\n# iterates through each word in the first pdf and adds it to the ticker list if\n# it is a ticker\nfor number in range(Russell_2018.getNumPages()):\n text = Russell_2018.getPage(number).extractText().split(\"\\n\")\n for i in range(len(text)):\n text[i] = text[i].replace(\" \", \"\")\n if is_valid_ticker(text[i]):\n tickers.append(text[i])\n\n# iterates through each word in the second pdf and adds it to the ticker list if\n# it is a ticker\nfor number in range(Russell_2019_additions.getNumPages()):\n text = Russell_2019_additions.getPage(number).extractText().split(\"\\n\")\n for i in range(len(text)):\n text[i] = text[i].replace(\" \", \"\")\n if is_valid_ticker(text[i]):\n tickers.append(text[i])\n\n# removes duplicates of companies whose names are the same as their tickers\ntickers.remove(\"RH\")\ntickers.remove(\"TYME\")\ntickers.remove(\"LYFT\")\n\n# removes tickers of companies that are not in Yahoo's database\ncompanies_not_found = \"\"\"AVHI ABAX ANCX ACXM AET AOI ARII AFSI ANDV APTI ARRS\nASNS ATHN AHL BWINB BEL BNCL BRK.B BH.A OZRK BHBK BOFI BF.A BOJA BF.B BLMT CA\nABCD CPLA CAVM CHFN CLD COBZ CIVI CVON CVG COTV CRD.B CORI DCT DDR DEPO CYS DPS\nDSW DNB ECR EDR ESIO ELLI PERY EGC EGL EVHC ECYT EGN ESRX ESND ESL FCB FNGN\nFBNK FFKT FNBG FCE.A FMI GGP GPT GNBC GEF.B GBNK GOV HYH HCOM GLF HEI.A HRG ILG\nIMDZ HDP IDTI IMPV IPCC ITG KTWO KS KERX KLXI KMG KND KLDX LEN.B LPNT LHO LFGR\nLOXO LGF.A LGF.B MBFI KORS MOG.A MB NCOM NSM MTGE NWY NFX NXEO NYLD.A NYLD NTRI\nNXTM ORIG OCLR P PAH COOL PHH PHIIK PNK PF PX QCP QSII REN REIS RSPP COL RDC\nSHLD SIR SN SCG SHLM SEND SPA SONC STBZ SYNT TAHO SVU SLD TSRO TRNC PAY VR VVC\nWEB VTL WRD JW.A WIN WMIH WTW WGL ZOES XCRA XL XOXO CRD.A DVMT SGYP USG\"\"\"\n\ncompanies_not_found_list = companies_not_found.split()\n\nfor ticker in companies_not_found_list:\n tickers.remove(ticker)\n\n# adds companies that were not in the original files\ntickers.append(\"SGYPQ\")\n\n################################################################################\n# This part uses urls to gather data from yahoo finance\n################################################################################\n\n# # splits sample urls in order to replace the ticker\n# prices_and_volumes_url = (\n# \"https://query1.finance.yahoo.com/v7/finance/download/XOM?period1=-25236000\"\n# + \"0&period2=1562472000&interval=1d&events=history&crumb=H2RuVtotLS3\"\n# ).split(\"XOM\")\n\n# dividends_url = (\n# \"https://query1.finance.yahoo.com/v7/finance/download/XOM?period1=-25235640\"\n# + \"0&period2=1562472000&interval=1d&events=div&crumb=HJBnDea.bVV\"\n# ).split(\"XOM\")\n\n# stock_splits_url = (\n# \"https://query1.finance.yahoo.com/v7/finance/download/XOM?period1=-25235640\"\n# + \"0&period2=1562472000&interval=1d&events=split&crumb=HJBnDea.bVV\"\n# ).split(\"XOM\")\n\n# prices_and_volumes_url_list = []\n# dividends_url_list = []\n# stock_splits_url_list = []\n\n# # iterates through each ticker, then creates and adds new urls using the sample\n# # urls\n# for i in range(len(tickers)):\n# prices_and_volumes_url_list.append(\n# prices_and_volumes_url[0] + tickers[i] + prices_and_volumes_url[1]\n# )\n# dividends_url_list.append(dividends_url[0] + tickers[i] + dividends_url[1])\n# stock_splits_url_list.append(stock_splits_url[0] + tickers[i] + stock_splits_url[1])\n\n# # prints each url in each list. Click on each url in order to download the data\n# for i in range(10):\n# wget.download(prices_and_volumes_url_list[i], \"Downloads\")\n\n# for url in dividends_url_list:\n# print(url)\n\n# for url in stock_splits_url_list:\n# print(url)\n\n################################################################################\n# This part uses the csv files to create SQL tables containing the historical\n# data from each company. THIS WILL NOT RUN IF THE FINANCIAL DATA IS NOT\n# DOWNLOADED AND IN THE CURRENT FILE\n################################################################################\n\n# connection = sqlite3.connect(\"historical_data.db\")\n# cursor = connection.cursor()\n\n# # creates each table in the database. This will raise an error if these tables\n# # already exist in the database, so you\n# cursor.execute(\n# \"\"\"CREATE TABLE Historical_Prices_And_Voluemes(\n# Date DATE,\n# Company TEXT,\n# Price decimal,\n# Volume int);\"\"\"\n# )\n\n# cursor.execute(\n# \"\"\"CREATE TABLE Historical_Dividends(\n# Date DATE,\n# Company TEXT,\n# Dividend decimal);\"\"\"\n# )\n# cursor.execute(\n# \"\"\"CREATE TABLE Historical_Stock_Splits(\n# Date DATE,\n# Company TEXT,\n# Split_Ratio TEXT);\"\"\"\n# )\n\n# # iterates through each ticker in the ticker list and adds the data to each\n# # table\n# for n in range(len(tickers)):\n# if n % 100 == 0:\n# print(str(n) + \" Completed\")\n# file = tickers[n] + \".csv\"\n# # opens csv files\n# prices_and_volumes = pd.read_csv(\"Historical Prices and Volumes/\" + file)\n# dividends = pd.read_csv(\"Dividends/\" + file)\n# stock_splits = pd.read_csv(\"Stock Splits/\" + file)\n# # adds prices and volumes to database\n# for i in range(len(prices_and_volumes)):\n# sql_command = (\n# \"\"\"INSERT INTO Historical_Prices_And_Voluemes (Date, Company, Price,\n# Volume)\n# VALUES ('\"\"\"\n# + str(prices_and_volumes[\"Date\"][i])\n# + \"', '\"\n# + tickers[n]\n# + \"', '\"\n# + str(prices_and_volumes[\"Close\"][i])\n# + \"', '\"\n# + (str(prices_and_volumes[\"Volume\"][i]))\n# + \"');\"\n# )\n# cursor.execute(sql_command)\n# # adds dividends to database\n# for i in range(len(dividends)):\n# sql_command = (\n# \"\"\"INSERT INTO Historical_Dividends (Date, Company, Dividend)\n# VALUES ('\"\"\"\n# + str(dividends[\"Date\"][i])\n# + \"', '\"\n# + tickers[n]\n# + \"', '\"\n# + str(dividends[\"Dividends\"][i])\n# + \"');\"\n# )\n# cursor.execute(sql_command)\n# # adds stock splits to database\n# for i in range(len(stock_splits)):\n# sql_command = (\n# \"\"\"INSERT INTO Historical_Stock_Splits (Date, Company, Split_Ratio)\n# VALUES ('\"\"\"\n# + str(stock_splits[\"Date\"][i])\n# + \"', '\"\n# + tickers[n]\n# + \"', '\"\n# + str(stock_splits[\"Stock Splits\"][i])\n# + \"');\"\n# )\n# cursor.execute(sql_command)\n\n# connection.commit()\n\n# connection.close()\n\nfor ticker in tickers:\n url = \"https://api.etrade.com/v1/market/quote/{\" + ticker + \"}\"\n r = requests.get(url)\n print(r)\n"
},
{
"alpha_fraction": 0.8482758402824402,
"alphanum_fraction": 0.8482758402824402,
"avg_line_length": 71.5,
"blob_id": "6556a92dda62c268ad281c8619cf07606a30a71e",
"content_id": "0f2e0011d6cce19b66ce57f17001a834c2730cae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 2,
"path": "/README.md",
"repo_name": "CarlosAlvarezGomez/AlgorithimicTrading",
"src_encoding": "UTF-8",
"text": "# AlgorithimicTrading\nThis project gathers data, creates SQL tables, and backtests strategies for people interested in algorithmic stock trading\n"
}
] | 4 |
wvanamstel/twitter_oscars
|
https://github.com/wvanamstel/twitter_oscars
|
6e3c19a70ff8fb4fbcca489b2922ddf306bb617c
|
a02fd891a56349b1ba02d2e77b93cbe4a2b3fa9c
|
351bcc05f6746fc01a542d7f3f3e8566880b76cc
|
refs/heads/master
| 2016-09-06T01:43:40.150489 | 2015-02-24T00:40:13 | 2015-02-24T00:40:13 | 31,235,700 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8104265332221985,
"alphanum_fraction": 0.829383909702301,
"avg_line_length": 211,
"blob_id": "5845321a7162affd1b584d4c8501b2ae75019734",
"content_id": "5f5d5fa21066fbdbfed43d969b3b6abe56127596",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 211,
"num_lines": 1,
"path": "/README.md",
"repo_name": "wvanamstel/twitter_oscars",
"src_encoding": "UTF-8",
"text": "Analysis of twitter messages sent during the 2015 Academy Awards. Ipython notebook with analysis can be accessed [here](http://nbviewer.ipython.org/github/wvanamstel/twitter_oscars/blob/master/twitter_ipy.ipynb)"
},
{
"alpha_fraction": 0.5922671556472778,
"alphanum_fraction": 0.6063268780708313,
"avg_line_length": 30.629629135131836,
"blob_id": "aab2464dd16e6859209c00f852246e24e3fde133",
"content_id": "950e4594b56d5674e06d07bd54dc94a056b0bbaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1707,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 54,
"path": "/twitter_listener.py",
"repo_name": "wvanamstel/twitter_oscars",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 23 08:59:39 2015\n\n@author: w\n\"\"\"\n\nimport tweepy\nimport json\nfrom settings import API_KEY, API_SECRET, ACCESS_TOKEN_KEY, ACCES_TOKEN_SECRET\nfrom pymongo import MongoClient\n\n'''\nListen to the Twitter firehose and store tweets into a mongo database\n'''\nclass TwitterListener(tweepy.streaming.StreamListener):\n def __init__(self):\n # Set up mongo database\n db_client = MongoClient()\n db = db_client['oscar']\n self.collection = db['tweets']\n self.counter = 0 # number of tweets stored in DB\n \n def on_data(self, data):\n tweet = json.loads(data) # convert json format tweet to dict\n if 'lang' in tweet and tweet['lang']=='en':\n self.counter += 1\n self.collection.insert(tweet)\n # Print some things to stdout temporarily\n if self.counter % 1000 == 0: \n print self.counter\n print tweet['text']\n return True\n\n def on_error(self, error_code):\n # Print error code, but keep listening\n print 'error', error_code\n return True\n \n def on_timeout(self, status_code):\n # In case of time out keep listening\n print 'timeout', status_code\n return True\n \nif __name__ == '__main__': \n # Set up authentication\n auth = tweepy.OAuthHandler(API_KEY, API_SECRET)\n auth.set_access_token(ACCESS_TOKEN_KEY, ACCES_TOKEN_SECRET)\n \n # Construct stream instance\n listener = TwitterListener()\n stream = tweepy.Stream(auth, listener)\n stream.filter(track=['Oscars', 'Oscars2015'])\n #stream.sample() # Just get everything, no filtering"
},
{
"alpha_fraction": 0.6219768524169922,
"alphanum_fraction": 0.6456362009048462,
"avg_line_length": 34.23147964477539,
"blob_id": "1d4ff2467ff29e85b2d6b21324ca090ace25e633",
"content_id": "05d1489dfc559f12290d057114bd91c5707619cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3804,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 108,
"path": "/twitter_analysis.py",
"repo_name": "wvanamstel/twitter_oscars",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom collections import Counter\nfrom pymongo import MongoClient\nfrom dateutil.parser import parse\nfrom textblob import TextBlob\nfrom textblob.sentiments import NaiveBayesAnalyzer\n\ndef get_data_mongo():\n '''\n Connect to mongo data base and put data into a data frame for analysis\n IN: null\n OUT: data frame; collected tweet data\n '''\n # Connect to mongo database and retrieve data\n client = MongoClient()\n db = client.oscar\n collection = db.tweets\n \n # Store data in dataframe\n df = pd.DataFrame(list(collection.find()))\n \n # Parse data/time column and set index\n df.created_at = df.created_at.apply(lambda x: parse(x).strftime(\"%Y-%m-%d %H:%M:%S\"))\n df.set_index(df.created_at, drop=False, inplace=True)\n \n return df\n\ndef analyse_data(df):\n '''\n Do analysis of tweet data\n IN: data frame; consisting of all collected tweet data\n OUT: stdout\n '''\n # Plot resampled frequency of tweets\n freq = pd.to_datetime(df.created_at)\n freq.index = freq\n resampled = freq.resample('10Min', how='count')\n resampled.plot(kind='bar', title='Frequency of tweets per 10 minutes', figsize=(8,5))\n \n #Find most active people tweeting\n users = df.user.apply(pd.Series)\n names = users.screen_name\n df['user_names'] = names #add column of user names to use later\n names_cnt = Counter(names).most_common(n=10)\n print '\\n'.join(['{0}\\t{1}'.format(name, cnt) for name, cnt in names_cnt])\n\n # Remove 'trending???' bots\n ind = names.apply(lambda x: 'trending' not in x)\n filtered = names[ind]\n filtered_cnt = Counter(filtered).most_common(n=10)\n print '\\n'.join(['{0}\\t{1}'.format(name, cnt) for name, cnt in filtered_cnt])\n print 'Total number of users: ', len(set(list(names.values)))\n \n # Look at top tweeter 'clickphoto6000' activity (an actual person)\n user = 'clickphoto6000'\n cl_ph = df[df.user_names==user]\n freq = pd.to_datetime(cl_ph.created_at)\n freq.index = freq\n cl_ph_resamp = freq.resample('5Min', how='count')\n cl_ph_resamp.plot(kind='bar', title='Frequency of tweets by most active user', figsize=(8,5))\n \n # Investigating the peak in tweet activity reveals that John Legend was\n # performing 'Glory' at the time\n cl_ph.index = freq\n print cl_ph['2015-02-23 03:53:00':'2015-02-23 04:03:00'].text\n \n # Find most used hashtags\n raw_text = df.text.values\n hashtags = []\n for tweet in raw_text:\n for word in tweet.split():\n if word.startswith('#') and 'oscar' not in word.lower():\n hashtags.append(word.encode('utf-8'))\n hash_cnt = Counter(hashtags).most_common(n=10)\n print '\\n'.join(['{0}\\t{1}'.format(tag, cnt) for tag, cnt in hash_cnt])\n \n \ndef sentiment(df):\n '''\n Do sentiment analysis on tweets that were about the host\n IN: data frame; tweet data\n OUT: stdout\n '''\n # Find tweets referring to the evening's host\n ind = df.text.apply(lambda x: '#nph' in x.lower() or '#neilpatrickharris' in x.lower()) \n raw_text = df[ind].text\n sentiments = []\n for tweet in raw_text.values:\n blob = TextBlob(tweet)\n sentiments.append(blob.sentiment)\n \n # Plot summarized sentiment and frequency\n indx = pd.to_datetime(df[ind].index)\n sents = [x[0] for x in sentiments]\n sent_ts = pd.Series(sents, index=indx)\n sent_ts.resample('5Min', how='sum').plot(title='Total sentiment')\n sent_ts.resample('5Min', how='count').plot(kind='bar', title='Frequency of hash tags mentioning host')\n \n print raw_text['2015-02-23 02:44:01':'2015-02-23 02:44:20']\n\nif __name__=='__main__':\n df = get_data_mongo()\n analyse_data(df)\n sentiment(df)"
}
] | 3 |
Joseamica/Bootcamp.github.io
|
https://github.com/Joseamica/Bootcamp.github.io
|
4119bb957cab54f91ec5e77660ea6b1d88382e89
|
f88431a81e25321d1ef508988111e223825e6285
|
d73f86789575be45e1d5aa4d8dcda070e9eb3967
|
refs/heads/master
| 2020-06-05T13:14:59.771243 | 2019-09-02T00:53:54 | 2019-09-02T00:53:54 | 192,447,450 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6477272510528564,
"alphanum_fraction": 0.6542207598686218,
"avg_line_length": 21.814815521240234,
"blob_id": "a06f998a6e76340542fea080ee748dbe8c4f9a4a",
"content_id": "6d1ee55115f052fa76dc46f7faeb3fea4f2e1bcf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 616,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 27,
"path": "/Week 14 - Intro-To-JavaScript/1/Activities/02-Evr_Python_to_JavaScript/Unsolved/2-ConditionalCheck/conditional-check.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "var x = 1\nvar y = 10\n// Checks if one value is equal to another\nif (x == y){\n console.log(\"x is equal to 1\")\n} else {\n console.log(\"Is not\")\n}\n// Checks if one value is NOT equal to another\nif (x!=y){\n console.log(\"y is not equal\")\n}\n// Checks if one value is less than another\nif (x > y){\n console.log(\"x is bigger than y\")\n} else{\n console.log(\"y is bigger than x\")\n}\n// Checks if one value is greater than another\n\n// Checks if a value is less than or equal to another\n\n// Checks for two conditions to be met using &&\n\n// Checks if either of two conditions is met using ||\n\n// Nested if statements\n"
},
{
"alpha_fraction": 0.6863117814064026,
"alphanum_fraction": 0.6958174705505371,
"avg_line_length": 30,
"blob_id": "4acebb01f50ef74bea9c9f08738b01fe18adaae8",
"content_id": "6f61695b7c53df6dbd1441a1faac2af15a42e3d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 526,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 17,
"path": "/Tareas/Tarea12_WebScrapping/scrape.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask import render_template\n\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\[email protected]('/')\ndef scrape(name=None):\n return render_template('index.html', name=name)\n\[email protected]_request\ndef add_header(response):\n # response.cache_control.no_store = True\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response"
},
{
"alpha_fraction": 0.6045130491256714,
"alphanum_fraction": 0.6104512810707092,
"avg_line_length": 20.512821197509766,
"blob_id": "0ecf6d105920fc3321cafbdf86145f158cc976e1",
"content_id": "3d167ec22b9453fd0496ebe93f793e3581ce450a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 39,
"path": "/Week 4 - Python/Clase_2_Python/letters to numbers.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "import sys\nimport string\nd = dict.fromkeys(string.ascii_lowercase, 0)\n\n#cuantos numeros hay en el alfabeto\nnumber = []\n#el alfabeto en una lista (a,b,c,d,e,f,g...)\nalphabet = []\n\ncounter = 0\nfor i in d:\n counter += 1\n #agregara a la lista number cuantos numeros tiene la lista d (alfabeto)\n number.append(counter) \n\nfor letters in d:\n alphabet.append(letters)\n\n\n# print(number,len(number))\n# print(alphabet,len(alphabet))\n\n\n# for indext in \"abcdefg\":\n# #alphabet[index] = number[index]\n# print(indext)\n# if indext == number[i]:\n# pass \nnew_dict = {}\nfor i in range(26):\n new_dict[alphabet[i]]=number[i]\n #print(i)\n #print(letters[i])\n#print(new_dict)\n\ndef check_dict(letter,new_dict):\n return new_dict[letter]\n\nprint(check_dict('p',new_dict))\n\n "
},
{
"alpha_fraction": 0.6532467603683472,
"alphanum_fraction": 0.6779220700263977,
"avg_line_length": 27.518518447875977,
"blob_id": "c9c299a813919a7c2dee9c9a67e6c337a0f14275",
"content_id": "cd1d1aa01c6b7afd61cd5433805b5b5cc45ed7b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 770,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 27,
"path": "/Week 14 - Intro-To-JavaScript/3/Activities/01-Evr_D3_Select/Unsolved/static/js/index.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "var text1 = d3.select(\".text1\").text();\nconsole.log(text1);\n\nvar text2 = d3.select(\"#text2\").text();\nconsole.log(text2);\n\nd3.select(\".text1\").text(\"Hey, I changed this\");\n\nvar myLink = d3.select(\".my-link\").html();\nconsole.log(myLink);\n\nvar myLinkAnchor = d3.select(\".my-link>a\");\nconsole.log(myLinkAnchor)\n\n//var myLinkAnchorAttribute = d3.select(\".my-link>a\").attr(\"href\");\nvar myLinkAnchorAttribute = myLinkAnchor.attr(\"href\");\nconsole.log(myLinkAnchorAttribute)\n\nmyLinkAnchor.attr(\"href\", \"https://python.org\")\n\nd3.select(\".my-link>a\").attr(\"href\", \"https://nytimes.org\").text(\"Link to ny times\")\n\nd3.selectAll(\"li\").style(\"color\", \"blue\");\nvar li1 = d3.select(\"ul\").append(\"li\");\nli1.text(\"a new item has added\");\n\nd3.selectAll(\"ul\").append(\"li\").attr(\"href\", \"a\")\n"
},
{
"alpha_fraction": 0.5388600826263428,
"alphanum_fraction": 0.5941278338432312,
"avg_line_length": 16.57575798034668,
"blob_id": "0dc85fc50c6465e8da72be599937e7ec61ef6a1e",
"content_id": "3bd06a9a1d84c37e90640915456eac6dfb8d5cce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 579,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 33,
"path": "/Week 14 - Intro-To-JavaScript/1/Activities/06-Stu_Movie_Scores/Unsolved/static/js/index.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "// Array of movie ratings\nvar movieScores = [\n 4.4,\n 3.3,\n 5.9,\n 8.8,\n 1.2,\n 5.2,\n 7.4,\n 7.5,\n 7.2,\n 9.7,\n 4.2,\n 6.9\n];\n\n// Starting a rating count\nvar sum = 0;\nvar z = 0;\n// Arrays to hold movie scores\nvar goodMovieScores = [];\nvar okMovieScores = [];\nvar badMovieScores = [];\n\nfor (z; z < movieScores.length; z++){\n if (movieScores[z] < 5){\n badMovieScores.push(movieScores[z]);\n } else if (movieScores[z] > 5 && movieScores[z] <= 7.5){\n okMovieScores.push(movieScores[z]);\n } else if (movieScores[z] > 7.6){\n goodMovieScores.push(movieScores[z]);\n }\n}"
},
{
"alpha_fraction": 0.7180579304695129,
"alphanum_fraction": 0.7265757918357849,
"avg_line_length": 25.704545974731445,
"blob_id": "f62c2ba65d0d0ce1ac15dd255fcd922cec035ffa",
"content_id": "22bc7836f06aae9ce414fcef8961359f2d93859e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1174,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 44,
"path": "/Tareas/Tarea12_WebScrapping/app.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, redirect\n#import mars as m\nimport scrape_mars\n# Import our pymongo library, which lets us connect our Flask app to our Mongo database.\nimport pymongo\nfrom datetime import datetime\n\n\n\n# Create an instance of our Flask app.\napp = Flask(__name__)\n\n# Create connection variable\nconn = 'mongodb://localhost:27017'\n\n# Pass connection to the pymongo instance.\nclient = pymongo.MongoClient(conn)\n\n# Connect to a database. Will create one if not already available.\n\n\n# Set route\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\[email protected]('/')\ndef scrape(name=None):\n return render_template('index.html', name=name)\n\[email protected]_request\ndef add_header(response):\n # response.cache_control.no_store = True\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response\n\n\[email protected]('/scrapeMars')\ndef scrapeMars():\n\tmarsData = scrape_mars.scrapeMars()\n\tscrape_mars.storeInDb(marsData)\n\tdata = scrape_mars.getData()\n\treturn render_template('index.html', data=data)"
},
{
"alpha_fraction": 0.7793103456497192,
"alphanum_fraction": 0.7793103456497192,
"avg_line_length": 23.16666603088379,
"blob_id": "c49c3ff484a1824a0aaf8aac750c3a775b432f76",
"content_id": "3ba68c37cfcddef1951f4e644cc0d8e7bebc22b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 6,
"path": "/README.md",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# Tec DATA BOOTCAMP\nWelcome to my Tec Bootcamp Repository.\n\nI will constantly upload all the made in class exercices and homework.\n\nFollow me ;)\n"
},
{
"alpha_fraction": 0.8484848737716675,
"alphanum_fraction": 0.8484848737716675,
"avg_line_length": 32,
"blob_id": "ca57d116ef05c4508c756e311467e6c64ed2266a",
"content_id": "4b12848032536f40e4992ce8ee3ec52ce913fedb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 66,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 2,
"path": "/Tareas/Tarea11_Web/hw/README.md",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# latitude_visualization\nThis is a latitude visualisation webpage\n"
},
{
"alpha_fraction": 0.7752808928489685,
"alphanum_fraction": 0.7752808928489685,
"avg_line_length": 89,
"blob_id": "6861191b9b3153e5ba6ae1c2c45d27b9c7f1499c",
"content_id": "7eb7d4bbc3e93d9aa561832ae555005943a47e7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 1,
"path": "/Week 11 - HTML & CSS/2/Justin Bieber – Purpose Available Now_files/visitor-country.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "// Surprise! Currently nothing's here, but we need this so we can call wp_localize_script"
},
{
"alpha_fraction": 0.5753012299537659,
"alphanum_fraction": 0.5813252925872803,
"avg_line_length": 26.5,
"blob_id": "5508fa3d92cc54a7d81a0f02da62896c369417d6",
"content_id": "24a628043c16822282549e52e0d7c0e88a9f48f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 332,
"license_type": "no_license",
"max_line_length": 193,
"num_lines": 12,
"path": "/Week 4 - Python/Clase_3_Python/Cereal/cereal.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n\n\ndf = pd.read_csv(\"./cereal.csv\")\ndf2 = pd.read_csv(\"./cereal_bonus.csv\", sep=\",\", names=[\"Names\", \"mfr\", \"type\",\"calores\",\"protein\",\"fat\",\"sodium\",\"fiber\",\"carbo\",\"sugars\",\"potass\",\"vitamins\",\"shelf\",\"weight\",\"cups\",\"rating\"])\n\n# sprint(df)\n\n\nfor i in df['fiber']:\n if i >= 5:\n print(df.iloc[[i]])\n\n\n"
},
{
"alpha_fraction": 0.49737611413002014,
"alphanum_fraction": 0.5030882954597473,
"avg_line_length": 37.763275146484375,
"blob_id": "9b6c7cf2c9e932838a810b8c088ba912d5407e8a",
"content_id": "5abdfd4caeaa253c34de197f6d89d1142adebbf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 43066,
"license_type": "no_license",
"max_line_length": 244,
"num_lines": 1111,
"path": "/Week 11 - HTML & CSS/2/Justin Bieber – Purpose Available Now_files/umg-aal-wp.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "/********************************************\n *\n * umgAALWP\n * JS bridge between 3rd party plugins and umgAAL\n *\n */\n\n//\n// document init\n//\njQuery(document).ready(function() {\n\n if(typeof(umgAAL) != 'undefined') {\n\n //\n // init\n //\n if(parseInt(UMGAALWP.debug_enabled)) {\n UMGAALWP.helpers.debug = true;\n umgAAL.helpers.debug_enabled = true;\n UMGAALWP.helpers.debugOutput('Debug output enabled');\n }else{\n UMGAALWP.helpers.debug = 0;\n umgAAL.helpers.debug_enabled = 0;\n }\n\n //\n // set default values for WP platform\n //\n umgAAL.platform = umgAAL.standardized.PLATFORMS.WORDPRESS;\n umgAAL.user.loginPlatform = 'site'; // assume standard site reg (NB: if AE connect enabled, it's init function will set this to AE instead)\n\n //\n // iterate over all enabled plugins and run any init needed for it\n //\n for(var plugin_identifier in UMGAALWP.plugins) {\n if(UMGAALWP.plugins[plugin_identifier]) {\n // plugin is enabled, run any initialization code - will call a function 'UMGAALWP.plugins.{plugin_idenifier}_init'\n // eg: 'UMGAALWP.plugins.ae_connect_init' or 'UMGAALWP.plugins.umg_ecrm_init'\n UMGAALWP.helpers.executeFunctionByName('UMGAALWP.plugins.' + plugin_identifier + '_init', window);\n }\n }\n\n } else {\n\n // umgAAL object not found - output this error regardless of debug setting to facilitate debugging on prod servers\n console.log('UMGAALWP: umgAAL object not found on page, init functions have not been run');\n\n }\n\n});\n\n//\n// helpers\n//\nUMGAALWP.helpers = {\n debug: false, // default off\n\n debugOutput: function(message, object) {\n \tif(UMGAALWP.helpers.debug) {\n \t\tif(typeof(object) != 'undefined') {\n \t\t\tconsole.log('UMGAALWP: ' + message, object);\n \t\t} else {\n \t\t\tconsole.log('UMGAALWP: ' + message);\n \t\t}\n \t}\n },\n\n // execute a function by name (handles namespaced function names, eg \"My.Namespace.functionName\")\n executeFunctionByName: function(functionName, context /*, args */) {\n \tvar args = Array.prototype.slice.call(arguments, 2);\n var namespaces = functionName.split(\".\");\n var func = namespaces.pop();\n \tfor (var i = 0; i < namespaces.length; i++) {\n \t\tif(typeof context != \"undefined\") {\n \t \t\tcontext = context[namespaces[i]];\n \t\t} else {\n \t\t\t\treturn false;\n \t\t}\n \t}\n \tif(typeof context != \"undefined\" && typeof context[func] != \"undefined\") {\n \t\treturn context[func].apply(context, args);\n \t}\n \treturn false;\n }\n};\n\n\n\n/********************************************\n *\n * Plugin integrations\n *\n */\n\n//\n// AE Connect\n//\n// init function\nUMGAALWP.plugins.ae_connect_init = function() {\n // set user login platform\n umgAAL.user.loginPlatform = 'AE';\n // set user details if available\n if(typeof(AEJSWP) != 'undefined' && typeof(AEJSWP.aeJS) != 'undefined' && typeof(AEJSWP.aeJS.user.data) != 'undefined') {\n umgAAL.user.loginStatus = 'yes';\n if(typeof(AEJSWP.aeJS.user.data.ID) != 'undefined') {\n umgAAL.user.loginPlatformUserID = AEJSWP.aeJS.user.data.ID;\n }\n if(typeof(AEJSWP.aeJS.user.data.Email) != 'undefined') {\n umgAAL.user.email = AEJSWP.aeJS.user.data.Email;\n }\n if(typeof(AEJSWP.aeJS.user.data.VerifiedEmail) != 'undefined') {\n umgAAL.user.email = AEJSWP.aeJS.user.data.VerifiedEmail;\n }\n if(typeof(AEJSWP.aeJS.user.data.BirthDate) != 'undefined') {\n umgAAL.user.birthDate = AEJSWP.aeJS.user.data.BirthDate;\n }\n if(typeof(AEJSWP.aeJS.user.data.BirthDate) != 'undefined') {\n umgAAL.user.zip = AEJSWP.aeJS.user.data.PostCode;\n }\n if(typeof(AEJSWP.aeJS.user.data.BirthDate) != 'undefined') {\n umgAAL.user.state = AEJSWP.aeJS.user.data.State;\n }\n if(typeof(AEJSWP.aeJS.user.data.BirthDate) != 'undefined') {\n umgAAL.user.country = AEJSWP.aeJS.user.data.Country;\n }\n } else {\n umgAAL.user.loginStatus = 'no';\n }\n};\n// AE Connect uses a trackingDelegate.AEConnect object to fire events\nvar trackingDelegate = window.trackingDelegate || {};\ntrackingDelegate.AEConnect = {\n is_enabled: function() {\n return UMGAALWP.plugins.ae_connect && typeof(umgAAL) != 'undefined';\n },\n loginModalShown: function(call_to_action) {\n if(trackingDelegate.AEConnect.is_enabled) {\n umgAAL.track.account_login_modal_shown( call_to_action );\n }\n\t},\n registrationModalShown: function(call_to_action) {\n if(trackingDelegate.AEConnect.is_enabled) {\n umgAAL.track.account_registration_modal_shown( call_to_action );\n }\n\t},\n\tlogin: function(data) {\n if(trackingDelegate.AEConnect.is_enabled) {\n UMGAALWP.plugins.ae_connect_init(); // call the init function just before firing event to capture any updated user details\n umgAAL.user.loginProvider = (data.service_name == 'email') ? umgAAL.user.loginProvider = umgAAL.standardized.SOCIAL_PLATFORMS.SITE : data.service_name; // happy coincedence: ae social service names === umgAAL.standardized.SOCIAL_PLATFORMS\n umgAAL.track.login_success( umgAAL.standardized.SOURCES.PLATFORM_LOGIN );\n }\n\t},\n registration: function(data) {\n if(trackingDelegate.AEConnect.is_enabled) {\n UMGAALWP.plugins.ae_connect_init();\n\n umgAAL.track.register_success( umgAAL.standardized.SOURCES.PLATFORM_REGISTRATION );\n }\n\t},\n formErrors: function(data) {\n if(trackingDelegate.AEConnect.is_enabled) {\n UMGAALWP.plugins.ae_connect_init();\n var form_type;\n switch(data.form_type) {\n case 'registration':\n form_type = umgAAL.standardized.FORM_TYPES.REGISTRATION;\n break;\n case 'login':\n form_type = umgAAL.standardized.FORM_TYPES.LOGIN;\n break;\n case 'mailinglist':\n form_type = umgAAL.standardized.FORM_TYPES.COMMUNICATIONS;\n break;\n }\n umgAAL.track.form_errors( form_type, data.form_errors );\n }\n\t},\n accountConnect: function(data) {\n if(trackingDelegate.AEConnect.is_enabled) {\n UMGAALWP.plugins.ae_connect_init();\n var social_provider = ( data.service_name == 'email' ) ? umgAAL.standardized.SOCIAL_PLATFORMS.SITE : data.service_name;\n umgAAL.track.account_social_connect_success( umgAAL.standardized.SOURCES.PLATFORM_REGISTRATION, social_provider );\n }\n }\n};\n\n\n//\n// UMG eCRM\n//\ntrackingDelegate.UMGECRM = {\n is_enabled: function() {\n return UMGAALWP.plugins.umg_ecrm && typeof(umgAAL) != 'undefined';\n },\n optin_success: function(data) {\n if(trackingDelegate.UMGECRM.is_enabled) {\n umgAAL.track.comm_optin_success( umgAAL.standardized.SOURCES.PLATFORM_COMMUNICATIONS, data.list_id, data.list_name, data.business_unit );\n }\n }\n};\n//\n// AE Social Follow\ntrackingDelegate.AESocialFollow = {\n is_enabled: function() {\n return UMGAALWP.plugins.ae_social_follow && typeof(umgAAL) != 'undefined';\n },\n followToggle:function(data) {\n if(trackingDelegate.AESocialFollow.is_enabled) {\n\n }\n },\n follow:function(data){\n if(trackingDelegate.AESocialFollow.is_enabled) {\n\n }\n },\n unFollow:function(data){\n if(trackingDelegate.AESocialFollow.is_enabled) {\n\n }\n },\n followLoginClicked:function(data){\n if(trackingDelegate.AESocialFollow.is_enabled) {\n\n }\n },\n socialAccountConnect:function(data){\n if(trackingDelegate.AESocialFollow.is_enabled) {\n data = JSON.parse(data);\n var social_provider;\n switch(data.account_type) {\n case 'spotify':\n social_provider = umgAAL.standardized.SOCIAL_PLATFORMS.SPOTIFY;\n break;\n case 'youtube':\n social_provider = umgAAL.standardized.SOCIAL_PLATFORMS.YOUTUBE;\n break;\n case 'twitter':\n social_provider = umgAAL.standardized.SOCIAL_PLATFORMS.TWITTER;\n break;\n case 'instagram':\n social_provider = umgAAL.standardized.SOCIAL_PLATFORMS.INSTAGRAM;\n break;\n }\n var form_type = umgAAL.standardized.FORM_TYPES.LOGIN;\n umgAAL.track.account_social_connect_success( form_type, social_provider );\n }\n },\n formErrors:function(data){\n if(trackingDelegate.AESocialFollow.is_enabled) {\n\n }\n }\n};\n\n\n// Login for content\ntrackingDelegate.LoginForContent = {\n is_enabled: function() {\n return UMGAALWP.plugins.login_for_content && typeof(umgAAL) != 'undefined';\n },\n contentUnlocking:function(data){\n data = JSON.parse(data);\n if(trackingDelegate.LoginForContent.is_enabled) {\n umgAAL.track.unlock_content_success( data.media_type, data.media_id, data.title, umgAAL.user )\n }\n }\n};\n// Subscribtions\ntrackingDelegate.Subscriptions = {\n is_enabled: function() {\n return UMGAALWP.plugins.subscriptions && typeof(umgAAL) != 'undefined';\n },\n //COMMUNICATION_OPTIN\n communicationOptin:function(data){\n if(trackingDelegate.subscriptions.is_enabled) {\n data = JSON.parse(data);\n umgAAL.track.comm_optin_success( data.source,data.list_id,data.business_unit,data.subscriber_list_name );\n }\n }\n};\n\n// UMG Live\ntrackingDelegate.UMGLive = {\n is_enabled: function() {\n return UMGAALWP.plugins.umg_live && typeof(umgAAL) != 'undefined';\n },\n //COMMUNICATION_OPTIN\n buyTicketsLink:function(data){\n if(trackingDelegate.UMGLive.is_enabled) {\n data = JSON.parse(data);\n umgAAL.track.tour_store_link(data.platform,data.date,data.location,data.tier,data.link);\n }\n },\n externalLink:function(data){\n if(trackingDelegate.UMGLive.is_enabled) {\n data = JSON.parse(data);\n umgAAL.track.outbound_link(data.location);\n }\n }\n};\n\n//BANDs In Town\n(function($) {\n \"use strict\";\n if (UMGAALWP.plugins.bands_in_town && typeof(umgAAL) != 'undefined' )\n {\n $( document ).on( \"click\", \".bit-event-data, .bit-rsvp\", function() {\n umgAAL.track.share('facebook','page','',document.title);\n });\n $( document ).on( \"click\", \".bit-track-artist-header\", function() {\n var dest_url = $(this).attr('href');\n umgAAL.track.outbound_link( dest_url );\n });\n $( document ).on( \"click\", \".bit-fb-share\", function() {\n umgAAL.track.share('facebook','page','',document.title);\n });\n $( document ).on( \"click\", \".bit-twitter-share\", function() {\n umgAAL.track.share('twitter','page','',document.title);\n });\n\n $( document ).on( \"click\", \".bit-buy-tix\", function() {\n var date = '', location = '', ticket_tier = 'N/A', dest_url = '', platform = 'TourDates';\n var $table_row = $(this).closest('tr');\n dest_url = $(this).attr('href');\n\n location = $table_row.find('.bit-location').text();\n\n var fmt = new DateFormatter();\n date = fmt.parseDate(new Date().getFullYear() + ' ' + $table_row.find('.bit-date').text(), 'Y M d');\n var month = (\"0\" + (date.getMonth() + 1)).slice(-2)\n var day = (\"0\" + (date.getDate())).slice(-2);\n var year = date.getFullYear();\n if(new Date(date).getTime() < new Date().getTime())\n {\n year = year +1;\n }\n if ( isNaN(new Date(date)) === false)\n {\n date = year+'-'+month+'-'+day;\n }\n else\n {\n date = '';\n }\n umgAAL.track.tour_store_link(platform,date,location,ticket_tier,dest_url);\n });\n }\n})(jQuery);\n//GIGPress\n(function($) {\n \"use strict\";\n if (UMGAALWP.plugins.gigpress && typeof(umgAAL) != 'undefined' )\n {\n $( document ).on( \"click\", \".gigpress-related-item a\", function() {\n umgAAL.track.outbound_link( $(this).attr('href') );\n });\n\n $( document ).on( \"click\", \".gigpress-venue a\", function() {\n umgAAL.track.outbound_link( $(this).attr('href') );\n });\n $( document ).on( \"click\", \".gigpress-address\", function() {\n umgAAL.track.outbound_link( $(this).attr('href') );\n });\n $( document ).on( \"click\", \".gigpress-show-related a\", function() {\n umgAAL.track.outbound_link( $(this).attr('href') );\n });\n $( document ).on( \"click\", \".gigpress-tickets-link\", function() {\n var date = '',month = '',day = '',year = '', location = '', ticket_tier = 'N/A', dest_url = '', platform = 'gigpress';\n dest_url = $(this).attr('href');\n if (UMGAALWP.data.gigpress.shows && typeof(UMGAALWP.data.gigpress.shows) !== 'undefined')\n {\n if (UMGAALWP.data.gigpress.shows.venue_city !== null)\n {\n location = UMGAALWP.data.gigpress.shows.venue_city + ',';\n }\n if (UMGAALWP.data.gigpress.shows.venue_state !== null)\n {\n location = location + UMGAALWP.data.gigpress.shows.venue_state + ',';\n }\n if (UMGAALWP.data.gigpress.shows.venue_country !== null)\n {\n location = location + UMGAALWP.data.gigpress.shows.venue_country + ',';\n }\n date = UMGAALWP.data.gigpress.shows.show_date;\n }\n else\n {\n var fmt = new DateFormatter();\n var $table_row = $(this).closest('tr');\n var $table_row_data = $table_row.prev('tr');\n\n if($table_row.length>0 && $table_row_data.length>0)\n {\n date = fmt.parseDate($table_row_data.find('.gigpress-date').text(), UMGAALWP.data.gigpress.options.date_format);\n\n location = $table_row_data.find('.gigpress-city').text() + ',' + $table_row_data.find('.gigpress-country').text();\n month = (\"0\" + (date.getMonth() + 1)).slice(-2)\n day = (\"0\" + (date.getDate())).slice(-2);\n year = date.getFullYear();\n\n if ( isNaN(new Date(date)) === false)\n {\n date = year+'-'+month+'-'+day;\n }\n else\n {\n date = '';\n }\n }\n }\n umgAAL.track.tour_store_link(platform,date,location,ticket_tier,dest_url);\n });\n }\n})(jQuery);\n\n//New Royalslider\n(function($) {\n \"use strict\";\n if (UMGAALWP.plugins.new_royalslider && typeof(umgAAL) != 'undefined' )\n {\n $(window).load(function() {\n $(document).ready(function(){\n //Get Slider Title From AAL JSON\n function get_royal_Slider_title(id){\n for (var i = 0; i < UMGAALWP.data.royalslider.length; i++) {\n if (UMGAALWP.data.royalslider[i].id === id)\n {\n return UMGAALWP.data.royalslider[i].name;\n }\n }\n return '';\n }\n //Get loop though all Sliders on the Page\n $('.royalSlider').each(function() {\n var id = $(this).attr('id').replace(\"new-royalslider-\", \"\");\n var royal_Slider_title = get_royal_Slider_title(id);\n umgAAL.track.photo_gallery_view( id, royal_Slider_title);\n });\n });\n });\n }\n})(jQuery);\n\n//Nextgen gallery\n(function($) {\n \"use strict\";\n if (UMGAALWP.plugins.nextgen_gallery && typeof(umgAAL) != 'undefined' )\n {\n $(window).load(function() {\n $(document).ready(function(){\n $('.ngg-galleryoverview').each(function(){\n umgAAL.track.photo_gallery_view( '',window.location.href);\n });\n $('.ngg-navigation a').click(function(){\n umgAAL.track.photo_gallery_load_more( '',window.location.href,'',$(this).data('pageid'));\n });\n });\n });\n }\n})(jQuery);\n\n//ADD THIS\n(function($) {\n \"use strict\";\n var addthisAAL = (window.addthisAAL) ? window.addthisAAL : {};\n if (UMGAALWP.plugins.add_this && typeof(umgAAL) != 'undefined' )\n {\n addthisAAL = {\n init: function () {\n if (typeof(addthis) != 'undefined' ){\n addthis.addEventListener('addthis.ready', addthisAAL.addthisReady);\n addthis.addEventListener('addthis.menu.share', addthisAAL.shareEventHandler);\n }\n },\n addthisReady : function(e){\n\n },\n shareEventHandler : function(e){\n if (e.type == 'addthis.menu.share') {\n umgAAL.track.share(e.data.service.replace(\"_share\", \"\"),'page','',document.title);\n }\n }\n };\n //Cant run till after files are loaded as Add this is loaded in the footer so wont work\n $(window).load(function () {\n addthisAAL.init();\n });\n }\n})(jQuery);\n\n//epoch comments\n(function($) {\n \"use strict\";\n\n var oldXHR;\n function newXHR() {\n var realXHR = new oldXHR();\n realXHR.addEventListener(\"readystatechange\", function() {\n if(realXHR.readyState===4 && realXHR.status===200\n && realXHR.responseURL.indexOf(\"epoch-api\") !== -1 &&\n ( realXHR.responseURL.indexOf(\"submit_comment\") !== -1 ))\n {\n umgAAL.track.comment('page',epoch_vars.post_id,document.title);\n }\n }, false);\n return realXHR;\n }\n $(window).load(function() {\n //If comments on page and epoch tracking enabled track Ajax comment post events\n if(typeof(UMGAALWP.plugins.epoch) !== 'undefined' && UMGAALWP.plugins.epoch && typeof(epoch_vars) !== 'undefined') {\n //Detect Ajax calls to EPOCH\n oldXHR = window.XMLHttpRequest;\n window.XMLHttpRequest = newXHR;\n }\n });\n\n})(jQuery);\n\n//Tracking for You tube and vimeo\n var tag = document.createElement('script');\n\n tag.src = \"//www.youtube.com/player_api\";\n var firstScriptTag = document.getElementsByTagName('script')[0];\n firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);\n\n(function($) {\n \"use strict\";\n var vimeoAAL = (window.vimeoAAL) ? window.vimeoAAL : {};\n var youtubeAAL = (window.youtubeAAL) ? window.youtubeAAL : {};\n //var youtubeAAL = {};\n var YTdeferred = $.Deferred();\n window.onYouTubeIframeAPIReady = function() {\n YTdeferred.resolve(window.YT);\n };\n\n youtubeAAL = {\n players : [],\n details : {},\n first : false,\n init: function () {\n youtubeAAL.setIframes();\n $('body').bind(\"DOMSubtreeModified\", youtubeAAL.domChange);\n },\n youtube_parser : function (url){\n var regExp = /^.*((youtu.be\\/)|(v\\/)|(\\/u\\/\\w\\/)|(embed\\/)|(watch\\?))\\??v?=?([^#\\&\\?]*).*/;\n var match = url.match(regExp);\n return (match&&match[7].length===11)? match[7] : false;\n },\n youtubeIframe : function (potentialYouTubeVideo) {\n var potentialYouTubeVideoSrc = potentialYouTubeVideo.src || '';\n if( potentialYouTubeVideoSrc.indexOf( 'youtube.com/embed/' ) > -1 ||\n potentialYouTubeVideoSrc.indexOf( 'youtube.com/v/' ) > -1 ) {\n return true;\n }\n return false;\n },\n setPlayer : function(iframe, ytid, iframe_id){\n var player = new YT.Player($(iframe).attr('id'), {\n events: {\n 'onReady': function(){\n $(iframe).data('aal-tracking', 'true');\n $(iframe).data('aal-tracking-yt-id', ytid);\n },\n //youtubeAAL.onReady,\n 'onStateChange':youtubeAAL.onStateChange,\n }\n });\n youtubeAAL.players[ytid] = player;\n player.details = {\n 'id' : ytid,\n 'title' : '',\n 'paused' : false,\n 'progress25' : false,\n 'progress50' : false,\n 'progress75' : false,\n 'progress90' : false,\n 'progress100': false,\n 'postion_timer' : null,\n 'duration' : 0,\n 'states_array' : [],\n 'current_video_postion' : 0\n };\n },\n onStateChange : function(e){\n var player = e.target;\n var player_data = youtubeAAL.getVideoData(player);\n var video_data = player_data.videoData;\n player.details.duration = player_data.duration;\n\n\n if ((e.data === 1\n && (player.details.states_array[player.details.states_array.length-1] !== 2)\n && player.details.states_array[player.details.states_array.length-1] !== 3)\n || (e.data === 1 && player.details.states_array[player.details.states_array.length-1] === 3\n && (player.details.states_array[player.details.states_array.length-2] === -1 || player.details.states_array.length < 2) )){\n\n umgAAL.track.video('youtube','play',video_data['video_id'], video_data['title']);\n\n }else if (e.data === 2 && player.details.states_array[player.details.states_array.length-1] === 1){\n umgAAL.track.video('youtube','pause',video_data['video_id'], video_data['title']);\n }else if (e.data === 0 && player.details.states_array[player.details.states_array.length-1] === 1){\n umgAAL.track.video('youtube','stop',video_data['video_id'], video_data['title']);\n }\n\n if (e.data === 1){\n player.details.postion_timer = setInterval(function() {\n var new_percent = Math.round((player_data.currentTime/player.details.duration)*100);\n if (player.details.percent_played !== new_percent)\n {\n switch(new_percent) {\n case 25:\n umgAAL.track.video('youtube','25%',video_data['video_id'], video_data['title']);\n break;\n case 50:\n umgAAL.track.video('youtube','50%',video_data['video_id'], video_data['title']);\n break;\n case 75:\n umgAAL.track.video('youtube','75%',video_data['video_id'], video_data['title']);\n break;\n case 90:\n umgAAL.track.video('youtube','90%',video_data['video_id'], video_data['title']);\n break;\n case 100:\n umgAAL.track.video('youtube','100%',video_data['video_id'], video_data['title']);\n break;\n }\n }\n\n if (player.details.current_video_postion===player_data.currentTime){\n clearInterval(player.details.postion_timer);\n }\n player.details.current_video_postion=player_data.currentTime;\n player.details.percent_played = new_percent;\n }, 1000);\n }\n\n if (player.details.states_array.length === 5)\n {\n player.details.states_array.shift();\n }\n player.details.states_array.push(e.data);\n\n if (e.data === 0 || e.data === 2)\n {\n player.details.states_array = [];\n }\n\n player.details.current_video_postion = player_data.currentTime;\n\n },\n setIframes : function(){\n $('iframe').each(function(index, iframe) {\n var yt_id = youtubeAAL.youtube_parser(iframe.src);\n if ((yt_id && $(this).data('aal-tracking') === undefined) || (yt_id && ($(this).data('aal-tracking-yt-id') !==yt_id)))\n {\n\n if ($(this).attr('id') === undefined)\n {\n $(this).attr('id', 'player-'+yt_id);\n }\n\n if ($(this).attr('src').indexOf('enablejsapi=1') === -1){\n if ($(this).attr('src').indexOf('?') === -1){\n $(this).attr('src', $(this).attr('src') + '?enablejsapi=1');\n }else\n {\n $(this).attr('src', $(this).attr('src') + '&enablejsapi=1');\n }\n }\n youtubeAAL.setPlayer($(this)[0],yt_id, $(this).attr('id'));\n }\n });\n },\n domChange : function (){\n youtubeAAL.setIframes();\n },\n\n getVideoData: function(videoObj) {\n /**\n * Get YouTube Video datablock from video object\n * - reason for this function is that when other YT.Player are initiating the video then the API functions are not available\n * - such as getDuration()\n */\n for (var prop in videoObj) {\n if(videoObj.hasOwnProperty(prop)) {\n var propValue = videoObj[prop];\n if(propValue && typeof propValue !== 'undefined' && typeof propValue.videoData !== 'undefined'\n && typeof propValue.duration !== 'undefined') {\n return propValue;\n }\n }\n }\n return false;\n }\n };\n\n vimeoAAL = {\n iframes : [],\n players : [],\n details : {},\n\n init: function () {\n //Set iframes to track\n vimeoAAL.setIframes();\n //Look for new iframes\n $('body').bind(\"DOMSubtreeModified\", vimeoAAL.domChange);\n },\n removeURLParameter : function (url, parameter) {\n //prefer to use l.search if you have a location/link object\n var urlparts= url.split('?');\n if (urlparts.length>=2) {\n\n var prefix= encodeURIComponent(parameter)+'=';\n var pars= urlparts[1].split(/[&;]/g);\n\n //reverse iteration as may be destructive\n for (var i= pars.length; i-- > 0;) {\n //idiom for string.startsWith\n if (pars[i].lastIndexOf(prefix, 0) !== -1) {\n pars.splice(i, 1);\n }\n }\n\n url= urlparts[0] + (pars.length > 0 ? '?' + pars.join('&') : \"\");\n return url;\n } else {\n return url;\n }\n },\n setIframes : function(){\n $.each($('iframe'), function (index, iframe) {\n var patt = /https?:\\/\\/(?:www\\.|player\\.)?vimeo.com\\/(?:channels\\/(?:\\w+\\/)?|groups\\/([^\\/]*)\\/videos\\/|album\\/(\\d+)\\/video\\/|video\\/|)(\\d+)(?:$|\\/|\\?)/;\n //See if iframe is vimio\n if(patt.test(iframe.src ) && $(this).data('aal-tracking') === undefined){\n $(this).data('aal-tracking', 'true');\n\n var url = $(this).attr('src');\n\n var details = {\n 'id' : '',\n 'title' : '',\n 'paused':false,\n 'progress25' : false,\n 'progress50' : false,\n 'progress75' : false,\n 'progress90' : false,\n 'progress100' : false,\n 'iframe':iframe\n };\n //console.log('PLAYER SET');\n var aal_vimeo_player = new Vimeo.Player($(this)[0]);\n aal_vimeo_player.on('timeupdate', vimeoAAL.onTimeUpdate);\n aal_vimeo_player.aal_details = details;\n\n aal_vimeo_player.on('play', vimeoAAL.onPlay);\n aal_vimeo_player.on('pause', vimeoAAL.onPause);\n\n aal_vimeo_player.on('error', function(data){\n //console.log(data);\n });\n aal_vimeo_player.getVideoId().then(function(id) {\n aal_vimeo_player.aal_details.id = String(id);\n }).catch(function(error) {\n // an error occurred\n // console.log(error);\n });\n aal_vimeo_player.getVideoTitle().then(function(title) {\n aal_vimeo_player.aal_details.title = title;\n }).catch(function(error) {\n // an error occurred\n //console.log(error);\n });\n aal_vimeo_player.on('seeked', vimeoAAL.onSeek);\n aal_vimeo_player.on('ended', vimeoAAL.onEnded);\n\n\n //});\n }\n\n });\n },\n onPlay: function(e) {\n var player = this;\n if (player.aal_details.paused === false)\n {\n umgAAL.track.video('vimeo','play',player.aal_details.id, player.aal_details.title);\n }\n },\n onPause: function(e) {\n var player = this;\n player.aal_details.paused = true;\n umgAAL.track.video('vimeo','pause',player.aal_details.id, player.aal_details.title);\n },\n // Tracking video progress\n onTimeUpdate: function(e) {\n var percent = Math.floor(e.percent * 100);\n var player = this;\n if (percent > 20 && percent < 30 && player.aal_details.progress25 === false)\n {\n player.aal_details.progress25 = true;\n umgAAL.track.video('vimeo','25%',player.aal_details.id, player.aal_details.title);\n }else if (percent > 45 && percent < 55 && player.aal_details.progress50 === false)\n {\n player.aal_details.progress50 = true;\n umgAAL.track.video('vimeo','50%',player.aal_details.id, player.aal_details.title);\n }else if (percent > 65 && percent < 85 && player.aal_details.progress75 === false)\n {\n player.aal_details.progress75 = true;\n umgAAL.track.video('vimeo','75%',player.aal_details.id, player.aal_details.title);\n }\n else if (percent > 85 && percent < 95 && player.aal_details.progress90 === false)\n {\n player.aal_details.progress90 = true;\n umgAAL.track.video('vimeo','90%',player.aal_details.id, player.aal_details.title);\n }\n else if (percent > 95 && player.aal_details.progress100 === false)\n {\n player.aal_details.progress100 = true;\n umgAAL.track.video('vimeo','100%',player.aal_details.id, player.aal_details.title);\n }\n },\n onSeek: function(e) {\n var player = this;\n var percent = Math.floor(e.percent * 100);\n vimeoAAL.resetPercent(player);\n umgAAL.track.video('vimeo',percent,player.aal_details.id, player.aal_details.title);\n },\n onEnded : function(e) {\n var player = this;\n vimeoAAL.playerResetAll(player);\n umgAAL.track.video('vimeo','',player.aal_details.id, player.aal_details.title);\n },\n playerResetAll : function(player){\n player.aal_details.paused = false;\n vimeoAAL.resetPercent(player);\n },\n resetPercent : function(player){\n player.aal_details.progress25 = false;\n player.aal_details.progress50 = false;\n player.aal_details.progress75 = false;\n player.aal_details.progress90 = false;\n player.aal_details.progress100 = false;\n },\n domChange : function (){\n vimeoAAL.setIframes();\n }\n };\n YTdeferred.done(function() {\n jQuery(window).load(function() {\n if (typeof (UMGAALWP.services) !== 'undefined' && (typeof UMGAALWP.services.you_tube) !== 'undefined' && UMGAALWP.services.you_tube)\n {\n youtubeAAL.init();\n }\n });\n });\n\n $(window).load(function() {\n $(document).ready(function(){\n if (typeof (UMGAALWP.services) !== 'undefined' && (typeof UMGAALWP.services.vimeo) !== 'undefined' && UMGAALWP.services.vimeo)\n {\n vimeoAAL.init();\n }\n });\n });\n\n})(jQuery);\n\n\n//Stackla\n(function($) {\n \"use strict\";\n var stacklaAAL = (window.vimeoAAL) ? window.vimeoAAL : {};\n stacklaAAL = {\n stackla_items : [],\n init: function () {\n Stackla.WidgetManager\n .on('load', stacklaAAL.onLoad)\n .on('tileExpand', stacklaAAL.onTileExpand)\n .on('userClick', stacklaAAL.onUserClick)\n .on('shareClick', stacklaAAL.onShareClick)\n .on('moreLoad', stacklaAAL.onLoadMore)\n .on('productActionClick', stacklaAAL.onProductActionClick);\n },\n onLoad : function(e, data){\n var type = 'custom';\n if (data.styleName && data.styleName.indexOf('carousel') !== -1)\n {\n type = 'carousel';\n umgAAL.track.photo_gallery_view( data.widgetId, data.name, '', 0 );\n }\n else if (data.styleName && data.styleName.indexOf('billboard') !== -1)\n {\n type = 'billboard';\n umgAAL.track.photo_gallery_view( data.widgetId, data.name, '', 0 );\n }\n else if (data.styleName && data.styleName.indexOf('feed') !== -1)\n {\n type = 'feed';\n umgAAL.track.photo_gallery_view( data.widgetId, data.name, '', 0 );\n }\n else if (data.styleName && data.styleName.indexOf('slideshow') !== -1)\n {\n type = 'slideshow';\n umgAAL.track.photo_gallery_view( data.widgetId, data.name, '', 0 );\n }\n else if (data.styleName && data.styleName.indexOf('waterfall') !== -1)\n {\n type = 'waterfall';\n umgAAL.track.photo_gallery_view( data.widgetId, data.name, '', 0 );\n }\n var stackla_obj = {id:data.widgetId,type:type, name:data.name};\n stacklaAAL.stackla_items.push(stackla_obj);\n },\n onTileExpand : function(e, data){\n if (data.tileData.media === 'video' && data.tileData.source!=='youtube' && data.tileData.source!=='vimeo'){\n umgAAL.track.video(data.tileData.source,'play',data.widgetId, data.tileData.message);\n }\n else if (data.tileData.media === 'audio' && data.tileData.source!=='soundcloud'){\n umgAAL.track.audio(data.tileData.source, 'play', data.widgetId, data.tileData.message);\n }\n },\n onUserClick : function (e, data){\n if (data.tileData.author_link)\n {\n umgAAL.track.outbound_link( data.tileData.author_link );\n }\n else\n {\n umgAAL.track.outbound_link( data.tileData.original_url );\n }\n },\n onShareClick : function(e, data){\n umgAAL.track.share( data.shareNetwork, data.tileData.media, data.widgetId, data.tileData.message );\n },\n onLoadMore : function (e, data) {\n var stackla_obj = stacklaAAL.getStacklaItemById(data.widgetId);\n if (stackla_obj !== null\n /*\n &&\n (stackla_obj.type === 'slideshow'\n || stackla_obj.type === 'carousel'\n || stackla_obj.type === 'billboard')\n */\n ){\n\n umgAAL.track.photo_gallery_load_more( stackla_obj.id, stackla_obj.name, '', data.page );\n }\n },\n onProductActionClick: function(e, data){\n\n var store = '';\n var dest_url = '';\n if (data.productTag)\n {\n if (data.productTag.custom_url)\n {\n dest_url = data.productTag.custom_url;\n }\n if (data.productTag.tag)\n {\n store = data.productTag.tag;\n }\n }\n umgAAL.track.merch_link( store, dest_url )\n },\n getStacklaItemById : function(id){\n for (var i = 0; i < stacklaAAL.stackla_items.length; i++) {\n if (stacklaAAL.stackla_items[i].id === id)\n {\n return stacklaAAL.stackla_items[i];\n }\n }\n return null;\n }\n };\n if (UMGAALWP.plugins.stackla && typeof(umgAAL) != 'undefined' )\n {\n stacklaAAL.init();\n }\n})(jQuery);\n\n//Sound Cloud tracking\n(function($) {\n \"use strict\";\n var soundcloudAAL = (window.soundcloudAAL) ? window.soundcloudAAL : {};\n soundcloudAAL = {\n players : [],\n init: function () {\n soundcloudAAL.setIframes();\n $('body').bind(\"DOMSubtreeModified\", soundcloudAAL.domChange);\n },\n getSoundCloudId : function(src) {\n var exp = new RegExp(/(snd\\.sc|soundcloud\\.com)/);\n return exp.test(src);\n },\n setIframes : function(){\n $.each($('iframe'), function (index, iframe) {\n var sc_id = soundcloudAAL.getSoundCloudId(iframe.src);\n if(sc_id && $(this).data('aal-tracking') === undefined){\n sc_id = '';\n $(this).data('aal-tracking', 'true');\n var widget = SC.Widget(iframe);\n var details = {\n 'id' : '',\n 'title' : '',\n 'duration':0,\n 'paused':false,\n 'progress25' : false,\n 'progress50' : false,\n 'progress75' : false,\n 'progress90' : false,\n 'progress100' : false,\n 'iframe':iframe\n };\n\n widget.aal_details = details;\n widget.bind(SC.Widget.Events.READY, function() {\n widget.getCurrentSound(function(e){\n widget.aal_details.id = e.id;\n widget.aal_details.title = e.title;\n widget.aal_details.duration = e.duration\n });\n widget.bind(SC.Widget.Events.PLAY, function() {\n umgAAL.track.audio('soundcloud', 'play', widget.aal_details.id.toString(), widget.aal_details.title);\n });\n widget.bind(SC.Widget.Events.PAUSE, function() {\n umgAAL.track.audio('soundcloud', 'pause', widget.aal_details.id.toString(), widget.aal_details.title);\n });\n widget.bind(SC.Widget.Events.SEEK, function() {\n umgAAL.track.audio('soundcloud', 'seek', widget.aal_details.id.toString(), widget.aal_details.title);\n });\n //Progress if ever needed as percent\n /*\n widget.bind(SC.Widget.Events.PLAY_PROGRESS, function() {\n console.log('PROGRESS SOUND CLOUD');\n widget.getPosition(function(position){\n //Mins and Seconds if ever needed\n var mins = (position/1000/60 < 10 ? '0' : '') + Math.floor(position/1000/60),\n secs = (position/1000%60 < 10 ? '0' : '') + Math.floor((position/1000) %60);\n var percentage = Math.round(position*100/widget.aal_details.duration);\n console.log(percentage);\n });\n });\n */\n widget.bind(SC.Widget.Events.FINISH, function() {\n umgAAL.track.audio('soundcloud', 'stop', widget.aal_details.id, widget.aal_details.title);\n });\n widget.bind(SC.Widget.Events.CLICK_BUY, function() {\n\n });\n });\n soundcloudAAL.players.push(widget);\n }\n });\n },\n domChange : function (){\n soundcloudAAL.setIframes();\n }\n };\n $(window).load(function() {\n $(document).ready(function(){\n if (typeof (UMGAALWP.services) !== 'undefined' && (typeof UMGAALWP.services.soundcloud) !== 'undefined' && UMGAALWP.services.soundcloud)\n {\n soundcloudAAL.init();\n }\n });\n });\n\n})(jQuery);\n\n//Slider Revolution\n(function($) {\n \"use strict\";\n if (UMGAALWP.plugins.revslider && typeof(umgAAL) != 'undefined' )\n {\n $(window).load(function() {\n $(document).ready(function(){\n //Get Slider Title From AAL JSON\n function get_revslider_details(alias){\n for (var i = 0; i < UMGAALWP.data.revslider_slider.length; i++) {\n if (UMGAALWP.data.revslider_slider[i].alias === alias)\n {\n return UMGAALWP.data.revslider_slider[i];\n }\n }\n return null;\n }\n //Get loop though all Sliders on the Page\n $('.rev_slider_wrapper').each(function() {\n var alias = $(this).data('alias');\n var source = $(this).data('source');\n\n if (source === 'gallery' || source === 'flickr' || source === 'instagram')\n {\n var slider_details = get_revslider_details(alias);\n var title = '';\n var id = '';\n if (slider_details !== null)\n {\n title = slider_details.title;\n id = slider_details.id;\n }\n umgAAL.track.photo_gallery_view( id, title);\n }\n });\n });\n });\n }\n})(jQuery);\n"
},
{
"alpha_fraction": 0.6539792418479919,
"alphanum_fraction": 0.6643598675727844,
"avg_line_length": 21.076923370361328,
"blob_id": "a9c527f5cb31e37678b9b57ab2f48ab8cc26ea7e",
"content_id": "0b1c59d4da3e7d584bd0e12795516058338a1776",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 13,
"path": "/Week 14 - Intro-To-JavaScript/3/Activities/04-Ins_Event_Listeners/Unsolved/index.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "var button = d3.select(\"#click-me\");\nvar inputField = d3.select(\"#input-field\");\n\nfunction handleClick(){\n console.log(\"button clicked\");\n}\n\nbutton.on(\"click\", handleClick)\n\ninputField.on(\"change\", function() {\n var newText = d3.event.target.value;\n console.log(newText);\n });\n "
},
{
"alpha_fraction": 0.7715399861335754,
"alphanum_fraction": 0.7762182950973511,
"avg_line_length": 51.32653045654297,
"blob_id": "42836cf75aaf61704b7c9939500d2d2aa3e17765",
"content_id": "c8a2df4a762a9713122af69ff88fa57aad775d06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 2565,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 49,
"path": "/Tareas/Tarea9_SQL/queries.sql",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "--List the following details of each employee: employee number, \n--last name, first name, gender, and salary.\nSELECT employees.emp_no, employees.last_name, employees.first_name, employees.gender, salaries.salary FROM employees\nINNER JOIN salaries on employees.emp_no = salaries.emp_no;\n\n--List employees who were hired in 1986.\nSELECT first_name as \"First Name\", last_name as \"Last Name\", hire_date as \"Hire Date\" FROM employees\nWHERE hire_date > '1986-01-01';\n\n---List the manager of each department with the following information: \n--department number, department name, the manager's employee number, \n--last name, first name, and start and end employment dates.\nSELECT dept_manager.dept_no, departments.dept_name, employees.emp_no, employees.first_name, employees.last_name, dept_manager.from_date, dept_manager.to_date FROM dept_manager \nINNER JOIN departments ON dept_manager.dept_no = departments.dept_no\nINNER JOIN employees ON dept_manager.emp_no = employees.emp_no;\n\n--List the department of each employee with the following information: \n--employee number, last name, first name, and department name.\nSELECT employees.emp_no, employees.last_name, employees.first_name,\ndepartments.dept_name FROM departments\nINNER JOIN dept_emp ON departments.dept_no = dept_emp.dept_no\nINNER JOIN employees ON dept_emp.emp_no = employees.emp_no;\n\n--List all employees whose first name is \"Hercules\" \n--and last names begin with \"B.\"\nSELECT * FROM employees\nWHERE first_name = 'Hercules' AND last_name LIKE 'B%';\n\n---List all employees in the Sales department, including their \n---employee number, last name, first name, and department name.\nSELECT employees.emp_no, departments.dept_name, employees.last_name, employees.first_name from departments\nINNER JOIN dept_emp ON departments.dept_no = dept_emp.dept_no\nINNER JOIN employees ON dept_emp.emp_no = employees.emp_no\nWHERE dept_name = 'Sales';\n\n--List all employees in the Sales and Development departments, including their \n--employee number, last name, first name, and department name.\nSELECT employees.emp_no, employees.last_name, employees.first_name, departments.dept_name from departments\nINNER JOIN dept_emp ON departments.dept_no = dept_emp.dept_no\nINNER JOIN employees ON dept_emp.emp_no = employees.emp_no\nWHERE dept_name = 'Sales' OR dept_name = 'Development';\n\n\n--In descending order, list the frequency count of employee last names, i.e., \n--how many employees share each last name.\nSELECT DISTINCT last_name, COUNT(last_name) AS Frequency FROM employees\nGROUP BY last_name\nORDER BY\nCOUNT(last_name) DESC\n\n"
},
{
"alpha_fraction": 0.6807313561439514,
"alphanum_fraction": 0.6962025165557861,
"avg_line_length": 27.479999542236328,
"blob_id": "15dc28430db9e173b1425a007d4100abbbb3bcce",
"content_id": "85aaca5e277c3c9387f4ec0513f37087c45d5fd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 711,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 25,
"path": "/Week 14 - Intro-To-JavaScript/3/Activities/06-Stu_Button_Click/Unsolved/static/js/app.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "// Randomly select an episode number for Star Wars\nvar text = d3.select(\".star-wars\")\n .text(Math.floor(Math.random() * 8) + 1);\n\n// Select the upvote and downvote buttons\nvar buttonUp = d3.select(\".upvote\");\nvar buttonDown = d3.select(\".downvote\");\n// Select the counter h1 element\nvar counter = d3.select(\".counter\");\n// Use D3 `.on` to attach a click handler for the upvote\n\n\nbuttonUp.on(\"click\",function(){\n var currentCount = parseInt(counter.text());\n currentCount += 1;\n counter.text(currentCount);\n \n})\n// Use d3 `.on` to attach a click handler for the downvote\nbuttonDown.on(\"click\",function(){\n var currentCount = parseInt(counter.text());\n currentCount -=1;\n counter.text(currentCount);\n \n})"
},
{
"alpha_fraction": 0.677478551864624,
"alphanum_fraction": 0.6848224997520447,
"avg_line_length": 35.33333206176758,
"blob_id": "fc43e5ede0420314516e7e33c7f77532c76064d0",
"content_id": "2d30029016a8e5625d576e430a086826c4069d56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1634,
"license_type": "no_license",
"max_line_length": 225,
"num_lines": 45,
"path": "/Week 4 - Python/Clase_1_Python/rock_paper_scissors.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# Incorporate the random library\nimport random\n\n# Print Title\nprint(\"Let's Play Rock Paper Scissors!\")\n\n# Specify the three options\noptions = [\"r\", \"p\", \"s\"]\noptions_full = [\"Rock\", \"Paper\", \"Scissors\"]\n\n\n# Computer Selection\ncomputer_choice = random.choice(options)\n\n\nif computer_choice == \"r\":\n computer_choice = \"Rock\"\nelif computer_choice == \"s\":\n computer_choice = \"Scissors\"\nelif computer_choice == \"p\":\n computer_choice = \"Paper\"\n\n# User Selection\nuser_choice = input(\"Make your Choice: (r)ock, (p)aper, (s)cissors? \")\n\nif user_choice == \"r\":\n user_choice = \"Rock\"\nelif user_choice == \"s\":\n user_choice = \"Scissors\"\nelif user_choice == \"p\":\n user_choice = \"Paper\"\n\n\n# Run Conditionals\nif (user_choice == options_full[0] and computer_choice == options_full[2]) or (user_choice == options_full[1] and computer_choice == options_full[0]) or (user_choice == options_full[2] and computer_choice == options_full[1]):\n print(f\"\\nYou choose {user_choice} and your enemy choose {computer_choice}, You won!\")\n\nelif user_choice == options_full[0] and computer_choice == options_full[0]:\n print(f\"\\nYou choose {user_choice} and your enemy choose {computer_choice}, Its a tie.\")\nelif user_choice == options_full[1] and computer_choice == options_full[1]:\n print(f\"\\nYou choose {user_choice} and your enemy choose {computer_choice}, Its a tie.\")\nelif user_choice == options_full[2] and computer_choice == options_full[2]:\n print(f\"\\nYou choose {user_choice} and your enemy choose {computer_choice}, Its a tie.\")\nelse: \n print(f\"\\nYou choose {user_choice} and your enemy choose {computer_choice}, You lose!\")"
},
{
"alpha_fraction": 0.7419354915618896,
"alphanum_fraction": 0.7419354915618896,
"avg_line_length": 31,
"blob_id": "c89058b32f514361dfd30e4c6bcb636672071cd5",
"content_id": "ea995aee2ba85d3a750f3b33c4534e16efbe9d08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/Week 4 - Python/Clase_1_Python/LearnPython/Assignment2/quick_python2.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "print(\"hablo de otro archivo!\")"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5,
"avg_line_length": 17,
"blob_id": "d35a9b1925a31098dcab6f859c1f76c77a19a9a7",
"content_id": "16bd9becdd29ba9d7e5b16713d1d2a47f5623892",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 1,
"path": "/Week 6 - APIs/Clase_2/03OpenWeather/config.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "api_key = \"752d8c9bd81f78cbe64e5ee3c61b4a72\"\n"
},
{
"alpha_fraction": 0.5809248685836792,
"alphanum_fraction": 0.6416184902191162,
"avg_line_length": 23.785715103149414,
"blob_id": "4a2e02e22bfbece36b124b724cefb52c1a89cbc3",
"content_id": "9f289a8091af3114bf2c133e9997ad23654fc6be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 346,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 14,
"path": "/Week 4 - Python/Clase_1_Python/hello-world.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "name = \"Jose Antonio Amieva\"\ncountry = \"Mexico\"\n\nage = 25\nhourly_wage = 1000000\n\nsatisfied = True\n\ndaily_wage = 100000000000\n\nprint(\"My name is \" + name + \", I have \" + str(age) + \" years old. I am actually living in \" + country +\n \" and my wage is \" + str(hourly_wage) + \" per hour.\")\n\nprint(f\"my daily wage is ${daily_wage} and I am satisfied? {satisfied}\")"
},
{
"alpha_fraction": 0.6481999754905701,
"alphanum_fraction": 0.6571999788284302,
"avg_line_length": 19.995798110961914,
"blob_id": "2b8dc36d74e71fa3c82f71dae611c0e81e362be7",
"content_id": "f2fc753bfb4335112ebb4339391c0c8e84551789",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5000,
"license_type": "no_license",
"max_line_length": 211,
"num_lines": 238,
"path": "/Tareas/Tarea12_WebScrapping/mars.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[18]:\n\n\n#import dependencies\nimport os\nfrom bs4 import BeautifulSoup as bs\nfrom splinter import Browser\nimport pandas as pd\n\n\n# In[19]:\n\n\nexecutable_path = {'executable_path': '/usr/local/bin/chromedriver'}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n\n# In[20]:\n\n\n#open chrome and visit url\nurl_nasa = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\nbrowser.visit(url_nasa)\n\n\n# In[ ]:\n\n\nhtml = browser.html\nsoup = bs(html, 'html.parser')\n\n\n# In[ ]:\n\n\n#print all titles and news \n\nfor node in soup.find_all('div', class_=\"list_text\"):\n title = node.find('div', class_=\"content_title\", text=True).text\n news = node.find('div', class_=\"article_teaser_body\").text\n print('Title: ' + title)\n print('News: ' + news)\n print('-'*30)\n \n\n\n# In[ ]:\n\n\n\"\"\" #visit the pictures \nurl_pics = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\nbrowser.visit(url_pics)\n\n\n# In[ ]:\n\n\nhtml = browser.html\nsoup = bs(html, 'html.parser')\n\n\n# In[ ]:\n\n\n#find all pictures fullsize\nnasa_url = 'https://www.jpl.nasa.gov/spaceimages/images/largesize'\nnasa_urls = []\nfor node in soup.find_all('div', class_=\"img\"):\n try:\n browser.find_by_css('img.thumb').first.click()\n except:\n pass\n #thumb = node.find('img', class_=\"thumb\")\n try:\n browser.find_by_css('a.fancybox-nav.fancybox-next').first.click()\n print('Link: ' + browser.find_by_css('img.fancybox-image')['src'])\n nasa_urls.append(browser.find_by_css('img.fancybox-image')['src'])\n print('')\n\n except :\n print('algo esta mal en 2019') \"\"\"\n\n #print(thumb['src'])\n \n \n\n\n# In[ ]:\n\nurl_pics = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\nbrowser.visit(url_pics)\nxxx = browser.find_by_id('full_image')\nxxx.click()\n\n\n# In[ ]:\n\n\n#feature image scrap url\nfeatured_image_url = browser.find_by_css('img.fancybox-image')['src']\nprint(featured_image_url)\n\n\n# In[ ]:\n\n\n#visit twitter from mars \ntwitter_url = 'https://twitter.com/marswxreport?lang=en'\n\n\n# In[ ]:\n\n\n#scrap last tweet from mars twitter\nmars_weather = []\nfor i in range(1):\n browser.visit(twitter_url)\n browser.find_by_css('ol.stream-items').first.click()\n mars_weather.append(browser.find_by_css('p.TweetTextSize').text)\n\n\n# In[ ]:\n\n\n#convert list to string\nmars_weather = ''.join(mars_weather)\nprint(mars_weather)\ntype(mars_weather)\n\n\n# ## Mars Facts\n# **Visit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.**\n# \n# \n# **Use Pandas to convert the data to a HTML table string.**\n\n# In[ ]:\n\n\nmars_facts_url = 'https://space-facts.com/mars/'\n\n\n# In[ ]:\n\n\n#scrap last tweet from mars twitter\nbrowser.visit(mars_facts_url)\n\nmars_facts = pd.read_html(mars_facts_url)\nmars_facts_df = mars_facts[0]\nmars_facts_df = mars_facts_df.set_index(mars_facts_df['Mars - Earth Comparison'])\nmars_facts_df.drop(columns=\"Mars - Earth Comparison\")\n\n\n# In[ ]:\n\n\nmars_facts_df\n\n\n# In[ ]:\n\n\nmars_html_df = mars_facts_df.to_html('mars_facts.html')\n\n\n# ## Mars Hemispheres\n# **Visit the USGS Astrogeology site here to obtain high resolution images for each of Mar's hemispheres.**\n# \n# \n# **You will need to click each of the links to the hemispheres in order to find the image url to the full resolution image.**\n# \n# \n# **Save both the image url string for the full resolution hemisphere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys img_url and title.**\n# \n# \n# **Append the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere.**\n# \n# \n# hemisphere_image_urls = [\n# {\"title\": \"Valles Marineris Hemisphere\", \"img_url\": \"...\"},\n# {\"title\": \"Cerberus Hemisphere\", \"img_url\": \"...\"},\n# {\"title\": \"Schiaparelli Hemisphere\", \"img_url\": \"...\"},\n# {\"title\": \"Syrtis Major Hemisphere\", \"img_url\": \"...\"},\n# ]\n# \n\n# In[16]:\n\n\n#mars_\nmars_hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n\n\n# In[24]:\n\n\nbrowser.visit(mars_hemispheres_url)\nhtml = browser.html\nsoup = bs(html, 'html.parser')\n\nast_url = f\"https://astrogeology.usgs.gov\"\n\nhemisphere_image_urls = []\nhemispheresLinks = soup.find_all(\"div\", class_=\"description\")\n\nfor link in hemispheresLinks:\n url = ast_url + link.a[\"href\"]\n browser.visit(url)\n soup = bs(browser.html, 'html.parser')\n image = ast_url + soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n title = soup.find(\"h2\", class_=\"title\").text.replace(\" Enhanced\", \"\")\n hemisphere_image_urls.append( { \"title\" : title, \"img_url\" : image } )\n\n\n# In[25]:\n\n\nhemisphere_image_urls\n\n\n# In[ ]:\n\n\nhemisphere_image_urls\n\n\n# In[32]:\n\n\nfor i in hemispheresLinks:\n print(ast_url + i.a['href'])\n\n\n# In[ ]:\n\n\n\n"
},
{
"alpha_fraction": 0.5945945978164673,
"alphanum_fraction": 0.5945945978164673,
"avg_line_length": 17.5,
"blob_id": "d85070a664e64d5e296662fc98aeb0e8ab7b3e33",
"content_id": "992003a92c12f28309166fceae56a9e74826efb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/Week 6 - APIs/Clase_2/10MapWrap/config.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# Add your API key\napi_key = \"752d8c9bd81f78cbe64e5ee3c61b4a72\"\n"
},
{
"alpha_fraction": 0.49889135360717773,
"alphanum_fraction": 0.5055432319641113,
"avg_line_length": 22.421052932739258,
"blob_id": "720d68f23c356b63921f63ccaf5a0d0db40c07cd",
"content_id": "8ff7cecb1c9abbcb4844c982e3d0798081eebcdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 19,
"path": "/Week 4 - Python/Clase_1_Python/number_chain_loop.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "while True:\n try:\n numbers = int(input(\"How many numbers? \"))\n break\n except ValueError:\n print(\"Tienes que seleccionar un numero. Vuelve a intentarlo\")\n\ni = 0\nanswer = \"\"\nwhile i <= numbers:\n print(i)\n i += 1\n if i == numbers:\n answer = input(\"Would you want to continue? y=yes or n=no: \")\n if answer == \"y\":\n i = 0\n continue\n elif answer == \"n\":\n break\n\n \n"
},
{
"alpha_fraction": 0.5595777034759521,
"alphanum_fraction": 0.5641025900840759,
"avg_line_length": 25.440000534057617,
"blob_id": "b363f256c6b3605100ce3d14a281b1442c20e9f6",
"content_id": "3417dd548eaa61b044ba4ce83c60919d084fa11b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 663,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 25,
"path": "/Week 14 - Intro-To-JavaScript/2/Activities/04-Stu_Word_Counter/Unsolved/index.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "function wordCount(myString) {\n // Convert string to an array of words\n var stringArray = myString.split(\" \");\n \n // An object to hold word frequency\n var wordFrequency = {};\n \n // Iterate through the array\n stringArray.forEach(function(i){\n // If the word has been seen before...\n if (i in wordFrequency) {\n // Add one to the counter\n wordFrequency[i] += 1;\n }\n else {\n // Set the counter at 1\n wordFrequency[i] = 1;\n } \n })\n \n console.log(wordFrequency);\n return wordFrequency;\n }\n \n wordCount(\"I yam what I yam and always will be what I yam I I I\");\n "
},
{
"alpha_fraction": 0.487650066614151,
"alphanum_fraction": 0.48770296573638916,
"avg_line_length": 47.479488372802734,
"blob_id": "e19c5c58a89ec0a887b8138d0f46762ea635391f",
"content_id": "8281a36762f6eabedf918cb6d9db0b93ebb4ffb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 18907,
"license_type": "no_license",
"max_line_length": 354,
"num_lines": 390,
"path": "/Week 11 - HTML & CSS/2/Justin Bieber – Purpose Available Now_files/umg-ecrm-frontend.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "jQuery(document).ready(function($) {\n\n if(jQuery('.umgecrm-casl-country-block').length) {\n jQuery('.umgecrm-casl-country-block select.umgecrm-casl-country-selector').change(function() {\n var country_code = jQuery(this).val();\n UMGECRM.check_casl(country_code);\n });\n } else {\n AEJSWP.get_user_country('UMGECRM.check_casl');\n }\n});\n\nif(typeof UMGECRM.sub_data !== 'undefined' && typeof UMGECRM.sub_data.ecrm !== 'undefined' && UMGECRM.sub_data.ecrm == 'umgapi' && UMGECRM.lytics_env !== 'none') {\n if(typeof AEJSWP !== 'undefined') {\n //add waiter that makes AE Connect wait before executing user optins. This is required when we are waiting for Lytics Cookie ID\n AEJSWP.add_wait_for_event('send_user_optins');\n }\n\n /**\n * Lytics Callback Handler\n */\n !function(l,a){a.liosetup=a.liosetup||{},a.liosetup.callback=a.liosetup.callback||[],a.liosetup.addEntityLoadedCallback=function(l,o){if(\"function\"==typeof a.liosetup.callback){var i=[];i.push(a.liosetup.callback),a.liosetup.callback=i}a.lio&&a.lio.loaded?l(a.lio.data):o?a.liosetup.callback.unshift(l):a.liosetup.callback.push(l)}}(document,window);\n\n // Custom Callback Function\n window.liosetup.addEntityLoadedCallback(function(data){\n\n //get lytics cookie uid\n var lytics_uid = window.jstag.getid();\n\n //append the lytics uid to user data object so it can be used when sending optin API call\n AEJSWP.add_data_filter('filter__optin_user_data', function(data) {\n data['lytics_uid'] = lytics_uid;\n\n return data;\n });\n\n AEJSWP.add_data_filter('filter__redirect_login_return_url', function(return_url) {\n return AEJSWP.addURLParameter(return_url, 'lytics-uid', lytics_uid);\n });\n\n AEJSWP.execute_wait_for_event('send_user_optins');\n });\n}\n\n//Check if user is from Canada and amend any fields that require different labels for Canada\nUMGECRM.check_casl = function(country) {\n if(country == 'CA') {\n //user country is Canada so let's use CASL labels\n jQuery('.umgecrm-adhoc-block[data-umgecrm-casl=\"1\"]').each(function() {\n jQuery('.umgecrm-text-container').each(function() {\n if(jQuery('.umgecrm-casl', jQuery(this)).length) {\n jQuery('.umgecrm-casl', jQuery(this)).show();\n jQuery('.umgecrm-non-casl', jQuery(this)).hide();\n }\n });\n });\n }\n};\n\n//Add event listener for AEJS ready\nAEJSWP.add_aejsready_handler(\"UMGECRM.ae_wpaejsready\");\nUMGECRM.ae_wpaejsready = function(aeJS) {\n aeJS.events.onLogin.addHandler(UMGECRM.ae_login_handle);\n aeJS.events.onUser.addHandler(UMGECRM.ae_user_handle);\n};\n\n\n//Function executed when AE Framework logs in user\nUMGECRM.ae_login_handle = function(ae_user) {\n\n //find all adhoc ecrm forms and save their opt-in states to localStorage so they can be executed once AE onUser event fires\n //if AJAX login is not used then onUSer event will fire after page refresh (that's why save states in localStorage)\n jQuery('.umgecrm-adhoc-block[data-umgecrm-submission-type=\"auth-connect\"]').each(function() {\n UMGECRM.set_adhoc_block_states(jQuery(this), ae_user);\n });\n};\n\n//Function executed when AE framework returns a user object to the client\nUMGECRM.ae_user_handle = function(ae_user) {\n UMGECRM.update_adhoc_blocks(ae_user);\n\n //AE user logged in so we can execute opt-ins that are set to 'auth-connect' submission type\n jQuery('.umgecrm-adhoc-block[data-umgecrm-submission-type=\"auth-connect\"]').each(function() {\n var subs = UMGECRM.get_adhoc_block_states(jQuery(this), ae_user);\n if(typeof subs !== 'undefined') {\n UMGECRM.execute_adhoc_block(jQuery(this), ae_user, subs);\n UMGECRM.clear_adhoc_block_states(jQuery(this), ae_user);\n }\n });\n\n //Add listeners to submit buttons where adhoc submission type is set to submit-button\n jQuery('.umgecrm-adhoc-block[data-umgecrm-submission-type=\"submit-button\"]').each(function() {\n var block = jQuery(this);\n jQuery('.umgecrm-submit-button button', jQuery(this)).click(function(e) {\n e.preventDefault();\n UMGECRM.execute_adhoc_block(block, ae_user);\n });\n });\n};\n\n//check which adhoc block have been complete and set complete message if they are complete\n//complete states are checked and saved inside of localStorage\nUMGECRM.update_adhoc_blocks = function(ae_user) {\n jQuery('.umgecrm-adhoc-block').each(function() {\n var adhoc_id = jQuery(this).attr('data-adhoc-id');\n\n if(UMGECRM.session_check_adhoc_exists(adhoc_id, ae_user)) {\n jQuery(this).children('div').hide();\n jQuery('.umgecrm-complete-block', jQuery(this)).show();\n }\n });\n};\n\n//run opt-in actions from specified ad-hoc block/shortcode\nUMGECRM.execute_adhoc_block = function(block, ae_user, subs) {\n var adhoc_id = jQuery(block).attr('data-adhoc-id');\n if(!UMGECRM.session_check_adhoc_exists(adhoc_id, ae_user)) {\n UMGECRM.session_set_adhoc(adhoc_id, ae_user);\n\n jQuery.ajax({\n url: UMGECRM.ajaxurl,\n type: 'post',\n data: {\n action: 'execute_adhoc_block',\n user: ae_user,\n subs: subs,\n adhoc_id: adhoc_id\n },\n success: function(response) {\n UMGECRM.update_adhoc_blocks(ae_user);\n //track analytics event for adhoc form\n UMGECRM.trackEvent('optin_success_adhoc', {form_id: adhoc_id, user: ae_user, subs: subs});\n }\n });\n }\n};\n\n/** localStorage session functions **/\nUMGECRM.clear_adhoc_block_states = function(block, ae_user) {\n if(typeof Storage !== \"undefined\") {\n if(typeof ae_user.data !== 'undefined' && typeof ae_user.data['ID'] !== 'undefined') {\n var user_id = ae_user.data['ID'];\n var adhoc_id = jQuery(block).attr('data-adhoc-id');\n return localStorage.removeItem('umgecrm-adhoc-state-' + user_id + '-' + adhoc_id);\n }\n }\n};\nUMGECRM.get_adhoc_block_states = function(block, ae_user) {\n if(typeof Storage !== \"undefined\") {\n if(typeof ae_user.data !== 'undefined' && typeof ae_user.data['ID'] !== 'undefined') {\n var user_id = ae_user.data['ID'];\n var adhoc_id = jQuery(block).attr('data-adhoc-id');\n return JSON.parse(localStorage.getItem('umgecrm-adhoc-state-' + user_id + '-' + adhoc_id));\n }\n }\n};\nUMGECRM.set_adhoc_block_states = function (block, ae_user) {\n if(typeof Storage !== \"undefined\") {\n if(typeof ae_user.data !== 'undefined' && typeof ae_user.data['ID'] !== 'undefined') {\n var adhoc_id = jQuery(block).attr('data-adhoc-id');\n var subs = {};\n jQuery('input[type=\"checkbox\"]', jQuery(block)).each(function() {\n var optin_id = jQuery(this).attr('data-optin-id');\n var checked = jQuery(this).is(':checked');\n subs[optin_id] = checked;\n });\n\n var user_id = ae_user.data['ID'];\n localStorage.setItem('umgecrm-adhoc-state-' + user_id + '-' + adhoc_id, JSON.stringify(subs));\n }\n }\n};\n\n\nUMGECRM.session_check_adhoc_exists = function(adhoc_id, ae_user) {\n var exists = false;\n if(typeof Storage !== \"undefined\") {\n if(typeof ae_user.data !== 'undefined' && typeof ae_user.data['ID'] !== 'undefined') {\n var user_id = ae_user.data['ID'];\n exists = localStorage.getItem('umgecrm-adhoc-' + user_id + '-' + adhoc_id) ? true : false;\n }\n }\n return exists;\n};\n\nUMGECRM.session_set_adhoc = function(adhoc_id, ae_user) {\n if(typeof Storage !== \"undefined\") {\n if(typeof ae_user.data !== 'undefined' && typeof ae_user.data['ID'] !== 'undefined') {\n var user_id = ae_user.data['ID'];\n localStorage.setItem('umgecrm-adhoc-' + user_id + '-' + adhoc_id, true);\n }\n }\n};\n\n\n\n/** Analytics Event Tracking **/\n\n//function fired by AE Connect once custform form opt-ins are submitted\n// NB: we check for trackingDelegate.AEConnect first and don't overwrite it if it already exists as it may or may not be on the page already from other plugins\nvar trackingDelegate = window.trackingDelegate || {};\nif(typeof(trackingDelegate.AEConnect) == 'undefined') {\n trackingDelegate.AEConnect = {};\n}\ntrackingDelegate.AEConnect.optinsUpdated = function(data) {\n UMGECRM.trackEvent('optin_success_ae', data);\n};\n\nUMGECRM.trackEvent = function(type, data) {\n switch(type) {\n //track opt-in for ae custom forms\n case 'optin_success_ae':\n switch(UMGECRM.sub_data.ecrm) {\n case 'umgappi':\n for(var sub_index in data.optins) {\n var optin = data.optins[sub_index];\n if(optin.user_choice) {\n var optins = UMGECRM.sub_data['ae'][data.cform_id]['subs'];\n for(var i in optins) {\n if(optins[i].optin_id == optin.optin_id) {\n var sub_data = optins[i];\n var analytics_data = {\n list_id: sub_data['umgapi-form-id'],\n list_name: sub_data['umgapi-optin-ids'],\n business_unit: sub_data['umgapi-form-id']\n };\n AEJSWP.debugOutput('Firing optin_success_ae event with data: ', analytics_data);\n AEJSWP.executeFunctionByName(UMGECRM.analytics_tracking_delegate + '.optin_success', window, analytics_data);\n }\n }\n }\n }\n break;\n case 'exacttarget':\n for(var sub_index in data.optins) {\n var optin = data.optins[sub_index];\n if(optin.user_choice) {\n var optins = UMGECRM.sub_data['ae'][data.cform_id]['subs'];\n for(var i in optins) {\n if(optins[i].optin_id == optin.optin_id) {\n var sub_data = optins[i];\n var analytics_data = {\n list_id: sub_data['et-list-id'],\n list_name: sub_data['et-list-name'],\n business_unit: sub_data['et-client-id']\n };\n AEJSWP.debugOutput('Firing optin_success_ae event with data: ', analytics_data);\n AEJSWP.executeFunctionByName(UMGECRM.analytics_tracking_delegate + '.optin_success', window, analytics_data);\n }\n }\n }\n }\n break;\n case 'viceversa':\n \tfor(var sub_index in data.optins) {\n var optin = data.optins[sub_index];\n if(optin.user_choice) {\n var optins = UMGECRM.sub_data['ae'][data.cform_id]['subs'];\n for(var i in optins) {\n if(optins[i].optin_id == optin.optin_id) {\n var sub_data = optins[i];\n var analytics_data = {\n list_id: sub_data['vv-cf-optin-campaign-id'],\n list_name: null,\n business_unit: null\n };\n AEJSWP.debugOutput('Firing optin_success_ae event with data: ', analytics_data);\n AEJSWP.executeFunctionByName(UMGECRM.analytics_tracking_delegate + '.optin_success', window, analytics_data);\n }\n }\n }\n }\n break;\n case 'neolane':\n for(var sub_index in data.optins) {\n var optin = data.optins[sub_index];\n if(optin.user_choice) {\n var optins = UMGECRM.sub_data['ae'][data.cform_id]['subs'];\n for(var i in optins) {\n if(optins[i].optin_id == optin.optin_id) {\n var sub_data = optins[i];\n var list_id = null;\n var list_name = null;\n var business_unit = null;\n if(typeof sub_data['nl-artist-id'] != 'undefined') {\n list_id = sub_data['nl-artist-id'];\n business_unit = sub_data['nl-source'];\n list_name = sub_data['nl-label'];\n } else if(typeof sub_data['nl-cf-optin-artist-id'] != 'undefined') {\n list_id = sub_data['nl-cf-optin-artist-id'];\n business_unit = sub_data['nl-cf-optin-source'];\n list_name = sub_data['label'];\n }\n var analytics_data = {\n list_id: list_id,\n list_name: list_name,\n business_unit: business_unit\n };\n AEJSWP.debugOutput('Firing optin_success_ae event with data: ', analytics_data);\n AEJSWP.executeFunctionByName(UMGECRM.analytics_tracking_delegate + '.optin_success', window, analytics_data);\n }\n }\n }\n }\n break;\n }\n break;\n //track opt-in for adhoc forms\n case 'optin_success_adhoc':\n switch(UMGECRM.sub_data.ecrm) {\n case 'umgapi':\n //loop through the opt-in choices\n for(var sub_id in data.subs) {\n var checked = data.subs[sub_id];\n if(checked) {\n var optins = UMGECRM.sub_data['adhoc'][data.form_id]['subs'];\n if(typeof optins[sub_id] !== 'undefined') {\n var sub_data = optins[sub_id];\n var analytics_data = {\n list_id: sub_data.list_id,\n list_name: sub_data.list_name,\n business_unit: sub_data.business_unit\n };\n AEJSWP.debugOutput('Firing optin_success_adhoc event with data: ', analytics_data);\n AEJSWP.executeFunctionByName(UMGECRM.analytics_tracking_delegate + '.optin_success', window, analytics_data);\n }\n }\n }\n break;\n case 'exacttarget':\n //loop through the opt-in choices\n for(var sub_id in data.subs) {\n var checked = data.subs[sub_id];\n if(checked) {\n var optins = UMGECRM.sub_data['adhoc'][data.form_id]['subs'];\n if(typeof optins[sub_id] !== 'undefined') {\n var sub_data = optins[sub_id];\n var analytics_data = {\n list_id: sub_data.list_id,\n list_name: sub_data.list_name,\n business_unit: sub_data.business_unit\n };\n AEJSWP.debugOutput('Firing optin_success_adhoc event with data: ', analytics_data);\n AEJSWP.executeFunctionByName(UMGECRM.analytics_tracking_delegate + '.optin_success', window, analytics_data);\n }\n }\n }\n break;\n case 'viceversa':\n var send_ecrm = false;\n //check if ecrm call is being made if auto-post enabled or user ticked the opt-in box\n if(UMGECRM.sub_data['adhoc'][data.form_id]['vv-auto-post-enabled']) {\n send_ecrm = true;\n } else if(typeof data.subs['vv-manual'] !== 'undefined') {\n send_ecrm = true;\n }\n\n if(send_ecrm) {\n var analytics_data = {\n list_id: UMGECRM.sub_data['vv-campaign-id'],\n list_name: null,\n business_unit: null\n };\n AEJSWP.debugOutput('Firing optin_success_adhoc event with data: ', analytics_data);\n AEJSWP.executeFunctionByName(UMGECRM.analytics_tracking_delegate + '.optin_success', window, analytics_data);\n }\n break;\n case 'neolane':\n for(var sub_id in data.subs) {\n var checked = data.subs[sub_id];\n if(checked) {\n var optins = UMGECRM.sub_data['adhoc'][data.form_id]['subs'];\n if(typeof optins[sub_id] !== 'undefined') {\n var sub_data = optins[sub_id];\n var analytics_data = {\n list_id: sub_data['nl-artist-id'],\n list_name: sub_data['nl-label'],\n business_unit: sub_data['nl-source']\n };\n AEJSWP.debugOutput('Firing optin_success_adhoc event with data: ', analytics_data);\n AEJSWP.executeFunctionByName(UMGECRM.analytics_tracking_delegate + '.optin_success', window, analytics_data);\n }\n }\n }\n break;\n }\n\n break;\n }\n};\n"
},
{
"alpha_fraction": 0.6380090713500977,
"alphanum_fraction": 0.6877828240394592,
"avg_line_length": 19.090909957885742,
"blob_id": "10790a4f82a5cd90e412906f4cb7e7d10c3a33b1",
"content_id": "b07fd789c2d35f8e8288780bb07322204cdc71d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 11,
"path": "/Week 4 - Python/Clase_3_Python/functions.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# @TODO: Write a function that returns the arithmetic average for a list of numbers\n\n\n\n\ndef average(list):\n return sum(list) / len(list)\n\nprint(average([10,10,9.5]))\nprint(average([1, 5, 9]))\nprint(average(range(11)))\n"
},
{
"alpha_fraction": 0.7594155669212341,
"alphanum_fraction": 0.765584409236908,
"avg_line_length": 82.24324035644531,
"blob_id": "3cf5024c28ca9edfa4d0c54aedea4624c4abc580",
"content_id": "96c31077ee9534b97621e0b8c31f94dfdd3cfbd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3080,
"license_type": "no_license",
"max_line_length": 234,
"num_lines": 37,
"path": "/Week 11 - HTML & CSS/3/bootstrap/index.html",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <title>Lorem Grid</title>\n <link rel=\"stylesheet\" href=\"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css\" integrity=\"sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T\" crossorigin=\"anonymous\"> <link rel=\"stylesheet\" href=\"style.css\">\n</head>\n<body>\n<div class=\"row \">\n <h1 class=\"col-md-4\" style=\"background-color: aquamarine\">HEADER</h1>\n</div>\n<div class=\"row\" role=\"alert\">\n<p class=\"col-md-3\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n<p class=\"col-md-3\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n<p class=\"col-md-3\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n<p class=\"col-md-3\"></p>\n\n</div>\n\n<div class=\"row\">\n<p class=\"col-md-2\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n<p class=\"col-md-2\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n<p class=\"col-md-2\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n<p class=\"col-md-2\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n<p class=\"col-md-2\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n<p class=\"col-md-2\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n\n\n</div>\n\n<div class=\"row\">\n <p class=\"col-md-6\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n <p class=\"col-md-6\">Lorem, ipsum dolor sit amet consectetur adipisicing elit. Aut ea similique expedita dolore maiores a! Veniam, consectetur neque in repellat alias deserunt officia rem et mollitia accusantium ipsam! Odio, a!</p>\n\n</div>\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.6616161465644836,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 22.352941513061523,
"blob_id": "caa96693b865a725f0d1dc5c93559a097ed078e9",
"content_id": "f238e35020d9da5d19e3595036f63b4c24599cee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 17,
"path": "/Week 4 - Python/Clase_2_Python/quick_check.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# Print Hello User!\nprint(\"Hello user!\")\n\n# Take in User Input\nuserName = input(\"What is your name? \")\n\n# Respond Back with User Input\nprint(f\"Hello {userName.title()}!\")\n\n# Take in the User Age\nuserAge = int(input(\"What is your age? \"))\n\n# Respond Back with a statement based on age\nif userAge >= 18:\n print(\"Ah... A well traveled soul are ye.\")\nelse:\n print(\"Awww... you're just a baby!\")"
},
{
"alpha_fraction": 0.5789473652839661,
"alphanum_fraction": 0.5789473652839661,
"avg_line_length": 18,
"blob_id": "ea42adb42a96ada96f445c5c60fe0aef62d96c95",
"content_id": "f5f02402e061fb019b0c526310914c9e8ec37de0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 2,
"path": "/Week 6 - APIs/Clase_1/retrieve_articles/config.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# Add your API key\napi_key = \"9pXDVJII8uuLc0uE\"\n"
},
{
"alpha_fraction": 0.5750988125801086,
"alphanum_fraction": 0.5810276865959167,
"avg_line_length": 18.5,
"blob_id": "ed7fdab89168af36d62b85de40bee518ca842ead",
"content_id": "2f19f8b719c750ab91645542188014a95115a6fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 26,
"path": "/Week 4 - Python/Clase_2_Python/Ejercicio_Netflix_CSV/netflixJA.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "import csv\n\nfile = './netflix_ratings.csv'\nf = open(file)\ncsv_f = csv.reader(f)\n\ninputName = input(\"What tv show or movie do you want to search for? \")\nrating = \"\"\nuser_rating = \"\"\na = False\n\nfor row in csv_f:\n if row[0] == inputName: \n rating = row[1]\n user_rating = row[5]\n a = True\n\n\nif a == True: \n print(f\"\\n{inputName} is rated {rating} with a rating of {user_rating}\")\nelse:\n print(f\"\\n{inputName} is not on our database! Ponte Buzo, Caperuso!\")\n\n\n\nprint(f.read())"
},
{
"alpha_fraction": 0.5090909004211426,
"alphanum_fraction": 0.5304812788963318,
"avg_line_length": 18.06122398376465,
"blob_id": "7828a7e95251b1ff448180edebe5437da4a5c8ff",
"content_id": "85447dd87687f74ba6f510c0ab80d59ddc8530eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 935,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 49,
"path": "/Week 12 - Web-Scraping-and-Document-Databases/3/Activities/08-Stu_Render_From_Mongo/Unsolved/app.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\nimport pymongo\n\napp = Flask(__name__)\n\n# @TODO: setup mongo connection\nconn = 'mongodb://localhost:27017'\n\nclient = pymongo.MongoClient(conn)\n\n# @TODO: connect to mongo db and collection\ndb = client.store_inventory\ndb.produce.drop()\n\n\ndb.produce.insert_many(\n [\n {\n \"type\": \"apples\",\n \"cost\": .23,\n \"stock\": 333\n },\n {\n \"type\": \"oranges\",\n \"cost\": .30, \n \"stock\": 400\n },\n {\n \"type\": \"mango\",\n \"cost\": .50,\n \"stock\": 100\n }\n\n ]\n)\n\n# Set route\[email protected]('/')\ndef index():\n # Store the entire team collection in a list\n produces = list(db.produce.find())\n print(produces)\n\n # Return the template with the teams list passed in\n return render_template('index.html', p=produces)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n"
},
{
"alpha_fraction": 0.4326086938381195,
"alphanum_fraction": 0.45652174949645996,
"avg_line_length": 40.818180084228516,
"blob_id": "7fa5696f7a56fb48576767e4d7a710f156199bc9",
"content_id": "aaba5698a859351c19bd07314b02c297b9b6b617",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 222,
"num_lines": 11,
"path": "/Week 4 - Python/Clase_3_Python/hobbybook.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "\nmyDict = {\n \"name\":\"Jose Antonio\",\n \"age\":25,\n \"hobbies\":[\"Coding\",\"Gym\",\"xbox\"],\n \"week\":\n {\n 'Monday': '6:00 am',\n 'AllWeek': '7:00 am'\n }\n }\nprint(f\"Hi, I'm {myDict['name']}\\nMy hobbies are {myDict['hobbies'][0]}, {myDict['hobbies'][1]} and {myDict['hobbies'][2]}\\nMonday I wake up {myDict['week']['Monday']} and the rest of the week {myDict['week']['AllWeek']}\")"
},
{
"alpha_fraction": 0.6744186282157898,
"alphanum_fraction": 0.6744186282157898,
"avg_line_length": 20.5,
"blob_id": "6bf63bbe46a4e1fc4a474e7bfeeca10da8ecc418",
"content_id": "7637c207ecbc1b9b06b809644791fff0b36cbe3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 43,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 2,
"path": "/Tareas/Tarea6_APIs/api_keys.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# OpenWeatherMap API Key\napi_key = \"752d8c9bd81f78cbe64e5ee3c61b4a72\"\n"
},
{
"alpha_fraction": 0.6957851648330688,
"alphanum_fraction": 0.7029231786727905,
"avg_line_length": 29.65625,
"blob_id": "14046b2ba12b293f013488ae67903ca928fcc50f",
"content_id": "5626d2f8cbd448f5ab6d6c1d2ee30e2e14bf5021",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2942,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 96,
"path": "/Tareas/Tarea12_WebScrapping/scrape_mars.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "# Imports\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\nimport requests\nfrom splinter import Browser\nimport pymongo\nimport re\n\n\nconn = 'mongodb://localhost:27017'\n\n\ndef scrapeMars():\n\n\t# Set up Splinter\n\texecutable_path = {'executable_path': '/usr/local/bin/chromedriver'}\n\twith Browser('chrome', **executable_path, headless=False) as browser:\n\t\t\n\t\t# Soupify the NASA Mars url\n\t\turl = f\"https://mars.nasa.gov/news/\"\n\t\tbrowser.visit(url)\n\t\tsoup = bs(browser.html, 'html.parser')\n\t\tnewsTitle = soup.find(\"div\", class_=\"content_title\").text\n\t\tnewsDescription = soup.find(\"div\", class_=\"article_teaser_body\").text\n\n\n\t\t# Visit the JPL site\n\t\tjplUrl = f\"https://www.jpl.nasa.gov\"\n\t\tmarsImagesUrlParam = \"/spaceimages/?search=&category=Mars\"\n\t\tbrowser.visit(jplUrl + marsImagesUrlParam)\n\t\tsoup = bs(browser.html, 'html.parser')\n\t\tarticle = soup.find(\"article\", class_=\"carousel_item\")\n\t\timage = article[\"style\"].split(\"'\")\n\t\tfeatured_image_url = jplUrl + image[1]\n\n\n\t\t# Mars Weather\n\t\tmarsWeatherTwitterUrl = f\"https://twitter.com/marswxreport?lang=en\"\n\t\tbrowser.visit(marsWeatherTwitterUrl)\n\t\tsoup = bs(browser.html, 'html.parser')\n\t\tmars_weather = soup.find('p', class_=\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\").text\n\t\tmars_weather = soup.find('p', text = re.compile('InSight'), attrs = {'class' : 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text'}).text\n\n\t\t# Mars Facts\n\t\tmarsFactsUrl = f\"https://space-facts.com/mars/\"\n\t\tmarsFacts = pd.read_html(marsFactsUrl)\n\t\tmarsFacts = marsFacts[1]\n\t\tmarsFacts.columns = [\"Description\", \"Value\"]\n\t\tmarsFacts.set_index(\"Description\", inplace = True)\n\n\n\t\t# Mars Hemispheres\n\t\tastrogeologyUrl = f\"https://astrogeology.usgs.gov\"\n\t\tsearchUrl = f\"/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n\t\tmainPageUrl = astrogeologyUrl + searchUrl\n\t\timageList = []\n\t\tbrowser.visit(mainPageUrl)\n\t\tsoup = bs(browser.html, 'html.parser')\n\t\themispheresLinks = soup.find_all(\"div\", class_=\"description\")\n\t\tfor link in hemispheresLinks:\n\t\t\themisphereUrl = astrogeologyUrl + link.a[\"href\"]\n\t\t\tbrowser.visit(hemisphereUrl)\n\t\t\tsoup = bs(browser.html, 'html.parser')\n\t\t\timage = astrogeologyUrl + soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n\t\t\ttitle = soup.find(\"h2\", class_=\"title\").text.replace(\" Enhanced\", \"\")\n\t\t\timageList.append( { \"title\" : title, \"img_url\" : image } )\n\n\t\n\tdata = {\n\t\t\"newsTitle\" : newsTitle,\n\t\t\"newsDescription\" : newsDescription,\n\t\t\"featuredImageUrl\" : featured_image_url,\n\t\t\"marsWeather\" : mars_weather,\n\t\t\"marsFacts\" : marsFacts.to_dict(),\n\t\t\"imageList\" : imageList\n\t}\n\treturn data\n\n\ndef storeInDb(data):\n\tconn = 'mongodb://localhost:27017'\n\tclient = pymongo.MongoClient(conn)\n\tdb = client.Mars\n\tdb.MarsData.drop()\n\tdb.MarsData.insert(data)\n\tclient.close()\n\n\ndef getData():\n conn = 'mongodb://localhost:27017'\n client = pymongo.MongoClient(conn)\n db = client.Mars\n collection = db.MarsData\n data = collection.find_one()\n return data"
},
{
"alpha_fraction": 0.6083915829658508,
"alphanum_fraction": 0.625,
"avg_line_length": 25.022727966308594,
"blob_id": "0710f2cebdc2eae7f7a4600ea8bfa32c38540730",
"content_id": "464db5dbe24b19c22f850a052d640cf5d7562a28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1144,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 44,
"path": "/Week 4 - Python/Clase_2_Python/kid_in_candy_store.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "import itertools\n\n# The list of candies to print to the screen\ncandyList = [\"Snickers\", \"Kit Kat\", \"Sour Patch Kids\", \"Juicy Fruit\", \"Swedish Fish\",\n \"Skittles\", \"Hershey Bar\", \"Starbursts\", \"M&Ms\"]\n\n# The amount of candy the user will be allowed to choose\nallowance = 5\n\n# The list used to store all of the candies selected inside of\ncandyCart = []\n\n\n# Print out options\nfor x in range(len(candyList)):\n print(\"[\"+str(x)+\"]\" + candyList[x])\n\n\nfor i in range(allowance):\n uInput = int(input(\"Number? \"))\n if uInput == 0:\n candyCart.append(candyList[0])\n elif uInput == 1:\n candyCart.append(candyList[1])\n elif uInput == 2:\n candyCart.append(candyList[2])\n elif uInput == 3:\n candyCart.append(candyList[3])\n elif uInput == 4:\n candyCart.append(candyList[4])\n elif uInput == 5:\n candyCart.append(candyList[5])\n elif uInput == 6:\n candyCart.append(candyList[6])\n elif uInput == 7:\n candyCart.append(candyList[7])\n elif uInput == 8:\n candyCart.append(candyList[8])\n\n \nprint(candyCart)\n \nfor candies in candyCart:\n print(candies)"
},
{
"alpha_fraction": 0.7304747104644775,
"alphanum_fraction": 0.7304747104644775,
"avg_line_length": 45.64285659790039,
"blob_id": "ed49acb361925909344bb0d0a0da76d2540c7397",
"content_id": "f645a5a45a0ef2960600685b0468f6a841db84e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 653,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 14,
"path": "/Week 11 - HTML & CSS/2/Justin Bieber – Purpose Available Now_files/ae-cache.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "// first, add custom aejsready handler\nAEJSWP.add_aejsready_handler(\"myAeJSReadyFunction\");\n \n// custom handler is passed the aeJS object, which can then\n// be used to add settings and custom event handlers\nfunction myAeJSReadyFunction(aeJS) {\n aeJS.events.onLogin.addHandler(myLoginHandler);\n aeJS.events.onUser.addHandler(myLoginHandler);\n}\nfunction myLoginHandler(event) {\n // replace login area with logged in html\n var html = '<a id=\"login\" class=\"logout tracking-event\" data-tracking-event=\"AE|Sign-Out\" onclick=\"AEJSWP.aeJS.logout()\" href=\"#\"><i class=\"icon\">u</i><span>Sign Out</span></a>';\n jQuery('#login-target').html(html);\n}\n"
},
{
"alpha_fraction": 0.6915662884712219,
"alphanum_fraction": 0.6915662884712219,
"avg_line_length": 58.42856979370117,
"blob_id": "f56a77c9b9bfab0ab8c106b810eb008ab2489fc9",
"content_id": "9c713cd432ba037e238821874dd3839d2141ac3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 185,
"num_lines": 7,
"path": "/Week 4 - Python/Clase_1_Python/basic_variables_ej_2.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "name = input(str(\"Name? \"))\nneighbor = input(str(\"Neighbor name? \"))\n\nmonths_coding = input(\"How much time do \" + name + \" has been coding? \")\nneighbor_months_coding = input(\"How much time do \" + neighbor + \" has been coding? \")\n\nprint(f\"\\nMy name is {name.title()} and I've been coding for {months_coding} months.\\n\\nMy neighbor name is {neighbor.title()} and has been coding for {neighbor_months_coding} months.\")"
},
{
"alpha_fraction": 0.6170143485069275,
"alphanum_fraction": 0.6258148550987244,
"avg_line_length": 26.159292221069336,
"blob_id": "fa33cb143f0f7413528c71ce8f896d456c094e7f",
"content_id": "bd90b090fb9121e8f99bf260de2bf0c2495176e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3102,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 113,
"path": "/Tareas/Tarea10_Adv_SQL/Resources/home.py",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "from flask import Flask, jsonify\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"/api/v1.0/<start>/<end>\"\n )\n\[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of all measurments\"\"\"\n # Query all passengers\n results = session.query(Measurement.date, Measurement.prcp).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list of all_measurments\n all_measurements = []\n for date, prcp in results:\n measurement_dict = {}\n measurement_dict[\"date\"] = date\n measurement_dict[\"percipitation\"] = prcp\n all_measurements.append(measurement_dict)\n\n return jsonify(all_measurements)\n\[email protected](\"/api/v1.0/stations\")\ndef stations():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of all stations names\"\"\"\n # Query all passengers\n results = session.query(Measurement.station).all()\n\n session.close()\n \n #return statement\n \n all_stations = []\n for station in results:\n single_station = station\n all_stations.append(station)\n \n return jsonify(all_stations)\n\n\[email protected](\"/api/v1.0/tobs\")\ndef tobs():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Latest Date\n lastDate = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n lastDate = str(lastDate)\n lastDate = lastDate[2:]\n lastDate = lastDate[:-3]\n\n lastDate = dt.datetime.strptime(lastDate, '%Y-%m-%d')\n oneYearAgo = lastDate - dt.timedelta(days=365)\n \n lastYear = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= oneYearAgo).all()\n\n session.close()\n \n year_temps = []\n for date, temp in lastYear:\n temp_dict = {}\n temp_dict[\"date\"] = date\n temp_dict[\"temperature\"] = temp\n year_temps.append(temp_dict)\n \n return jsonify(year_temps)\n\[email protected](\"/api/v1.0/<start>\")\ndef dateinfo(start):\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n # Start Date\n lastDate = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n lastDate = str(lastDate)\n lastDate = lastDate[2:]\n lastDate = lastDate[:-3]\n \n result = calc_temps_with_session(start, lastDate, session)\n \n return jsonify(result)\n\[email protected](\"/api/v1.0/<start>/<end>\")\ndef datestartend(start, end):\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n result = calc_temps_with_session(start, end, session)\n \n return jsonify(result)\n \nif __name__ == '__main__':\n app.run(debug=True, use_reloader=False)"
},
{
"alpha_fraction": 0.6468971967697144,
"alphanum_fraction": 0.705747663974762,
"avg_line_length": 30.574626922607422,
"blob_id": "d37ac196804f7398d8c70db80b6b6f7916f61208",
"content_id": "175ee0756418a05181a568df5c381b5453d2742a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4367,
"license_type": "no_license",
"max_line_length": 280,
"num_lines": 134,
"path": "/Week 11 - HTML & CSS/2/Justin Bieber – Purpose Available Now_files/load.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "\r\n\r\n/* UMGTAG_F4_ADTM V3 */\r\n\r\nvar DDO = {};\r\nDDO.specVersion = \"V3 8/1/2017 16:08\"\r\nDDO.siteType = \"standard\"\r\nDDO.prodURL = \"www.justinbiebermusic.com\"\r\nDDO.prodRSID = \"univmusicjustinbieber\"\r\nDDO.globalRSID = \"univmusicglobal\"\r\nDDO.devRSID = \"univmusicglobal-stage\"\r\nDDO.artistStore = \"justinbieber.shop.bravadousa.com\"\r\nDDO.storeType = \"Bravado\"\r\nDDO.addFilter = \"twitter.com,formbuilder.umgd.net,smarturl.it,bieberforever.com,instagram.com,vine.co,facebook.com,youtube.com,privacypolicy.umusic.com,parentalguide.org,whymusicmatters.org,fahlo.me,shots.com,instagram.com,justinbieber.tumblr.com,pages.umusic-mail.com,defjam.com\"\r\n\r\n \r\nDDO.siteData = {\r\n\"siteDomain\": window.location.host,\r\n\"siteName\":\"Justin Bieber Music\",\r\n\"siteStoreName\":\"Justin Bieber Shop\",\r\n\"siteFormat\":\"Artist\"\r\n}; \r\n\r\nDDO.artistData = {\r\n\"artistName\":\"Justin Bieber\", \r\n\"artistLabel\":\"Def Jam Records\",\r\n\"artistSubLabel\":\"N/A\"\r\n};\r\n\r\nDDO.pageData = {\r\n\"pageURL\": window.location.href, \r\n\"pageRef\": (document.referrer || \"No Referrer\") \r\n};\r\n\r\nDDO.navData = {\r\n\"K7621\":\"Justin Bieber Music:Home\",\r\n\"K7622\":\"Justin Bieber Music:News\",\r\n\"K7623\":\"Justin Bieber Music:Music\",\r\n\"K7631\":\"Justin Bieber Music:Release\",\r\n\"K7638\":\"Justin Bieber Music:News\",\r\n\"K7639\":\"Justin Bieber Shop:Store Home\",\r\n\"K7640\":\"Justin Bieber Shop:Product Detail\",\r\n\"K7641\":\"Justin Bieber Shop:View Cart\",\r\n\"K7642\":\"Justin Bieber Shop:Checkout\",\r\n\"K7643\":\"Justin Bieber Shop:Order Confirm\",\r\n\"K7644\":\"Justin Bieber Shop:Product Category:\" + (document.title || \"\")\r\n,\"default\":\"K7621\"\r\n\r\n};\r\n\r\nDDO.sectionData = {\r\n\"K7621\":\"Home\",\r\n\"K7622\":\"News\",\r\n\"K7623\":\"Music\",\r\n\"K7631\":\"Release\",\r\n\"K7638\":\"News\",\r\n\"K7639\":\"Shop-Home\",\r\n\"K7640\":\"Shop-Product\",\r\n\"K7641\":\"Shop-Cart\",\r\n\"K7642\":\"Shop-Checkout\",\r\n\"K7643\":\"Shop-Confirm\",\r\n\"K7644\":\"Shop-Category\"\r\n,\"default\":\"K7621\"\r\n\r\n};\r\n\r\nDDO.getPageNav = {\r\n\"/\":\"K7621\",\r\n\"/news/\":\"K7622\",\r\n\"/music/\":\"K7623\",\r\n\"/release/\":\"K7631\",\r\n\"news/\":\"K7638\",\r\n\"shop-/store/\":\"K7639\",\r\n\"shop-/product.aspx\":\"K7640\",\r\n\"shop-/cart.aspx\":\"K7641\",\r\n\"shop-/checkout.aspx\":\"K7642\",\r\n\"shop-/confirmed.aspx\":\"K7643\",\r\n\"shop-/dept.aspx\":\"K7644\",\r\n\"default\":\"K7621\"\r\n\r\n};\r\n \r\nDDO.getLinkDetail = {\r\n\"http://smarturl.it/JustinShop?IQid=site\":\"Navigation Click:Shop\",\r\n\"http://www.bieberfever.com/\":\"Navigation Click:FanClub\",\r\n\"http://smarturl.it/justinitunes\":\"Page Interaction:Music\",\r\n\"http://www.facebook.com/JustinBieber#!/JustinBieber/app_130121696309\":\"Page Interaction:Events\",\r\n\"http://twitter.com/justinbieber\":\"Social Click:Twitter\",\r\n\"http://www.facebook.com/JustinBieber\":\"Social Click:Facebook\",\r\n\"http://parentalguide.org/\":\"Page Interaction:Parental Guide\",\r\n\"http://www.whymusicmatters.org/\":\"Page Interaction:WhyMusicMatters\",\r\n\"http://privacypolicy.umusic.com/\":\"Page Interaction:PrivacyPolicy\",\r\n\"http://privacypolicy.umusic.com/terms/\":\"Page Interaction:TermsOfUse\",\r\n\"http://formbuilder.umgd.net/FormSubmission/View/642\":\"Page Interaction:FormBuilder\",\r\n\"javascript:janrain.capture.ui.modal.open();\":\"Page Interaction:SignIn\",\r\n\"https://instagram.com/justinbieber/\":\"Navigation Click:Photos\",\r\n\"http://justinbieber.tumblr.com/\":\"Navigation Click:Fan Art\",\r\n\"https://fahlo.me/justinbieber/\":\"Social Click:Fahlo\",\r\n\"https://shots.com/justinbieber\":\"Social Click:Shots\",\r\n\"https://www.youtube.com/user/JustinBieberVEVO\":\"Social Click:YouTube\",\r\n\"https://twitter.com/justinbieber\":\"Social Click:Twitter\",\r\n\"https://www.facebook.com/JustinBieber\":\"Social Click:Facebook\",\r\n\"https://instagram.com/justinbieber\":\"Social Click:Instagram\",\r\n\"http://smarturl.it/iWDYM?IQid=site\":\"Page Interaction:Buy on iTunes\",\r\n\"http://smarturl.it/gWDYM?IQid=site\":\"Page Interaction:Get it on Google Play\",\r\n\"default\":\"noMatch\"\r\n\r\n};\r\n\r\nDDO.getLinkTitle = {\r\n\"default\":\"noMatch\"\r\n\r\n};\r\n\r\nDDO.getImageDetail = {\r\n\"http://d1do9jefdc5amy.cloudfront.net/wp-content/themes/justinbieber2/images/btn-itunes.png\":\"Purchase:Buy on iTunes\",\r\n\" http://d1do9jefdc5amy.cloudfront.net/wp-content/themes/justinbieber2/images/btn-google.png\":\"Purchase:Get it on Google Play\",\r\n\"default\":\"noMatch\"\r\n\r\n}; \r\n\r\nDDO.getLinkText = {\r\n\"K7621\":\"None\",\r\n\"K7622\":\"None\",\r\n\"K7623\":\"None\",\r\n\"K7631\":\"None\",\r\n\"K7638\":\"None\",\r\n\"default\":\"None\"\r\n\r\n};\r\n\r\n\r\n\r\n\r\n//LOAD ADTM\r\ndocument.write(\"<script src='//assets.adobedtm.com/e264f00eb0c37aa53085fd9876f9ec341123f732/satelliteLib-00855dce39693d0411d5f95c946521711ecf8531.js'><\\/script>\");"
},
{
"alpha_fraction": 0.5773584842681885,
"alphanum_fraction": 0.6188679337501526,
"avg_line_length": 41.83333206176758,
"blob_id": "d48a752b52289fd96cebc5c9871dce929643d151",
"content_id": "28b90da4588cb8235e013ca201a624fbe2bc42f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 6,
"path": "/Week 11 - HTML & CSS/2/Justin Bieber – Purpose Available Now_files/init.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "\r\n\r\nvar umg01 = 'new';\r\nif (typeof window.location.host != \"undefined\") {\r\n\tumg01 = window.location.hostname;\r\n}\r\n\r\ndocument.write('<script src=\"https://dmsn0cdst6m8x.cloudfront.net/' + umg01 + '/load.js?s=' + umg01 + '&t=' + (new Date).getTime() + '\"><\\/script>');"
},
{
"alpha_fraction": 0.7291296720504761,
"alphanum_fraction": 0.738898754119873,
"avg_line_length": 34.21875,
"blob_id": "dcc4536d94bb3b71fc24d4fac4b8847ed08dd1f9",
"content_id": "30c42232eafbde6038e63d5c46628465266a5719",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1126,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 32,
"path": "/Week 14 - Intro-To-JavaScript/1/Activities/02-Evr_Python_to_JavaScript/Unsolved/1-HelloVariableWorld/hello-variable-world.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "// Create a variable called \"name\" that holds a string\nvar name = \"jose\"\n// Create a variable called \"country\" that holds a string\nvar country = \"Mexico\"\n// Create a variable called \"age\" that holds an integer\nvar age = 25\n// Create a variable called \"hourlyWage\" that holds an integer\nvar hourlyWage = 12000\n// Calculate the \"dailyWage\" for the user\nvar dailyWage = hourlyWage * 8\n// Create a variable that holds a number as a string\nvar numberString = \"25\"\n// Create a variable called 'weeklyWage' that converts a string into an integer\nweeklyWage = dailyWage * 7\n// Create a variable called \"satisfied\" that holds a boolean\nsatisfied = true\n// Print out \"Hello <name>!\"\nconsole.log(\"Hello \" + name)\n// Print out what country the user entered\nconsole.log(country)\n// Print out the user's age\nconsole.log(age)\n// Print out the daily wage that was calculated\nconsole.log(dailyWage)\n// Print out the weekly wage that was calculated\nconsole.log(weeklyWage)\n// Using an IF statement to print out whether the users were satisfied\nif (satisfied == true){\n console.log(\"Yes you are satisfied\")\n} else {\n console.log(\"Damn!\")\n}"
},
{
"alpha_fraction": 0.6199150085449219,
"alphanum_fraction": 0.6320582628250122,
"avg_line_length": 28.92727279663086,
"blob_id": "714b9075ee901d89aa2a029aba4cf7fb83e74359",
"content_id": "988314d0ef33451f9d49ea27345f4ad2003979f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1647,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 55,
"path": "/Week 14 - Intro-To-JavaScript/3/Activities/03-Evr_D3_Table/Unsolved/static/js/index.js",
"repo_name": "Joseamica/Bootcamp.github.io",
"src_encoding": "UTF-8",
"text": "// Get a reference to the table body\nvar tbody = d3.select(\"tbody\");\n\n// Console.log the weather data from data.js\n\n\n// Step 1: Loop Through `data` and console.log each weather report object\n// data.forEach(function(v){\n// var row = tbody.append(\"tr\");\n// var weekdays = Object.values(v)[0]\n// var date = Object.values(v)[1]\n// var highTemp = Object.values(v)[2]\n// var lowTemp = Object.values(v)[3]\n\n// row.append(\"td\").text(weekdays);\n// row.append(\"td\").text(date)\n// row.append(\"td\").text(highTemp)\n// row.append(\"td\").text(lowTemp)\n\n// });\n\ndata.forEach((v) =>{\n\n// Step 2: Use d3 to append one table row `tr` for each weather report object\n// Don't worry about adding cells or text yet, just try appending the `tr` elements.\n var row = tbody.append(\"tr\");\n\n// Step 3: Use `Object.entries` to console.log each weather report value\n var weekdays = Object.values(v)[0];\n var date = Object.values(v)[1];\n var highTemp = Object.values(v)[2];\n var lowTemp = Object.values(v)[3];\n// Step 4: Use d3 to append 1 cell per weather report value (weekday, date, high, low)\n\n row.append(\"td\").text(weekdays);\n row.append(\"td\").text(date);\n row.append(\"td\").text(highTemp);\n row.append(\"td\").text(lowTemp);\n\n\n});\n\n\ntable = d3.select(\"table\");\ntable.attr(\"class\", \"table table-striped\");\n// Step 5: Use d3 to update each cell's text with\n// weather report values (weekday, date, high, low)\n\n\n// data.forEach(function(weatherReport){\n// Object.entries(weatherReport).forEach(function([key,value]){\n// var cell = d3.select(\"td\")\n// cell.text(value) \n// })\n// })\n\n"
}
] | 40 |
jaguidini/ML
|
https://github.com/jaguidini/ML
|
368199870343c1fb2956a779a457b03a27c841b2
|
b14b07b60883de13b1213c8e267f882a52ccc7fb
|
ec67e9900464c702bf5a568fb1b1c8482d5557d0
|
refs/heads/master
| 2020-08-21T18:18:01.321199 | 2019-10-19T18:44:36 | 2019-10-19T18:44:36 | 216,216,814 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7022132873535156,
"alphanum_fraction": 0.7129443287849426,
"avg_line_length": 31.177778244018555,
"blob_id": "a8076b1b761f447b9c30700e0dd972bdcf05b3cc",
"content_id": "f9d8229155255adcffe25f92cb803c1bba6b4acc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1495,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 45,
"path": "/Exercício 09/randomForest.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nfrom sklearn import svm\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.utils import resample\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nimport joblib\r\n\r\ndef get_wine_class(value):\r\n if value <= 3:\r\n return 'C'\r\n if value <= 6:\r\n return 'B'\r\n return 'A'\r\n\r\ndfRed = pd.read_csv(\"dados/winequality-red.csv\", delimiter=';')\r\ndfRed['quality'] = dfRed['quality'].map(get_wine_class)\r\n\r\n#Isolar a base de dados com a classe minoritaria\r\nC = dfRed[dfRed['quality']=='C']\r\nB = dfRed[dfRed['quality']=='B']\r\nA = dfRed[dfRed['quality']=='A']\r\n\r\n#print(dfRed['quality'].value_counts())\r\n\r\nupsample_C = resample(C, replace=True, n_samples=1372, random_state=0)\r\nupsample_B = resample(A, replace=True, n_samples=1372, random_state=0)\r\n\r\ndfBalance = pd.concat([upsample_C, B, upsample_B])\r\n\r\nattributes = dfBalance.drop('quality', axis=1)\r\nclasses = dfBalance['quality']\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(attributes, classes, test_size=0.20)\r\nclassifier = RandomForestClassifier()\r\nmodel = classifier.fit(X_train, y_train) \r\nretorno = model.predict(X_test)\r\n\r\n# Acurácia e matriz de contingência\r\nprint(\"Resultado da Avaliação do Modelo\")\r\nprint(confusion_matrix(y_test, retorno))\r\nprint(classification_report(y_test, retorno))\r\n\r\njoblib.dump(classifier, 'models/randomForest.joblib')\r\nclassifier = joblib.load('models/randomForest.joblib')"
},
{
"alpha_fraction": 0.7215967178344727,
"alphanum_fraction": 0.7584441900253296,
"avg_line_length": 30.516128540039062,
"blob_id": "74e022fa6328872472f1d07105c6091502fff984",
"content_id": "6d7960f7ce5d992d601c787f676c8ef8d919d633",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 979,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 31,
"path": "/Exercício 05/setDiabetesRandomForest.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport joblib\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\n\ndiabetes = pd.read_csv(\"dados/diabetes.csv\", delimiter=',')\n\nattributes = diabetes.drop('class', axis=1)\nclasses = diabetes['class']\n\nnew_attributes = pd.get_dummies(attributes);\n\nX_train, X_test, y_train, y_test = train_test_split(new_attributes, classes, test_size=0.30)\n\nclassifier = RandomForestClassifier()\nclassifier.fit(X_train, y_train)\n\ny_pred = classifier.predict(X_test)\n\nprint(\"Resultado da Avaliação do Modelo Random forest classifier\")\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n\nprint(\"Classificar [6,25,46,59,0,46.2,1.1,21]\")\nparam=[[6,130,46,59,0,46.2,1.1,21]]\nprint(classifier.predict(param))\n\n#Salvar o modelo para uso posterior\njoblib.dump(classifier, 'models/random_forest.joblib')\n"
},
{
"alpha_fraction": 0.4498480260372162,
"alphanum_fraction": 0.5492401123046875,
"avg_line_length": 27.11504364013672,
"blob_id": "2bd47dbd2a4cee2f8f07a73cd9561d2f437286d3",
"content_id": "c65186c8ea6217a74d95d33132ce24946a99becd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3309,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 113,
"path": "/Exercício 04/PM25.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "#%%\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nimport pickle\r\nimport collections\r\nimport warnings\r\nfrom scipy import stats\r\n\r\n#%%\r\n# Leitura do csv\r\n# , columns=['peso', 'altura']\r\ndf = pd.read_csv(\"dados/PM251.csv\", sep=',', usecols=['year','month','day','hour','DEWP','TEMP','PRES','Iws','Is','Ir'])\r\ndf.head(5)\r\n\r\n#%%\r\n# Correlação das colunas\r\ndf.corr(method ='pearson') \r\n\r\n#%%\r\n\r\ndef pearsonr_ci(x,y,alpha=0.05):\r\n r, p = stats.pearsonr(x, y)\r\n r_z = np.arctanh(r)\r\n se = 1 / np.sqrt(x.size - 3)\r\n z = stats.norm.ppf(1 - alpha / 2)\r\n lo_z, hi_z = r_z - z * se, r_z + z * se\r\n lo, hi = np.tanh((lo_z, hi_z))\r\n return r, p, lo, hi\r\n\r\n#%%\r\n\r\ndef checa_correlacao(r, p, lo, hi):\r\n if (r > lo) & (r < hi) :\r\n #if (r == base):\r\n val_valida = valida_forca_correlacao(r, lo, hi)\r\n return r, lo, hi, val_valida\r\n return 0, 0, 0, 0\r\n#%%\r\n\r\n#Funcao verifica a força da correlação entre os campos\r\ndef valida_forca_correlacao(r, lo, hi): \r\n neutro = (lo+ ((hi-lo) / 2))\r\n parte = (neutro - lo) / 3\r\n menor = lo\r\n menor2 = menor + parte\r\n menor1 = menor2 + parte\r\n neutro = menor1 + parte\r\n maior1 = neutro + parte\r\n maior2 = maior1 + parte\r\n maior = hi\r\n \r\n if(r >= menor) & (r < menor2): \r\n return \"RELAÇÃO FORTE NEGATIVA\"\r\n elif(r >= menor2) & (r < neutro): \r\n return \"RELAÇÃO FRACA NEGATIVA\"\r\n elif(r <= maior) & (r >= maior2): \r\n return \"RELAÇÃO FORTE POSITIVA\"\r\n elif(r < maior2) & (r >= neutro): \r\n return \"RELAÇÃO FRACA POSITIVA\"\r\n \r\n#%%\r\nc1 = 0\r\nfor i, j in df.iteritems():\r\n c2 = 0\r\n for i2, j2 in df.iteritems():\r\n r, p, lo, hi = pearsonr_ci(df[i], df[i2])\r\n val_r, val_lo, val_hi, val_valida = checa_correlacao(r, p, lo, hi)\r\n if(val_r > 0) & (i != i2):\r\n print(i, \"/\", i2,\" => \", val_r, val_lo, val_hi, val_valida)\r\n c2 += 1 \r\n c1 += 1 \r\n \r\n#%%\r\n\r\n# Normalização\r\n# , columns=['year', 'month','day','hour','DEWP','TEMP','PRES','Iws','Is','Ir'] \r\ndf = pd.read_csv(\"dados/PM251.csv\", sep=',')\r\nnormal = pd.get_dummies(df)\r\nprint(normal.head(5))\r\nfilename = 'dados/PM251_Normal.csv'\r\nnormal.to_csv(path_or_buf=filename)\r\n\r\n#%%\r\n#Gerar os modelos\r\nfilename = 'Models/PM251_Normal.sav'\r\nkmeans = KMeans(n_clusters=4).fit(normal)\r\ncentroids = kmeans.cluster_centers_\r\npickle.dump(kmeans, open(filename, 'wb'))\r\n\r\n#%%\r\n#Carregar os modelos\r\nkmeans = pickle.load(open(filename, 'rb'))\r\nresult = kmeans.predict([\r\n [2010,1,2,0,-16,-4,1020.0,1.79,0,0,0,0,1,0,1,0,0,0],\r\n [2014,1,2,0,-10,-5,120.0,1.05,0,0,0,0,1,0,0,1,0,0]\r\n # [2010,1,2,0,-16,-4,10200,179,0,0,0,0,1,0,1,0,0,0],\r\n # [2011,1,2,1,-15,-4,10200,268,0,0,0,0,1,0,0,0,0,1],\r\n # [2012,1,2,2,-11,-5,10210,357,0,0,0,0,1,0,0,0,0,1],\r\n # [2013,1,2,3,-7,-5,10220,536,1,0,0,0,1,0,0,0,0,1],\r\n # [2014,1,2,0,-10,-5,120,0,1.05,0,0,0,1,0,1,1,1,1],\r\n # [2013,1,2,5,-7,-6,10220,714,3,0,0,0,1,0,1,0,0,0],\r\n # [2012,1,2,6,-7,-6,10230,893,4,0,0,0,1,0,1,1,1,1],\r\n # [2011,1,2,7,-7,-5,10240,1072,0,0,0,0,1,0,1,0,0,0]\r\n])\r\n\r\n# O resultado é uma instância que será inferida.\r\n# O resultado representa o cluster ao qual essa instância pertence.\r\nprint(result)\r\n\r\n\r\n#%%\r\n"
},
{
"alpha_fraction": 0.8225806355476379,
"alphanum_fraction": 0.8225806355476379,
"avg_line_length": 30,
"blob_id": "986159d627092eccf70b2ec50cb1c39e8fef0c94",
"content_id": "73df79fe71fceed74bc336ad1db0010235a88fff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 62,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 2,
"path": "/README.md",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "# ML\nTrabalhos para entrega da disciplina de Machine Learning\n"
},
{
"alpha_fraction": 0.6682027578353882,
"alphanum_fraction": 0.7603686451911926,
"avg_line_length": 26.25,
"blob_id": "b9c6ef41037b908c686a790532e45ddfed229ec9",
"content_id": "4da326c10570ecc3ddbb31d75232b015f0f3e53f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 8,
"path": "/Exercício 05/readDiabetes.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "import joblib\n\nclassifier = joblib.load('models/diabetes.joblib')\nparam=[[0,100,40,35,168,43.1,2.288,33]]\nprint(\"Predict\")\nprint(classifier.predict(param))\nprint(\"Predict proba\")\nprint(classifier.predict_proba(param))"
},
{
"alpha_fraction": 0.6473214030265808,
"alphanum_fraction": 0.7366071343421936,
"avg_line_length": 26.25,
"blob_id": "17439aa6b0aa01e396a7ef6ddc8692e75cd439ed",
"content_id": "22c30394e827955d70350c9fc048712ec69eb0f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 224,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 8,
"path": "/Exercício 06/readOrtopedia.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "import joblib\r\n\r\nclassifier = joblib.load('models/diabetes.joblib')\r\nparam=[[0,100,40,35,168,43.1,2.288,33]]\r\nprint(\"Predict\")\r\nprint(classifier.predict(param))\r\nprint(\"Predict proba\")\r\nprint(classifier.predict_proba(param))"
},
{
"alpha_fraction": 0.49696969985961914,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 34.66666793823242,
"blob_id": "c6d0432c86922f7e0c63a4813e2b98f9741644d8",
"content_id": "d78844035280583db0aaa960fa490ebdd8846411",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 9,
"path": "/Exercício 09/readSVM.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "import joblib\r\n\r\nkernel = \"linear\"\r\nclassifier = joblib.load('models/svm_{0}.joblib'.format(kernel))\r\nparam=[[7.7,0.49,0.26,1.9,0.062,9,31,0.9966,3.39,0.64,9.6],[8,0.59,0.16,1.8,0.065,3,16,0.9962,3.42,0.92,10.5]]\r\nprint(\"Predict\")\r\nprint(classifier.predict(param))\r\nprint(\"Predict proba\")\r\nprint(classifier.predict_proba(param))\r\n"
},
{
"alpha_fraction": 0.721794843673706,
"alphanum_fraction": 0.7346153855323792,
"avg_line_length": 30.200000762939453,
"blob_id": "769d4226099fbd6e87042263cc30b0e0ee1c5d46",
"content_id": "7f8de249437adf09854bbeebae3e4f37a4114531",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 780,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 25,
"path": "/Exercício 05/setDiabetes.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_validate, cross_val_score\nfrom sklearn import svm\n\ndiabetes = pd.read_csv(\"dados/diabetes.csv\", delimiter=',')\n\nattributes = diabetes.drop('class', axis=1)\nclasses = diabetes['class']\n\n\nx_train, x_test, y_train, y_test = train_test_split(attributes, classes, test_size=0.20, random_state=0)\n\n#(Vector Machine)\nclassifier = svm.SVC(kernel='linear', C=1).fit(x_train, y_train)\n\nprint(\"cros_val_score\")\nscores = cross_val_score(classifier, x_test, y_test, cv=10)\nprint(scores)\nprint(\"Precisao media:\", scores.mean())\n\nprint(\"cros_validate\")\nscores = cross_validate(classifier, x_test, y_test, cv=10)\nprint(scores)\nprint(\"Precisao media:\", scores['test_score'].mean())\n"
},
{
"alpha_fraction": 0.7458483576774597,
"alphanum_fraction": 0.7537906169891357,
"avg_line_length": 31.731706619262695,
"blob_id": "2fea7115b59db14450ed8485b9fb297afd391ac2",
"content_id": "2a10a8db860201556e98b50b7b79d47d0f09f069",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1387,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 41,
"path": "/Exercício 06/setOrtopedia.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "import pandas as pd\r\nimport joblib\r\nfrom sklearn.utils import resample\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\n\r\ndata = pd.read_csv(\"dados/Ortopedia_Coluna.csv\", delimiter=';')\r\n\r\n#Isolar a base de dados com a classe minoritaria\r\nminoritaria = data[data['Fusao_de_Vertebras']==1]\r\nmajoritaria = data[data['Fusao_de_Vertebras']==0]\r\nminoritaria_upsample = resample(minoritaria, replace=True, n_samples=7900, random_state=0)\r\n\r\ndata_balanceado = pd.concat([majoritaria, minoritaria_upsample])\r\n\r\nattributes = data_balanceado.drop('Fusao_de_Vertebras', axis=1)\r\nclasses = data_balanceado['Fusao_de_Vertebras']\r\n\r\nnew_attributes = pd.get_dummies(attributes);\r\n\r\nprint(new_attributes)\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(new_attributes, classes, test_size=0.20)\r\n\r\nclassifier = DecisionTreeClassifier()\r\nclassifier.fit(X_train, y_train)\r\n\r\ny_pred = classifier.predict(X_test)\r\n\r\nprint(\"Resultado da Avaliação do Modelo\")\r\nprint(confusion_matrix(y_test, y_pred))\r\nprint(classification_report(y_test, y_pred))\r\n\r\njoblib.dump(classifier, 'models/ortopedia.joblib')\r\nclassifier = joblib.load('models/ortopedia.joblib')\r\n\r\nprint(\"Predict\")\r\nprint(classifier.predict(X_test))\r\nprint(\"Predict proba\")\r\nprint(classifier.predict_proba(X_test))\r\n\r\n"
},
{
"alpha_fraction": 0.721787691116333,
"alphanum_fraction": 0.7608938813209534,
"avg_line_length": 28.83333396911621,
"blob_id": "88fcde2271e5bf3a8e4dc469f0d1ffe46161ad3e",
"content_id": "e56b78d44d18023a4cf6388a31be8e4b6229e871",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 897,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 30,
"path": "/Exercício 05/setDiabetesLogistic.py",
"repo_name": "jaguidini/ML",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport joblib\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report, confusion_matrix\n\ndiabetes = pd.read_csv(\"dados/diabetes.csv\", delimiter=',')\n\nattributes = diabetes.drop('class', axis=1)\nclasses = diabetes['class']\n\nnew_attributes = pd.get_dummies(attributes);\n\nX_train, X_test, y_train, y_test = train_test_split(new_attributes, classes, test_size=0.30)\n\nclassifier = LogisticRegression()\nclassifier.fit(X_train, y_train)\n\ny_pred = classifier.predict(X_test)\n\nprint(\"Resultado da Avaliação do Modelo Logistica\")\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n\nprint(\"Classificar [6,25,46,59,0,46.2,1.1,21]\")\n\nparam=[[6,130,46,59,0,46.2,1.1,21]]\nprint(classifier.predict(param))\n\njoblib.dump(classifier, 'models/logistica.joblib')\n"
}
] | 10 |
tssovi/data-mining-challenge
|
https://github.com/tssovi/data-mining-challenge
|
e50d8cca5b614cbeb3bc835863393b03c48ba790
|
ade8c3051f309bd880562d7261fb692309834140
|
faf255bf386497a36bd15144ee3980cdca6b4793
|
refs/heads/master
| 2022-11-26T04:49:40.493214 | 2020-08-04T08:14:04 | 2020-08-04T08:14:04 | 284,909,590 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5787269473075867,
"alphanum_fraction": 0.5787269473075867,
"avg_line_length": 21.980770111083984,
"blob_id": "cf5796eda4aab4c0e6bb9bba8ca9aada940ff029",
"content_id": "78571eb1c30ed4a4592c04d4144c23d9c55367d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1194,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 52,
"path": "/file_reader.py",
"repo_name": "tssovi/data-mining-challenge",
"src_encoding": "UTF-8",
"text": "# define a function to conver a text file to string\ndef file_to_text_converter():\n text = open(\"output_file.txt\", \"r\")\n text = text.read()\n\n text = text.replace(\" \", \"\")\n text = text.split(',')\n\n return text\n\n# define a function to check if it is an integer or not\ndef is_int(word):\n try:\n int(word)\n except ValueError:\n return False\n return True\n\n# define a function to check if it is a real number or not\ndef is_real_num(word):\n try: \n float(word)\n except ValueError: \n return False\n return True\n\n# define a function to identify word types\ndef identify_string_type(text):\n text_words = \"\"\n str_type = \"\"\n\n for word in text:\n if is_int(word):\n str_type = 'integer'\n elif is_real_num(word):\n str_type = 'real numbers'\n elif word.isalpha():\n str_type = 'alphabetical strings'\n elif word.isalnum():\n str_type = 'alphanumeric'\n \n if word:\n text_words += word + ' - ' + str_type + '\\n'\n text_words += '\\n-END-\\n'\n return text_words\n\n\ntext = file_to_text_converter()\n\ntext_words = identify_string_type(text)\n\nprint(text_words)"
},
{
"alpha_fraction": 0.6228878498077393,
"alphanum_fraction": 0.6420890688896179,
"avg_line_length": 37.30882263183594,
"blob_id": "d59f303da211c9ec524a1de75bdb2bd60fa7ad67",
"content_id": "a54ff5d25bb389227b84019457f98372ee3626bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2604,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 68,
"path": "/file_writer.py",
"repo_name": "tssovi/data-mining-challenge",
"src_encoding": "UTF-8",
"text": "import random\nimport string\nimport os\n\n\n# define a function for creating a string of random alphanumerical characters\ndef generate_random_alphanumerics_string():\n length = random.randint(5, 30)\n output_str = ''\n for _ in range(length):\n output_str += random.choice(string.ascii_lowercase + string.digits)\n return output_str\n\n# define a function for creating a string of random alphabetical characters\ndef generate_random_alphabetical_string():\n length = random.randint(5, 30)\n output_str = ''.join(random.choice(string.ascii_lowercase) for x in range(length))\n return output_str\n\n# define a function for creating a random integers and converting them to a string\ndef generate_random_integer_string():\n output_str = random.randint(0, 10000)\n output_str = '{}'.format(output_str)\n return output_str\n\n# define a function for creating random real number and converting them to a string\ndef generate_random_real_number_string():\n length = random.randint(1, 10)\n output_str = round(random.uniform(0.0, 10000.0), length)\n output_str = '{}'.format(output_str)\n return output_str\n\n# define a function to generate text file with random strings\ndef generate_text_file_with_random_strings():\n file_name = 'output_file.txt'\n open(file_name, 'w')\n file_size = os.stat(file_name).st_size\n\n with open(file_name, 'a') as text_file:\n # run the loop until file size reach 10 MB\n while file_size < 10485760:\n # put our functions into a list\n function_list = [\n generate_random_alphanumerics_string,\n generate_random_alphabetical_string,\n generate_random_integer_string,\n generate_random_real_number_string\n ]\n\n # randomly choose a function to generate string\n dataType = random.choice(function_list)\n\n if dataType == generate_random_alphanumerics_string:\n output_str = generate_random_alphanumerics_string()\n # whitespaces shouldn't be more than 9\n i = random.randint(0, 9)\n output_str = ' ' * i + output_str + ' ' * i\n else:\n output_str = dataType()\n text_file.write(output_str + ', ')\n file_size = os.stat(file_name).st_size\n print('Current file size is: {} MB\\n'.format(file_size / 1000000))\n\n # once loop is done, print final file size and close file\n print('\\nFinal file size is: {} MB\\n'.format(file_size / 1000000))\n text_file.close()\n\ngenerate_text_file_with_random_strings()"
}
] | 2 |
msfchen/deep_learning
|
https://github.com/msfchen/deep_learning
|
976af151638c5849b98e026a79baa814de167ae4
|
598e0dde37dd24d8c20ac851c2b4d6140f8ac207
|
d0aeb0c5133a9db8a3f939865695cdba270491e1
|
refs/heads/master
| 2022-12-30T19:12:35.457929 | 2020-10-14T22:12:53 | 2020-10-14T22:12:53 | 294,375,954 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5623570084571838,
"alphanum_fraction": 0.5697940587997437,
"avg_line_length": 36.9782600402832,
"blob_id": "be8d5975923f66f7deb03ea929002717b430d3e0",
"content_id": "096d84e553448e8768d31ff00485b7b84ef308fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1748,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 46,
"path": "/recurrentnn/characternml/cnn.py",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n### YOUR CODE HERE for part 1e\nimport torch\nimport torch.nn as nn\n\nclass CNN(nn.Module):\n \"\"\" 1-dimensional convolutions.\n The convolutional layer has two hyperparameters:\n the kernel size k (also called window size), and \n the number of filters f (also called number of output features or number of output channels).\n \"\"\"\n def __init__(self, char_embed_dim: int, \n word_embed_dim: int, \n max_word_length: int=21, \n kernel_size: int=5):\n \"\"\" Init CNN Instance.\n @param char_embed_dim (int): character embedding dimension # e_char\n @param word_embed_dim (int): the size of the final word embedding # e_word (set filter number to be equal to e_word)\n @param max_word_length (int): max word length # m_word\n @param kernel_size (int): window size\n \"\"\" \n super(CNN, self).__init__()\n\n self.conv1d = nn.Conv1d(\n in_channels=char_embed_dim,\n out_channels=word_embed_dim, \n kernel_size=kernel_size,\n bias=True)\n\n # MaxPool simply takes the maximum across the second dimension\n self.maxpool = nn.MaxPool1d(max_word_length - kernel_size + 1)\n\n def forward(self, x):\n \"\"\" Take x_reshaped, compute the x_vonv_out.\n @param x (tensor): b_size, e_char, m_word\n \n @returns x_conv_out (tensor): b_size, e_word\n \"\"\" \n x_conv = self.conv1d(x) # => b_size, e_word, max_word_length - kernel_size + 1\n x_conv_out = self.maxpool(torch.relu(x_conv)).squeeze() # => b_size, e_word\n \n return x_conv_out\n\n### END YOUR CODE\n\n"
},
{
"alpha_fraction": 0.7743813395500183,
"alphanum_fraction": 0.7743813395500183,
"avg_line_length": 39.47058868408203,
"blob_id": "7d06de1755def5fad6fd3bd3903175eb151386da",
"content_id": "d036eb0d77f107e3d28565331672fa301a2f4db4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 687,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 17,
"path": "/README.md",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "# Deep Learning\n\nThis repository contains some deep learning projects that I did in the past.\n\n## Table Of Contents\n\n### [Basic Neural Networks](https://github.com/msfchen/deep_learning/tree/master/basicnn)\n\n### [Word Vectors](https://github.com/msfchen/deep_learning/tree/master/wordvector)\n\n### [Convolutional Neural Networks](https://github.com/msfchen/deep_learning/tree/master/convolutionalnn)\n\n### [Recurrent Neural Networks](https://github.com/msfchen/deep_learning/tree/master/recurrentnn)\n\n### [Generative Adversarial Networks](https://github.com/msfchen/deep_learning/tree/master/gan)\n\n### [Attention Models](https://github.com/msfchen/deep_learning/tree/master/attentionmodel)"
},
{
"alpha_fraction": 0.7489567399024963,
"alphanum_fraction": 0.7605974078178406,
"avg_line_length": 90.08000183105469,
"blob_id": "bcea806b7169c0b314392cf6eb773dc105f251b4",
"content_id": "5dfc45e65f3d1cd4b25066c21feecfe845206749",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4553,
"license_type": "no_license",
"max_line_length": 359,
"num_lines": 50,
"path": "/recurrentnn/README.md",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "# Recurrent Neural Networks\n\n- Recurrent neural networks are designed to model sequence, in which the hidden state of the previous step is an input to the current step. The same set of parameter values are applied repeatedly to every steps of the sequence.\n- Long sequences tend to have vanishing gradient problem that give rise to LSTM and GRU units as well as other fixes, such as gradient clipping and skip connections.\n- More fancy RNN variants: Bidirectional RNNs and Multi-layer RNNs. \n\n## Language Model\n\n* [Predict Next Character with Recurrent Neural Network](https://github.com/msfchen/deep_learning/tree/master/recurrentnn/predictnextchar):\n - convert each sentence to a list of character token_ids, ending with EOS_int.\n - a batch data generator, optionally shuffled.\n - Gated Recurrent Unit (GRU) model using Trax framework; layers: ShiftRight -> Embedding -> n_layers of GRU -> Dense -> LogSoftmax\n - Train: CrossEntropyLoss, Adam optimizer(0.0005); Validation: CrossEntropyLoss, Accuracy; Test Evaluation: Perplexity\n - generating sentence, one predicted next character at a time\n* [Novel Writing with Character-Level RNN](https://github.com/msfchen/deep_learning/tree/master/recurrentnn/textgenbychar)\n* [TV-script Generation with Word-Level RNN](https://github.com/msfchen/deep_learning/tree/master/recurrentnn/tvscriptgeneration)\n\n## Word Tagging\n\n* [Named Entity Recognition](https://github.com/msfchen/deep_learning/tree/master/recurrentnn/ner):\n - explore the pre-processed labelled data (B-, I-, O)\n - a batch data generator, optionally shuffled.\n - Long Short-Term Memory (LSTM) model using Trax framework; layers: Embedding -> LSTM -> Dense -> LogSoftmax\n - Train: CrossEntropyLoss, Adam optimizer(0.01); Validation: CrossEntropyLoss, Accuracy; Test Evaluation: Accuracy (95.4%)\n\n## Neural Machine Translation\n\n* [Character-based Neural Machine Translation](https://github.com/msfchen/deep_learning/tree/master/recurrentnn/characternml):\n - The Spanish to English NMT system uses a character-based 1-D convolutional encoder and a word-level LSTM decoder plus a character-level LSTM decoder that will kick in when the word-level decoder produces an \\<UNK\\> token. Character-level decoder generates the target word one character at a time, which can produce rare and out-of-vocabulary target words.\n - Encoder Architecture: convert word to char idxs -> padding and embedding lookup -> MaxPool(ReLU(1-D Conv)) -> Highway Network Layer (with skip-connections) and Dropout\n - Character-level Decoder Architecture: char idxs -> char embeddings -> unidirectional LSTM -> linear layer -> softmax -> sum of char-level cross-entropy loss\n - Greedy decoding algorithm (as opposed to beam search algorithm) is used to generate the sequence of characters.\n\n## Siamese Networks\n\nA Siamese Network, also known as Twin Network, is composed of two identical networks that share the same weights while working in parallel on two different input vectors to compute similarity measures of of the corresponding output vectors.\n\n* [Predict Duplicate Questions](https://github.com/msfchen/deep_learning/tree/master/recurrentnn/predictdupquests):\n - explore the pre-processed is_duplicate labelled question pairs\n - Only use duplicate question pairs to prepare training data so that data generator will produce batches ([q1_1, q1_2, q1_3,...], [q2_1, q2_2, q2_3, ...]) where q1_i and q2_k are duplicate if and only if i = k.\n - tokenize each question => build vocab {token : idx} => convert questions to tensors; split train/valid to 8:2.\n - a batch data generator, optionally shuffled, that returns two lists of vectors of shape (batch_size * max_len)\n - Siamese Network using Trax framework; layers: Embedding -> LSTM -> Mean (average word vectors of each question output) -> Normalize (because cosine similarity = dot product of normalized vectors)\n - Triplet Loss Function with Hard Negative: A (anchor), P (positive), N (negative); Loss(A, P, N) = mean(Loss1 + Loss2); Loss1 = max(-cos(A, P) + mean_neg + alpha, 0); Loss2 = max(-cos(A, P) + closest_neg + alpha, 0)\n - Train: TripletLoss, Adam optimizer(0.01), lr_schedule = trax.lr.warmup_and_rsqrt_decay(400, 0.01); Validation: TripletLoss; Test Evaluation: Accuracy (69.1%)\n\n## Time Series\n\n* [Simple Time Series Prediction](https://github.com/msfchen/deep_learning/tree/master/recurrentnn/timeseries):\n - given a time series [n1, n2, ..., nt]; use input [n1, n2, ..., nt-1] and output [n2, ..., nt] to train a RNN so that it can predict the next item in a given test series."
},
{
"alpha_fraction": 0.7802741527557373,
"alphanum_fraction": 0.7859863042831421,
"avg_line_length": 92.82142639160156,
"blob_id": "e7387a0a9041261dbfd0a66a8f0f7c9737a8485a",
"content_id": "fe8061d604c5c55e5c5ecf7661b1485b72f091c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2626,
"license_type": "no_license",
"max_line_length": 281,
"num_lines": 28,
"path": "/basicnn/README.md",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "# Basic Neural Networks\n\n- Basic Neural Networks are composed of an input layer, one or more hidden layers, and an output layer that provides predictions. Layers are fully connected, meaning every node (neuron) in one layer is connected to all the nodes in the next layer.\n- The input of each layer is a linear combination of the output of the previous layer. The activation function of each neuron applies non-linear transformation on the linear combination of inputs.\n- When basic neural networks are applied to problems with hand-crafted features, it does not show particular advantages over other forms of machine learning methods, such as tree-based approaches.\n\n## Multiclass Classification\n\n* [Neural Dependency Parsing](https://github.com/msfchen/deep_learning/tree/master/basicnn/dependencyparser):\n - A dependency parser analyzes the grammatical structure of a sentence, establishing relationships between head words, and words which modify those heads.\n - In a transition-based parser, at every step, the parser applies one of the three transitions: SHIFT, LEFT-ARC, and RIGHT-ARC.\n - We will train a neural netwrok to predict which transition should be applied next, with the goal of maximizing performance on UAS (Unlabeled Attachemnt Score) metric.\n - PyTorch; the feature vector consists of a list of tokens (e.g., the last word in the stack, first word in the buffer, etc.) that is represented as a list of integers, which is then converted into a single concatenated embedding. The training is to minimize cross-entropy loss. \n\n## Binary Classification\n\n* [Predict Sentiment of Tweets](https://github.com/msfchen/deep_learning/tree/master/basicnn/tweetsentiment_dnn):\n - convert each tweet to a list of token_id\n - a batch data generator that provides equal number of positive and negative examples, optionally shuffled.\n - classifier using Trax framework; layers: Embedding -> Mean (average of word embeddings of a tweet) -> Dense -> LogSoftmax\n - Train: CrossEntropyLoss, Adam optimizer(0.01); Validation: CrossEntropyLoss, Accurary; Test Evaluation: Accuracy 99.31%\n \n## Regression\n\n* [Predict Bike Rental Count](https://github.com/msfchen/deep_learning/tree/master/basicnn/bikerental):\n - Structured data requires exploratory data analyses, feature extraction, and feature engineering. The target value is hourly rental count. \n - Data split (from 2-year of historical data): the last 21 days for test, the further prior 60 days for validation, all earlier 643 days for training\n - The simplest form of NN is used: one input layer, one hidden layer, and one output neuron; MSE is used as loss;"
},
{
"alpha_fraction": 0.7773842215538025,
"alphanum_fraction": 0.7817438840866089,
"avg_line_length": 98.21621704101562,
"blob_id": "bd225ffcf542a46ef89aba72159b7ab19a28ba36",
"content_id": "48405648adb29a33b71e6caece754aff5b306925",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3682,
"license_type": "no_license",
"max_line_length": 425,
"num_lines": 37,
"path": "/wordvector/README.md",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "# Word Vectors\n\n- The advantage of neural network models is that it can learn latent feature representations from raw natural inputs, such as language tokens or image pixels.\n- Word embeddings capture both syntactic and semantic features of words in a dense vector representation.\n\n\n* [Train Word2Vec with CBOW Model](https://github.com/msfchen/deep_learning/tree/master/wordvector/cbow): \n - In continuous bag of words (CBOW) model, we try to predict the center word given a few context words (the words around the center word).\n - A shallow neural network with one hidden layer is used; input is the average of all the one hot vectors of the context words; output is a softmax layer.\n - embs = (W1.T + W2)/2.0\n\n* [Train Word2Vec with Skip-Gram Model and Negative Sampling](https://github.com/msfchen/deep_learning/tree/master/wordvector/skipgram):\n - The setup of Skip-Gram is largely the same as CBOW, but we essentially swap input and output. The input is now the one hot vector of the center word.\n - The outputs are 2m vectors (m is the context window size), each of which will be turned into probability by softmax. We desire these probability vectors to match the true probabilities of the actual output.\n - Negative sampling is to improve computation efficiency by only sampling several negative examples, instead of looping over the entire vocabulary as required by the objective function.\n\n* [Naive Machine Translation and Locality Sensitive Hashing](https://github.com/msfchen/deep_learning/tree/master/wordvector/translate_lsh):\n - Naive Word Translation\n - train a transformation matrix R that projects English embeddings X to French embeddings Y, by minimizing the the Frobenius norm ||X R -Y||^2\n - use 1-nearest neighbor algorithm to search for an embedding 𝐟 (as a row) in the matrix 𝐘 which is the closest to the transformed vector 𝐞𝐑\n - Find Most Similar Tweets\n - given a new tweet, find the top most similar ones from a tweet corpus \n - a tweet is converted to a vector by the sum of all the word vectors of all the words it contains.\n - LSH provides an efficient way to find approximate K-NN\n\n* [Predict Word Relationships with Word2Vec Embeddings](https://github.com/msfchen/deep_learning/tree/master/wordvector/analogies):\n - predict analogies between words using pre-trained word embeddings GoogleNews-vectors-negative300\n - Compare word embeddings by using a similarity measure (the cosine similarity).\n - Use PCA to reduce the dimensionality of the word embeddings and plot them in two dimensions.\n - homonyms & similarity, synonyms & antonyms, analogies, and biases \n - Gensim word vector visualization \n\n* [Explore Pre-Trained BERT Embeddings](https://github.com/msfchen/deep_learning/tree/master/wordvector/explorebert):\n - In Word2Vec or Fasttext, each word has a fixed representation regardless of context. In BERT, same word may have different representations dependent upon the words around it.\n - BERT input format: begin and end tokens of a sentence; word piece tokenization; a segment ID to specify a sentence\n - The BERT tokenizer uses WordPiece model that creates a fixed-size vocabulary of individual characters, subwords, and words. The tokenizer first checks if the whole word is in the vocabulary. If not, it tries to break the word into the largest possible subwords contained in the vocabulary, and as a last resort will decompose the word into individual characters. Therefore, there is no problem of out of vocabulary words.\n - There are multiple ways to extract word vectors from a pre-trained BERT model, such as the last (of the 12) hidden layer, sum of the last four hidden layers, etc."
},
{
"alpha_fraction": 0.7597143054008484,
"alphanum_fraction": 0.7634285688400269,
"avg_line_length": 91.13157653808594,
"blob_id": "6709de66cb46888a3ab4106b90388bff8df73405",
"content_id": "d715ebfe7cc80d762362154ec80cda698163e8ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3500,
"license_type": "no_license",
"max_line_length": 236,
"num_lines": 38,
"path": "/attentionmodel/README.md",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "# Attention Models\n\n- Given a set of vectors of keys, values, and query, attention is a technique to compute a weighted sum of the values, dependent on the compatibility scores between the query and keys.\n- It is sometimes referred to as that the query attends to or focuses on particular parts of the keys or values, by giving different weights to different parts. \n- Attention Variants:\n\n | By | Variants |\n |-----------------------------------|--------------------------------------|\n | how attention scores are computed | basic dot-product attention, multiplicative attention, additive attention, scaled dot-product attention |\n | query-key relative location | Encoder-Decoder Attention, Causal Self Attention, Bi-directional Self Attention, Causal with Prefix Self Attention |\n | span of the attention | Global vs Local Attention; Soft vs Hard Attention; Locality Sensitive Hashing Attention |\n\n## Encoder-Decoder Attention Models\n\n* [Neural Machine Translation with Seq2Seq Model with Multiplicative Attention](https://github.com/msfchen/deep_learning/tree/master/attentionmodel/translation):\n - The Spanish to English NMT system uses a Bidirectional LSTM Encoder and a Unidirectional LSTM Decoder.\n - At each decoder timestep, the decoder hidden state is the query and all encoder hidden states are values. We get the attention scores using multiplicative attention.\n - We concatenate the attention output with the decoder hidden state and pass it through a linear layer, a Tanh, and a Dropout to attain the combined-output vector, which is used to produce a probability distribution over target words.\n - The loss at that timestep is the softmax cross entropy loss between the probability distribution and the actual target word. \n - Beam Search Decoding is applied. We keep track of the k (beam size) most probable partial translations (hypotheses) on each step. For the highest-scoring hypothesis at end, we backtrack to obtain the full hypothesis.\n\n## Transformer Language Models \n\n* [Article Summarization with Transformer Decoder](https://github.com/msfchen/deep_learning/tree/master/attentionmodel/transf_summarizer):\n - Training data are in the format of [Article][\\<EOS\\>][\\<pad\\>][Summary][\\<EOS\\>].\n - Training examples are batched by grouping similar lengthed examples in buckets, with varying bucket sizes depending on example length.\n - Transformer Decoder; Masked Multi-Head Self-Attention; Causal Attention\n - At inference time, an article is fed into the model and the model generates a summary one word at a time, using greedy decoding algorithm, until \\<EOS\\> token is generated.\n\n* [Question Answering with T5 Model](https://github.com/msfchen/deep_learning/tree/master/attentionmodel/t5_qa):\n - Subword Tokenization\n - BERT\n\n* [Chatbot with Reformer Model](https://github.com/msfchen/deep_learning/tree/master/attentionmodel/reformer_chatbot):\n - Training data are in the format of [Person 1:][message 1][Person 2:][message 2]...[Person 1:][message N][Person 2:][message N], where N is around 5.\n - Training examples are batched by grouping similar lengthed examples in buckets, with varying bucket sizes depending on example length.\n - Build Reformer Language Model; LSH Attention reduces computation cost; Reversible Layers reduce memory cost.\n - At inference time, [Person 1:][message 1][Person 2:] is fed into the model and the model generates subsequent dialogues one word at a time until the given max_len of tokens have been generated."
},
{
"alpha_fraction": 0.5862413048744202,
"alphanum_fraction": 0.590229332447052,
"avg_line_length": 29.33333396911621,
"blob_id": "fbfb5df98128cafeb08fec58d02e97f6b4b9eea3",
"content_id": "61ac9dba129b399ed945a988bdbe92835ee723b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1003,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 33,
"path": "/recurrentnn/characternml/highway.py",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n### YOUR CODE HERE for part 1d\nimport torch\nimport torch.nn as nn\n\nclass Highway(nn.Module):\n \"\"\" Highway Networks have a skip-connection controlled by a dynamic gate \"\"\"\n\n def __init__(self, embed_dim: int): \n \"\"\" Init Highway Instance.\n @param embed_dim (int): word embedding dimension\n \"\"\" \n super(Highway, self).__init__()\n \n self.conv_out_proj = nn.Linear(embed_dim, embed_dim, bias=True)\n self.gate = nn.Linear(embed_dim, embed_dim, bias=True)\n\n def forward(self, x_conv_out):\n \"\"\" Take x_conv_out, compute the x_highway.\n @param x_conv_out (matrix): batch_size x embed_dim\n\n @returns scores (matrix): batch_size x embed_dim\n \"\"\" \n x_proj = torch.relu(self.conv_out_proj(x_conv_out))\n x_gate = torch.sigmoid(self.gate(x_conv_out))\n\n x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\n\n return x_highway\n\n### END YOUR CODE \n\n"
},
{
"alpha_fraction": 0.6026410460472107,
"alphanum_fraction": 0.6410564184188843,
"avg_line_length": 56.44827651977539,
"blob_id": "1df90d6a00aacf1f2a5091ffaff22fc68d6e643a",
"content_id": "278acc0486f1be219b8b78fb83c01fe2e2311d4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1666,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 29,
"path": "/wordvector/cbow/README.md",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "# Train Word Vectors with CBOW Model\n\n- The Continuous bag of words model\n - In continuous bag of words (CBOW) model, we try to predict the center word given a few context words (the words around the center word).\n - A shallow neural network with one hidden layer h is used; input X is the average of all the one hot vectors of the context words; output is a softmax layer.\n - h = W1X + b1; a = ReLU(h); z = W2a + b2; y = softmax(z)\n\n- Training the Model\n - V = size of vocab; N = dim of hidden layer, and the eventual word vector.\n - def initialize_model(N,V, random_seed=1) returns randomly initialized W1 of (N, V), W2 of (V, N), b1 of (N, 1), b2 of (V, 1)\n - def forward_prop(x, W1, W2, b1, b2) returns z and h, with shape of (V, 1) and (N, 1), respectively.\n - cross-entropy cost function: \n - logprobs = np.multiply(np.log(yhat),y) + np.multiply(np.log(1 - yhat), 1 - y)\n - cost = - 1/batch_size * np.sum(logprobs)\n - cost = np.squeeze(cost)\n - def back_prop(x, yhat, y, h, W1, W2, b1, b2, batch_size) returns gradients of matrices and biases, grad_W1, grad_W2, grad_b1, grad_b2\n - Compute l1 as W2^T (Yhat - Y)\n - def gradient_descent(data, word2Ind, N, V, num_iters, alpha=0.03)\n - W1, W2, b1, b2 = initialize_model(N,V, random_seed=282)\n - mini-batch size = 128\n - in each batch:\n - z, h = forward_prop(x, W1, W2, b1, b2)\n - yhat = softmax(z)\n - cost = compute_cost(y, yhat, batch_size)\n - grad_W1, grad_W2, grad_b1, grad_b2 = back_prop(x, yhat, y, h, W1, W2, b1, b2, batch_size)\n - Update weights and biases by - alpha * grad\n - return W1, W2, b1, b2\n\n- word vectors are defined as (W1.T + W2)/2.0\n"
},
{
"alpha_fraction": 0.7651892304420471,
"alphanum_fraction": 0.7681772708892822,
"avg_line_length": 101.97435760498047,
"blob_id": "bf9e45040c8a4f43e1c99676cdf311976e7e5be5",
"content_id": "f02ba7c3e5cc3e01cca82ca862d065050b761295",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4028,
"license_type": "no_license",
"max_line_length": 279,
"num_lines": 39,
"path": "/gan/README.md",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "# Generative Adversarial Networks\n\n- A Generative Adversarial Network (GAN) is composed of two adversarial networks, a discriminator and a generator. \n- The discriminator is a classifier that is trained to classify real or fake data. \n- The generator generates fake data from a random vector (a latent vector in a latent space). As the generator trains, it learns how to map latent vectors to recognizable data that can fool the discriminator.\n\n## Image Generation\n\n* [Hand-written Digits Generation with Deep GAN](https://github.com/msfchen/deep_learning/tree/master/gan/digitsimple):\n - Discriminator: 3 times of dropout(leaky_relu(Linear)) -> Linear \n - Generator: 3 times of dropout(leaky_relu(Linear)) -> tanh(Linear)\n - The losses will be binary cross entropy loss with logits, BCEWithLogitsLoss, which combines a sigmoid activation function and binary cross entropy loss in one function. total_loss = real_loss + fake_loss. For generator loss, the labels are flipped.\n - Training will involve alternating between training the discriminator optimizer and the generator optimizer.\n \n* [Street View House Numbers Generation with Deep Convolutional GAN](https://github.com/msfchen/deep_learning/tree/master/gan/housenumconv):\n - Discriminator: leaky_relu(conv) -> 2 times leaky_relu(BatchNorm(conv) -> flatten -> Linear\n - Generator: Linear -> de-flatten -> 2 times relu(BatchNore(transpose conv)) -> tanh(transpose conv)\n - The losses will be binary cross entropy loss with logits, BCEWithLogitsLoss, which combines a sigmoid activation function and binary cross entropy loss in one function. total_loss = real_loss + fake_loss. For generator loss, the labels are flipped.\n - Training will involve alternating between training the discriminator optimizer and the generator optimizer.\n\n* [Face Image Generation with Deep Convolutional GAN](https://github.com/msfchen/deep_learning/tree/master/gan/facegen):\n - Discriminator: leaky_relu(conv) -> 2 times leaky_relu(BatchNorm(conv) -> flatten -> Linear\n - Generator: Linear -> de-flatten -> 2 times relu(BatchNore(transpose conv)) -> tanh(transpose conv)\n - The losses will be binary cross entropy loss with logits, BCEWithLogitsLoss, which combines a sigmoid activation function and binary cross entropy loss in one function. total_loss = real_loss + fake_loss. For generator loss, the labels are flipped.\n - Training will involve alternating between training the discriminator optimizer and the generator optimizer.\n \n## Image-to-Image Translation\n\n- Given two sets (domains) of unordered and unpaired images, learn to transform images from one domain to another. This is an unsupervised learning, because these images do not come with labels. Also, there is no exact correspondences between individual images in those two sets.\n- Examples of domains: summer vs winter, Monet painting vs landscape photos, zebras vs horses, areial photos vs street map \n\n* [Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks](https://github.com/msfchen/deep_learning/tree/master/gan/cyclegan):\n - Goal: translate images from summer scene to winter scene or vice versa\n - Main Idea: two Discriminators, one for each domain; two CycleGenerators, one for each translation; Cycle-consistency loss: x → G(x) → F(G(x)) ≈ x and y → F(y) → G(F(y)) ≈ y\n - Residual Function = the difference between a mapping applied to x and the original input x; In our case, Cycle-consistency loss is a residual function.\n - Discriminator: ReLU(Conv) -> 3 times of ReLU(BatchNorm(Conv)) -> Conv \n - CycleGenerator: 3 times of ReLU(BatchNorm(Conv)) -> n times of (x + BatchNorm(Conv(ReLu(BatchNorm(Conv(x)))))) -> 2 times of ReLu(BatchNorm(ConvTranspose)) -> tanh(ConvTranspose)\n - real_MSE_loss = mean((D_out - 1)\\*\\*2); fake_MSE_loss = mean(D_out\\*\\*2); cycle_consistency_loss = lambda_weight\\*mean(abs(real_im - reconstructed_im))\n - Alternating between training the discriminators and the generators, for a specified number of training iterations. "
},
{
"alpha_fraction": 0.7672584056854248,
"alphanum_fraction": 0.7788461446762085,
"avg_line_length": 108.59459686279297,
"blob_id": "11630fa0250a5262e05056dbc19a2905fdd67ca0",
"content_id": "7fed3bcafcfcf4ad2e2f6ed80d332fbc1a5ed936",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4056,
"license_type": "no_license",
"max_line_length": 578,
"num_lines": 37,
"path": "/convolutionalnn/README.md",
"repo_name": "msfchen/deep_learning",
"src_encoding": "UTF-8",
"text": "# Convolutional Neural Networks\n\n- Convolutional neural networks (CNNs) make the explicit assumption that the inputs are images with 3 dimensions: width, height, depth. Therefore, the layers of a CNN have neurons arranged in 3 dimensions.\n- There are three main types of layers to build CNN architectures: Convolutional Layer, Pooling Layer, and Fully-Connected Layer (exactly as seen in regular Neural Networks). \n- A [convolutional layer](https://github.com/msfchen/deep_learning/blob/master/convolutionalnn/visualization/conv_visualization.ipynb) contains a set of filters with learnable parameters. The height and width (receptive field) of the filters are smaller than those of the input volume. Filters are slid (convolved) across the width and height of the input and the dot products between the input and filters are computed at every spatial position. The output volume of the convolutional layer is obtained by stacking the activation maps of all filters along the depth dimension. \n- A [pooling layer](https://github.com/msfchen/deep_learning/blob/master/convolutionalnn/visualization/maxpooling_visualization.ipynb) will perform a downsampling operation along the spatial dimensions (width, height).\n\n## Image Classification\n\n* [Dog Breed Identification](https://github.com/msfchen/deep_learning/tree/master/convolutionalnn/dogbreed):\n - Goal: for a given dog image, provide an estimate of the dog's breed; for a given human image, provide the most resembling dog breed. \n - using pre-trained models:\n - use a pre-trained Open Source Computer Vision Haar Cascades classifier to detect human face.\n - use a pre-trained deep CNN, VGG-16, model to predict dogs. VGG-16 is trained to classify 1000 categories of objects, of which category idx 151 ~ 268 (inclusive) are dogs.\n - buld CNN model from scratch:\n - augment image data by transformations, such as resize, center crop, random horizontal flip, and random rotation; data splitted into train, valid, test sets.\n - model architecture: 4 times of Pool(ReLU(Conv)) + flatten + 2 times of Dropout(ReLU(Linear)) + Linear\n - CrossEntropyLoss; Adam(0.0007) Optimizer; test evaluation: accuracy\n - build CNN model by transfer learning from a pre-trained model:\n - load pre-trained VGG-19 model and freeze \"features\" layers parameters; change the output layer size from 1000 to 133 (the number of dog breeds in our training data)\n - CrossEntropyLoss; Adam(0.001) Optimizer for \"classifier\" layers parameters; test evaluation: accuracy\n\n## Image Style Transfer\n\n* [Combine the content of one image with the style of another image](https://github.com/msfchen/deep_learning/tree/master/convolutionalnn/styletransfer):\n - analyses of outputs of each layer of deep CNNs indicate that earlier layers capture lower level features, such as directional edges, colors, and color edges; and later layers capture more complex shapes, such as mouth, eyes, etc. \n - load the pre-trained VGG-19 model and freeze its parameters; load and normalize the two images; extract features at each convolutional layer from passed-in images; initialize a target image copied from content image.\n - run a learning process with Adam optimizer to update the target image to minimize the total loss:\n - total_loss = content_weight * content_loss + style_weight * style_loss\n - content_loss = mean((target_features['conv4_2'] - content_features['conv4_2'])**2) where conv4_2 is the 2nd from the last Conv layer\n - style_loss = sum over all layers of layer_style_loss / (d * h * w) where layer_style_loss = style_weights[layer] * mean((target_gram - style_gram)**2)\n\n## Image Compression\n\n* [Convolutional Autoencoder](https://github.com/msfchen/deep_learning/tree/master/convolutionalnn/autoencoder):\n - A compressed representation of images can save storage space and enable more efficient sharing.\n - The encoder portion will be made of convolutional and pooling layers and the decoder will be made of transpose convolutional layers that learn to reconstruct a compressed representation.\n\n"
}
] | 10 |
clawler/portal
|
https://github.com/clawler/portal
|
26d5d67f267aeca3751e73f22d3c576661e8770f
|
0a12bee2e462a0d6d1cb5a7360e06c7349eb8c05
|
c3121090fa1aeb808bc30e378468e8c4dbf19e99
|
refs/heads/master
| 2021-05-27T12:06:01.427885 | 2020-04-08T17:43:33 | 2020-04-08T17:43:33 | 254,267,705 | 0 | 0 | null | 2020-04-09T04:05:32 | 2020-04-08T17:43:37 | 2020-04-08T17:43:34 | null |
[
{
"alpha_fraction": 0.6030951142311096,
"alphanum_fraction": 0.6627218723297119,
"avg_line_length": 24.55813980102539,
"blob_id": "91b8a0ca61ce917654907368b5b5ac3df652e7e3",
"content_id": "64a3b9d34b442542c2f8671da01b8dfadc77b45b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 2197,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 86,
"path": "/conf/docker/Dockerfile",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "FROM python:3.6-buster\n\nMAINTAINER DesignSafe-CI <[email protected]>\n\nEXPOSE 8000\n\nENV TERM xterm\nUSER root\n\n# install locales for en_us.utf-8\nRUN apt-get update && apt-get install -y \\\n dialog \\\n apt-utils \\\n locales \\\n && rm -rf /var/lib/apt/lists/* \\\n && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8\n\nENV LC_ALL en_US.utf-8\nENV LANG en_US.utf-8\n\n\n\nRUN apt-get update && apt-get install -y \\ \n gawk \\\n unzip \\\n wget \\\n git \\\n mysql-devel \\\n vim \\\n nfs-utils \\\n openssl-devel \\\n bzip2-devel \\\n\n\n\n# Install python 3.7\n# CentOS supports 3.6 but not 3. yet. Although we can install it \n# we should wait until everything works with python3.6\n\n# RUN curl --output /tmp/python3.7.4.tgz https://www.python.org/ftp/python/3.7.4/Python-3.7.4.tgz && \\\n# tar xzf /tmp/python3.7.4.tgz -C /opt/ && \\\n# ls /opt && \\\n# cd /opt/Python-3.7.4 && \\\n# ./configure --enable-optimizations && \\\n# make altinstall\n\n# install node 12.x\nRUN curl --silent --location https://rpm.nodesource.com/setup_12.x | bash -\nRUN apt-get -y install nodejs npm\n\n# RUN pip install --upgrade pip && pip install uwsgitop\nRUN pip3 install --upgrade pip && pip3 install uwsgi uwsgitop\n\nRUN mkdir -p /opt/uwsgi && \\\n curl -SLk -o /opt/uwsgi/uwsgi-2.0.15.tar.gz https://projects.unbit.it/downloads/uwsgi-2.0.15.tar.gz && \\\n tar -xvzf /opt/uwsgi/uwsgi-2.0.15.tar.gz -C /opt/uwsgi && \\\n uwsgi --build-plugin /opt/uwsgi/uwsgi-2.0.15/plugins/zabbix && \\\n mkdir -p /usr/lib/uwsgi/plugins && \\\n mv zabbix_plugin.so /usr/lib/uwsgi/plugins/.\n\nRUN groupadd --gid 816877 G-816877 && \\\n useradd --uid 458981 --gid G-816877 -m --shell /bin/bash tg458981 -d /home/tg458981\n\nCOPY . /srv/www/designsafe\n\nRUN chown -R tg458981:G-816877 /srv/www/designsafe\nRUN mkdir /src\nRUN chown -R tg458981:G-816877 /src\n\nUSER tg458981\n\n\nRUN pip3 install --upgrade pip && \\\n pip3 install pip-tools\n\nCOPY requirements.txt /tmp/\n\nRUN pip3 install -r /tmp/requirements.txt\nENV PATH=\"/home/tg458981/.local/bin:${PATH}\"\n\nCOPY package*.json /srv/www/designsafe/\nRUN cd /srv/www/designsafe npm install\n\nRUN echo \"prefix=~/.npm-global\" >> ~/.npmrc\n\nWORKDIR /srv/www/designsafe"
},
{
"alpha_fraction": 0.3736720681190491,
"alphanum_fraction": 0.4683602750301361,
"avg_line_length": 29.94285774230957,
"blob_id": "114bf06ec308032d3a3082e4876caf830579ee2e",
"content_id": "6bb23b920211aa310a2663cfc999de62cc0a1c9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2165,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 70,
"path": "/designsafe/apps/api/agave/filemanager/fixtures/publication_fixture.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "pub_fixture = {\n \"status\": \"published\",\n \"users\": [\n {\n \"profile\": {\n \"institution\": \"University of Hawaii\"\n },\n \"username\": \"ianrob30\",\n \"first_name\": \"Ian\",\n \"last_name\": \"Robertson\",\n \"_ui\": {\n \"deleted\": False,\n \"order\": 0\n },\n \"email\": \"[email protected]\"\n }\n ],\n \"created\": \"2018-10-10T13:30:26.678200\",\n \"projectId\": \"PRJ-2110\",\n \"project\": {\n \"doi\": \"doi:10.17603/DS2BH7W\",\n \"uuid\": \"6161221773431860760-242ac119-0001-012\",\n \"name\": \"designsafe.project\",\n \"created\": \"2018-10-09T21:10:32.071000-05:00\",\n \"_related\": {},\n \"schemaId\": None,\n \"lastUpdated\": \"2018-10-10T08:19:21.582000-05:00\",\n \"associationIds\": [],\n \"_links\": {\n \"owner\": {\n \"href\": \"https://agave.designsafe-ci.org/profiles/v2/ds_admin\"\n },\n \"self\": {\n \"href\": \"https://agave.designsafe-ci.org/meta/v2/data/6161221773431860760-242ac119-0001-012\"\n },\n \"associationIds\": [],\n \"permissions\": {\n \"href\": \"https://agave.designsafe-ci.org/meta/v2/data/6161221773431860760-242ac119-0001-012/pems\"\n }\n },\n \"value\": {\n \"teamMembers\": [\n \"ianrob30\"\n ],\n \"coPis\": [],\n \"projectType\": \"other\",\n \"description\": \"This project records data collected during and after Hurricane Lane impacted the Hawaiian Island chain.\",\n \"title\": \"Hurricane Lane, Hawaii Islands, August 2018\",\n \"projectId\": \"PRJ-2110\",\n \"ef\": \"None\",\n \"keywords\": \"Hurricane, Hawaii, flooding, landslide\",\n \"associatedProjects\": [],\n \"pi\": \"ianrob30\",\n \"awardNumber\": \"CMMI-1841667\"\n },\n \"owner\": \"ds_admin\",\n \"internalUsername\": None,\n \"piLabel\": \"Robertson, Ian\"\n },\n \"institutions\": [\n {\n \"_ui\": {\n \"deleted\": False,\n \"order\": 0\n },\n \"name\": \"ianrob30\",\n \"label\": \"University of Hawaii\"\n }\n ]\n }"
},
{
"alpha_fraction": 0.48875856399536133,
"alphanum_fraction": 0.49169111251831055,
"avg_line_length": 33.686439514160156,
"blob_id": "029acc0865f6925e674470153821e2b3ef20ec38",
"content_id": "bee8fb073075d77e2ea16f98e51e65eac2b38091",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4092,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 118,
"path": "/designsafe/apps/api/agave/filemanager/shared_data.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "\"\"\"File Manager for legacy Shared with Me data\n\"\"\"\n\nimport logging\nimport json\nimport os\nimport re\nimport six\nimport datetime\nfrom django.conf import settings\nfrom .base import BaseFileManager\nfrom itertools import takewhile\nfrom designsafe.apps.api.agave.filemanager.agave import AgaveFileManager\nfrom designsafe.apps.api.exceptions import ApiException\nfrom elasticsearch import TransportError, ConnectionTimeout\nfrom elasticsearch_dsl.query import Q\nfrom elasticsearch_dsl import Search, Document\nfrom elasticsearch_dsl.connections import connections\nfrom designsafe.apps.data.models.elasticsearch import IndexedFile\nlogger = logging.getLogger(__name__)\n\nclass SharedDataFileManager(AgaveFileManager):\n NAME = 'agave'\n DEFAULT_SYSTEM_ID = 'designsafe.storage.default'\n\n @property\n def requires_auth(self):\n \"\"\"Whetherit should check for an authenticated user.\n\n If this is a public data file manager, it should return False.\n \"\"\"\n return True\n\n @staticmethod\n def listing(system, file_path, user_context=None, offset=None, limit=None):\n file_path = file_path or '/'\n file_path = file_path.strip('/')\n if file_path.strip('/').split('/')[0] != user_context:\n if file_path == '$SHARE':\n q = Q('bool',\n must=[\n Q('term', **{'system._exact': system})\n ]\n )\n else:\n q = Q('bool',\n must=[\n Q('term', **{'path._path': file_path}),\n Q('term', **{'system._exact': system})\n ]\n )\n else:\n q = Q('bool',\n must=[\n Q('term', **{'path._exact': file_path}),\n Q('term', **{'system._exact': system})\n ]\n )\n if user_context is not None:\n username_q = Q('term', **{'permissions.username': user_context})\n world_q = Q('term', **{'permissions.username': 'WORLD'})\n pems_filter = Q('bool')\n pems_filter.should = [username_q, world_q]\n nested_filter = Q('nested')\n nested_filter.path = 'permissions'\n nested_filter.query = pems_filter\n\n if file_path == '$SHARE':\n file_path = '/'\n home_filter = Q('bool', must_not=Q('term', **{'path._path': '/'+user_context}))\n query = Q('bool', must=q, filter=[nested_filter, home_filter])\n else:\n query = Q('bool', must=q)\n \n search = IndexedFile.search()\n search.query = query\n search = search.sort('path._exact', 'name._exact')\n\n try:\n res = search.execute()\n except (TransportError, ConnectionTimeout) as e:\n if getattr(e, 'status_code', 500) == 404:\n raise\n res = search.execute()\n\n if file_path == '/':\n result = {\n 'trail': [{'name': '$SHARE', 'path': '/$SHARE'}],\n 'name': '$SHARE',\n 'path': '/$SHARE',\n 'system': system,\n 'type': 'dir',\n 'children': [],\n 'permissions': 'NONE'\n }\n else:\n file_path_comps = file_path.split('/')\n if file_path_comps != '':\n file_path_comps.insert(0, '')\n\n trail_comps = [{'name': file_path_comps[i] or '/',\n 'system': system,\n 'path': '/'.join(file_path_comps[0:i+1]) or '/',\n } for i in range(0, len(file_path_comps))]\n result = {\n 'trail': trail_comps,\n 'name': os.path.split(file_path)[1],\n 'path': file_path,\n 'system': system,\n 'type': 'dir',\n 'children': [],\n 'permissions': 'READ'\n }\n\n for f in res:\n result['children'].append(f.to_dict())\n\n return result"
},
{
"alpha_fraction": 0.567581295967102,
"alphanum_fraction": 0.5693597793579102,
"avg_line_length": 35.44444274902344,
"blob_id": "56b4aecc37eb4f24149398f4ca9950d6f372772f",
"content_id": "9ca65674bc27b49b8db7674c8535a08724b0b16d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3936,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 108,
"path": "/designsafe/apps/api/agave/filemanager/publications.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "\"\"\"Publication file manager.\n\n.. module:: portal.apps.api.agave.managers.publications\n :synopsis: Manager handling Publications searches.\n\"\"\"\n\n\nimport logging\nimport datetime\nfrom django.conf import settings\nfrom elasticsearch_dsl import Q, Search, Index\nfrom designsafe.libs.elasticsearch.docs.publications import BaseESPublication\nfrom designsafe.libs.elasticsearch.docs.publication_legacy import BaseESPublicationLegacy\nfrom designsafe.apps.api.agave.filemanager.agave import AgaveFileManager\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PublicationsManager(AgaveFileManager):\n \"\"\"File manager for listing publications.\"\"\"\n\n @property\n def requires_auth(self):\n \"\"\"Whether it should check for an authenticated user.\n\n If this is a public data file manager, it should return False.\n \"\"\"\n return False\n\n def construct_query(self, **kwargs): # pylint: disable=no-self-use\n \"\"\"Construct ES query.\"\"\"\n published_index_name = list(Index(settings.ES_INDEX_PREFIX.format('publications')).get_alias().keys())[0]\n legacy_index_name = list(Index(settings.ES_INDEX_PREFIX.format('publications-legacy')).get_alias().keys())[0]\n\n filter_queries = []\n if kwargs.get('type_filters'):\n for type_filter in kwargs['type_filters']:\n if type_filter == 'nees':\n type_query = Q({'term': {'_index': legacy_index_name}})\n else:\n type_query = Q('term', **{'project.value.projectType._exact': type_filter})\n filter_queries.append(type_query)\n published_query = Q(\n 'bool',\n must=[\n Q('bool', should=[\n Q({'term': {'_index': published_index_name}}),\n Q({'term': {'_index': legacy_index_name}})\n ]),\n Q('bool', should=filter_queries)\n ],\n must_not=[\n Q('term', status='unpublished'),\n Q('term', status='publishing'),\n Q('term', status='saved')\n ]\n )\n\n return published_query\n\n def listing(self, system=None, file_path=None, offset=0, limit=100, **kwargs):\n \"\"\"Wrap the search result in a BaseFile object for serializtion.\"\"\"\n query = self.construct_query(**kwargs)\n listing_search = Search()\n listing_search = listing_search.filter(query).sort(\n '_index',\n {'project._exact': {'order': 'asc', 'unmapped_type': 'keyword'}},\n {'created': {'order': 'desc', 'unmapped_type': 'long'}}\n )\n listing_search = listing_search.extra(from_=offset, size=limit)\n\n res = listing_search.execute()\n children = []\n for hit in res:\n try:\n getattr(hit, 'projectId')\n children.append(BaseESPublication(**hit.to_dict()).to_file())\n except AttributeError:\n children.append(BaseESPublicationLegacy(**hit.to_dict()).to_file())\n\n result = {\n 'trail': [{'name': '$SEARCH', 'path': '/$SEARCH'}],\n 'name': '$SEARCH',\n 'path': '/',\n 'system': system,\n 'type': 'dir',\n 'children': children,\n 'permissions': 'READ'\n }\n return result\n\n def save_publication(\n self,\n publication,\n status='publishing'\n ): # pylint: disable=no-self-use\n \"\"\"Save publication.\"\"\"\n publication['projectId'] = publication['project']['value']['projectId']\n publication['created'] = datetime.datetime.now().isoformat()\n publication['status'] = status\n publication['version'] = 2\n publication['licenses'] = publication.pop('license', [])\n publication['license'] = ''\n\n pub = BaseESPublication(project_id=publication['projectId'], **publication)\n pub.save()\n return pub\n"
},
{
"alpha_fraction": 0.5485336780548096,
"alphanum_fraction": 0.5497727990150452,
"avg_line_length": 33.58571243286133,
"blob_id": "ffb83c3af54fae0f3ed87c4e8303d2e3f687d717",
"content_id": "388e36ab5034d564c3a1a16e2eb324663c7791fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2421,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 70,
"path": "/designsafe/static/scripts/projects/components/publication-citation/publication-citation.component.js",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "import PublicationCitationTemplate from './publication-citation.component.html';\n\nclass PublicationCitationCtrl {\n constructor($sce, $window) {\n 'ngInject';\n this.$sce = $sce;\n this.$window = $window;\n }\n\n $onInit() {\n this.entity = this.resolve.entity;\n this.publication = this.resolve.publication;\n this.auths = [];\n this.doi = '';\n\n if (this.entity) {\n // entity\n this.auths = angular.copy(this.entity.authors);\n this.doi = this.entity.doi;\n } else if (this.publication.project.value.projectType !== 'other') {\n // exp,hyb,sim,field \n let authIds = [];\n if (this.publication.project.value.coPis) {\n authIds = this.publication.project.value.coPis.concat(this.publication.project.value.pi);\n } else {\n authIds = [this.publication.project.value.pi];\n }\n this.auths = this.publication.authors.filter((author) => authIds.includes(author.name));\n } else {\n // other\n this.auths = angular.copy(this.publication.project.value.teamOrder);\n }\n\n if (!this.entity && !this.publication.project.doi && this.publication.project.value.dois.length){\n this.doi = this.publication.project.value.dois[0];\n } else if (!this.entity && this.publication.project.doi) {\n this.doi = this.publication.project.doi;\n }\n\n let authors = '';\n this.auths.sort((a, b) => {\n return a.order - b.order;\n });\n this.auths.forEach(\n (a) => {\n if (a && a.lname && a.fname && a.authorship) {\n authors += a.lname + ', ' + a.fname + ', ';\n }\n }\n );\n this.citationDate = this.publication.created.split('T')[0].split('-')[0];\n this.citationUrl = 'https://doi.org/' + this.doi.replace(/doi:/, '');\n this.doiCitation = this.doi.replace(/doi:/, '');\n }\n\n downloadCitation() {\n let url = \"https://data.datacite.org/application/vnd.datacite.datacite+xml/\" + this.doiCitation;\n this.$window.open(url);\n }\n}\n\nexport const PublishedCitationComponent = {\n template: PublicationCitationTemplate,\n controllerAs: '$ctrl',\n controller: PublicationCitationCtrl,\n bindings: {\n resolve: '<',\n close: '&'\n }\n};\n"
},
{
"alpha_fraction": 0.6647211313247681,
"alphanum_fraction": 0.6809338331222534,
"avg_line_length": 28.653846740722656,
"blob_id": "7377779c457e0a3ea5c8a968b656d2efbe7b46df",
"content_id": "fa59e196df3a8a3c2dfa6081a14510fbf7ae6aa4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1542,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 52,
"path": "/designsafe/apps/api/agave/filemanager/published_files.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "\"\"\"File Manager for published Data\n\"\"\"\n\nimport logging\nimport json\nimport os\nimport re\nimport datetime\nfrom django.conf import settings\nfrom .base import BaseFileManager\nfrom designsafe.apps.api.agave.filemanager.agave import AgaveFileManager\nfrom designsafe.apps.api.exceptions import ApiException\n\n\nlogger = logging.getLogger(__name__)\n\nclass PublishedFileManager(AgaveFileManager):\n NAME = 'published_files'\n DEFAULT_SYSTEM_ID = 'designsafe.storage.published'\n\n @property\n def requires_auth(self):\n \"\"\"Whether it should check for an authenticated user.\n\n If this is a public data file manager, it should return False.\n \"\"\"\n return False\n\n def listing(self, system, file_path='/', offset=0, limit=100, **kwargs):\n return super(PublishedFileManager, self).\\\n listing(system, file_path, offset, limit)\n\n def delete(self, *args, **kwargs):\n raise ApiException('Invalid Action', status=400)\n\n def mkdir(self, *args, **kwargs):\n raise ApiException('Invalid Action', status=400)\n\n def move(self, *args, **kwargs):\n raise ApiException('Invalid Action', status=400)\n\n def rename(self, *args, **kwargs):\n raise ApiException('Invalid Action', status=400)\n\n def share(self, *args, **kwargs):\n raise ApiException('Invalid Action', status=400)\n\n def trash(self, *args, **kwargs):\n raise ApiException('Invalid Action', status=400)\n\n def upload(self, *args, **kwargs):\n raise ApiException('Invalid Action', status=400)\n"
},
{
"alpha_fraction": 0.574462890625,
"alphanum_fraction": 0.576416015625,
"avg_line_length": 33.420169830322266,
"blob_id": "4a5e25127ecbfaec22385beee636a64fee318dda",
"content_id": "b6d7f70a2aae2babef59ceca1e29205456769e37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4096,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 119,
"path": "/designsafe/static/scripts/data-depot/components/data-depot-toolbar/data-depot-toolbar.component.js",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "import dataDepotToolbarTemplate from './data-depot-toolbar.component.html'\n\nclass DataDepotToolbarCtrl {\n constructor($state, $uibModal, Django, DataBrowserService, UserService) {\n 'ngInject';\n this.DataBrowserService = DataBrowserService;\n this.$state = $state;\n this.search = { queryString: '' };\n this.browser = DataBrowserService.state();\n this.UserService = UserService;\n\n this.tests = {};\n\n this.apiParams = DataBrowserService.apiParameters();\n\n }\n\n placeholder() {\n var stateNames = {\n 'myData': 'My Data',\n 'projects.list': 'My Projects',\n 'sharedData': 'Shared Data',\n 'boxData': 'Box',\n 'dropboxData': 'Dropbox',\n 'googledriveData': 'Google Drive',\n 'publicData': 'Published Projects',\n 'communityData': 'Community Data',\n 'projects.view': 'Project View',\n 'projects.view.data': 'Project Data View',\n 'neesPublished': 'NEES Published'\n };\n\n if (stateNames[this.$state.current.name]) {\n return (stateNames[this.$state.current.name]);\n }\n else {\n return ('Data Depot');\n }\n };\n\n details() {\n // preview the last selected file or current listing if none selected\n if (this.browser.selected.length > 0) {\n this.DataBrowserService.preview(this.browser.selected.slice(-1)[0]);\n } else {\n this.DataBrowserService.preview(this.browser.listing);\n }\n }\n download() {\n this.DataBrowserService.download(this.browser.selected);\n }\n preview() {\n this.DataBrowserService.preview(this.browser.selected[0], this.browser.listing);\n }\n previewImages() {\n const images = this.browser.selected.filter(({ path }) => {\n const ext = path.split('.').pop().toLowerCase();\n return ['jpg', 'jpeg', 'png', 'tiff', 'gif'].indexOf(ext) !== -1;\n });\n const folders = this.browser.selected.filter(({ format }) => format === 'folder');\n if (folders.length) {\n Promise.all(folders.map((folder) => folder.fetch())).then((responses) => {\n for (const res of responses) {\n const output = res.children.filter(({ path }) => {\n const ext = path.split('.').pop().toLowerCase();\n return ['jpg', 'jpeg', 'png', 'tiff', 'gif'].indexOf(ext) !== -1;\n });\n images.push(...output);\n }\n this.DataBrowserService.previewImages(images);\n });\n } else {\n this.DataBrowserService.previewImages(images);\n }\n }\n showCitation() {\n this.DataBrowserService.showCitation(this.browser.selected, this.browser.listing);\n }\n viewMetadata() {\n this.DataBrowserService.viewMetadata(this.browser.selected, this.browser.listing);\n }\n viewCategories() {\n this.DataBrowserService.viewCategories(this.browser.selected, this.browser.listing);\n }\n share() {\n this.DataBrowserService.share(this.browser.selected[0]);\n }\n copy() {\n this.DataBrowserService.copy(this.browser.selected);\n }\n move() {\n this.DataBrowserService.move(this.browser.selected, this.browser.listing);\n }\n rename() {\n this.DataBrowserService.rename(this.browser.selected[0]);\n }\n trash() {\n this.DataBrowserService.trash(this.browser.selected);\n }\n rm() {\n this.DataBrowserService.rm(this.browser.selected);\n }\n ddSearch() {\n var state = this.apiParams.searchState;\n this.$state.go(state, {\n 'query_string': this.search.queryString,\n 'systemId': this.browser.listing.system,\n });\n }\n}\n\nDataDepotToolbarCtrl.$inject = ['$state', '$uibModal', 'Django', 'DataBrowserService', 'UserService'] \n\n\nexport const DataDepotToolbarComponent = {\n controller: DataDepotToolbarCtrl,\n controllerAs: '$ctrl',\n template: dataDepotToolbarTemplate\n}\n"
},
{
"alpha_fraction": 0.5215564966201782,
"alphanum_fraction": 0.5257566571235657,
"avg_line_length": 37.7320556640625,
"blob_id": "b1f08f6164d92a22ae154a3243361c6f78809db8",
"content_id": "5958d8f9ff33fbcae3b6fc82237f4187094f44b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8095,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 209,
"path": "/designsafe/libs/elasticsearch/docs/publications.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "\"\"\"ES publication doc.\n\n.. module:: portal.libs.elasticsearch.docs.files\n :synopsis: Wrapper classes for ES ``files`` doc type.\n\"\"\"\n\nimport logging\nimport os\nimport zipfile\nfrom future.utils import python_2_unicode_compatible\nfrom designsafe.apps.data.models.elasticsearch import IndexedPublication\nfrom designsafe.libs.elasticsearch.docs.base import BaseESResource\nfrom designsafe.libs.elasticsearch.exceptions import DocumentNotFound\nfrom django.contrib.auth import get_user_model\n\n# pylint: disable=invalid-name\nlogger = logging.getLogger(__name__)\n# pylint: enable=invalid-name\n\n\n@python_2_unicode_compatible\nclass BaseESPublication(BaseESResource):\n \"\"\"Wrapper class for Elastic Search indexed publication.\n\n .. rubric:: Rationale\n\n This wrapper class is needed in order to separate concerns.\n Any thing specific to Elastic Search must live in\n :mod:`libs.elasticsearch.docs.base` and any logic needed\n to manipulate data must live here.\n Also, by manipulating data outside a ``DocType`` subclass\n we avoid the use of ``AttrDict`` and ``AttrList``.\n\n \"\"\"\n\n def __init__(self, wrapped_doc=None, project_id=None, **kwargs):\n \"\"\"Elastic Search File representation.\n\n This class directly wraps an Agave indexed file.\n\n \"\"\"\n super(BaseESPublication, self).__init__(wrapped_doc, **kwargs)\n\n if not wrapped_doc:\n self._populate(project_id, **kwargs)\n\n def _populate(self, project_id, **kwargs):\n\n try:\n wrapped_doc = self._index_cls.from_id(project_id)\n self._wrap(wrapped_doc, **kwargs)\n except DocumentNotFound:\n self._wrapped = self._index_cls(\n project_id=project_id,\n **dict(kwargs)\n )\n\n @property\n def _index_cls(self):\n return IndexedPublication\n\n def save(self, using=None, index=None, validate=True,\n **kwargs): # pylint: disable=unused-argument\n \"\"\"Save document.\"\"\"\n self._wrapped.save()\n\n def delete(self):\n \"\"\"Delete.\"\"\"\n self._wrapped.delete()\n\n def to_file(self):\n \"\"\"To file.\"\"\"\n dict_obj = {\n 'agavePath': 'agave://designsafe.storage.published/{}'.format(\n self.project.value.projectId\n ),\n 'children': [],\n 'deleted': False,\n 'format': 'folder',\n 'length': 24731027,\n 'meta': {\n 'title': self.project['value']['title'],\n 'pi': self.project['value']['pi'],\n 'dateOfPublication': self.created,\n 'type': self.project['value']['projectType'],\n 'projectId': self.project['value']['projectId'],\n 'keywords': self.project['value']['keywords'],\n 'description': self.project['value']['description']\n },\n 'name': self.project.value.projectId,\n 'path': '/{}'.format(self.project.value.projectId),\n 'permissions': 'READ',\n 'project': self.project.value.projectId,\n 'system': 'designsafe.storage.published',\n 'systemId': 'designsafe.storage.published',\n 'type': 'dir',\n 'version': getattr(self, 'version', 1)\n }\n if 'dataType' in self.project['value']:\n dict_obj['meta']['dataType'] = self.project['value']['dataType']\n pi = self.project['value']['pi']\n pi_user = [x for x in getattr(self, 'users', []) if x['username'] == pi]\n if pi_user:\n pi_user = pi_user[0]\n dict_obj['meta']['piLabel'] = '{last_name}, {first_name}'.format(\n last_name=pi_user['last_name'], first_name=pi_user['first_name'])\n else:\n try:\n pi_user = get_user_model().objects.get(username=pi)\n dict_obj['meta']['piLabel'] = '{last_name}, {first_name}'.format(\n last_name=pi_user.last_name, first_name=pi_user.first_name)\n except:\n dict_obj['meta']['piLabel'] = '({pi})'.format(pi=pi)\n return dict_obj\n\n def related_file_paths(self):\n dict_obj = self._wrapped.to_dict()\n related_objs = []\n if dict_obj['project']['value']['projectType'] == 'experimental':\n related_objs = (\n dict_obj.get('modelConfigs', []) +\n dict_obj.get('analysisList', []) +\n dict_obj.get('sensorLists', []) +\n dict_obj.get('eventsList', []) +\n dict_obj.get('reportsList', [])\n )\n elif dict_obj['project']['value']['projectType'] == 'simulation':\n related_objs = (\n dict_obj.get('models', []) +\n dict_obj.get('inputs', []) +\n dict_obj.get('outputs', []) +\n dict_obj.get('analysiss', []) +\n dict_obj.get('reports', [])\n )\n elif dict_obj['project']['value']['projectType'] == 'hybrid_simulation':\n related_objs = (\n dict_obj.get('global_models', []) +\n dict_obj.get('coordinators', []) +\n dict_obj.get('sim_substructures', []) +\n dict_obj.get('exp_substructures', []) +\n dict_obj.get('coordinator_outputs', []) +\n dict_obj.get('sim_outputs', []) +\n dict_obj.get('exp_outputs', []) +\n dict_obj.get('reports', []) +\n dict_obj.get('analysiss', [])\n )\n elif dict_obj['project']['value']['projectType'] == 'field_recon':\n related_objs = (\n dict_obj.get('collections', []) +\n dict_obj.get('socialscience', []) +\n dict_obj.get('planning', []) +\n dict_obj.get('reports', []) +\n dict_obj.get('geoscience', [])\n )\n\n file_paths = []\n for obj in related_objs:\n for file_dict in obj['fileObjs']:\n file_paths.append(file_dict['path'])\n\n return file_paths\n\n def archive(self):\n archive_name = '{}_archive.zip'.format(self.projectId)\n pub_dir = '/corral-repl/tacc/NHERI/published/'\n arc_dir = os.path.join(pub_dir, 'archives/')\n\n def set_perms(dir, octal, subdir=None):\n try:\n os.chmod(dir, octal)\n if subdir:\n if not os.path.isdir(subdir):\n raise Exception('subdirectory does not exist!')\n for root, dirs, files in os.walk(subdir):\n os.chmod(root, octal)\n for d in dirs:\n os.chmod(os.path.join(root, d), octal)\n for f in files:\n os.chmod(os.path.join(root, f), octal)\n except Exception as e:\n logger.exception(\"Failed to set permissions for {}\".format(dir))\n os.chmod(dir, 0o555)\n\n def create_archive():\n arc_source = os.path.join(pub_dir, self.projectId)\n archive_path = os.path.join(arc_dir, archive_name)\n\n try:\n logger.debug(\"Creating archive for {}\".format(self.projectId))\n\n zf = zipfile.ZipFile(archive_path, mode='w', allowZip64=True)\n for dirs, _, files in os.walk(arc_source):\n for f in files:\n if f == archive_name:\n continue\n zf.write(os.path.join(dirs, f), os.path.join(dirs.replace(pub_dir, ''), f))\n zf.close()\n except Exception as e:\n logger.exception(\"Archive creation failed for {}\".format(arc_source))\n finally:\n set_perms(pub_dir, 0o555, arc_source)\n set_perms(arc_dir, 0o555)\n\n try:\n set_perms(pub_dir, 0o755, os.path.join(pub_dir, self.projectId))\n set_perms(arc_dir, 0o755)\n create_archive()\n except Exception as e:\n logger.exception('Failed to archive publication!')\n"
},
{
"alpha_fraction": 0.6595083475112915,
"alphanum_fraction": 0.6606385707855225,
"avg_line_length": 38.99435043334961,
"blob_id": "a27c21f67b4a8784af42acc08009c63c2924ccd7",
"content_id": "8fa7ee15ad139e5ad4bd3e77714a4be76d9d3041",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7078,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 177,
"path": "/designsafe/apps/api/agave/filemanager/tests.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nimport datetime\nfrom mock import patch, MagicMock\nfrom django.test import TestCase\nfrom django.contrib.auth import get_user_model\nfrom django.conf import settings\nfrom datetime import timedelta\nfrom designsafe.apps.api.agave.filemanager.lookups import FileLookupManager\n\nfrom designsafe.apps.api.agave.filemanager.private_data import PrivateDataFileManager\nfrom designsafe.apps.api.agave.filemanager.community import CommunityFileManager\nfrom designsafe.apps.api.agave.filemanager.published_files import PublishedFileManager\nfrom designsafe.apps.api.agave.filemanager.shared_data import SharedDataFileManager\nfrom designsafe.apps.api.agave.filemanager.publications import PublicationsManager\n\nfrom designsafe.apps.data.models.elasticsearch import IndexedPublication, IndexedPublicationLegacy\n\nfrom designsafe.apps.api.exceptions import ApiException\n\nclass TestLookupManager(TestCase):\n\n def test_lookup_returns_for_shared(self):\n self.assertEqual(FileLookupManager('shared'), SharedDataFileManager)\n def test_lookup_returns_for_private(self):\n self.assertEqual(FileLookupManager('agave'), PrivateDataFileManager)\n def test_lookup_returns_for_publications(self):\n self.assertEqual(FileLookupManager('public'), PublicationsManager)\n def test_lookup_returns_for_published_files(self):\n self.assertEqual(FileLookupManager('published'), PublishedFileManager)\n def test_lookup_returns_for_community(self):\n self.assertEqual(FileLookupManager('community'), CommunityFileManager)\n\nclass TestPrivateDataManager(TestCase):\n def test_requires_auth(self):\n mock_ac = MagicMock()\n fm = PrivateDataFileManager(mock_ac)\n self.assertEqual(fm.requires_auth, True) \n\nclass TestCommunityFileManager(TestCase):\n @patch('designsafe.apps.api.agave.filemanager.agave.BaseFileResource')\n def test_listing(self, mock_afm):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n fm.listing('test.system', '/')\n mock_afm.listing.assert_called_with(mock_ac, 'test.system', '/', 0, 100)\n def test_requires_auth(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n self.assertEqual(fm.requires_auth, False) \n def test_copy_raises(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.copy()\n def test_delete_raises(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.delete()\n def test_mkdir_raises(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.mkdir()\n def test_move_raises(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.move()\n def test_rename_raises(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.rename()\n def test_share_raises(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.share()\n def test_trash_raises(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.trash()\n def test_upload_raises(self):\n mock_ac = MagicMock()\n fm = CommunityFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.upload()\n\nclass TestPublishedFileManager(TestCase):\n @patch('designsafe.apps.api.agave.filemanager.agave.BaseFileResource')\n def test_listing(self, mock_afm):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n fm.listing('test.system', '/')\n mock_afm.listing.assert_called_with(mock_ac, 'test.system', '/', 0, 100)\n def test_requires_auth(self):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n self.assertEqual(fm.requires_auth, False) \n def test_delete_raises(self):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.delete()\n def test_mkdir_raises(self):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.mkdir()\n def test_move_raises(self):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.move()\n def test_rename_raises(self):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.rename()\n def test_share_raises(self):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.share()\n def test_trash_raises(self):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.trash()\n def test_upload_raises(self):\n mock_ac = MagicMock()\n fm = PublishedFileManager(mock_ac)\n with self.assertRaises(ApiException):\n fm.upload()\n\nclass TestPublicationsManager(TestCase):\n def test_requires_auth(self):\n mock_ac = MagicMock()\n fm = PublicationsManager(mock_ac)\n self.assertEqual(fm.requires_auth, False) \n\n @patch('designsafe.apps.api.agave.filemanager.publications.BaseESPublicationLegacy')\n @patch('designsafe.apps.api.agave.filemanager.publications.BaseESPublication')\n @patch('designsafe.apps.api.agave.filemanager.publications.Search')\n def test_listing(self, mock_search, mock_pub, mock_leg_pub):\n fm = PublicationsManager(None) \n mock_search().filter().sort().extra().execute.return_value = [\n IndexedPublication(projectId='PRJ-XXX'),\n IndexedPublicationLegacy()\n ]\n\n mock_pub().to_file.return_value = {'type': 'pub'}\n mock_leg_pub().to_file.return_value = {'type': 'leg_pub'}\n\n res = fm.listing(**{'type_filters': []})\n expected_result = {\n 'trail': [{'name': '$SEARCH', 'path': '/$SEARCH'}],\n 'name': '$SEARCH',\n 'path': '/',\n 'system': None,\n 'type': 'dir',\n 'children': [{'type': 'pub'}, {'type': 'leg_pub'}],\n 'permissions': 'READ'\n }\n self.assertEqual(res, expected_result)\n\n @patch('designsafe.apps.api.agave.filemanager.publications.BaseESPublication')\n def test_save(self, mock_pub):\n from designsafe.apps.api.agave.filemanager.fixtures.publication_fixture import pub_fixture\n fm = PublicationsManager(None) \n mock_saved_pub = MagicMock()\n mock_pub.return_value = mock_saved_pub\n pub = fm.save_publication(pub_fixture)\n self.assertEqual(pub, mock_saved_pub)"
},
{
"alpha_fraction": 0.6075000166893005,
"alphanum_fraction": 0.6075000166893005,
"avg_line_length": 24,
"blob_id": "66af42aeefc7b84468b078146261215dea259e58",
"content_id": "87b04d9d4633277a95b6deb0f465ec15512fa195",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 32,
"path": "/designsafe/static/scripts/data-depot/components/projects/publication-preview/modals/author-information-modal.component.js",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "import AuthorInformationModalTemplate from './author-information-modal.template.html';\n\nclass AuthorInformationModalCtrl {\n constructor() { }\n\n $onInit() { \n this.author = this.resolve.author;\n this.first = this.author.fname;\n this.last = this.author.lname;\n this.email = this.author.email;\n this.institution = this.author.inst;\n this.username = this.author.name;\n if (this.author.orcid) {\n this.orcid = this.author.orcid;\n }\n }\n\n close() {\n return;\n }\n}\n\nexport const AuthorInformationModalComponent = {\n template: AuthorInformationModalTemplate,\n controller: AuthorInformationModalCtrl,\n controllerAs: '$ctrl',\n bindings: {\n resolve: '<',\n close: '&',\n dismiss: '&'\n },\n};\n"
},
{
"alpha_fraction": 0.7253521084785461,
"alphanum_fraction": 0.7816901206970215,
"avg_line_length": 34.25,
"blob_id": "901ea0d622ed2ce65c6189947fbb1be16921dca9",
"content_id": "16076e253ea3dca12afc0e2123be23ed520c573a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 142,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 4,
"path": "/bin/run-django.sh",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nsource scl_source enable\n# run django dev server as designsafe community account\npython manage.py runserver 0.0.0.0:8000 \n"
},
{
"alpha_fraction": 0.567512571811676,
"alphanum_fraction": 0.5683603882789612,
"avg_line_length": 45.07551956176758,
"blob_id": "d4f2c7f103b9e6309c80124dcfc3de5e75ba3372",
"content_id": "0ff2926d198a401ddea5552fd46f5c60d242cc3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 17693,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 384,
"path": "/designsafe/static/scripts/data-depot/components/published/published-view.component.js",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "import ExpPublicationTemplate from '../projects/publication-preview/publication-preview.component.html';\nimport SimPublicationTemplate from '../projects/publication-preview/publication-preview-sim.component.html';\nimport HybSimPublicationTemplate from '../projects/publication-preview/publication-preview-hyb-sim.component.html';\nimport FieldReconPublicationTemplate from '../projects/publication-preview/publication-preview-field-recon.component.html';\nimport OtherPublicationTemplate from '../projects/publication-preview/publication-preview-other.component.html';\nimport experimentalData from '../../../projects/components/manage-experiments/experimental-data.json';\nimport { isEqual } from 'underscore';\n\nclass PublishedViewCtrl {\n constructor($stateParams, DataBrowserService, PublishedService, FileListing, $uibModal, $http, djangoUrl, UserService, $q, $anchorScroll, $location){\n 'ngInject';\n this.$stateParams = $stateParams;\n this.DataBrowserService = DataBrowserService;\n this.PublishedService = PublishedService;\n this.FileListing = FileListing;\n this.$uibModal = $uibModal;\n this.$http = $http;\n this.djangoUrl = djangoUrl;\n this.UserService = UserService;\n this.$q = $q;\n this.$anchorScroll = $anchorScroll;\n this.$location = $location;\n }\n\n $onInit() {\n //this.version = this.resolve.version;\n this.readOnly = true;\n this.ui = {\n efs: experimentalData.experimentalFacility,\n equipmentTypes: experimentalData.equipmentTypes,\n experimentTypes: experimentalData.experimentTypes,\n fileNav: true,\n loading: true,\n };\n this.browser = this.DataBrowserService.state();\n this.browser.listings = {};\n var projId = this.$stateParams.filePath.replace(/^\\/+/, '').split('/')[0];\n\n this.getFileObjs = (evt) => {\n evt.files = evt.fileObjs.map((f) => {\n f.system = 'designsafe.storage.published';\n f.path = this.browser.publication.projectId + f.path;\n f.permissions = 'READ';\n return this.FileListing.init(f, {fileMgr: 'published', baseUrl: '/api/public/files'});\n });\n evt.files.forEach((file) => {\n if (!this.browser.listings[evt.uuid]) {\n this.browser.listings[evt.uuid] = { children: [] };\n }\n this.browser.listings[evt.uuid].children.push(file);\n });\n this.browser.listings[evt.uuid].children.forEach((child) => {\n child._entities.push(evt);\n });\n };\n\n if (this.$stateParams.filePath.replace('/', '') === projId) {\n this.ui.fileNav = false;\n }\n\n if (projId) {\n this.PublishedService.getPublished(projId)\n .then((resp) => {\n this.browser.publication = resp.data;\n this.browser.project = resp.data.project;\n this.project = resp.data.project;\n this.fl = {\n showSelect: true,\n showHeader: this.browser.project.value.projectType === 'other',\n showTags: true,\n editTags: false,\n };\n\n if (this.browser.publication.project.value.projectType === 'experimental') {\n if (typeof this.browser.publication.analysisList != 'undefined') {\n this.browser.publication.analysisList.forEach(this.getFileObjs);\n }\n if (typeof this.browser.publication.reportsList != 'undefined') {\n this.browser.publication.reportsList.forEach(this.getFileObjs);\n }\n this.browser.publication.modelConfigs.forEach(this.getFileObjs);\n this.browser.publication.sensorLists.forEach(this.getFileObjs);\n this.browser.publication.eventsList.forEach(this.getFileObjs);\n } else if (this.browser.publication.project.value.projectType === 'simulation') {\n if (typeof this.browser.publication.analysiss != 'undefined') {\n this.browser.publication.analysiss.forEach(this.getFileObjs);\n }\n if (typeof this.browser.publication.reports != 'undefined') {\n this.browser.publication.reports.forEach(this.getFileObjs);\n }\n this.browser.publication.models.forEach(this.getFileObjs);\n this.browser.publication.inputs.forEach(this.getFileObjs);\n this.browser.publication.outputs.forEach(this.getFileObjs);\n } else if (this.browser.publication.project.value.projectType === 'hybrid_simulation') {\n if (typeof this.browser.publication.analysiss != 'undefined') {\n this.browser.publication.analysiss.forEach(this.getFileObjs);\n }\n if (typeof this.browser.publication.reports != 'undefined') {\n this.browser.publication.reports.forEach(this.getFileObjs);\n }\n\n this.browser.publication.hybrid_simulations.forEach(this.getFileObjs);\n this.browser.publication.global_models.forEach(this.getFileObjs);\n this.browser.publication.coordinators.forEach(this.getFileObjs);\n this.browser.publication.coordinator_outputs.forEach(this.getFileObjs);\n this.browser.publication.exp_substructures.forEach(this.getFileObjs);\n this.browser.publication.exp_outputs.forEach(this.getFileObjs);\n this.browser.publication.sim_substructures.forEach(this.getFileObjs);\n this.browser.publication.sim_outputs.forEach(this.getFileObjs);\n } else if (this.browser.publication.project.value.projectType === 'field_recon') {\n if (typeof this.browser.publication.analysiss != 'undefined') {\n this.browser.publication.analysiss.forEach(this.getFileObjs);\n }\n if (typeof this.browser.publication.reports != 'undefined') {\n this.browser.publication.reports.forEach(this.getFileObjs);\n }\n if (typeof this.browser.publication.collections != 'undefined') {\n this.browser.publication.collections.forEach(this.getFileObjs);\n }\n if (typeof this.browser.publication.planning != 'undefined') {\n this.browser.publication.planning.forEach(this.getFileObjs);\n }\n if (typeof this.browser.publication.geoscience != 'undefined') {\n this.browser.publication.geoscience.forEach(this.getFileObjs);\n }\n if (typeof this.browser.publication.socialscience != 'undefined') {\n this.browser.publication.socialscience.forEach(this.getFileObjs);\n }\n }\n \n //add metadata to header\n this.PublishedService.updateHeaderMetadata(projId, resp);\n this.version = this.browser.publication.version || 1;\n this.type = this.browser.publication.project.value.projectType;\n this.ui.loading = false;\n \n // // Generate text for PI\n // this.piDisplay = this.browser.publication.authors.find((author) => author.name === this.browser.project.value.pi);\n // // Generate CoPI list\n // this.coPIDisplay = this.project.value.coPis.map((coPi) => this.browser.publication.authors.find((author) => author.name === coPi));\n }).then( () => {\n this.prepProject();\n });\n }\n }\n prepProject() {\n if (this.project.value.projectType === 'experimental'){\n this.browser.project.analysis_set = this.browser.publication.analysisList;\n this.browser.project.modelconfig_set = this.browser.publication.modelConfigs;\n this.browser.project.sensorlist_set = this.browser.publication.sensorLists;\n this.browser.project.event_set = this.browser.publication.eventsList;\n this.browser.project.report_set = this.browser.publication.reportsList;\n this.browser.project.experiment_set = this.browser.publication.experimentsList;\n this.expDOIList = this.browser.project.experiment_set.map(({ doi, uuid }) => {\n return { value: doi, uuid, hash: `anchor-${uuid}` };\n });\n \n }\n if (this.project.value.projectType === 'simulation'){\n this.browser.project.simulation_set = this.browser.publication.simulations;\n this.browser.project.model_set = this.browser.publication.models;\n this.browser.project.input_set = this.browser.publication.inputs;\n this.browser.project.output_set = this.browser.publication.outputs;\n this.browser.project.analysis_set = this.browser.publication.analysiss;\n this.browser.project.report_set = this.browser.publication.reports;\n this.simDOIList = this.browser.project.simulation_set.map(({ doi, uuid }) => {\n return { value: doi, uuid, hash: `anchor-${uuid}` };\n });\n }\n if (this.project.value.projectType === 'hybrid_simulation'){\n this.browser.project.hybridsimlation_set = this.browser.publication.hybrid_simulations;\n this.browser.project.globalmodel_set = this.browser.publication.global_models;\n this.browser.project.coordinator_set = this.browser.publication.coordinators;\n this.browser.project.simsubstructure_set = this.browser.publication.sim_substructures;\n this.browser.project.expsubstructure_set = this.browser.publication.exp_substructures;\n this.browser.project.coordinatoroutput_set = this.browser.publication.coordintaor_outputs;\n this.browser.project.simoutput_set = this.browser.publication.sim_outputs;\n this.browser.project.expoutput_set = this.browser.publication.exp_outputs;\n this.browser.project.analysis_set = this.browser.publication.analysiss;\n this.browser.project.report_set = this.browser.publication.reports;\n this.hsDOIList = this.browser.project.hybridsimlation_set.map(({ doi, uuid }) => ({\n value: doi,\n uuid,\n hash: `details-${uuid}`,\n }));\n }\n if (this.project.value.projectType === 'field_recon'){\n this.browser.project.mission_set = this.browser.publication.missions;\n this.browser.project.collection_set = this.browser.publication.collections;\n this.browser.project.socialscience_set = this.browser.publication.socialscience;\n this.browser.project.planning_set = this.browser.publication.planning;\n this.browser.project.geoscience_set = this.browser.publication.geoscience;\n this.browser.project.analysis_set = this.browser.publication.analysiss;\n this.browser.project.report_set = this.browser.publication.reports;\n this.primaryEnts = [].concat(\n this.browser.publication.missions || [],\n this.browser.publication.reports || []\n );\n this.secondaryEnts = [].concat(\n this.browser.publication.socialscience || [],\n this.browser.publication.planning || [],\n this.browser.publication.geoscience || [],\n this.browser.publication.collections || []\n );\n this.orderedPrimary = this.ordered(this.browser.project, this.primaryEnts);\n this.orderedSecondary = {};\n this.orderedPrimary.forEach((primEnt) => {\n if (primEnt.name === 'designsafe.project.field_recon.mission') {\n this.orderedSecondary[primEnt.uuid] = this.ordered(primEnt, this.secondaryEnts);\n }\n });\n this.frDOIList = this.orderedPrimary.map(({ doi, uuid, name }) => ({\n type: name.split('.').pop(),\n value: doi,\n uuid,\n hash: `anchor-${uuid}`\n }));\n }\n }\n\n ordered(parent, entities) {\n let order = (ent) => {\n if (ent._ui && ent._ui.orders && ent._ui.orders.length) {\n return ent._ui.orders.find(order => order.parent === parent.uuid);\n }\n return 0;\n };\n entities.sort((a,b) => {\n if (typeof order(a) === 'undefined' || typeof order(b) === 'undefined') {\n return -1;\n }\n return (order(a).value > order(b).value) ? 1 : -1;\n });\n\n return entities;\n }\n\n getEF(str) {\n let efs = this.ui.efs[this.browser.project.value.projectType];\n let ef = efs.find((ef) => {\n return ef.name === str;\n });\n return ef.label;\n }\n\n getET(exp) {\n let ets = this.ui.experimentTypes[exp.value.experimentalFacility];\n let et = ets.find((x) => {\n return x.name === exp.value.experimentType;\n });\n return et.label;\n }\n\n getEQ(exp) {\n let eqts = this.ui.equipmentTypes[exp.value.experimentalFacility];\n let eqt = eqts.find((x) => {\n return x.name === exp.value.equipmentType;\n });\n return eqt.label;\n }\n\n download() {\n var body = {\n action: 'download'\n };\n var system = this.$stateParams.systemId;\n var projectId = this.project.value.projectId;\n \n var url = this.djangoUrl.reverse('designsafe_api:public_files_media', ['published', system, `archives/${projectId}_archive.zip`]);\n\n this.$http.put(url, body).then(function (resp) {\n var postit = resp.data.href;\n\n // Is there a better way of doing this?\n var link = document.createElement('a');\n link.style.display = 'none';\n link.setAttribute('href', postit);\n link.setAttribute('download', \"null\");\n document.body.appendChild(link);\n link.click();\n document.body.removeChild(link);\n });\n }\n\n matchingGroup(exp, model) {\n if (!exp) {\n // if the category is related to the project level\n if (model.associationIds.indexOf(this.projectId) > -1 && !model.value.experiments.length) {\n return true;\n }\n return false;\n } else {\n // if the category is related to the experiment level\n // match appropriate data to corresponding experiment\n if(model.associationIds.indexOf(exp.uuid) > -1) {\n return true;\n }\n return false;\n }\n }\n\n showAuthor(author) {\n this.UserService.get(author.name).then((res) => {\n if (res.orcid_id) {\n author.orcid = res.orcid_id;\n }\n this.$uibModal.open({\n component: 'authorInformationModal',\n resolve: {\n author\n },\n size: 'author'\n });\n });\n }\n\n treeDiagram() {\n this.$uibModal.open({\n component: 'projectTree',\n resolve: {\n project: () => {return this.browser.project; },\n readOnly: () => {return true;},\n },\n size: 'lg'\n });\n }\n\n showCitation(entity) {\n this.$uibModal.open({\n component: 'publishedCitationModal',\n resolve: {\n publication: () => { return this.browser.publication; },\n entity: () => { return entity; },\n }\n });\n }\n\n goToHash(hash) {\n this.$location.hash(hash);\n this.$anchorScroll.yOffset = 64;\n return setTimeout(() => this.$anchorScroll(), 750);\n }\n\n relatedWorkEmpty() {\n const relatedWork = this.browser.project.value.associatedProjects.slice();\n const emptyArray = relatedWork.length === 0;\n const emptyListing = isEqual(relatedWork.shift(),{ order: 0, title: '' });\n return emptyArray || emptyListing;\n }\n\n rmEmpty(arr) {\n return arr.filter(Boolean);\n }\n}\n\nexport const ExpPublishedViewComponent = {\n template: ExpPublicationTemplate,\n controller: PublishedViewCtrl,\n controllerAs: '$ctrl',\n};\n\nexport const SimPublishedViewComponent = {\n template: SimPublicationTemplate,\n controller: PublishedViewCtrl,\n controllerAs: '$ctrl',\n};\n\nexport const HybSimPublishedViewComponent = {\n template: HybSimPublicationTemplate,\n controller: PublishedViewCtrl,\n controllerAs: '$ctrl',\n};\n\nexport const FieldReconPublishedViewComponent = {\n template: FieldReconPublicationTemplate,\n controller: PublishedViewCtrl,\n controllerAs: '$ctrl',\n};\n\nexport const OtherPublishedViewComponent = {\n template: OtherPublicationTemplate,\n controller: PublishedViewCtrl,\n controllerAs: '$ctrl',\n};\n"
},
{
"alpha_fraction": 0.6935483813285828,
"alphanum_fraction": 0.7076612710952759,
"avg_line_length": 34.42856979370117,
"blob_id": "9652677c4677007e8891ad48d54f3dcfc93e5783",
"content_id": "f913d99690235332b50a5f9a8d56b6adb4d4f908",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 496,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 14,
"path": "/designsafe/tests.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom mock import patch\nfrom .views import redirect_old_nees\n\nclass RedirectTest(TestCase):\n \"\"\"\n Assert that the redirect function is called \n using a query string made with part of the NEES ID\n \"\"\"\n @patch('django.shortcuts.redirect')\n def test_redirect_old_nees(self, mock_redirect):\n mock_redirect.return_value = True\n redirect_old_nees(685)\n self.assertTrue(mock_redirect.called_with('/search/?query_string=NEES 0685'))\n"
},
{
"alpha_fraction": 0.6198904514312744,
"alphanum_fraction": 0.6201011538505554,
"avg_line_length": 31.731035232543945,
"blob_id": "5f72609f8252b1ffeeb8c5d6d3667d761fe9b48e",
"content_id": "2f5b2d9277fe5a1e9e6d1c3ab4619aeeaee8173c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4746,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 145,
"path": "/designsafe/static/scripts/data-depot/components/projects/project-view/project-view.component.js",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "import ProjectViewTemplate from './project-view.component.html';\n\nclass ProjectViewCtrl {\n\n constructor(ProjectEntitiesService, ProjectService, DataBrowserService, FileListing, $state, $q, $uibModal) {\n 'ngInject';\n\n this.ProjectEntitiesService = ProjectEntitiesService;\n this.ProjectService = ProjectService;\n this.DataBrowserService = DataBrowserService;\n this.FileListing = FileListing;\n this.browser = this.DataBrowserService.state();\n this.$state = $state;\n this.$q = $q;\n this.$uibModal = $uibModal;\n }\n\n $onInit() {\n this.projectId = this.ProjectService.resolveParams.projectId;\n this.filePath = this.ProjectService.resolveParams.filePath;\n this.data = this.ProjectService.resolveParams.data;\n this.loading = true;\n this.fl = {\n showSelect: true,\n showHeader: true,\n showTags: true,\n editTags: false,\n };\n\n if (typeof this.browser.listings != 'undefined') {\n delete this.browser.listings;\n }\n\n if (this.data && this.data.listing.path == this.filePath) {\n this.browser = this.data;\n this.loading = false;\n } else {\n this.$q.all([\n this.ProjectService.get({ uuid: this.projectId }),\n this.DataBrowserService.browse(\n { system: 'project-' + this.projectId, path: this.filePath },\n { query_string: this.$state.params.query_string }\n ),\n this.ProjectEntitiesService.listEntities({ uuid: this.projectId, name: 'all' })\n ]).then(([project, listing, entities]) => {\n this.browser.project = project;\n this.browser.project.appendEntitiesRel(entities);\n this.browser.listing = listing;\n this.browser.listing.href = this.$state.href('projects.view.data', {\n projectId: this.projectId,\n filePath: this.browser.listing.path,\n projectTitle: this.browser.project.value.projectTitle,\n });\n this.browser.listing.children.forEach((child) => {\n child.href = this.$state.href('projects.view.data', {\n projectId: this.projectId,\n filePath: child.path,\n projectTitle: this.browser.project.value.projectTitle,\n });\n child.setEntities(this.projectId, entities);\n });\n this.loading = false;\n });\n }\n }\n\n isSingle(val) {\n // we will have older projects with a single award number as a string\n if (val.length) {\n if (typeof val[0] === 'string') {\n return true;\n }\n }\n return false;\n }\n\n editProject($event) {\n if ($event) {\n $event.preventDefault();\n }\n this.ProjectService.editProject(this.browser.project);\n }\n\n manageProjectType($event) {\n if ($event) {\n $event.preventDefault();\n }\n this.$uibModal.open({\n component: 'manageProjectType',\n resolve: {\n options: () => { return { 'project': this.browser.project, 'warning': false }; },\n },\n size: 'lg',\n });\n }\n\n workingDirectory() {\n this.$state.go('projects.view.data', { projectId: this.projectId }).then(() => {\n });\n }\n\n curationDirectory() {\n if (this.browser.project.value.projectType === 'None') {\n this.manageProjectType();\n } else {\n this.$state.go('projects.curation', { projectId: this.projectId, data: this.browser, filePath: this.filePath});\n }\n }\n\n publicationPreview() {\n if (this.browser.project.value.projectType === 'experimental') {\n this.$state.go('projects.preview', { projectId: this.browser.project.uuid, data: this.browser}).then(() => {\n this.checkState();\n });\n } else if (this.browser.project.value.projectType === 'simulation') {\n this.$state.go('projects.previewSim', { projectId: this.browser.project.uuid, data: this.browser}).then(() => {\n this.checkState();\n });\n } else if (this.browser.project.value.projectType === 'hybrid_simulation') {\n this.$state.go('projects.previewHybSim', { projectId: this.browser.project.uuid, data: this.browser}).then(() => {\n this.checkState();\n });\n } else if (this.browser.project.value.projectType === 'other') {\n this.$state.go('projects.previewOther', { projectId: this.browser.project.uuid, data: this.browser}).then(() => {\n this.checkState();\n });\n } else if (this.browser.project.value.projectType === 'field_recon') {\n this.$state.go('projects.previewFieldRecon', { projectId: this.browser.project.uuid, data: this.browser}).then(() => {\n this.checkState();\n });\n } else {\n this.manageProjectType();\n }\n }\n}\n\nexport const ProjectViewComponent = {\n controller: ProjectViewCtrl,\n controllerAs: '$ctrl',\n template: ProjectViewTemplate,\n bindings: {\n resolve: '<',\n projectId: '<'\n }\n};\n"
},
{
"alpha_fraction": 0.7394822239875793,
"alphanum_fraction": 0.7394822239875793,
"avg_line_length": 23.719999313354492,
"blob_id": "04ab7250dcb1eee3bb35c2fd6e9caae643b08a63",
"content_id": "c3e6a46541004098714d370db864bd6e149293cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 25,
"path": "/designsafe/apps/api/agave/filemanager/private_data.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "\"\"\"File Manager for community Data\n\"\"\"\n\nimport logging\nimport json\nimport os\nimport re\nimport datetime\nfrom django.conf import settings\nfrom .base import BaseFileManager\nfrom designsafe.apps.api.agave.filemanager.agave import AgaveFileManager\nfrom designsafe.apps.api.exceptions import ApiException\n\nlogger = logging.getLogger(__name__)\n\nclass PrivateDataFileManager(AgaveFileManager):\n NAME = 'my_data'\n\n @property\n def requires_auth(self):\n \"\"\"Whether it should check for an authenticated user.\n\n If this is a public data file manager, it should return False.\n \"\"\"\n return True\n"
},
{
"alpha_fraction": 0.6425490975379944,
"alphanum_fraction": 0.6473406553268433,
"avg_line_length": 36.818180084228516,
"blob_id": "ea72e90146206f93da185f0a4b9fb1555c3dbd43",
"content_id": "303c5969e6c8b39132151ae1cd651cbdd385b123",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2087,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 55,
"path": "/designsafe/apps/api/agave/tests.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nimport datetime\nfrom mock import patch, MagicMock\nfrom django.test import TestCase\nfrom django.contrib.auth import get_user_model\nfrom django.conf import settings\nfrom datetime import timedelta\nfrom designsafe.apps.api.agave.filemanager.lookups import FileLookupManager\nfrom django.contrib.auth.models import User\n\nfrom designsafe.apps.api.agave import to_camel_case\nfrom designsafe.apps.api.exceptions import ApiException\n\nclass MiscTests(TestCase):\n\n def test_to_camel_case(self):\n\n # test cases, first is expected output, second is input\n cases = (\n ('camelCase', 'camelCase'),\n ('_camelCase', '_camelCase'),\n ('snakeCase', 'snake_case'),\n ('_snakeCase', '_snake_case'),\n ('snakeCaseCase', 'snake_case_case'),\n )\n\n for case in cases:\n self.assertEqual(case[0], to_camel_case(case[1]))\n\n\nclass TestListingViews_auth(TestCase):\n fixtures = ['user-data.json', 'agave-oauth-token-data.json']\n @patch('designsafe.apps.api.agave.filemanager.lookups.FileLookupManager')\n def test_file_listing_view_private(self, mock_lookup):\n self.client.force_login(get_user_model().objects.get(username=\"ds_user\"))\n mock_lookup()().listing.return_value = {'resp': 'data'}\n mock_lookup()().requires_auth = True\n resp = self.client.get('/api/agave/files/listing/agave/designsafe.storage.default/ds_user')\n mock_lookup.assert_called_with('agave')\n mock_lookup()().listing.assert_called_with(\n system='designsafe.storage.default', \n file_path='ds_user',\n offset=0, \n limit=100)\n self.client.logout()\n\n mock_lookup()().requires_auth = False\n resp = self.client.get('/api/public/files/listing/community/designsafe.storage.community//')\n mock_lookup.assert_called_with('community')\n mock_lookup()().listing.assert_called_with(\n system='designsafe.storage.community', \n file_path='/',\n offset=0, \n limit=100)\n\n \n\n"
},
{
"alpha_fraction": 0.4529058039188385,
"alphanum_fraction": 0.4539078176021576,
"avg_line_length": 25.210525512695312,
"blob_id": "a154ca22f6e345c01137e2e5446e48d78c65332b",
"content_id": "ac15fbe6c6127de23be8f999aacef3908979538a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 998,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 38,
"path": "/designsafe/static/scripts/data-depot/components/file-metadata/file-metadata.controller.js",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "\n\nclass FileMetadataComponentCtrl {\n\n constructor() {\n\n }\n\n $onInit() {\n this.show = false;\n this.loading = true;\n this.file.getAssociatedMetadata().then( (resp)=>{\n //\n try {\n this.metadata = resp[1];\n Object.keys(this.metadata).filter((key)=>{\n if (key.startsWith('_') || this.metadata[key] === '') {\n delete this.metadata[key];\n }\n });\n // Convert the object to an Array for the template\n this.metadata = Object.keys(this.metadata).map( (key)=> {return [key, this.metadata[key]];});\n } catch(err) {\n this.metadata = null;\n }\n }, (err)=>{\n this.errorMessage = err.message;\n }).finally( ()=> {\n this.loading = false;\n });\n }\n\n toggle() {\n this.show = !this.show;\n }\n\n}\n\n\nexport default FileMetadataComponentCtrl;\n"
},
{
"alpha_fraction": 0.6626262664794922,
"alphanum_fraction": 0.680134654045105,
"avg_line_length": 27.55769157409668,
"blob_id": "6101ab33741d17749ac2deae05fa2e892783dc0c",
"content_id": "9ab3f06446cb7ce72fe27f4becd27ab85efa92ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1485,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 52,
"path": "/designsafe/apps/api/agave/filemanager/community.py",
"repo_name": "clawler/portal",
"src_encoding": "UTF-8",
"text": "\"\"\"File Manager for community Data\n\"\"\"\n\nimport logging\nimport json\nimport os\nimport re\nimport datetime\nfrom django.conf import settings\nfrom .base import BaseFileManager\nfrom future.utils import python_2_unicode_compatible\nfrom designsafe.apps.api.agave.filemanager.agave import AgaveFileManager\nfrom designsafe.apps.api.exceptions import ApiException\n\nlogger = logging.getLogger(__name__)\n\n@python_2_unicode_compatible\nclass CommunityFileManager(AgaveFileManager):\n NAME = 'community'\n DEFAULT_SYSTEM_ID = 'designsafe.storage.community'\n\n @property\n def requires_auth(self):\n \"\"\"Whether it should check for an authenticated user.\n\n If this is a public data file manager, it should return False.\n \"\"\"\n return False\n \n def copy(self, *args, **kwargs):\n raise ApiException('Invalid action.', 400)\n\n def delete(self, *args, **kwargs):\n raise ApiException('Invalid action.', 400)\n\n def mkdir(self, *args, **kwargs):\n raise ApiException('Invalid action.', 400)\n\n def move(self, *args, **kwargs):\n raise ApiException('Invalid action.', 400)\n\n def rename(self, *args, **kwargs):\n raise ApiException('Invalid action.', 400)\n\n def share(self, *args, **kwargs):\n raise ApiException('Invalid action.', 400)\n\n def trash(self, *args, **kwargs):\n raise ApiException('Invalid action.', 400)\n\n def upload(self, *args, **kwargs):\n raise ApiException('Invalid action.', 400)\n"
}
] | 18 |
bonifaido/pys60-sudoku
|
https://github.com/bonifaido/pys60-sudoku
|
7ddc84d067f6e2051969afcfa96a8ee7d193cb30
|
a1a9321ef15699ab798c9308d385151f00c62b24
|
460d55b9d2bca62a88cbd2afa8f154692d9b44e7
|
refs/heads/master
| 2016-09-05T19:06:23.700338 | 2011-07-12T17:28:53 | 2011-07-12T17:28:53 | 112,222 | 3 | 3 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6264263987541199,
"alphanum_fraction": 0.6498498320579529,
"avg_line_length": 28.73214340209961,
"blob_id": "e0d779d8001b9848c8ea8a1a43ac0ad9972c85d3",
"content_id": "8ef75faaba42602ba81ef64891585d2937f701f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9990,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 336,
"path": "/pys60_sudoku.py",
"repo_name": "bonifaido/pys60-sudoku",
"src_encoding": "UTF-8",
"text": "#\n#\tpys60_sudoku\n# A simple sudoku game for S60 based phones.\n#\n#\tCopyright (C) 2008-2009 Nandor Istvan Kracser\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\n\n__all__ = [\"Board\", \"Game\"]\n\nimport appuifw\nfrom graphics import *\nimport key_codes\nimport e32\nimport copy # csak a sudoku tabla masolasaert\nimport random\n\n\nclass Board:\n\tboardlist = []\n\tpartialboardlist = []\n\t\n\tdef __init__(self):\n\t\tself.boardlist = []\n\t\tself.partialboardlist = []\n\n\tdef generate(self, numFilled=(9*9)):\n\t\t# cleaning cleaning cleaning\n\t\tself.boardlist = []\n\t\tself.partialboardlist = []\n\t\tslots = []\n\t\tfillOrder = []\n\n\t\trandom.seed()\n\n\t\t# setup board\n\t\trow = [0,0,0,0,0,0,0,0,0]\n\t\tfor i in range(0, 9):\n\t\t\tself.boardlist.append(row[:])\n\n\t\tfor j in range(0, 9):\n\t\t\tfor i in range(0, 9):\n\t\t\t\tslots.append((i,j))\n\n\t\tself.search(slots, 0)\n\t\t\n\t\twhile len(slots) > 0:\n\t\t\ti = random.randint(0, len(slots)-1)\n\t\t\tfillOrder.append(slots[i])\n\t\t\tdel slots[i]\n\n\t\t# setup board\n\t\tfor i in range(0, 9):\n\t\t\tself.partialboardlist.append(row[:])\n\n\t\tfor i in range(0, numFilled):\n\t\t\tj = fillOrder[i]\n\t\t\tself.partialboardlist[j[0]][j[1]] = self.boardlist[j[0]][j[1]]\n\n\tdef search(self, slots, index):\n\t\tnums = []\n\t\tfillOrder = []\n\n\t\tif len(slots) == index:\n\t\t\treturn self.check()\n\n\t\tfor i in range(1, 10):\n\t\t\tnums.append(i)\n\n\t\twhile len(nums) > 0:\n\t\t\ti = random.randint(0, len(nums)-1)\n\t\t\tfillOrder.append(nums[i])\n\t\t\tdel nums[i]\n\n\t\tfor i in fillOrder:\n\t\t\tx = slots[index][0]\n\t\t\ty = slots[index][1]\n\t\t\tself.boardlist[x][y] = i\n\t\t\tif (self.check()):\n\t\t\t\tif self.search(slots, index+1):\n\t\t\t\t\treturn True\n\t\t\tself.boardlist[x][y] = 0\n\t\treturn False\n\n\tdef check(self):\n\t\tfor i in range(0, 9):\n\t\t\tif (not self.checkRow(i)) or (not self.checkCol(i)) or (not self.checkSquare(i)):\n\t\t\t\treturn False\n\t\treturn True\n\t\n\tdef checkRow(self, row):\n\t\tfound = []\n\t\tfor i in range(0, 9):\n\t\t\tif not self.boardlist[i][row] == 0:\n\t\t\t\tif self.boardlist[i][row] in found:\n\t\t\t\t\treturn False\n\t\t\t\tfound.append(self.boardlist[i][row])\n\t\treturn True\n\n\tdef checkCol(self, col):\n\t\tfound = []\n\t\tfor j in range(0, 9):\n\t\t\tif not self.boardlist[col][j] == 0:\n\t\t\t\tif self.boardlist[col][j] in found:\n\t\t\t\t\treturn False\n\t\t\t\tfound.append(self.boardlist[col][j])\n\t\treturn True\n\n\tdef checkSquare(self, square):\n\t\tfound = []\n\t\txoffset = (3*(square % 3))\n\t\tyoffset = int(square / 3) * 3\n\t\tfor j in range(0, 3):\n\t\t\tfor i in range(0, 3):\n\t\t\t\tif not self.boardlist[xoffset+i][yoffset+j] == 0:\n\t\t\t\t\tif self.boardlist[xoffset+i][yoffset+j] in found:\n\t\t\t\t\t\treturn False\n\t\t\t\t\tfound.append(self.boardlist[xoffset+i][yoffset+j])\n\t\treturn True\n\n\tdef getList(self):\n\t\trow = [0,0,0,0,0,0,0,0,0]\n\t\tfor i in range(0, 9):\n\t\t\tself.boardlist.append(row[:])\n\t\treturn self.boardlist\n\n\tdef printBoard(self):\n\t\tfor j in range(0, 9):\n\t\t\tfor i in range(0, 9):\n\t\t\t\tif self.boardlist[i][j] == 0:\n\t\t\t\t\tprint '.',\n\t\t\t\telse:\n\t\t\t\t\tprint self.boardlist[i][j],\n\t\t\tprint\n\n\tdef printPartialBoard(self):\n\t\tfor j in range(0, 9):\n\t\t\tfor i in range(0, 9):\n\t\t\t\tif self.partialboardlist[i][j] == 0:\n\t\t\t\t\tprint '.',\n\t\t\t\telse:\n\t\t\t\t\tprint self.partialboardlist[i][j],\n\t\t\tprint\n\n\tdef _checkRow(self, row):\n\t\tfound = []\n\t\tfor i in range(0, 9):\n\t\t\tif self.boardlist[i][row] == 0:\n\t\t\t\treturn False\n\t\t\tif self.boardlist[i][row] in found:\n\t\t\t\treturn False\n\t\t\tfound.append(self.boardlist[i][row])\n\t\treturn True\n\n\tdef _checkCol(self, col):\n\t\tfound = []\n\t\tfor j in range(0, 9):\n\t\t\tif self.boardlist[col][j] == 0:\n\t\t\t\treturn False\n\t\t\tif self.boardlist[col][j] in found:\n\t\t\t\treturn False\n\t\t\tfound.append(self.boardlist[col][j])\n\t\treturn True\n\n\tdef _checkSquare(self, square):\n\t\tfound = []\n\t\txoffset = (3*(square % 3))\n\t\tyoffset = int(square / 3) * 3\n\t\tfor j in range(0, 3):\n\t\t\tfor i in range(0, 3):\n\t\t\t\tif self.boardlist[xoffset+i][yoffset+j] == 0:\n\t\t\t\t\treturn False\n\t\t\t\tif self.boardlist[xoffset+i][yoffset+j] in found:\n\t\t\t\t\treturn False\n\t\t\t\tfound.append(self.boardlist[xoffset+i][yoffset+j])\n\t\treturn True\n\n\tdef _check(self):\n\t\tself.boardlist = copy.deepcopy(self.partialboardlist)\n\t\tfor i in range(0, 9):\n\t\t\tif (not self._checkRow(i)) or (not self._checkCol(i)) or (not self._checkSquare(i)):\n\t\t\t\treturn False\n\t\treturn True\n\t\t\n\nclass Game (object):\n\n\tdef __init__(self):\n\t\tself.canvas = appuifw.Canvas(redraw_callback=self.redraw, event_callback=self.paint_table)\n\t\tself.img = Image.new(self.canvas.size)\n\t\tself.w_unit = self.canvas.size[0] / 9\n\t\tself.h_unit = self.canvas.size[1] / 9\n\t\tself.fontsize = 20\n\t\tself.border_color = (0,100,255)\n\t\tself.row = 0 # a keret kiindulasi pozicioja, es kesobb az aktualis pozicioja\n\t\tself.coll = 0\n\t\tself.blankcells = 51\n\t\tself.b = Board()\n\t\tself.b.generate(9*9 - self.blankcells)\n\t\t# meglegyen az eredeti peldany, az osszehasonlitasok miatt\n\t\tself.ref_list = copy.deepcopy(self.b.partialboardlist)\n\t\tappuifw.app.title = u\"Sudoku\"\n\t\tappuifw.app.screen = \"normal\"\n\t\tself.menu = [\n\t\t\t\t(u\"New\", self.generateboard),\n\t\t\t\t(u\"Check\", self.check),\n\t\t\t\t(u\"Difficulty\", self.change_difficulty),\n\t\t\t\t(u\"About\", self.about)\n\t\t\t\t\t]\n\t\tappuifw.app.menu = self.menu\n\t\tappuifw.app.body = self.canvas\n\t\tappuifw.app.exit_key_handler = self.quit\n\t\tself.app_lock = e32.Ao_lock()\n\t\tself.app_lock.wait()\n\n\n\tdef paint_table(self,event=None):\n\t\tself.img.clear()\n\n\t\tif event:\n\t\t\tif event['keycode']==key_codes.EKey1 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 1\n\t\t\telif event['keycode']==key_codes.EKey2 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 2\n\t\t\telif event['keycode']==key_codes.EKey3 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 3\n\t\t\telif event['keycode']==key_codes.EKey4 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 4\n\t\t\telif event['keycode']==key_codes.EKey5 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 5\n\t\t\telif event['keycode']==key_codes.EKey6 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 6\n\t\t\telif event['keycode']==key_codes.EKey7 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 7\n\t\t\telif event['keycode']==key_codes.EKey8 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 8\n\t\t\telif event['keycode']==key_codes.EKey9 and self.ref_list[self.coll][self.row] == 0:\n\t\t\t\tself.b.partialboardlist[self.coll][self.row] = 9\n\t\t\telif event['keycode']==key_codes.EKeyUpArrow:\n\t\t\t\tif (self.coll -\t1) < 0:\n\t\t\t\t\tself.coll = 8\n\t\t\t\telse:\n\t\t\t\t\tself.coll -= 1\n\t\t\telif event['keycode']==key_codes.EKeyDownArrow:\n\t\t\t\tif (self.coll + 1) > 8:\n\t\t\t\t\tself.coll = 0\n\t\t\t\telse:\n\t\t\t\t\tself.coll += 1\n\t\t\telif event['keycode']==key_codes.EKeyLeftArrow:\n\t\t\t\tif (self.row - 1) < 0:\n\t\t\t\t\tself.row = 8\n\t\t\t\telse:\n\t\t\t\t\tself.row -= 1\n\t\t\telif event['keycode']==key_codes.EKeyRightArrow:\n\t\t\t\tif (self.row + 1) > 8:\n\t\t\t\t\tself.row = 0\n\t\t\t\telse:\n\t\t\t\t\tself.row += 1\n\t\t\n\t\t# alkoto keretek felrajzolasa, 4 vastagabb keret is van\n\t\tfor i in range(1,9):\n\t\t\t_width = 1\n\t\t\tif i%3 == 0:\n\t\t\t\t_width = 3\n\t\t\tself.img.line(((i*self.w_unit, 0), (i*self.w_unit, self.img.size[1])), width=_width, outline=(0,0,0))\n\t\t\tself.img.line(((0, i*self.h_unit), (self.img.size[0], i*self.h_unit)), width=_width, outline=(0,0,0))\n\t\t\n\t\t# az aktualis cella korulrajzolasa\n\t\tself.img.line(((self.row*self.w_unit,self.coll*self.h_unit),((self.row+1)*self.w_unit,self.coll*self.h_unit)), \\\n\t\t\twidth=3, outline=self.border_color) # felso vonal\n\t\tself.img.line(((self.row*self.w_unit,(self.coll+1)*self.h_unit),((self.row+1)*self.w_unit,(self.coll+1)*self.h_unit)), \\\n\t\t\twidth=3, outline=self.border_color) # also vonal\n\t\tself.img.line(((self.row*self.w_unit,self.coll*self.h_unit),(self.row*self.w_unit,(self.coll+1)*self.h_unit)), \\\n\t\t\twidth=3, outline=self.border_color) # bal vonal\n\t\tself.img.line((((self.row+1)*self.w_unit,(self.coll+1)*self.h_unit),((self.row+1)*self.w_unit,self.coll*self.h_unit)), \\\n\t\t\twidth=3, outline=self.border_color) # jobb vonal\n\n\t\tfor i in range(0,9):\n\t\t\tfor j in range(0,9):\n\t\t\t\tif not self.b.partialboardlist[i][j] == 0:\n\t\t\t\t\t_color = (0,0,0)\n\t\t\t\t\tif not self.ref_list[i][j] == 0:\n\t\t\t\t\t\t_color = (128,128,128)\n\t\t\t\t\t# font maggassagat es szelesseget is belekalkulaljuk\n\t\t\t\t\tself.img.text( (j*self.w_unit + self.w_unit/2 - self.fontsize/4, i*self.h_unit + self.h_unit/2 + self.fontsize/2), \\\n\t\t\t\t\t\tunicode(str(self.b.partialboardlist[i][j])), \\\n\t\t\t\t\t\t_color, font=(None,self.fontsize,FONT_BOLD | FONT_ANTIALIAS), )\n\n\t\tself.canvas.blit(self.img)\n\t\n\t# a canvas redraw_callbackje, at kell venni a parametert (redraw terulet)\n\tdef redraw(self,param):\n\t\tself.canvas.blit(self.img)\n\n\t# menu functions\n\tdef generateboard(self):\n\t\tself.b.generate(9*9 - self.blankcells)\n\t\tself.ref_list = copy.deepcopy(self.b.partialboardlist)\n\t\tself.paint_table()\n\n\tdef check(self):\n\t\tif self.b._check():\n\t\t\tappuifw.note(u\"Success\",\"conf\")\n\t\t\tif appuifw.query(u'Next game?', 'query'):\n\t\t\t\tself.generateboard()\n\t\telse:\n\t\t\tappuifw.note(u\"Fail\",\"error\")\n\n\tdef change_difficulty(self):\n\t\tself.blankcells = appuifw.query(u\"How many blank cells do you want? (1-80)\",\"number\")\n\t\twhile self.blankcells > 80 or self.blankcells < 1:\n\t\t\tself.blankcells = appuifw.query(u\"Please choose between 1 and 80!.\",\"number\")\n\t\tself.generateboard()\n\n\tdef about(self):\n\t\tappuifw.note(u\"Visit http://github.com/bonifaido/\",\"info\")\n\n\tdef quit(self):\n\t\tself.app_lock.signal()\n\n\nif __name__ == '__main__':\n\tGame()\n"
}
] | 1 |
lumanight/python-design-project
|
https://github.com/lumanight/python-design-project
|
24b2e15ede3bf32588f5b4e922fdf2fd278d5145
|
84f08db6c7f9012821873423e46246822bb098e9
|
bda2ff4923bcb26ff2fca6c1b856e800c6ac6b2a
|
refs/heads/master
| 2020-09-09T16:17:41.697059 | 2019-11-13T15:51:39 | 2019-11-13T15:51:39 | 221,494,231 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5863950848579407,
"alphanum_fraction": 0.6818423867225647,
"avg_line_length": 15.068181991577148,
"blob_id": "1ab1e3fd14108edcee4d99ca1966acc85a4525e2",
"content_id": "cbeb2e60fe42486323565abf1de5bc1f7dbde85c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7512,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 440,
"path": "/yin22343432.py",
"repo_name": "lumanight/python-design-project",
"src_encoding": "UTF-8",
"text": "import turtle\r\nbob = turtle.Turtle()\r\nturtle.bgcolor(\"dark blue\")\r\n\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\n\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\n\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\n\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\n\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"red\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"dark green\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"blue\")\r\nbob.begin_fill()\r\nbob.width(12)\r\nbob.forward(100)\r\nbob.color(\"orange\r\n \")\r\nbob.circle(200)\r\nbob.left(100)\r\nbob.forward(30)\r\nbob.right(30)\r\nbob.left(30)\r\nbob.forward(50)\r\nbob.forward(50)\r\nbob.right(17)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"white\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"red\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"white\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"red\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"grey\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"white\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"red\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\nbob.color(\"light blue\")\r\nbob.forward(17)\r\nbob.circle(45)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.forward(17)\r\nbob.forward(30)\r\nbob.right(90)\r\nbob.forward(30)\r\nbob.color(\"orange\")\r\nbob.circle(45)\r\nbob.forward(17)\r\nbob.color(\"yellow\")\r\nbob.color(\"red\")\r\nbob.forward(109)\r\nbob.right(78)\r\nbob.forward(120)\r\nbob.end_fill()\r\n\r\n"
}
] | 1 |
ickoice/tinder-bot-python
|
https://github.com/ickoice/tinder-bot-python
|
f3281a39967d8ed7c1b9b1e5202457bd1f29a992
|
8f24df668704e2f3ef55f82b30808a58ed1543de
|
13d6f80e6a5c21bba9b5bb45e29575b7f836612b
|
refs/heads/master
| 2023-01-04T04:50:52.281349 | 2020-10-28T14:51:42 | 2020-10-28T14:51:42 | 308,044,188 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5418422818183899,
"alphanum_fraction": 0.5570940971374512,
"avg_line_length": 34.34042739868164,
"blob_id": "f4c286c03652549cee4cec1935e1797aaaf973fa",
"content_id": "9d53fdcf06db78c340c9ea57101b7fd1ade7e21d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4983,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 141,
"path": "/tinder_bot.py",
"repo_name": "ickoice/tinder-bot-python",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom secrets import username, password\n\n\nclass TinderBot():\n def __init__(self):\n self.driver = webdriver.Chrome()\n\n def login(self):\n self.driver.get('https://tinder.com')\n\n sleep(2)\n\n # Turn off popup cookies\n cookies_popup_btn = WebDriverWait(self.driver, 20).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"content\"]/div/div[2]/div/div/div[1]/button')))\n cookies_popup_btn.click()\n\n # Click login button\n login_btn = self.driver.find_element_by_xpath(\n '//*[@id=\"content\"]/div/div[1]/div/main/div[1]/div/div/header/div[1]/div[2]/div/button')\n login_btn.click()\n\n sleep(2)\n\n # print(self.driver.window_handles)\n\n # click login with facebook\n # fb_btn = self.driver.find_element_by_xpath('//*[@id=\"modal-manager\"]/div/div/div[1]/div/div[3]/span/div[2]/button')\n # print(fb_btn)\n # fb_btn.click()\n\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located(\n (By.XPATH, \"//button[@type = 'button' and @aria-label = 'Log in with Facebook']//span\"))).click()\n\n # switch to login popup\n base_window = self.driver.window_handles[0]\n self.driver.switch_to_window(self.driver.window_handles[1])\n\n email_in = self.driver.find_element_by_xpath('//*[@id=\"email\"]')\n email_in.send_keys(username)\n\n pw_in = self.driver.find_element_by_xpath('//*[@id=\"pass\"]')\n pw_in.send_keys(password)\n\n login_btn = self.driver.find_element_by_xpath('//*[@id=\"u_0_0\"]')\n login_btn.click()\n\n sleep(2)\n\n self.driver.switch_to_window(base_window)\n\n popup_1 = WebDriverWait(self.driver, 20).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"modal-manager\"]/div/div/div/div/div[3]/button[1]')))\n popup_1.click()\n\n popup_2 = WebDriverWait(self.driver, 20).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"modal-manager\"]/div/div/div/div/div[3]/button[1]')))\n popup_2.click()\n\n # sleep(10)\n recieved_likes_popup = WebDriverWait(self.driver, 20).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"modal-manager\"]/div/div/div/div[3]/button[2]')))\n if recieved_likes_popup:\n recieved_likes_popup.click()\n\n def like(self):\n like_btn = self.driver.find_element_by_xpath(\n '//*[@id=\"content\"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[2]/div[4]/button')\n like_btn.click()\n\n def dislike(self):\n dislike_btn = self.driver.find_element_by_xpath(\n '//*[@id=\"content\"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[2]/div[2]/button')\n dislike_btn.click()\n\n def auto_swipe(self):\n from random import random\n left_count, right_count = 0, 0\n while True:\n sleep(0.5)\n try:\n rand = random()\n if rand < .73:\n self.like()\n right_count = right_count + 1\n print('{}th right swipe'.format(right_count))\n else:\n self.dislike()\n left_count = left_count + 1\n print('{}th left swipe'.format(left_count))\n except Exception:\n try:\n self.close_popup()\n except Exception:\n self.close_match()\n\n def close_popup(self):\n popup_3 = self.driver.find_element_by_xpath(\n '//*[@id=\"modal-manager\"]/div/div/div[2]/button[2]')\n popup_3.click()\n\n def close_match(self):\n match_popup = self.driver.find_element_by_xpath(\n '//*[@id=\"modal-manager-canvas\"]/div/div/div[1]/div/div[3]/a')\n match_popup.click()\n\n def message_all(self):\n \tMESSAGE = 'heyy'\n while True:\n matches = self.driver.find_elements_by_class_name('matchListItem')\n print(matches)\n if len(matches) < 2:\n break\n matches[1].click()\n sleep(4)\n msg_box = self.driver.find_element_by_xpath(\n '//*[@id=\"chat-text-area\"]')\n msg_box.send_keys(MESSAGE)\n\n sleep(2)\n send_btn = WebDriverWait(self.driver, 2).until(EC.presence_of_element_located(\n (By.XPATH, '/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div/div[1]/div/div/div[3]/form/button[2]')))\n send_btn.click()\n sleep(2)\n matches_tab = self.driver.find_element_by_xpath(\n '//*[@id=\"match-tab\"]')\n matches_tab.click()\n sleep(1)\n\n\nbot = TinderBot()\nbot.login()\nsleep(5)\n# bot.auto_swipe()\n# bot.message_all()\n"
},
{
"alpha_fraction": 0.6527546048164368,
"alphanum_fraction": 0.6644407510757446,
"avg_line_length": 34.235294342041016,
"blob_id": "dcf9d1f4c0ae3418634619ebb0a8e07a0426b571",
"content_id": "e6ca3ac1acd78b24bae94d0bdc1660690a2d0a7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 599,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 17,
"path": "/README.md",
"repo_name": "ickoice/tinder-bot-python",
"src_encoding": "UTF-8",
"text": "# To run:\n - download chromedriver, unzip, move to `/usr/local/bin` (mac os / linux)\n - `pip install selenium`\n - open `tinder_bot.py` using any text editor -> uncomment `# bot.auto_swipe()` or `# bot.message_all()` according to your initial need\n - edit `secrets.py` with your email and password\n - run the command `python tinder_bot.py`\n\n# Uses:\n\n - `Auto swipe` - run in this mode to launch the auto-swiper\n * 70% swipe right\n * 30% swipe left\n - `Message all` - run in this mode to message all matches\n * Set the message on `line 114 - MESSAGE = 'heyy'`\n\n\n# Have fun - use it well!\n"
}
] | 2 |
pelluch/gmm_t3
|
https://github.com/pelluch/gmm_t3
|
5eb2692a838e8258212526518cfaf63cc5ead5f8
|
1f88f74dc53cbda9c9c726641882cb755d205995
|
91ddfd9bfe646716e3003c1ec477bade5c018891
|
refs/heads/master
| 2020-06-10T23:00:09.298062 | 2016-12-01T18:48:38 | 2016-12-01T18:48:38 | 75,848,849 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5901065468788147,
"alphanum_fraction": 0.6074581146240234,
"avg_line_length": 32.68717956542969,
"blob_id": "2ff13aecb4d01e2d1a2efcb53143d35562340862",
"content_id": "0ef1966530bc0bde8f2fbb55efc70afa2016f0b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6576,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 195,
"path": "/main.py",
"repo_name": "pelluch/gmm_t3",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\nfrom scipy import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport sklearn\nfrom sklearn.cluster import KMeans\nfrom scipy import linalg\nfrom scipy.misc import logsumexp\nfrom matplotlib.patches import Ellipse\nfrom sklearn import mixture\n\n\n\nclass GMM:\n def __init__(self, n_components, initial_weights, initial_means, initial_sigmas):\n self.n_components = n_components\n self.means = initial_means\n self.weights = initial_weights\n self.sigmas = initial_sigmas\n\n\n\n# Esto da el valor mínimo para un float sin ser 0\neps = np.finfo(float).eps\n# Valor para terminar la iteración\nthreshold = 0.001\n\ndef log_multivariable_normal(X, mean, sigma, n_components):\n x = X.T\n # Evitar problemas de dimensiones\n sigma = np.atleast_2d(sigma)\n\n lhs = -(n_components / 2) * np.log(2 * np.pi) - 0.5 * np.log(np.linalg.det(sigma))\n rhs = -0.5 * (x.T - mean).T * np.dot(linalg.inv(sigma), (x.T - mean).T)\n if rhs.ndim == 2:\n rhs = np.sum(rhs, axis=0)\n return lhs + rhs\n\n\n# Calculado según ecuaciones de https://www.cs.ubc.ca/~murphyk/Papers/learncg.pdf\ndef calculate_new_sigmas(X, gammas, means, n_components):\n num_dims = X.shape[1]\n # Nuevo arreglo de sigmas\n sigmas = np.empty((n_components, num_dims, num_dims))\n for k in range(n_components):\n post_resps = gammas[:, k]\n mean = means[k]\n diff = X - mean\n # El último factor se agrega para evitar indeterminación\n avg_sigma = np.dot(post_resps * diff.T, diff) / (post_resps.sum() + 10 * eps)\n sigmas[k] = avg_sigma\n return sigmas\n\n\n\ndef e_step(gmm, X):\n probs_mat = np.zeros((X.shape[0], gmm.n_components))\n # Calculamos las responsabilidades\n for i in range(gmm.n_components):\n probs_mat[:, i] = log_multivariable_normal(X, gmm.means[i], gmm.sigmas[i], gmm.n_components) + np.log(gmm.weights[i])\n\n lpr = logsumexp(probs_mat, axis=1)\n gammas = np.exp(probs_mat - lpr[:, np.newaxis])\n return lpr, gammas\n\n\ndef m_step(gmm, X, gammas):\n # Los nuevos pesos son las sumas de las responsabilidades ponderado por la suma de todos los componentes\n weights = gammas.sum(axis=0)\n # Al actualizar, agregamos un mínimo para evitar valores indeterminados\n gmm.weights = (weights / (weights.sum() + 10 * eps) + eps)\n\n weighted_gammas = np.dot(gammas.T, X)\n # Nuevamente evitamos valores indeterminados\n inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * eps)\n\n gmm.means = weighted_gammas * inverse_weights\n gmm.sigmas = calculate_new_sigmas(X, gammas, gmm.means, gmm.n_components)\n\n\n\ndef plot_2d(X, means, sigmas, fig_number, title):\n fig = plt.figure(fig_number)\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_xlim(-10, 10)\n ax.set_ylim(-10, 10)\n ax.scatter(X[:, 0], X[:, 1], c='yellow')\n ax.scatter(means[:, 0], means[:, 1], s=40)\n for idx, mean in enumerate(means):\n covar_matrix = sigmas[idx]\n w, v = np.linalg.eig(covar_matrix)\n width = 2 * np.sqrt(w[0] * 5.991)\n height = 2 * np.sqrt(w[1] * 5.991)\n largest_vector = v[0]\n if np.linalg.norm(v[1]) > np.linalg.norm(v[0]):\n largest_vector = v[1]\n v1_y = largest_vector[1]\n v1_x = largest_vector[0]\n angle = np.arctan(v1_x / v1_y) * 180 / np.pi\n e = Ellipse(mean, width=height, height=width, angle=angle)\n e.set_clip_box(ax.bbox)\n e.set_alpha(0.6)\n e.set_facecolor(random.rand(3))\n ax.add_artist(e)\n plt.title(title)\n\n\ndef fit_gmm(X, n_components):\n kmeans = KMeans(n_clusters=n_components, random_state=0).fit(X)\n # Primero, inicializamos ocupando k-means\n means = kmeans.cluster_centers_\n # Pesos aleatorios uniformes\n weights = np.tile(1.0 / n_components, n_components)\n predictions = kmeans.predict(X)\n\n # Para las covarianzas inicializamos con covarianza intra-clusters\n covs = []\n for cluster in range(n_components):\n cluster_data = X[predictions == cluster]\n covs.append(np.cov(cluster_data.T))\n covs = np.array(covs)\n weights = np.array(weights)\n mixture = GMM(n_components, weights, means, covs)\n lpr = np.finfo(float).min\n \n num_iters = 100\n # Max 100 iteraciones\n for i in range(num_iters):\n logs, gammas = e_step(mixture, X)\n m_step(mixture, X, gammas)\n mean_logs = logs.mean()\n # int('Delta for iteration ' + str(i) + ': ' + str(np.abs(mean_logs - lpr)))\n if np.abs(mean_logs - lpr) < threshold:\n num_iters = (i + 1)\n break\n\n lpr = mean_logs\n \n print('delta after ' + str(num_iters) + ' iterations: ' + str(lpr - mean_logs))\n # Ploteamos 2D para visualizar resultados\n if X.shape[1] == 2: \n plot_2d(X, mixture.means, mixture.sigmas, 0, 'Calculado')\n\n print('Calculated means: ')\n print(np.sort(mixture.means, axis=0))\n # print('Calculated covars: ')\n # print(mixture.sigmas)\n # print('Calculated weights: ')\n # print(mixture.weights)\n gmm = sklearn.mixture.GaussianMixture(n_components, verbose=0, tol=threshold)\n gmm.fit(X)\n print('Sklearn means: ')\n print(np.sort(gmm.means_, axis=0))\n # print('Sklearn covars: ')\n # print(gmm.covariances_)\n # print('Sklearn weights: ')\n # print(gmm.weights_)\n\n if X.shape[1] == 2:\n plot_2d(X, gmm.means_, gmm.covariances_, 1, 'Sklearn')\n print('--------------')\n print('Plotting 2D cases')\n print('--------------')\n plt.show()\n \n\n# Generamos los datos con normal multivariable\ndef generate_data(num_dims, n_components):\n X = np.empty([0, num_dims])\n all_means = []\n for i in range(n_components):\n # Con esto aseguramos de generar matriz positiva semi definida\n cov = random.rand(num_dims, num_dims)\n cov = np.dot(cov, cov.transpose())\n means = []\n for j in range(num_dims):\n # Medias aleatorias\n means.append(random.uniform(-5, 5))\n new_data = np.random.multivariate_normal(means, cov, random.randint(100, 400))\n X = np.concatenate((X, new_data))\n all_means.append(means)\n return X\n\n\nrandom_params = [(2, 7), (3, 5)]\nplt.close(\"all\")\n\nfor params in random_params:\n print('------------------------------')\n print('Results for ' + str(params[0]) + \" dimensions - \" + str(params[1]) + \" components\")\n print('------------------------------')\n X = generate_data(params[0], params[1])\n fit_gmm(X, params[1])\n\n"
}
] | 1 |
danilnagy/dmc_stack
|
https://github.com/danilnagy/dmc_stack
|
90f7943f4b2f196fc103f030b78d3d3d3cf9fbcf
|
d2cf7472a9140483745a153006f9b67b821cc8ef
|
4f3c89ae4a39def9efc72f181c3b88595a5398a2
|
refs/heads/master
| 2021-01-17T05:44:05.360233 | 2015-08-17T15:01:18 | 2015-08-17T15:01:18 | 40,030,579 | 0 | 1 | null | 2015-08-01T00:55:57 | 2015-08-01T01:01:05 | 2015-09-21T00:07:10 |
JavaScript
|
[
{
"alpha_fraction": 0.7397590279579163,
"alphanum_fraction": 0.7710843086242676,
"avg_line_length": 28.64285659790039,
"blob_id": "0df2f77fc310a48e0a844ee562722f50e2f3ceda",
"content_id": "a62d9402f5c67a16163c15aee6faddfb6dd08162",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 14,
"path": "/README.md",
"repo_name": "danilnagy/dmc_stack",
"src_encoding": "UTF-8",
"text": "# dmc_stack\nFull web stack for DMC 2015 class @ Columbia University GSAPP\n\n# getting started\n\n1. install python\n2. install flask library for python\n3. run app.py in main folder\n4. log on to http://localhost:5000 in browser\n\n# editing\n\n- app.py contains backend code for the web server as well as all data manipulation\n- index.html file in templates folder contains dynamic leaflet and D3 code for visualization\n"
},
{
"alpha_fraction": 0.6671479940414429,
"alphanum_fraction": 0.6888086795806885,
"avg_line_length": 24.66666603088379,
"blob_id": "b42aac2a6af51a3ac6b7a387868ab3813ed582ac",
"content_id": "125c850191b15930752878460a02a760dfdb3c70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1385,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 54,
"path": "/app.py",
"repo_name": "danilnagy/dmc_stack",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask import render_template\nimport json\n\nimport pyorient\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef index():\n return render_template(\"index.html\")\n\[email protected](\"/listings\")\ndef getData():\n\n\t#ORIENTDB IMPLEMENTATION\n\tclient = pyorient.OrientDB(\"localhost\", 2424)\n\tsession_id = client.connect(\"root\", \"password\")\n\n\tdb_name = \"property_test\"\n\n\tif client.db_exists( db_name, pyorient.STORAGE_TYPE_MEMORY ):\n\t\tclient.db_open( db_name, \"admin\", \"admin\" )\n\telse:\n\t\tprint \"database does not exist!\"\n\t\tsys.exit()\n\n\trecordsDict = {\"type\":\"FeatureCollection\",\"features\":[]}\n\trecords = client.command('SELECT FROM Listing WHERE [latitude,longitude,$spatial] NEAR [41.177407, 80.290192, {\"maxDistance\": 2}]')\n\n\tfor record in records:\n\t\trecordDict = {\"type\":\"Feature\",\"properties\":{},\"geometry\":{\"type\":\"Point\"}}\n\t\trecordDict[\"id\"] = record._rid\n\t\trecordDict[\"properties\"][\"name\"] = record.title\n\t\trecordDict[\"properties\"][\"price\"] = record.price\n\t\trecordDict[\"geometry\"][\"coordinates\"] = [record.longitude, record.latitude]\n\n\t\trecordsDict[\"features\"].append(recordDict)\n\n\tclient.db_close()\n\n\n\t# #DUMMY DATA IMPLEMENTATION\n\t# with open(\"static/data.txt\", 'r') as f:\n\t# \trecordsDict = json.loads(f.read())\n\n\tprint \"acquired!\"\n\n\t#pass GeoJSON data back to D3\n\treturn json.dumps(recordsDict)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=5000,debug=True)"
}
] | 2 |
oTeun/solo.to-checker
|
https://github.com/oTeun/solo.to-checker
|
9ae8cd599e25a0ca10d186617daab2c1442858b7
|
acc0b99f803e0bdc2e87e374a3c2218ee9b145c3
|
f2763d10cd2708bd9af53241a2b373a8a3ac8876
|
refs/heads/main
| 2023-08-15T01:21:15.991210 | 2021-09-19T16:57:30 | 2021-09-19T16:57:30 | 376,253,881 | 2 | 3 | null | 2021-06-12T09:49:29 | 2021-06-13T12:55:35 | 2021-06-13T12:55:33 |
Python
|
[
{
"alpha_fraction": 0.7709163427352905,
"alphanum_fraction": 0.7709163427352905,
"avg_line_length": 54.77777862548828,
"blob_id": "0bdba6d4d41745df2c4198fbcf5de4a7c5fd94bf",
"content_id": "a15697b8f61b7a0202ae8f385e32e586390a3758",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 502,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 9,
"path": "/README.md",
"repo_name": "oTeun/solo.to-checker",
"src_encoding": "UTF-8",
"text": "# (DEPRACATED) Solo.to name checker\nA program that checks available names on solo.to\nTHIS PROGRAM NO LONGER FUNCTIONS\n\n## READ\n- Currently, solo.to does not have any ratelimits / IP bans. I still recommend running this on a VPN. Depending on how many names you check, this program might be spamming their servers.\n- Anything that goes wrong is your responsibility, not mine.\n- Note: some usernames may appear available, but are blocked. There is no way to check this\n- Now get your names on solo.to :)\n"
},
{
"alpha_fraction": 0.6660988330841064,
"alphanum_fraction": 0.6737648844718933,
"avg_line_length": 27.634145736694336,
"blob_id": "fc67d821f289ff2eeba12621b939fc67f3854bd8",
"content_id": "cca998781a94ecb2c901d25896405dae00e8545b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1174,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 41,
"path": "/main.py",
"repo_name": "oTeun/solo.to-checker",
"src_encoding": "UTF-8",
"text": "import requests\nfrom threading import Thread\nfrom colorama import Fore\nimport logging\nfrom time import sleep\nfrom os import system\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\nsystem(\"\")\n\nnames = open('usernames.txt', 'r').read().splitlines() # read names from file\n\n\ndef check(name):\n r = requests.get(f'https://solo.to/{name}')\n if r.status_code == 404:\n logging.info(f\"{Fore.GREEN}[AVAILABLE] {name}\")\n with open('available.txt', 'a') as f:\n f.write(name + '\\n')\n elif r.status_code == 200:\n logging.info(f\"{Fore.RED}[UNAVAILABLE] {name}\")\n else:\n logging.info(f\"{Fore.RED}[ERROR] Received status code {r.status_code} on {name}\")\n\n\nprint(f\"{Fore.GREEN}Started check for {len(names)} usernames on dsc.bio\")\n\nthreads = []\nfor name in names:\n threads.append(Thread(target=check, args=[name]))\nfor t in threads:\n t.start()\n sleep(0.02)\n # I sleep a little because otherwise it starts returning server errors\n # Basically if you dont sleep you end up ddossing their servers cuz their protection is shit\nfor t in threads:\n t.join()\n\nprint(f\"{Fore.GREEN}Done checking.\")\nprint(Fore.RESET)\n"
}
] | 2 |
hoergems/robots
|
https://github.com/hoergems/robots
|
a5ff51df774371532b6d2c062f53b7ab51e3c978
|
062971aa7ad816776ebdb415ded7476771cee6b6
|
868bde0d9fb1adc48ceb7bf4ea7bac662cfa6c64
|
refs/heads/master
| 2020-05-29T15:41:59.959269 | 2016-09-25T19:11:25 | 2016-09-25T19:11:25 | 61,373,369 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5186650156974792,
"alphanum_fraction": 0.5367851853370667,
"avg_line_length": 39.09708786010742,
"blob_id": "9fe3e1895f32e391437441bc5a5983a80e97e8a3",
"content_id": "3ed1f7215b942b69dc2799094c167e30fbd9395a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24779,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 618,
"path": "/src/build_model2.py",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport argparse\nfrom sympy import *\nimport sympybotics\nfrom urdf_parser_py import urdf\n\nclass ModelParser:\n def __init__(self, file, header_src, imple_src):\n self.build_model(file)\n print \"Calculate link jacobians\"\n linkJacobians, baseToCOMTransformations = self.calcLinkJacobians2()\n g_ = symbols(\"g_\")\n g = Matrix([0.0, 0.0, g_]).T\n print \"Calculate inertia matrix\"\n M = self.calcManipulatorInertiaMatrix(linkJacobians)\n print \"Calculate coriolis matrix\"\n C = self.calcCentrifugalMatrix(M)\n print \"Calculate normal forces\"\n N = self.calcNormalForces(g, baseToCOMTransformations)\n numLinks = len(self.link_names) \n print \"invert inertia matrix\"\n M_inv = self.inertia_inverse(M, symbolic=True)\n print \"get dynamic model\" \n f = self.get_dynamic_model(M, M_inv, C, N, self.q, self.qdot, self.rho, self.zeta)\n print \"Calculate partial derivatives\" \n A, B, V = self.partial_derivatives2(f)\n print \"Calc first order derivatives of observation function\"\n H, W = self.calc_observation_derivatives(baseToCOMTransformations)\n print \"cleaning cpp code...\" \n \n self.clean_cpp_code(header_src, imple_src)\n self.gen_cpp_code2(f, \"F0\", header_src, imple_src)\n #self.gen_cpp_code2(A, \"A0\", header_src, imple_src)\n #self.gen_cpp_code2(B, \"B0\", header_src, imple_src)\n #self.gen_cpp_code2(V, \"V0\", header_src, imple_src)\n self.gen_cpp_code2(M, \"M0\", header_src, imple_src)\n self.gen_cpp_code2(H, \"H0\", header_src, imple_src)\n self.gen_cpp_code2(W, \"W0\", header_src, imple_src)\n print \"done\"\n \n def calcNormalForces(self, g, baseToCOMTransformations): \n Ocs = [] \n for i in xrange(len(baseToCOMTransformations)):\n\tprint baseToCOMTransformations[i]\n\tOc = Matrix([baseToCOMTransformations[i][0, 3],\n\t baseToCOMTransformations[i][1, 3],\n\t baseToCOMTransformations[i][2, 3]])\n\tOcs.append(Oc)\n \n V = 0.0\n link_masses = self.link_masses[1:len(self.link_masses)-1] \n for i in xrange(len(link_masses)):\n el = g[2] * Ocs[i][2] * link_masses[i] \n print \"el \" + str(el)\n V = V + el\n V = trigsimp(V) \n q = [self.q[i] for i in xrange(len(self.q) - 1)]\n VDiff = trigsimp(Matrix([V]).jacobian(q).T)\n return VDiff\n print VDiff \n print VDiff.shape\n N = Matrix([[trigsimp(diff(V, self.q[i]).doit())] for i in xrange(len(self.q) - 1)])\n print N;sleep\n qdot = self.qdot[0:len(self.qdot) - 1]\n '''\n The joint friction forces\n '''\n K = N + Matrix([[self.viscous[i] * qdot[i]] for i in xrange(len(qdot))]) \n return K \n \n def calcCentrifugalMatrix(self, M): \n C = zeros(M.shape[0], M.shape[1])\n q = self.q[0:len(self.q) - 1]\n qdot = self.qdot[0:len(self.qdot) - 1] \n for i in xrange(M.shape[0]):\n for j in xrange(M.shape[1]):\t\n\tfor k in xrange(M.shape[0]):\n\t C[i, j] += (trigsimp(diff(M[i, j], q[k])) + \n\t trigsimp(diff(M[i, k], q[j])) + \n\t trigsimp(diff(M[k, j], q[i]))) * qdot[k]\n\tC[i, j] *= 0.5\n '''for k in xrange(M.shape[0]):\n for j in xrange(M.shape[1]):\n\tfor i in xrange(M.shape[0]):\n\t C[k, j] += (trigsimp(diff(M[k, j], q[i])) +\n\t trigsimp(diff(M[k, i], q[j])) +\n\t trigsimp(diff(M[i, j], q[k]))) * qdot[i]\n\tC[k,j] *= 0.5''' \n return C\n \n def calcManipulatorInertiaMatrix(self, linkJacobians): \n Ms = []\n for i in xrange(len(self.joint_origins) - 1):\n M = eye(6)\n for j in xrange(3):\n\tM[j, j] = self.link_masses[i + 1]\n M[3, 3] = self.Is[i+1][0]\n M[4, 4] = self.Is[i+1][1]\n M[5, 5] = self.Is[i+1][2]\n Ms.append(M)\n M = zeros(len(linkJacobians), len(linkJacobians))\n for i in xrange(len(linkJacobians)):\n M += trigsimp(linkJacobians[i].T * Ms[i] * linkJacobians[i])\n return trigsimp(M)\n\n def getJointRotation(self, jointNumber): \n jointAxis = self.joint_axis[jointNumber]\n print \"jointNumber \" + str(jointNumber)\n print \"ja \" + str(jointAxis)\n rotation = 0\n if (jointAxis[0] == 1.0):\t \n\t rotation = self.rotateX(self.q[jointNumber])\n elif (jointAxis[1] == 1.0):\t \n\t rotation = self.rotateY(self.q[jointNumber])\n elif (jointAxis[2] == 1.0):\n\t \n\t rotation = self.rotateZ(self.q[jointNumber])\n \n return rotation\n \n def calcLinkJacobians2(self): \n baseToJointTransformations = []\n baseToCOMTransformations = []\n currentTrans = self.translateXYZ(0.0, 0.0, 0.0) \n \n for i in xrange(len(self.joint_origins) - 1):\n jointRotation = 0\n\tif (i == 0):\n\t jointRotation = self.rotateZ(0)\n\telse:\t \n\t jointRotation = self.getJointRotation(i-1)\t\n\ttrans = self.translateXYZ(self.joint_origins[i][0], self.joint_origins[i][1], self.joint_origins[i][2])\t\n\trotX = self.rotateX(self.joint_origins[i][3])\n\t#rotY = self.rotateY(self.joint_origins[i][4])\n\t#rotZ = self.rotateZ(self.joint_origins[i][5])\t\n\tcurrentTrans *= jointRotation\t\n\tcurrentTrans *= trans\t\n\tcurrentTrans *= rotX\n\t#currentTrans *= rotY\n\t#currentTrans *= rotZ\t\n\tcurrentTrans = trigsimp(currentTrans)\t\n\tbaseToJointTransformations.append(currentTrans) \n for i in xrange(len(self.joint_origins) - 1):\n jointRotation = self.getJointRotation(i) \n inertialPose = self.inertial_poses[i+1]\n inertialTranslation = self.translateXYZ(inertialPose[0], inertialPose[1], inertialPose[2]) \n trans = baseToJointTransformations[i] * jointRotation * inertialTranslation\n trans = trigsimp(trans)\n baseToCOMTransformations.append(trans) \n linkJacobians = []\n for i in xrange(len(self.joint_origins) - 1):\n\tJ = zeros(6, len(self.joint_origins) - 1)\n\tOc = trigsimp(Matrix([baseToCOMTransformations[i].col(3)[k] for k in xrange(3)]).T)\n\tfor j in xrange(i + 1):\n\t Oj = trigsimp(Matrix([baseToJointTransformations[j].col(3)[k] for k in xrange(3)]).T)\t \n\t Zj = trigsimp(Matrix([baseToJointTransformations[j].col(2)[k] for k in xrange(3)]).T)\n\t upper = Zj.cross(Oc - Oj)\n\t lower = Zj\n\t for k in xrange(3):\n\t J[k, j] = trigsimp(upper[k])\n\t J[k+3, j] = trigsimp(lower[k])\n\tlinkJacobians.append(J)\t\n return linkJacobians, baseToCOMTransformations\n \n def calcLinkJacobians(self):\n baseToJointTransformations = []\n currentTrans = 0\n for i in xrange(len(self.joint_origins) - 1):\n dh = 0\n if (i == 0):\n\tcurrentTrans = self.dh(0, self.joint_origins[i][2], 0, self.joint_origins[i][3])\t\n else:\n\tcurrentTrans = currentTrans * self.dh(self.q[i - 1], 0, self.joint_origins[i][0], self.joint_origins[i][3])\n baseToJointTransformations.append(currentTrans)\n baseToCOMTransformations = [] \n for i in xrange(len(self.joint_origins) - 1): \n trans = baseToJointTransformations[i] * self.dh(self.q[i], 0, self.joint_origins[i + 1][0] / 2.0, 0)\n baseToCOMTransformations.append(trans)\n linkJacobians = []\n for i in xrange(len(self.joint_origins) - 1):\n\tJ = zeros(6, len(self.joint_origins) - 1)\n\tOc = trigsimp(Matrix([baseToCOMTransformations[i].col(3)[k] for k in xrange(3)]).T)\n\tfor j in xrange(i + 1):\n\t Oj = trigsimp(Matrix([baseToJointTransformations[j].col(3)[k] for k in xrange(3)]).T)\t \n\t Zj = trigsimp(Matrix([baseToJointTransformations[j].col(2)[k] for k in xrange(3)]).T)\n\t upper = Zj.cross(Oc - Oj)\n\t lower = Zj\n\t for k in xrange(3):\n\t J[k, j] = trigsimp(upper[k])\n\t J[k+3, j] = trigsimp(lower[k])\n\tlinkJacobians.append(J)\n return linkJacobians, baseToCOMTransformations\n\n def translateXYZ(self, x, y, z):\n return Matrix([[1.0, 0.0, 0.0, x],\n\t\t [0.0, 1.0, 0.0, y],\n\t\t [0.0, 0.0, 1.0, z],\n\t\t [0.0, 0.0, 0.0, 1.0]])\n\n def rotateX(self, theta):\n return Matrix([[1.0, 0.0, 0.0, 0.0],\n\t\t [0.0, cos(theta), -sin(theta), 0.0],\n\t\t [0.0, sin(theta), cos(theta), 0.0],\n\t\t [0.0, 0.0, 0.0, 1.0]])\n \n def rotateY(self, theta):\n return Matrix([[cos(theta), 0.0, sin(theta), 0.0],\n\t\t [0.0, 1.0, 0.0, 0.0],\n\t\t [-sin(theta), 0.0, cos(theta), 0.0],\n\t\t [0.0, 0.0, 0.0, 1.0]])\n \n def rotateZ(self, theta):\n return Matrix([[cos(theta), -sin(theta), 0.0, 0.0],\n\t\t [sin(theta), cos(theta), 0.0, 0.0],\n\t\t [0.0, 0.0, 1.0, 0.0],\n\t\t [0.0, 0.0, 0.0, 1.0]])\n \n \n def dh(self, theta, d, a, alpha):\n return Matrix([[cos(theta), -sin(theta) * cos(alpha), sin(theta) * sin(alpha), a * cos(theta)],\n [sin(theta), cos(theta) * cos(alpha), -cos(theta) * sin(alpha), a * sin(theta)],\n [0.0, sin(alpha), cos(alpha), d],\n [0.0, 0.0, 0.0, 1.0]])\n \n def inertia_inverse(self, M, symbolic=False):\n if symbolic:\n M_inv = Matrix.zeros(M.shape[0], M.shape[1])\n for i in xrange(M.shape[0]):\n for j in xrange(M.shape[1]):\n strr = \"M_inv_(\" + str(i) + \", \" + str(j) + \")\" \n s = Symbol(strr)\n M_inv[i, j] = s\n return M_inv\n else:\n return M.inv()\n \n def calc_observation_derivatives(self, baseToCOMTransformations):\n ee_transformation = baseToCOMTransformations[-1] * self.transformation(self.joint_origins[1][0] / 2, 0.0, 0.0)\n\tg_funct = [trigsimp(ee_transformation[0, 3]), \n\t trigsimp(ee_transformation[1, 3]), \n\t trigsimp(ee_transformation[2, 3])]\t\n\tfor i in xrange(len(self.qdot) - 1):\n\t g_funct.append(self.qdot[i])\n\tg_funct = Matrix(g_funct)\n\tg_funct = g_funct\n\tetas = [symbols(\"eta_[\" + str(i) + \"]\") for i in xrange(g_funct.shape[0])]\t\n\t\n\tfor i in xrange(len(etas)):\n\t g_funct[i, 0] = g_funct[i, 0] + etas[i]\n\tx = [self.q[i] for i in xrange(len(self.q) - 1)]\n\tx.extend([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n\tH = trigsimp(g_funct.jacobian([x[i] for i in xrange(len(x))]))\n\tW = trigsimp(g_funct.jacobian([etas[i] for i in xrange(len(etas))]))\t\n\tH = simplify(H)\n\tH = nsimplify(H, tolerance=1e-4) \n\treturn H, W\n \n def transformation(self, x, y, z):\n t = Matrix([[1.0, 0.0, 0.0, x],\n\t\t[0.0, 1.0, 0.0, y],\n\t\t[0.0, 0.0, 1.0, z],\n\t\t[0.0, 0.0, 0.0, 1.0]])\n return t\n \n def partial_derivatives2(self, f): \n A1 = f.jacobian([self.q[i] for i in xrange(len(self.q) - 1)])\n A2 = f.jacobian([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n B = f.jacobian([self.rho[i] for i in xrange(len(self.rho) - 1)])\n C = f.jacobian([self.zeta[i] for i in xrange(len(self.zeta) - 1)])\n A = A1.row_join(A2) \n return A, B, C \n \n def get_dynamic_model(self, M, M_inv, C, N, thetas, dot_thetas, rs, zetas): \n #print \"time to invert: \" + str(time.time() - t0)\n Dotthetas = Matrix([[dot_thetas[i]] for i in xrange(len(dot_thetas) - 1)]) \n Rs = Matrix([[rs[i]] for i in xrange(len(rs) - 1)]) \n Zetas = Matrix([[zetas[i]] for i in xrange(len(zetas) - 1)]) \n print \"Constructing 2nd-order ODE\"\n m_upper = Matrix([[Dotthetas[i]] for i in xrange(len(Dotthetas))])\n m_lower = 0\n '''if self.simplifying:\n m_lower = trigsimp(-M_inv * trigsimp(C * Dotthetas + N) + M_inv * Rs) \n else:'''\n m_lower = M_inv * ((Rs + Zetas) - C * Dotthetas - N)\n h = m_upper.col_join(m_lower) \n return h\n\n def parse_urdf(self, file):\n\t r = urdf.Robot.from_xml_string(file)\n\t #robot = Robot(xml_file)\n\t self.link_names = [link.name for link in r.links]\n\t self.joint_names = [r.joints[i].name for i in xrange(len(r.joints))]\n\t self.joint_types = [joint.type for joint in r.joints]\n\t self.joint_origins = [Matrix([[joint.origin.xyz[0]],\n\t\t\t\t\t[joint.origin.xyz[1]],\n\t\t\t\t\t[joint.origin.xyz[2]],\n\t\t\t\t\t[joint.origin.rpy[0]],\n\t\t\t\t\t[joint.origin.rpy[1]],\n\t\t\t\t\t[joint.origin.rpy[2]]]) for joint in r.joints]\n\t for i in xrange(len(self.joint_origins)):\n\t self.joint_origins[i][3] = nsimplify(self.joint_origins[i][3], [pi], tolerance=0.001)\n\t self.joint_origins[i][4] = nsimplify(self.joint_origins[i][4], [pi], tolerance=0.001)\n\t self.joint_origins[i][5] = nsimplify(self.joint_origins[i][5], [pi], tolerance=0.001)\n\t \n\t self.joint_axis = [Matrix([[joint.axis[0]],\n\t\t\t\t [joint.axis[1]],\n\t\t\t\t [joint.axis[2]]]) for joint in r.joints]\n\t self.viscous = [symbols(\"viscous_[\" + str(i) + \"]\") for i in xrange(len(r.joints) - 1)]\n\t print \"===================\"\n\t \n\t \n\t self.q = []\n\t self.qdot = []\n\t self.qstar = []\n\t self.qdotstar = []\n\t self.rho = []\n\t self.rhostar = []\n\t self.zeta = []\n\t self.zetastar = []\n\t for i in xrange(len(self.joint_names)): \n\t \n\t symb_string_q = \"x[\" + str(i) + \"]\"\n\t symb_string_q_dot = \"x[\" + str(i + len(self.joint_names) - 1) + \"]\"\n\t symb_string_q_star = \"xstar[\" + str(i) + \"]\"\n\t symb_string_q_dot_star = \"xstar[\" + str(i + len(self.joint_names) - 1) + \"]\"\n\t symb_string_r = \"rho[\" + str(i) + \"]\"\n\t symb_string_r_star = \"rhostar[\" + str(i) + \"]\" \n\t symb_zeta = \"zeta[\" + str(i) + \"]\"\n\t symb_zeta_star = \"zetastar[\" + str(i) + \"]\" \n\t\t \n\t\t \n\t self.q.append(symbols(symb_string_q))\n\t self.qdot.append(symbols(symb_string_q_dot))\n\t self.rho.append(symbols(symb_string_r))\n\t self.qstar.append(symbols(symb_string_q_star))\n\t self.qdotstar.append(symbols(symb_string_q_dot_star))\n\t self.rhostar.append(symbols(symb_string_r_star))\n\t self.zeta.append(symbols(symb_zeta))\n\t self.zetastar.append(symbols(symb_zeta_star)) \n\t \n\t self.inertial_poses = []\n\t self.link_masses = []\n\t self.link_inertias = []\n\t self.link_dimensions = []\n\t self.Is = []\n\t for link in r.links:\n\t if link.inertial != None:\t\t \n\t\t self.inertial_poses.append([link.inertial.origin.xyz[0], \n\t\t\t\t\t link.inertial.origin.xyz[1],\n\t\t\t\t\t link.inertial.origin.xyz[2],\n\t\t\t\t\t link.inertial.origin.rpy[0],\n\t\t\t\t\t link.inertial.origin.rpy[1],\n\t\t\t\t\t link.inertial.origin.rpy[2]])\n\t\t self.link_masses.append(link.inertial.mass)\n\t\t self.link_inertias.append(Matrix(link.inertial.inertia.to_matrix()))\n\t\t self.Is.append([self.link_inertias[-1][i, i] for i in xrange(3)])\n\t else:\n\t\t self.inertial_poses.append([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\t\t self.link_masses.append(0.0)\n\t\t self.link_inertias.append(Matrix([[0.0 for j in xrange(3)] for i in xrange(3)]))\n\t\t self.Is.append([0.0 for i in xrange(3)])\n\n\n def build_model(self, modelFile):\n if (modelFile == None):\n print \"Model is none\"\n return\n print(\"Parsing model \" + modelFile)\n self.parse_urdf(modelFile)\n \n def gen_cpp_code2(self, Matr, name, header_src, imple_src): \n lines = list(open(imple_src, 'r'))\n lines_header = list(open(header_src, 'r'))\n temp_lines = []\n if Matr.shape[1] != 1:\n temp_lines.append(\"MatrixXd m(\" + str(Matr.shape[0]) + \", \" + str(Matr.shape[1]) + \"); \\n\")\n else:\n temp_lines.append(\"VectorXd m(\" + str(Matr.shape[0]) + \"); \\n\") \n for i in xrange(Matr.shape[0]):\n for j in xrange(Matr.shape[1]):\n temp_lines.append(\"m(\" + str(i) + \", \" + str(j) + \") = \" + str(ccode(Matr[i, j])) + \"; \\n\")\n temp_lines.append(\"return m; \\n\")\n idx1 = -1\n idx2 = -1\n breaking = False \n for i in xrange(len(lines)):\n if \"Integrate::get\" + name + \"(const state_type &x, const state_type &rho, const state_type &zeta) const{\" in lines[i]: \n idx1 = i + 1 \n breaking = True\n elif \"}\" in lines[i]:\n idx2 = i - 1\n if breaking:\n break \n if idx1 == -1: \n temp_lines.insert(0, \"MatrixXd Integrate::get\" + name + \"(const state_type &x, const state_type &rho, const state_type &zeta) const{ \\n\") \n temp_lines.append(\"\\n\")\n temp_lines.append(\"} \\n \\n\") \n lines[len(lines) - 2:len(lines) - 1] = temp_lines \n \n temp_lines_header = []\n idx = -1\n for i in xrange(len(lines_header)):\n if \"private:\" in lines_header[i]: \n idx = i\n temp_lines_header.append(\"MatrixXd get\" + str(name) + \"(const state_type &x, const state_type &rho, const state_type &zeta) const; \\n\")\n lines_header[idx+1:idx+1] = temp_lines_header\n \n else: \n del lines[idx1:idx2]\n idx = -1\n for i in xrange(len(lines)):\n if \"Integrate::get\" + name in lines[i]:\n idx = i \n lines[idx:idx] = temp_lines \n os.remove(imple_src)\n os.remove(header_src)\n with open(imple_src, 'a+') as f:\n for line in lines:\n f.write(line)\n with open(header_src, 'a+') as f:\n for line in lines_header:\n f.write(line)\n \n def clean_cpp_code2(self, header_src, imple_src):\n lines = list(open(imple_src, 'r'))\n lines_header = list(open(header_src, 'r'))\n tmp_lines = []\n idx_pairs = []\n \n idx1 = -1\n idx2 = -1\n breaking = False\n for i in xrange(len(lines)):\n\t if (\"MatrixXd Integrate::getA\" in lines[i] or\n\t \"MatrixXd Integrate::getB\" in lines[i] or\n\t \"MatrixXd Integrate::getV\" in lines[i] or\n\t \"MatrixXd Integrate::getF\" in lines[i] or\n\t \"MatrixXd Integrate::getM\" in lines[i] or\n\t \"MatrixXd Integrate::getC\" in lines[i] or\n\t \"MatrixXd Integrate::getN\" in lines[i] or\n\t\t\"MatrixXd Integrate::getH\" in lines[i] or\n \"MatrixXd Integrate::getW\" in lines[i]): \n idx1 = i \n breaking = True\n if \"}\" in lines[i] and breaking:\n idx_pairs.append((idx1, i))\n idx1 = -1\n breaking = False \n for i in xrange(len(lines)):\n app = True\n for j in xrange(len(idx_pairs)):\n if i >= idx_pairs[j][0] and i <= idx_pairs[j][1]:\n app = False\n break \n if app:\n tmp_lines.append(lines[i])\n os.remove(imple_src) \n with open(imple_src, 'a+') as f:\n for line in tmp_lines:\n f.write(line)\n \n tmp_lines = []\n idxs = []\n for i in xrange(len(lines_header)):\n\t if (\"MatrixXd Integrate::getA\" in lines[i] or\n\t \"MatrixXd Integrate::getB\" in lines[i] or\n\t \"MatrixXd Integrate::getV\" in lines[i] or\n\t \"MatrixXd Integrate::getF\" in lines[i] or\n\t \"MatrixXd Integrate::getM\" in lines[i] or\n\t \"MatrixXd Integrate::getC\" in lines[i] or\n\t \"MatrixXd Integrate::getN\" in lines[i] or\n\t\t\"MatrixXd Integrate::getH\" in lines[i] or\n \"MatrixXd Integrate::getW\" in lines[i]): \n idxs.append(i)\n for i in xrange(len(lines_header)):\n app = True\n for j in xrange(len(idxs)):\n if i == idxs[j]:\n app = False\n if app:\n tmp_lines.append(lines_header[i])\n \n os.remove(header_src) \n with open(header_src, 'a+') as f:\n for line in tmp_lines:\n f.write(line)\n \n lines = list(open(imple_src, 'r'))\n tmp_lines = []\n idx1 = -1\n idx2 = -1\n breaking = False\n for i in xrange(len(lines)):\n if \"void Integrate::setupSteadyStates() const {\" in lines[i]:\n idx1 = i + 1\n breaking = True\n elif \"std::pair<Integrate::AB_funct, std::pair<Integrate::AB_funct, Integrate::AB_funct>> Integrate::getClosestSteadyStateFunctions\" in lines[i]:\n idx2 = i - 3 \n if breaking:\n break \n del lines[idx1:idx2] \n os.remove(imple_src) \n with open(imple_src, 'a+') as f:\n for line in lines: \n f.write(line)\n \n \n def clean_cpp_code(self, header_src, imple_src):\n lines = list(open(imple_src, 'r'))\n lines_header = list(open(header_src, 'r'))\n tmp_lines = []\n idx_pairs = []\n \n idx1 = -1\n idx2 = -1\n breaking = False\n for i in xrange(len(lines)):\n\t if (\"MatrixXd Integrate::getA\" in lines[i] or \n \"MatrixXd Integrate::getB\" in lines[i] or \n \"MatrixXd Integrate::getV\" in lines[i] or\n \"MatrixXd Integrate::getF\" in lines[i] or\n \"MatrixXd Integrate::getM\" in lines[i] or\n \"MatrixXd Integrate::getC\" in lines[i] or\n \"MatrixXd Integrate::getN\" in lines[i] or\n \"MatrixXd Integrate::getH\" in lines[i] or\n \"MatrixXd Integrate::getW\" in lines[i] or\n \"MatrixXd Integrate::getSec\" in lines[i] or\n \"MatrixXd Integrate::getFirst\" in lines[i] or\n \"MatrixXd Integrate::getEEJacobian\" in lines[i] or\n \"MatrixXd Integrate::getMInv\" in lines[i]): \n idx1 = i \n breaking = True\n if \"}\" in lines[i] and breaking:\n idx_pairs.append((idx1, i))\n idx1 = -1\n breaking = False \n for i in xrange(len(lines)):\n app = True\n for j in xrange(len(idx_pairs)):\n if i >= idx_pairs[j][0] and i <= idx_pairs[j][1]:\n app = False\n break \n if app:\n tmp_lines.append(lines[i])\n os.remove(imple_src) \n with open(imple_src, 'a+') as f:\n for line in tmp_lines:\n f.write(line)\n \n tmp_lines = []\n idxs = []\n for i in xrange(len(lines_header)):\n\t if (\"MatrixXd getA\" in lines_header[i] or \n \"MatrixXd getB\" in lines_header[i] or \n \"MatrixXd getV\" in lines_header[i] or\n \"MatrixXd getF\" in lines_header[i] or\n \"MatrixXd getM\" in lines_header[i] or\n \"MatrixXd getC\" in lines_header[i] or\n \"MatrixXd getN\" in lines_header[i] or\n \"MatrixXd getH\" in lines_header[i] or\n \"MatrixXd getW\" in lines_header[i] or\n \"MatrixXd getSec\" in lines_header[i] or\n \"MatrixXd getFirst\" in lines_header[i] or\n \"MatrixXd getEEJacobian\" in lines_header[i] or\n \"MatrixXd getMInv\" in lines[i]): \n idxs.append(i)\n for i in xrange(len(lines_header)):\n app = True\n for j in xrange(len(idxs)):\n if i == idxs[j]:\n app = False\n if app:\n tmp_lines.append(lines_header[i])\n \n os.remove(header_src) \n with open(header_src, 'a+') as f:\n for line in tmp_lines:\n f.write(line)\n \n lines = list(open(imple_src, 'r'))\n tmp_lines = []\n idx1 = -1\n idx2 = -1\n breaking = False\n for i in xrange(len(lines)):\n if \"void Integrate::setupSteadyStates() const {\" in lines[i]:\n idx1 = i + 1\n breaking = True\n elif \"std::pair<Integrate::AB_funct, std::pair<Integrate::AB_funct, Integrate::AB_funct>> Integrate::getClosestSteadyStateFunctions\" in lines[i]:\n idx2 = i - 3 \n if breaking:\n break \n del lines[idx1:idx2] \n os.remove(imple_src) \n with open(imple_src, 'a+') as f:\n for line in lines: \n f.write(line)\n \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Dynamic model generator.')\n parser.add_argument('file', type=argparse.FileType('r'), nargs='?', default=None, help='File to load. Use - for stdin')\n parser.add_argument(\"-he\", \"--header\", help=\"Path to the robot header file\")\n parser.add_argument(\"-s\", \"--src\", help=\"Path to the robot source file\")\n \n args = parser.parse_args() \n if (args.file == None):\n print \"File is none\"\n sys.exit(0)\n if (args.header == None):\n print \"No header provided\"\n sys.exit(0)\n if (args.src == None):\n print \"No source provided\"\n sys.exit(0)\n xml_string = args.file.read()\n ModelParser(xml_string, args.header, args.src)"
},
{
"alpha_fraction": 0.6821191906929016,
"alphanum_fraction": 0.6830849647521973,
"avg_line_length": 25.35636329650879,
"blob_id": "22673a314a677ff609ce97c05b843bc2d4ba4a55",
"content_id": "54f126dc85b8e7e0be5cefbbf0f5cfe98bf009c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7248,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 275,
"path": "/src/robot.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/robot.hpp>\n\nusing std::cout;\nusing std::endl;\n\nnamespace frapu\n{\n\nRobot::Robot(std::string robotFile, std::string configFile):\n InterfaceBase(),\n robot_file_(robotFile),\n constraints_enforced_(true),\n propagator_(nullptr),\n viewer_(nullptr),\n goal_position_(),\n goal_radius_(),\n process_distribution_(nullptr),\n observation_distribution_(nullptr),\n environmentInfo_(nullptr),\n stateSpace_(nullptr),\n actionSpace_(nullptr),\n observationSpace_(nullptr),\n serializer_(nullptr), \n goal_(nullptr),\n randomEngine_()\n{\n#ifdef USE_OPENRAVE\n viewer_ = std::make_shared<frapu::ViewerInterface>();\n#endif \n}\n\nvoid Robot::setRandomEngine(std::default_random_engine &randomEngine) {\n randomEngine_ = randomEngine;\n}\n\nbool Robot::propagateState(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& action,\n const std::vector<double> controlError,\n double duration,\n double simulationStepSize,\n frapu::RobotStateSharedPtr& result)\n{\n boost::this_thread::interruption_point();\n result = nullptr;\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n std::vector<double> controlVec = static_cast<const frapu::VectorAction*>(action.get())->asVector();\n std::vector<double> resultVec;\n propagator_->propagateState(stateVec, controlVec, controlError, duration, simulationStepSize, resultVec);\n result = std::make_shared<frapu::VectorState>(resultVec);\n if (constraints_enforced_) {\t\n stateSpace_->enforceStateLimits(result);\n }\n\n return true;\n}\n\nbool Robot::propagateState(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& action,\n double duration,\n double simulationStepSize,\n frapu::RobotStateSharedPtr& result)\n{\n\n boost::this_thread::interruption_point();\n std::vector<double> controlVec = static_cast<const frapu::VectorAction*>(action.get())->asVector();\n std::vector<double> controlError(controlVec.size());\n Eigen::MatrixXd sample = process_distribution_->samples(1);\n //cout << \"sample: \" << sample << endl;\n for (size_t i = 0; i < controlError.size(); i++) {\n controlError[i] = sample(i, 0);\n }\n\n return propagateState(state, action, controlError, duration, simulationStepSize, result);\n}\n\nbool Robot::isValid(const frapu::RobotStateSharedPtr& state) const\n{\n std::vector<frapu::CollisionObjectSharedPtr> collisionObjects;\n std::vector<frapu::ObstacleSharedPtr> obstacles;\n environmentInfo_->scene->getObstacles(obstacles);\n createRobotCollisionObjects(state, collisionObjects);\n \n for (size_t i = 0; i < obstacles.size(); i++) {\n if (!obstacles[i]->getTerrain()->isTraversable()) {\n if (obstacles[i]->inCollision(collisionObjects)) {\n return false;\n }\n }\n }\n\n return true;\n}\n\nvoid Robot::setControlDuration(double control_duration)\n{\n control_duration_ = control_duration;\n}\n\ndouble Robot::getControlDuration() const\n{\n return control_duration_;\n}\n\nfrapu::SerializerSharedPtr Robot::getSerializer() const\n{\n return serializer_;\n}\n\nvoid Robot::updateRobot(const frapu::RobotStateSharedPtr& state)\n{\n\n}\n\nstd::shared_ptr<Eigen::Distribution<double>> Robot::getProcessDistribution() const\n{\n return process_distribution_;\n}\n\nstd::shared_ptr<Eigen::Distribution<double>> Robot::getObservationDistribution() const\n{\n return observation_distribution_;\n}\n\ndouble Robot::calcLikelihood(const frapu::RobotStateSharedPtr& state,\n const frapu::ObservationSharedPtr& observation) const\n{\n frapu::ObservationSharedPtr observationState;\n transformToObservationSpace(state, observationState);\n std::vector<double> stateVec =\n static_cast<frapu::VectorObservation*>(observationState.get())->asVector();\n std::vector<double> observationVec =\n static_cast<frapu::VectorObservation*>(observation.get())->asVector();\n double pdf = observation_distribution_->calcPdf(observationVec, stateVec);\n return pdf;\n}\n\nfrapu::StateSpaceSharedPtr Robot::getStateSpace() const\n{\n return stateSpace_;\n}\n\nfrapu::ObservationSpaceSharedPtr Robot::getObservationSpace() const\n{\n return observationSpace_;\n}\n\nfrapu::ActionSpaceSharedPtr Robot::getActionSpace() const\n{\n if (!actionSpace_) {\n assert(false && \"ACTION SPACE IS NULL\");\n }\n return actionSpace_;\n}\n\nvoid Robot::setGoalArea(std::vector<double>& goal_position, double& goal_radius)\n{\n goal_position_.clear();\n for (size_t i = 0; i < goal_position.size(); i++) {\n goal_position_.push_back(goal_position[i]);\n }\n\n goal_radius_ = goal_radius;\n}\n\nvoid Robot::getGoalArea(std::vector<double> &goalArea) const {\n goalArea.clear();\n for (size_t i = 0; i < goal_position_.size(); i++) { \n\tgoalArea.push_back(goal_position_[i]);\t\n }\n \n goalArea.push_back(goal_radius_);\n}\n\nbool Robot::checkSelfCollision(std::vector<std::shared_ptr<fcl::CollisionObject>>& collision_objects) const\n{\n return false;\n}\n\nbool Robot::checkSelfCollision(const frapu::RobotStateSharedPtr& state) const\n{\n return false;\n}\n\nbool Robot::constraintsEnforced()\n{\n return constraints_enforced_;\n}\n\nvoid Robot::enforceConstraints(bool enforce)\n{\n constraints_enforced_ = enforce;\n}\n\nvoid Robot::setNewtonModel()\n{\n\n}\n\nvoid Robot::setGravityConstant(double gravity_constant)\n{\n\n}\n\nvoid Robot::setEnvironmentInfo(frapu::EnvironmentInfoSharedPtr& environmentInfo)\n{ \n if (!environmentInfo) {\n\tfrapu::ERROR(\"Robot: setSenvironmentInfo: environmentInfo is null!!!\");\n }\n environmentInfo_ = environmentInfo;\n}\n\nstd::vector<frapu::RobotStateSharedPtr> Robot::loadGoalStatesFromFile(std::string &filename) const {\n return serializer_->loadGoalStatesFromFile(filename);\n}\n\nvoid Robot::makeGoal() {\n \n}\n \nfrapu::GoalSharedPtr Robot::getGoal() const {\n return goal_;\n}\n\nstd::vector<frapu::RobotStateSharedPtr> Robot::getGoalStates() const\n{\n return goalStates_;\n}\n\nvoid Robot::setGoalStates(std::vector<frapu::RobotStateSharedPtr> &goalStates) {\n goalStates_ = goalStates;\n}\n\nvoid Robot::resetViewer(std::string model_file, std::string environment_file)\n{\n#ifdef USE_OPENRAVE\n viewer_->resetViewer(model_file, environment_file);\n#endif\n}\n\nvoid Robot::setupViewer(std::string model_file, std::string environment_file)\n{\n#ifdef USE_OPENRAVE\n viewer_->setupViewer(model_file, environment_file);\n#endif\n}\n\nvoid Robot::addBox(std::string name, std::vector<double> dims)\n{\n#ifdef USE_OPENRAVE\n viewer_->addObstacle(name, dims);\n#endif\n}\n\nvoid Robot::removeBox(std::string name)\n{\n#ifdef USE_OPENRAVE\n viewer_->removeObstacle(name);\n#endif\n}\n\nvoid Robot::getCameraImage(std::vector<uint8_t>& image, int width, int height)\n{\n#ifdef USE_OPENRAVE\n viewer_->getCameraImage(image, width, height);\n#endif\n}\n\nvoid Robot::setParticlePlotLimit(unsigned int particle_plot_limit)\n{\n#ifdef USE_OPENRAVE\n viewer_->setParticlePlotLimit(particle_plot_limit);\n#endif\n}\n\n}\n"
},
{
"alpha_fraction": 0.7537037134170532,
"alphanum_fraction": 0.7537037134170532,
"avg_line_length": 26.457626342773438,
"blob_id": "908ad3b334748064322546a11efc26d7b8cb7650",
"content_id": "44a9f4ac1260fc4bd2f8523c366fe2e00269b540",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1620,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 59,
"path": "/src/ActionSpace.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/ActionSpace.hpp>\n#include <memory>\n\nusing std::cout;\nusing std::endl;\n\nnamespace frapu\n{\nActionSpace::ActionSpace(const ActionSpaceInfo &actionSpaceInfo):\n numDimensions_(), \n actionNormalizer_(nullptr),\n actionSpaceInfo_(actionSpaceInfo)\n{\n if (actionSpaceInfo_.normalized) {\n actionNormalizer_ = std::unique_ptr<standardNormalize>(new standardNormalize());\n //actionNormalizer_ = std::make_unique<shared::standardNormalize>(lowerActionLimits_, upperActionLimits_);\n } else {\n actionNormalizer_ = std::unique_ptr<nullNormalize>(new nullNormalize());\n //actionNormalizer_ = std::make_unique<shared::nullNormalize>(lowerActionLimits_, upperActionLimits_);\n }\n\n}\n\nconst ActionSpaceInfo ActionSpace::getInfo() const {\n return actionSpaceInfo_;\n}\n\nvoid ActionSpace::setNumDimensions(unsigned int& numDimensions)\n{\n numDimensions_ = numDimensions;\n}\n\nunsigned int ActionSpace::getNumDimensions() const\n{\n return numDimensions_;\n}\n\nvoid ActionSpace::setActionLimits(frapu::ActionLimitsSharedPtr &actionLimits)\n{\n actionLimits_ = actionLimits;\n actionNormalizer_->setActionLimits(actionLimits);\n}\n\nActionLimitsSharedPtr ActionSpace::getActionLimits() const\n{\n return actionLimits_;\n}\n\nvoid ActionSpace::normalizeAction(const ActionSharedPtr& action, ActionSharedPtr &normalizedAction)\n{\n actionNormalizer_->operator()(action, normalizedAction);\n}\n\nvoid ActionSpace::denormalizeAction(const ActionSharedPtr& action, ActionSharedPtr &denormalizedAction)\n{\n actionNormalizer_->denormalizeAction(action, denormalizedAction); \n}\n\n}\n"
},
{
"alpha_fraction": 0.7516025900840759,
"alphanum_fraction": 0.7532051205635071,
"avg_line_length": 20.517240524291992,
"blob_id": "db98a6b37b28c11b7ed9c808bc09da31a3ec6a8d",
"content_id": "7280f35484e5cfe96cc56c94da36c82a6f793376",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 624,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 29,
"path": "/src/ObservationSpace.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/ObservationSpace.hpp>\n#include <iostream>\n\nusing std::cout;\nusing std::endl;\n\nnamespace frapu\n{\n\nObservationSpace::ObservationSpace(const ObservationSpaceInfo &observationSpaceInfo):\n dimension_(1), \n observationSpaceInfo_(observationSpaceInfo)\n{\n cout << \"made observation space\" << endl;\n}\n\nvoid ObservationSpace::setDimension(unsigned int dimension) {\n dimension_ = dimension;\n}\n\nunsigned int ObservationSpace::getDimension() const { \n return dimension_;\n}\n\nconst ObservationSpaceInfo ObservationSpace::getObservationSpaceInfo() const {\n return observationSpaceInfo_;\n}\n\n}\n"
},
{
"alpha_fraction": 0.42566192150115967,
"alphanum_fraction": 0.47148674726486206,
"avg_line_length": 25.54054069519043,
"blob_id": "a33e92d710ea384d304f3232b59912cad625f831",
"content_id": "783e2e676a53a3636bb4b567a29a9b7fa856b816",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 982,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 37,
"path": "/src/AUV/AUVPropagator.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/AUV/AUVPropagator.hpp>\n\nnamespace frapu\n{\n\nAUVPropagator::AUVPropagator():\n Propagator() \n{\n\n}\n\nbool AUVPropagator::propagateState(const std::vector<double>& currentState,\n const std::vector<double>& control,\n const std::vector<double>& control_error,\n const double& duration,\n const double& simulation_step_size,\n std::vector<double>& result)\n{\n result = currentState;\n if (control[0] == 1.0) {\n result[1] += 0.01;\n } else if (control[0] == 2.0) {\n result[0] += 0.01;\n result[1] += 0.01;\n } else if (control[0] == 3.0) {\n result[0] += 0.01;\n } else if (control[0] == 4.0) {\n result[0] += 0.01;\n result[1] -= 0.01;\n } else if (control[0] == 5.0) {\n result[1] -= 0.01;\n }\n\n result[0] += control_error[0];\n}\n\n}\n"
},
{
"alpha_fraction": 0.7096773982048035,
"alphanum_fraction": 0.7096773982048035,
"avg_line_length": 9.44444465637207,
"blob_id": "ca474b4841ad17690e3e193d6728dd99cb69d43b",
"content_id": "8d5db4200f85e0a45e973ae3c8cb06c889032be7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 9,
"path": "/src/propagator.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/propagator.hpp>\n\nnamespace frapu {\n\nPropagator::Propagator() {\n\t\n}\n\n}"
},
{
"alpha_fraction": 0.604742705821991,
"alphanum_fraction": 0.6200807094573975,
"avg_line_length": 33.40972137451172,
"blob_id": "0c9a65a9a23000d0975266455e8e988a5e53eb92",
"content_id": "26a9cf30b079b55e70da0c2115a8ad04bfecb92e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9910,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 288,
"path": "/src/AUV/auv.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/AUV/auv.hpp>\n\nnamespace frapu\n{\nAUV::AUV(std::string robotFile, std::string configFile):\n Robot(robotFile, configFile),\n dim_x_(0.0),\n dim_y_(0.0),\n dim_z_(0.0),\n initialState_()\n{\n\n serializer_ = std::make_shared<frapu::AUVSerializer>();\n propagator_ = std::make_shared<frapu::AUVPropagator>();\n dim_x_ = 0.005;\n dim_y_ = 0.005;\n dim_z_ = 0.005;\n\n //make the state limits\n lowerStateLimits_.clear();\n upperStateLimits_.clear();\n\n lowerStateLimits_.push_back(-1.0);\n lowerStateLimits_.push_back(-1.0);\n\n upperStateLimits_.push_back(1.0);\n upperStateLimits_.push_back(1.0);\n\n //make the control limits\n lowerControlLimits_.clear();\n upperControlLimits_.clear();\n\n lowerControlLimits_.push_back(1.0);\n upperControlLimits_.push_back(5.0);\n std::ifstream inputFile(configFile);\n initialState_ = static_cast<frapu::AUVSerializer*>(serializer_.get())->loadInitalState(inputFile);\n}\n\nstd::string AUV::getName() const\n{\n std::string name = \"AUV\";\n return name;\n}\n\nfrapu::HeuristicFunctionSharedPtr AUV::makeHeuristicFunction() const\n{\n return nullptr;\n}\n\nfrapu::RobotStateSharedPtr AUV::sampleInitialState() const\n{\n return initialState_;\n}\n\nvoid AUV::createRobotCollisionObjects(const frapu::RobotStateSharedPtr state,\n std::vector<frapu::CollisionObjectSharedPtr>& collision_objects) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n double x = stateVec[0];\n double y = stateVec[1];\n fcl::Vec3f trans_vec(x, y, 0.001);\n fcl::Matrix3f rot_matrix(1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0,\n 0.0, 0.0, 1.0);\n fcl::Transform3f trans(rot_matrix, trans_vec);\n fcl::AABB link_aabb(fcl::Vec3f(-0.01,\n -0.01,\n -0.01),\n fcl::Vec3f(0.01,\n 0.01,\n 0.01));\n fcl::Box* box = new fcl::Box();\n fcl::Transform3f box_tf;\n fcl::constructBox(link_aabb, trans, *box, box_tf);\n frapu::CollisionObjectSharedPtr coll_obj = std::make_shared<fcl::CollisionObject>(boost::shared_ptr<fcl::CollisionGeometry>(box), box_tf);\n collision_objects.push_back(coll_obj);\n}\n\nbool AUV::makeStateSpace()\n{\n stateSpace_ = std::make_shared<frapu::VectorStateSpace>(2);\n frapu::StateLimitsSharedPtr stateLimits =\n std::make_shared<frapu::VectorStateLimits>(lowerStateLimits_, upperStateLimits_);\n stateSpace_->setStateLimits(stateLimits);\n}\n\nbool AUV::makeActionSpace(const frapu::ActionSpaceInfo& actionSpaceInfo)\n{\n actionSpace_ = std::make_shared<frapu::DiscreteVectorActionSpace>(actionSpaceInfo);\n unsigned int numDimensions = 1;\n actionSpace_->setNumDimensions(numDimensions);\n frapu::ActionLimitsSharedPtr actionLimits =\n std::make_shared<frapu::VectorActionLimits>(lowerControlLimits_, upperControlLimits_);\n actionSpace_->setActionLimits(actionLimits);\n}\n\nbool AUV::makeObservationSpace(const frapu::ObservationSpaceInfo& observationSpaceInfo)\n{\n observationSpace_ = std::make_shared<frapu::DiscreteObservationSpace>(observationSpaceInfo);\n observationSpace_->setDimension(2);\n std::vector<std::vector<double>> observations;\n\n // Get the observations using a serializer\n\n static_cast<frapu::DiscreteObservationSpace*>(observationSpace_.get())->addObservations(observations);\n return true;\n}\n\nbool AUV::getObservation(const frapu::RobotStateSharedPtr& state,\n std::vector<double>& observationError,\n frapu::ObservationSharedPtr& observation) const\n{\n return getObservation(state, observation);\n}\n\nbool AUV::getObservation(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& observation) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n std::vector<frapu::ObstacleSharedPtr> obstacles;\n environmentInfo_->scene->getObstacles(obstacles);\n std::vector<double> observationVec;\n std::vector<frapu::CollisionObjectSharedPtr> collisionObjects;\n createRobotCollisionObjects(state, collisionObjects);\n for (auto & obstacle : obstacles) {\n if (obstacle->getTerrain()->isObservable()) {\n if (obstacle->inCollision(collisionObjects)) {\n observationVec = stateVec;\n return true;\n }\n }\n }\n\n observationVec.clear();\n observationVec.push_back(-100);\n observationVec.push_back(-100);\n observation = std::make_shared<frapu::VectorObservation>(observationVec);\n return true;\n}\n\ndouble AUV::calcLikelihood(const frapu::RobotStateSharedPtr& state,\n const frapu::ObservationSharedPtr& observation) const\n{\n frapu::ObservationSharedPtr observationState;\n transformToObservationSpace(state, observationState);\n std::vector<double> observationVec =\n static_cast<frapu::VectorObservation*>(observationState.get())->asVector();\n std::vector<double> stateVec =\n static_cast<frapu::VectorState*>(state.get())->asVector();\n bool isSame = true;\n for (size_t i = 0; i < observationVec.size(); i++) {\n if (stateVec[i] != observationVec[i]) {\n isSame = false;\n }\n }\n\n if (isSame) {\n return 1.0;\n }\n\n return 0.0;\n}\n\nvoid AUV::transformToObservationSpace(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& res) const\n{\n getObservation(state, res);\n}\n\nint AUV::getDOF() const\n{\n return 2;\n}\n\nvoid AUV::makeNextStateAfterCollision(const frapu::RobotStateSharedPtr& previousState,\n const frapu::RobotStateSharedPtr& collidingState,\n frapu::RobotStateSharedPtr& nextState)\n{\n nextState = previousState;\n}\n\nvoid AUV::makeGoal()\n{\n goal_ = std::make_shared<frapu::SphereGoal>(goal_position_, goal_radius_);\n}\n\nbool AUV::isTerminal(const frapu::RobotStateSharedPtr& state) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n return static_cast<frapu::SphereGoal*>(goal_.get())->isSatisfied(stateVec);\n /**double dist = distanceGoal(state);\n if (dist < goal_radius_) {\n return true;\n }\n\n return false;*/\n}\n\ndouble AUV::distanceGoal(const frapu::RobotStateSharedPtr& state) const\n{\n\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n return static_cast<frapu::SphereGoal*>(goal_.get())->distanceCenter(stateVec);\n}\n\nvoid AUV::setGravityConstant(double gravity_constant)\n{\n\n}\n\nvoid AUV::getLinearObservationDynamics(const frapu::RobotStateSharedPtr& state,\n Eigen::MatrixXd& H,\n Eigen::MatrixXd& W) const\n{\n\n}\n\nvoid AUV::getLinearProcessMatrices(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& control,\n double& duration,\n std::vector<Eigen::MatrixXd>& matrices) const\n{\n\n}\n\nvoid AUV::makeProcessDistribution(Eigen::MatrixXd& mean,\n Eigen::MatrixXd& covariance_matrix)\n{\n process_distribution_ = std::make_shared<Eigen::WeightedDiscreteDistribution<double>>();\n std::vector<std::pair<std::vector<double>, double>> elements;\n std::vector<double> elem0( { -0.01, 0.0});\n std::vector<double> elem1( {0.0, 0.0});\n std::vector<double> elem2( {0.01, 0.0});\n elements.push_back(std::make_pair(elem0, 0.1));\n elements.push_back(std::make_pair(elem1, 0.8));\n elements.push_back(std::make_pair(elem2, 0.1));\n static_cast<Eigen::WeightedDiscreteDistribution<double> *>(process_distribution_.get())->setElements(elements);\n}\n\nvoid AUV::makeObservationDistribution(Eigen::MatrixXd& mean,\n Eigen::MatrixXd& covariance_matrix)\n{\n observation_distribution_ =\n std::make_shared<Eigen::WeightedDiscreteDistribution<double>>();\n}\n\nvoid AUV::updateRobot(const frapu::RobotStateSharedPtr& state)\n{\n cout << \"UPDATE\" << endl;\n}\n\nvoid AUV::updateViewer(const frapu::RobotStateSharedPtr& state,\n std::vector<frapu::RobotStateSharedPtr>& particles,\n std::vector<std::vector<double>>& particleColors)\n{\n#ifdef USE_OPENRAVE\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<std::string> names;\n std::vector<std::vector<double>> dims;\n std::vector<std::vector<double>> colors;\n std::string name = \"auv\";\n names.push_back(name);\n std::vector<double> main_dims( {stateVec[0], stateVec[1], 0.001, dim_x_, dim_y_, dim_z_, 0.0});\n dims.push_back(main_dims);\n std::vector<double> main_color( {1.0, 0.0, 0.0, 0.5});\n colors.push_back(main_color);\n for (size_t i = 0; i < particles.size(); i++) {\n std::string p_name = \"particle_auv\" + std::to_string(i);\n names.push_back(p_name);\n std::vector<double> particle = static_cast<const frapu::VectorState*>(particles[i].get())->asVector();\n std::vector<double> p_dims( {particle[0],\n particle[1],\n 0.001,\n dim_x_,\n dim_y_,\n dim_z_,\n 0.0\n });\n dims.push_back(p_dims);\n //std::vector<double> c({0.0, 1.0, 0.0, 0.5});\n colors.push_back(particleColors[i]);\n }\n\n viewer_->addBoxes(names, dims, colors);\n#endif\n}\n\n}\n"
},
{
"alpha_fraction": 0.6856999397277832,
"alphanum_fraction": 0.6898861527442932,
"avg_line_length": 36.559749603271484,
"blob_id": "ce2b38a66540502cee35f6deb05c60879ed3cc1f",
"content_id": "21345bb8566ae79e131c498f4b683ee680c29cb0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5972,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 159,
"path": "/src/Manipulator/ManipulatorRobotLinear.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Manipulator/ManipulatorRobotLinear.hpp>\n\n\nnamespace frapu\n{\nManipulatorRobotLinear::ManipulatorRobotLinear(std::string robotFile, std::string configFile):\n frapu::ManipulatorRobot(robotFile, configFile)\n{\n /**std::vector<double> lowerStateLimits(lowerStateLimits_.size() / 2);\n std::vector<double> upperStateLimits(upperStateLimits_.size() / 2);\n for (size_t i = 0; i < lowerStateLimits.size(); i++) {\n lowerStateLimits[i] = lowerStateLimits_[i];\n upperStateLimits[i] = upperStateLimits_[i];\n }\n\n lowerStateLimits_ = lowerStateLimits;\n upperStateLimits_ = upperStateLimits;*/\n lowerStateLimits_ = std::vector<double>(lowerStateLimits_.size() / 2);\n upperStateLimits_ = std::vector<double>(upperStateLimits_.size() / 2);\n lowerControlLimits_ = std::vector<double>(lowerStateLimits_.size());\n upperControlLimits_ = std::vector<double>(upperStateLimits_.size());\n for (size_t i = 0; i < lowerStateLimits_.size(); i++) {\n lowerStateLimits_[i] = -3.14;\n upperStateLimits_[i] = 3.14;\n lowerControlLimits_[i] = -0.1;\n upperControlLimits_[i] = 0.1;\n }\n \n propagator_ = std::make_shared<frapu::ManipulatorPropagatorLinear>();\n\n}\n\nstd::string ManipulatorRobotLinear::getName() const {\n std::string name = \"ManipulatorLinear\";\n return name;\n}\n\nvoid ManipulatorRobotLinear::getLinearProcessMatrices(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& control,\n double& duration,\n std::vector<Eigen::MatrixXd>& matrices) const\n{\n //A_B_V_H_W\n matrices = std::vector<Eigen::MatrixXd>(5);\n unsigned int stateSize = static_cast<frapu::VectorState*>(state.get())->asVector().size();\n for (size_t i = 0; i < 5; i++) {\n matrices[i] = Eigen::MatrixXd::Identity(stateSize, stateSize);\n }\n}\n\nvoid ManipulatorRobotLinear::makeNextStateAfterCollision(const frapu::RobotStateSharedPtr& previousState,\n const frapu::RobotStateSharedPtr& collidingState,\n frapu::RobotStateSharedPtr& nextState)\n{\n nextState = previousState;\n}\n\nbool ManipulatorRobotLinear::getObservation(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& observation) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n observation = std::make_shared<frapu::VectorObservation>(stateVec);\n}\n\nbool ManipulatorRobotLinear::getObservation(const frapu::RobotStateSharedPtr& state,\n std::vector<double>& observationError,\n frapu::ObservationSharedPtr& observation) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> observationVec(stateVec.size());\n for (size_t i = 0; i < stateVec.size(); i++) {\n observationVec[i] = stateVec[i] + observationError[i];\n }\n\n observation = std::make_shared<frapu::VectorObservation>(observationVec);\n}\n\nbool ManipulatorRobotLinear::makeObservationSpace(const frapu::ObservationSpaceInfo& observationSpaceInfo)\n{\n observationSpace_ = std::make_shared<frapu::ContinuousObservationSpace>(observationSpaceInfo);\n std::vector<double> lowerLimits;\n std::vector<double> upperLimits;\n unsigned int observationSpaceDimension = lowerStateLimits_.size();\n cout << \"obs dim \" << observationSpaceDimension << endl;\n observationSpace_->setDimension(observationSpaceDimension);\n static_cast<frapu::ContinuousObservationSpace*>(observationSpace_.get())->setLimits(lowerStateLimits_,\n upperStateLimits_);\n\n\n}\n\nvoid ManipulatorRobotLinear::transformToObservationSpace(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& res) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> observationVec = stateVec;\n res = std::make_shared<frapu::VectorObservation>(observationVec);\n}\n\nvoid ManipulatorRobotLinear::getLinearObservationDynamics(const frapu::RobotStateSharedPtr& state,\n Eigen::MatrixXd& H,\n Eigen::MatrixXd& W) const\n{\n unsigned int numDimensions =\n static_cast<frapu::VectorStateSpace*>(stateSpace_.get())->getNumDimensions();\n H = Eigen::MatrixXd::Identity(numDimensions, numDimensions);\n W = Eigen::MatrixXd::Identity(numDimensions, numDimensions);\n \n}\n\nvoid ManipulatorRobotLinear::updateViewer(const frapu::RobotStateSharedPtr& state,\n std::vector<frapu::RobotStateSharedPtr>& particles,\n std::vector<std::vector<double>>& particleColors)\n{\n#ifdef USE_OPENRAVE\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> joint_values;\n std::vector<double> joint_velocities;\n std::vector<std::vector<double>> particle_joint_values;\n for (size_t i = 0; i < stateVec.size(); i++) {\n joint_values.push_back(stateVec[i]);\n joint_velocities.push_back(0);\n }\n\n for (size_t i = 0; i < particles.size(); i++) {\n std::vector<double> particle;\n std::vector<double> particleVec = static_cast<const frapu::VectorState*>(particles[i].get())->asVector();\n for (size_t j = 0; j < stateVec.size(); j++) {\n particle.push_back(particleVec[j]);\n }\n particle_joint_values.push_back(particle);\n\n }\n\n viewer_->updateRobotValues(joint_values,\n joint_velocities,\n particle_joint_values,\n particleColors,\n nullptr);\n#endif\n\n}\n\nfrapu::RobotStateSharedPtr ManipulatorRobotLinear::sampleInitialState() const\n{\n std::vector<double> initStateVec(lowerStateLimits_.size(), 0);\n frapu::RobotStateSharedPtr initState(new frapu::VectorState(initStateVec));\n return initState;\n}\n\nvoid ManipulatorRobotLinear::setNewtonModel() {\n \n}\n\nvoid ManipulatorRobotLinear::setGravityConstant(double gravity_constant) {\n \n}\n\n}\n"
},
{
"alpha_fraction": 0.5436784625053406,
"alphanum_fraction": 0.5849846601486206,
"avg_line_length": 33.61835861206055,
"blob_id": "6bf9ee4d7c3fb670afa5a71578bd3cb4f835c16e",
"content_id": "3312fb27233919b6947981b8ac370613429fd922",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7166,
"license_type": "no_license",
"max_line_length": 193,
"num_lines": 207,
"path": "/src/Manipulator/Kinematics.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Manipulator/Kinematics.hpp>\n#include <signal.h>\n\nusing std::cout;\nusing std::endl;\n\n\nnamespace frapu\n{\n\nKinematics::Kinematics():\n links_(),\n joint_origins_(),\n joint_axis_()\n{\n}\n\nvoid Kinematics::setJointOrigins(std::vector<std::vector<double>>& joint_origins)\n{\n for (auto & o : joint_origins) {\n joint_origins_.push_back(o);\n }\n}\n\n/**void Kinematics::setJointAxis(std::vector<std::vector<int>> &axis) {\n for (auto &o: axis) {\n for (auto &k: o) {\n cout << k << \", \";\n }\n cout << endl;\n joint_axis_.push_back(o);\n }\n}*/\n\nvoid Kinematics::setLinkDimensions(std::vector<std::vector<double>>& link_dimensions)\n{\n for (auto & k : link_dimensions) {\n links_.push_back(k);\n }\n}\n\nvoid Kinematics::getPositionOfLinkN(const std::vector<double>& joint_angles, const int& n, std::vector<double>& position) const\n{\n std::pair<fcl::Vec3f, fcl::Matrix3f> link_n_pose = getPoseOfLinkN(joint_angles, n);\n position.push_back(link_n_pose.first[0]);\n position.push_back(link_n_pose.first[1]);\n position.push_back(link_n_pose.first[2]);\n}\n\n/* Gets the end effector position for a given set of joint angles */\nvoid Kinematics::getEndEffectorPosition(const std::vector<double>& joint_angles, std::vector<double>& end_effector_position) const\n{\n int n = joint_angles.size();\n std::pair<fcl::Vec3f, fcl::Matrix3f> ee_pose = getPoseOfLinkN(joint_angles, n);\n end_effector_position.push_back(ee_pose.first[0]);\n end_effector_position.push_back(ee_pose.first[1]);\n end_effector_position.push_back(ee_pose.first[2]); \n}\n\nEigen::MatrixXd Kinematics::getEndEffectorPose(const std::vector<double>& joint_angles, bool& eigen)\n{\n Eigen::MatrixXd m(4, 4);\n Eigen::MatrixXd res = Eigen::MatrixXd::Identity(4, 4);\n res(0, 3) = joint_origins_[0][0];\n res(1, 3) = joint_origins_[0][1];\n res(2, 3) = joint_origins_[0][2];\n for (size_t i = 0; i < joint_angles.size() + 1; i++) {\n if (i == joint_angles.size()) {\n res = getPoseOfLinkN(0.0, res, i);\n } else {\n res = getPoseOfLinkN(joint_angles[i], res, i);\n }\n\n }\n\n return res;\n}\n\nEigen::MatrixXd Kinematics::getPoseOfLinkN(const double& joint_angle,\n Eigen::MatrixXd& current_transform,\n size_t& n) const\n{\n Eigen::MatrixXd new_trans;\n if (n == 0) {\n new_trans = getTransformationMatr(joint_angle, 0.0, 0.0, 0.0);\n } else if (n == joint_origins_.size()) {\n new_trans = getTransformationMatr(0.0, 0.0, links_[n - 1][0], 0.0);\n } else {\n new_trans = getTransformationMatrRot(0.0, 0.0, links_[n - 1][0], joint_origins_[n][3], joint_angle);\n }\n\n return current_transform * new_trans;\n}\n\nstd::pair<fcl::Vec3f, fcl::Matrix3f> Kinematics::getPoseOfLinkN(const std::vector<double>& joint_angles, const int& n) const\n{ \n Eigen::MatrixXd res = Eigen::MatrixXd::Identity(4, 4);\n Eigen::MatrixXd init_trans(4, 4);\n init_trans << 1.0, 0.0, 0.0, joint_origins_[0][0],\n 0.0, 1.0, 0.0, joint_origins_[0][1],\n 0.0, 0.0, 1.0, joint_origins_[0][2],\n 0.0, 0.0, 0.0, 1.0;\n std::vector<Eigen::MatrixXd> transformations;\n transformations.push_back(init_trans);\n\n for (unsigned int i = 0; i < n; i++) {\n transformations.push_back(getTransformationMatr(joint_angles[i], 0.0, links_[i][0], joint_origins_[i + 1][3]));\n }\n\n /**if (n != 0) {\n transformations.push_back(getTransformationMatr(joint_angles[n], 0.0, 0.0, 0.0));\n } else {\n transformations.push_back(getTransformationMatr(joint_angles[0], 0.0, 0.0, 0.0));\n }*/\n if (n < joint_angles.size()) {\n\ttransformations.push_back(getTransformationMatr(joint_angles[n], 0.0, 0.0, 0.0));\n }\n\n for (int i = transformations.size() - 1; i >= 0; i--) {\n res = transformations[i] * res;\n\n }\n \n fcl::Vec3f r_vec = fcl::Vec3f(res(0, 3), res(1, 3), res(2, 3));\n fcl::Matrix3f r_matr = fcl::Matrix3f(res(0, 0), res(0, 1), res(0, 2),\n res(1, 0), res(1, 1), res(1, 2),\n res(2, 0), res(2, 1), res(2, 2));\n auto p = std::make_pair(r_vec, r_matr);\n return p;\n}\n\nstd::pair<fcl::Vec3f, fcl::Matrix3f> Kinematics::getEndEffectorPose(const std::vector<double>& joint_angles) const\n{\n int n = joint_angles.size();\n return getPoseOfLinkN(joint_angles, n);\n}\n\nvoid Kinematics::getEEJacobian(const std::vector<double>& joint_angles, Eigen::MatrixXd& jacobian)\n{\n auto ee_pose = getEndEffectorPose(joint_angles);\n Eigen::Vector3d o_e;\n o_e << ee_pose.first[0], ee_pose.first[1], ee_pose.first[2];\n std::vector<std::pair<fcl::Vec3f, fcl::Matrix3f>> link_poses;\n for (size_t i = 0; i < joint_angles.size(); i++) {\n auto pose = getPoseOfLinkN(joint_angles, i);\n Eigen::VectorXd column_vector(6);\n Eigen::Vector3d o_i;\n o_i << pose.first[0], pose.first[1], pose.first[2];\n Eigen::Vector3d z_i;\n z_i << pose.second(0, 2), pose.second(1, 2), pose.second(2, 2);\n Eigen::Vector3d upper = z_i.cross(o_e - o_i);\n column_vector << upper, z_i;\n jacobian.col(i) = column_vector;\n }\n}\n\nEigen::MatrixXd Kinematics::transform(double x, double y, double z, double roll, double pitch, double yaw) const\n{\n Eigen::MatrixXd trans(4, 4);\n trans << 1.0, 0.0, 0.0, x,\n 0.0, 1.0, 0.0, y,\n 0.0, 0.0, 1.0, z,\n 0.0, 0.0, 0.0, 1.0;\n\n Eigen::MatrixXd ro(4, 4);\n ro << 1.0, 0.0, 0.0, 0.0,\n 0.0, cos(roll), -sin(roll), 0.0,\n 0.0, sin(roll), cos(roll), 0.0,\n 0.0, 0.0, 0.0, 1.0;\n\n Eigen::MatrixXd pi(4, 4);\n pi << cos(pitch), 0.0, sin(pitch), 0.0,\n 0.0, 1.0, 0.0, 0.0,\n -sin(pitch), 0.0, cos(pitch), 0.0,\n 0.0, 0.0, 0.0, 1.0;\n\n Eigen::MatrixXd ya(4, 4);\n ya << cos(yaw), -sin(yaw), 0.0, 0.0,\n sin(yaw), cos(yaw), 0.0, 0.0,\n 0.0, 0.0, 1.0, 0.0,\n 0.0, 0.0, 0.0, 1.0;\n\n Eigen::MatrixXd res = ro * pi * ya * trans;\n return res;\n}\n\nEigen::MatrixXd Kinematics::getTransformationMatr(double sigma_n, double d_n, double a_n, double alpha_n) const\n{\n Eigen::MatrixXd b(4, 4);\n b << cos(sigma_n), -sin(sigma_n) * cos(alpha_n), sin(sigma_n) * sin(alpha_n), a_n* cos(sigma_n),\n sin(sigma_n), cos(sigma_n) * cos(alpha_n), -cos(sigma_n) * sin(alpha_n), a_n* sin(sigma_n),\n 0.0, sin(alpha_n), cos(alpha_n), d_n,\n 0.0, 0.0, 0.0, 1.0;\n return b;\n}\n\nEigen::MatrixXd Kinematics::getTransformationMatrRot(double sigma_n, double d_n, double a_n, double alpha_n, double theta_n) const\n{\n Eigen::MatrixXd b(4, 4);\n b << -sin(sigma_n)*sin(theta_n)*cos(alpha_n) + cos(sigma_n)*cos(theta_n), -sin(sigma_n)*cos(alpha_n)*cos(theta_n) - sin(theta_n)*cos(sigma_n), sin(alpha_n)*sin(sigma_n), a_n* cos(sigma_n),\n sin(sigma_n)*cos(theta_n) + sin(theta_n)*cos(alpha_n)*cos(sigma_n), -sin(sigma_n)*sin(theta_n) + cos(alpha_n)*cos(sigma_n)*cos(theta_n), -sin(alpha_n)*cos(sigma_n), a_n* sin(sigma_n),\n sin(alpha_n)*sin(theta_n), sin(alpha_n)*cos(theta_n), cos(alpha_n), d_n,\n 0.0, 0.0, 0.0, 1.0;\n return b;\n}\n\n}\n"
},
{
"alpha_fraction": 0.5990674495697021,
"alphanum_fraction": 0.6059072017669678,
"avg_line_length": 38.853939056396484,
"blob_id": "12562dc05752019b69f2b96cb887f1f2712a5b9a",
"content_id": "ac9cd8443b14d851de18cddf68a1de05fb958c24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 55119,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 1383,
"path": "/src/Manipulator/ManipulatorRobot.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Manipulator/ManipulatorRobot.hpp>\n\nusing std::cout;\nusing std::endl;\n\nnamespace frapu\n{\n\n/**template<class T>\nstruct VecToList {\n static PyObject* convert(const std::vector<T>& vec) {\n boost::python::list* l = new boost::python::list();\n for (size_t i = 0; i < vec.size(); i++) {\n (*l).append(vec[i]);\n }\n\n return l->ptr();\n }\n};*/\n\nbool ManipulatorRobot::initJoints(TiXmlElement* robot_xml)\n{\n for (TiXmlElement* joint_xml = robot_xml->FirstChildElement(\"joint\"); joint_xml; joint_xml = joint_xml->NextSiblingElement(\"joint\")) {\n // Joint Names\n std::string joint_name(joint_xml->Attribute(\"name\"));\n joint_names_.push_back(joint_name);\n cout << \"joint name \" << joint_name << endl;\n\n // Joint origin\n std::vector<double> origin = process_origin_(joint_xml);\n joint_origins_.push_back(origin);\n\n // Joint axes\n std::vector<int> joint_axis;\n TiXmlElement* axis_xml = joint_xml->FirstChildElement(\"axis\");\n if (axis_xml) {\n const char* xyz_str = axis_xml->Attribute(\"xyz\");\n std::vector<std::string> pieces;\n boost::split(pieces, xyz_str, boost::is_any_of(\" \"));\n for (unsigned int i = 0; i < pieces.size(); ++i) {\n if (pieces[i] != \"\") {\n try {\n joint_axis.push_back(boost::lexical_cast<int>(pieces[i].c_str()));\n } catch (boost::bad_lexical_cast& e) {\n\n }\n }\n }\n\n joint_axes_.push_back(joint_axis);\n } else {\n std::vector<int> ax( {0, 0, 0});\n joint_axes_.push_back(ax);\n }\n\n // Joint limits\n TiXmlElement* limit_xml = joint_xml->FirstChildElement(\"limit\");\n double torque_limit = 0.0;\n double lower_limit = 0.0;\n double upper_limit = 0.0;\n double velocity_limit = 0.0;\n if (limit_xml) {\n try {\n std::string effort_str(limit_xml->Attribute(\"effort\"));\n torque_limit = boost::lexical_cast<double>(effort_str.c_str());\n\n std::string lower_str(limit_xml->Attribute(\"lower\"));\n lower_limit = boost::lexical_cast<double>(lower_str.c_str());\n\n std::string upper_str(limit_xml->Attribute(\"upper\"));\n upper_limit = boost::lexical_cast<double>(upper_str.c_str());\n\n std::string vel_str(limit_xml->Attribute(\"velocity\"));\n velocity_limit = boost::lexical_cast<double>(vel_str.c_str());\n } catch (boost::bad_lexical_cast& e) {\n\n }\n }\n\n joint_torque_limits_.push_back(torque_limit);\n lowerStateLimits_.push_back(lower_limit);\n upperStateLimits_.push_back(upper_limit);\n lower_joint_limits_.push_back(lower_limit);\n upper_joint_limits_.push_back(upper_limit);\n joint_velocity_limits_.push_back(velocity_limit);\n\n // Joint dynamics\n TiXmlElement* dyn_xml = joint_xml->FirstChildElement(\"dynamics\");\n double damping = 0.0;\n if (dyn_xml) {\n std::string damping_str(dyn_xml->Attribute(\"damping\"));\n damping = boost::lexical_cast<double>(damping_str.c_str());\n }\n\n joint_dampings_.push_back(damping);\n\n // Joint types\n std::string joint_type(joint_xml->Attribute(\"type\"));\n joint_types_.push_back(joint_type);\n if (joint_type == \"revolute\") {\n active_joints_.push_back(joint_name);\n active_joint_origins_.push_back(origin);\n active_joint_axes_.push_back(joint_axis);\n active_joint_torque_limits_.push_back(torque_limit);\n\n active_lower_joint_limits_.push_back(lower_limit);\n active_upper_joint_limits_.push_back(upper_limit);\n active_joint_velocity_limits_.push_back(velocity_limit);\n }\n }\n}\n\nbool ManipulatorRobot::initLinks(TiXmlElement* robot_xml)\n{\n for (TiXmlElement* link_xml = robot_xml->FirstChildElement(\"link\"); link_xml; link_xml = link_xml->NextSiblingElement(\"link\")) {\n\n frapu::Link link;\n link.active = false;\n //Link names\n std::string link_name(link_xml->Attribute(\"name\"));\n link_names_.push_back(link_name);\n link.name = link_name;\n\n //Link dimensions\n std::vector<double> link_dimension;\n TiXmlElement* coll_xml = link_xml->FirstChildElement(\"collision\");\n if (coll_xml) {\n active_link_names_.push_back(link_name);\n TiXmlElement* geom_xml = coll_xml->FirstChildElement(\"geometry\");\n TiXmlElement* dim_xml = geom_xml->FirstChildElement(\"box\");\n const char* xyz_str = dim_xml->Attribute(\"size\");\n std::vector<std::string> pieces;\n boost::split(pieces, xyz_str, boost::is_any_of(\" \"));\n for (unsigned int i = 0; i < pieces.size(); ++i) {\n if (pieces[i] != \"\") {\n try {\n link_dimension.push_back(boost::lexical_cast<double>(pieces[i].c_str()));\n } catch (boost::bad_lexical_cast& e) {\n\n }\n }\n }\n if (link_dimension.size() != 3) {\n std::vector<double> ld( {0.0, 0.0, 0.0});\n link_dimensions_.push_back(ld);\n for (auto & k : ld) {\n link.link_dimensions.push_back(k);\n }\n } else {\n link_dimensions_.push_back(link_dimension);\n active_link_dimensions_.push_back(link_dimension);\n for (auto & k : link_dimension) {\n link.link_dimensions.push_back(k);\n }\n }\n\n std::vector<double> link_origin = process_origin_(coll_xml);\n link_origins_.push_back(link_origin);\n for (auto & k : link_origin) {\n link.origin.push_back(k);\n }\n } else {\n std::vector<double> ld( {0.0, 0.0, 0.0});\n link_dimensions_.push_back(ld);\n for (auto & k : ld) {\n link.link_dimensions.push_back(k);\n }\n\n }\n\n //Link inertia\n TiXmlElement* ine = link_xml->FirstChildElement(\"inertial\");\n\n if (ine) {\n\n // Link masses\n active_links_.push_back(link_name);\n link.active = true;\n\n TiXmlElement* mass_xml = ine->FirstChildElement(\"mass\");\n double mass = 0.0;\n if (mass_xml) {\n try {\n if (mass_xml->Attribute(\"value\")) {\n mass = boost::lexical_cast<double>(mass_xml->Attribute(\"value\"));\n }\n } catch (boost::bad_lexical_cast& e) {\n\n }\n }\n link_masses_.push_back(mass);\n link.mass = mass;\n\n //Inertia origins\n std::vector<double> inertia_origin = process_origin_(ine);\n link_inertia_origins_.push_back(inertia_origin);\n for (auto & k : inertia_origin) {\n link.inertia_origin.push_back(k);\n }\n\n //Inertia matrix\n std::vector<double> inertia_vals;\n double ixx = 0.0;\n double ixy = 0.0;\n double ixz = 0.0;\n double iyy = 0.0;\n double iyz = 0.0;\n double izz = 0.0;\n TiXmlElement* matr_xml = ine->FirstChildElement(\"inertia\");\n if (matr_xml) {\n try {\n if (matr_xml->Attribute(\"ixx\")) {\n ixx = boost::lexical_cast<double>(matr_xml->Attribute(\"ixx\"));\n }\n if (matr_xml->Attribute(\"ixy\")) {\n ixy = boost::lexical_cast<double>(matr_xml->Attribute(\"ixy\"));\n }\n if (matr_xml->Attribute(\"ixz\")) {\n ixz = boost::lexical_cast<double>(matr_xml->Attribute(\"ixz\"));\n }\n if (matr_xml->Attribute(\"iyy\")) {\n iyy = boost::lexical_cast<double>(matr_xml->Attribute(\"iyy\"));\n }\n if (matr_xml->Attribute(\"iyz\")) {\n iyz = boost::lexical_cast<double>(matr_xml->Attribute(\"iyz\"));\n }\n if (matr_xml->Attribute(\"izz\")) {\n izz = boost::lexical_cast<double>(matr_xml->Attribute(\"izz\"));\n }\n } catch (boost::bad_lexical_cast& e) {\n\n }\n }\n\n inertia_vals.push_back(ixx);\n inertia_vals.push_back(ixy);\n inertia_vals.push_back(ixz);\n inertia_vals.push_back(iyy);\n inertia_vals.push_back(iyz);\n inertia_vals.push_back(izz);\n link.inertials.push_back(ixx);\n link.inertials.push_back(ixy);\n link.inertials.push_back(ixz);\n link.inertials.push_back(iyy);\n link.inertials.push_back(iyz);\n link.inertials.push_back(izz);\n\n link_inertia_matrices_.push_back(inertia_vals);\n } else {\n link_masses_.push_back(0.0);\n link.mass = 0.0;\n\n std::vector<double> origin( {0.0, 0.0, 0.0, 0.0, 0.0, 0.0});\n link_inertia_origins_.push_back(origin);\n for (auto & k : origin) {\n link.inertia_origin.push_back(k);\n }\n\n std::vector<double> inert( {0.0, 0.0, 0.0, 0.0, 0.0, 0.0});\n link_inertia_matrices_.push_back(inert);\n for (auto & k : inert) {\n link.inertials.push_back(k);\n }\n }\n\n links_.push_back(link);\n }\n\n\n\n return true;\n}\n\nstd::vector<double> ManipulatorRobot::process_origin_(TiXmlElement* xml)\n{\n TiXmlElement* origin_xml = xml->FirstChildElement(\"origin\");\n std::vector<double> origin;\n if (origin_xml) {\n if (origin_xml->Attribute(\"xyz\")) {\n const char* xyz_str = origin_xml->Attribute(\"xyz\");\n const char* rpy_str = origin_xml->Attribute(\"rpy\");\n std::vector<std::string> pieces;\n boost::split(pieces, xyz_str, boost::is_any_of(\" \"));\n for (unsigned int i = 0; i < pieces.size(); ++i) {\n if (pieces[i] != \"\") {\n try {\n origin.push_back(boost::lexical_cast<double>(pieces[i].c_str()));\n } catch (boost::bad_lexical_cast& e) {\n\n }\n }\n }\n\n pieces.clear();\n boost::split(pieces, rpy_str, boost::is_any_of(\" \"));\n for (unsigned int i = 0; i < pieces.size(); ++i) {\n if (pieces[i] != \"\") {\n try {\n origin.push_back(boost::lexical_cast<double>(pieces[i].c_str()));\n } catch (boost::bad_lexical_cast& e) {\n\n }\n }\n }\n }\n }\n if (origin.size() == 6) {\n return origin;\n } else {\n std::vector<double> orig( {0.0, 0.0, 0.0, 0.0, 0.0, 0.0});\n return orig;\n }\n}\n\nManipulatorRobot::ManipulatorRobot(std::string robotFile, std::string configFile):\n Robot(robotFile, configFile),\n links_(),\n joints_(),\n link_names_(),\n active_link_names_(),\n joint_names_(),\n joint_origins_(),\n link_origins_(),\n active_joint_origins_(),\n active_links_(),\n active_joints_(),\n joint_axes_(),\n active_joint_axes_(),\n joint_torque_limits_(),\n lower_joint_limits_(),\n upper_joint_limits_(),\n joint_velocity_limits_(),\n active_joint_velocity_limits_(),\n active_joint_torque_limits_(),\n active_lower_joint_limits_(),\n active_upper_joint_limits_(),\n link_masses_(),\n link_inertia_origins_(),\n kinematics_(new Kinematics()),\n rbdl_interface_(nullptr),\n initialState_(nullptr),\n rrtOptions()\n{\n\n serializer_ = std::make_shared<frapu::ManipulatorSerializer>();\n propagator_ = std::make_shared<frapu::ManipulatorPropagator>();\n TiXmlDocument xml_doc;\n xml_doc.LoadFile(robotFile);\n TiXmlElement* robot_xml = xml_doc.FirstChildElement(\"robot\");\n initLinks(robot_xml);\n initJoints(robot_xml);\n kinematics_->setJointOrigins(joint_origins_);\n kinematics_->setLinkDimensions(active_link_dimensions_);\n static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator()->setJointDamping(joint_dampings_);\n //static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator()->setVelocityLimits(lowerVelocityLimits, upperVelocityLimits);\n lowerStateLimits_ = active_lower_joint_limits_;\n upperStateLimits_ = active_upper_joint_limits_;\n for (size_t i = 0; i < active_joint_velocity_limits_.size(); i++) {\n lowerStateLimits_.push_back(-active_joint_velocity_limits_[i]);\n upperStateLimits_.push_back(active_joint_velocity_limits_[i]);\n }\n\n for (size_t i = 0; i < active_joint_torque_limits_.size(); i++) {\n lowerControlLimits_.push_back(-active_joint_torque_limits_[i]);\n upperControlLimits_.push_back(active_joint_torque_limits_[i]);\n }\n\n\n std::ifstream infile(configFile);\n initialState_ = static_cast<frapu::ManipulatorSerializer*>(serializer_.get())->loadInitalState(infile);\n rrtOptions.continuousCollision = static_cast<frapu::ManipulatorSerializer*>(serializer_.get())->loadContinuousCollision(infile);\n rrtOptions.planningVelocity = static_cast<frapu::ManipulatorSerializer*>(serializer_.get())->loadPlanningVelocity(infile);\n}\n\nstd::string ManipulatorRobot::getName() const\n{\n std::string name = \"Manipulator\";\n return name;\n}\n\nfrapu::HeuristicFunctionSharedPtr ManipulatorRobot::makeHeuristicFunction() const\n{\n frapu::HeuristicFunctionSharedPtr heuristicFunction = std::make_shared<RRTHeuristicFunction>();\n auto terminalFunction = std::bind(&ManipulatorRobot::isTerminal, this, std::placeholders::_1);\n heuristicFunction->setTerminalFunction(terminalFunction);\n return heuristicFunction;\n}\n\n/**void ManipulatorRobot::setupHeuristic(frapu::RewardModelSharedPtr& rewardModel)\n{\n frapu::PathPlannerSharedPtr pathPlanner = std::make_shared<frapu::StandardPathPlanner>(control_duration_,\n rrtOptions.continuousCollision,\n rrtOptions.planningVelocity,\n 1.0,\n false,\n false);\n frapu::StandardPathPlanner* standardPathPlanner = static_cast<frapu::StandardPathPlanner*>(pathPlanner.get());\n\n /**\n * This is very bad!!!!!\n */\n/** frapu::RobotSharedPtr rob(this);\n pathPlanner->setup(environmentInfo_->scene, rob);\n standardPathPlanner->setupPlanner(\"RRTConnect\");\n std::vector<frapu::RobotStateSharedPtr> goalStates = getGoalStates();\n if (goalStates.size() == 0) {\n cout << \"Error. No goal states available\" << endl;\n }\n ompl::base::GoalPtr goal_region = frapu::makeRobotGoalRegion(standardPathPlanner->getSpaceInformation(),\n rob,\n goalStates);\n standardPathPlanner->setGoal(goal_region);\n auto terminalFunction = std::bind(&ManipulatorRobot::isTerminal, this, std::placeholders::_1);\n ompl::base::MotionValidatorPtr motionValidator = standardPathPlanner->getMotionValidator();\n std::shared_ptr<frapu::MotionValidator> motionValidatorSharedPtr = std::static_pointer_cast<frapu::MotionValidator>(motionValidator);\n frapu::CollisionCheckerSharedPtr collisionChecker = motionValidatorSharedPtr;\n heuristic_ = std::make_shared<frapu::RRTHeuristic>(pathPlanner, collisionChecker, environmentInfo_, terminalFunction);\n}*/\n\nvoid ManipulatorRobot::makeGoal()\n{\n goal_ = std::make_shared<frapu::SphereGoal>(goal_position_, goal_radius_);\n}\n\nfrapu::RobotStateSharedPtr ManipulatorRobot::sampleInitialState() const\n{\n return initialState_;\n}\n\nvoid ManipulatorRobot::setNewtonModel()\n{\n rbdl_interface_ = std::make_shared<RBDLInterface>();\n rbdl_interface_->load_model(robot_file_);\n static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator()->setRBDLInterface(rbdl_interface_);\n rbdl_interface_->setViscous(joint_dampings_);\n rbdl_interface_->setPositionConstraints(lower_joint_limits_, upper_joint_limits_);\n}\n\nvoid ManipulatorRobot::quatFromRPY(double& roll, double& pitch, double& yaw, std::vector<double>& quat)\n{\n double phi, the, psi;\n\n phi = roll / 2.0;\n the = pitch / 2.0;\n psi = yaw / 2.0;\n double x = sin(phi) * cos(the) * cos(psi) - cos(phi) * sin(the) * sin(psi);\n double y = cos(phi) * sin(the) * cos(psi) + sin(phi) * cos(the) * sin(psi);\n double z = cos(phi) * cos(the) * sin(psi) - sin(phi) * sin(the) * cos(psi);\n double w = cos(phi) * cos(the) * cos(psi) + sin(phi) * sin(the) * sin(psi);\n\n double s = sqrt(x * x +\n y * y +\n z * z +\n w * w);\n\n if (s == 0.0) {\n x = 0.0;\n y = 0.0;\n z = 0.0;\n w = 1.0;\n } else {\n x /= s;\n y /= s;\n z /= s;\n w /= s;\n }\n\n quat.push_back(x);\n quat.push_back(y);\n quat.push_back(z);\n quat.push_back(w);\n}\n\nstd::vector<std::shared_ptr<fcl::CollisionObject>>\n ManipulatorRobot::createEndEffectorCollisionObjectPy(const std::vector<double>& joint_angles)\n{\n std::vector<std::shared_ptr<fcl::CollisionObject>> collision_objects;\n createEndEffectorCollisionObject(joint_angles, collision_objects);\n return collision_objects;\n}\n\nbool ManipulatorRobot::makeStateSpace()\n{\n unsigned int dimensions = lowerStateLimits_.size();\n cout << \"state dim \" << dimensions << endl;\n stateSpace_ = std::make_shared<frapu::VectorStateSpace>(dimensions);\n frapu::StateLimitsSharedPtr stateLimits =\n std::make_shared<frapu::ManipulatorStateLimits>(lowerStateLimits_, upperStateLimits_);\n frapu::printVector<double>(lowerStateLimits_, \"lowerLimits\");\n frapu::printVector<double>(upperStateLimits_, \"upperLimits\");\n stateSpace_->setStateLimits(stateLimits);\n std::shared_ptr<frapu::Integrate> integrator =\n static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator();\n if (!integrator) {\n frapu::ERROR(\"No integrator!\");\n sleep(10000);\n }\n integrator->setStateSpaceDimension(dimensions);\n}\n\nbool ManipulatorRobot::makeActionSpace(const frapu::ActionSpaceInfo& actionSpaceInfo)\n{\n if (actionSpaceInfo.type == \"continuous\") {\n actionSpace_ = std::make_shared<frapu::ContinuousVectorActionSpace>(actionSpaceInfo);\n } else {\n actionSpace_ = std::make_shared<frapu::DiscreteVectorActionSpace>(actionSpaceInfo);\n }\n\n unsigned int numDimensions = active_joints_.size();\n actionSpace_->setNumDimensions(numDimensions);\n frapu::ActionLimitsSharedPtr actionLimits =\n std::make_shared<frapu::VectorActionLimits>(lowerControlLimits_, upperControlLimits_);\n actionSpace_->setActionLimits(actionLimits);\n static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator()->setControlSpaceDimension(numDimensions);\n}\n\nbool ManipulatorRobot::makeObservationSpace(const frapu::ObservationSpaceInfo& observationSpaceInfo)\n{\n observationSpace_ = std::make_shared<frapu::ContinuousObservationSpace>(observationSpaceInfo);\n std::vector<double> lowerLimits;\n std::vector<double> upperLimits;\n if (observationSpace_->getObservationSpaceInfo().observationType == \"linear\") {\n observationSpace_->setDimension(getStateSpaceDimension());\n static_cast<frapu::ContinuousObservationSpace*>(observationSpace_.get())->setLimits(lowerStateLimits_,\n upperStateLimits_);\n\n } else {\n observationSpace_->setDimension(3 + getStateSpaceDimension() / 2);\n std::vector<double> r_state(getStateSpaceDimension() / 2, 0.0);\n std::vector<double> end_effector_position;\n getEndEffectorPosition(r_state, end_effector_position);\n double radius = 0.0;\n for (size_t i = 0; i < end_effector_position.size(); i++) {\n radius += std::pow(joint_origins_[0][i] - end_effector_position[i], 2);\n }\n\n radius = sqrt(radius);\n std::vector<double> lowerObservationLimits;\n std::vector<double> upperObservationLimits;\n for (size_t i = 0; i < 3; i++) {\n lowerObservationLimits.push_back(-radius);\n upperObservationLimits.push_back(radius);\n }\n\n for (size_t i = 0; i < lowerLimits.size() / 2; i++) {\n lowerObservationLimits.push_back(lowerStateLimits_[i + lowerStateLimits_.size() / 2]);\n upperObservationLimits.push_back(upperStateLimits_[i + upperStateLimits_.size() / 2]);\n }\n\n static_cast<frapu::ContinuousObservationSpace*>(observationSpace_.get())->setLimits(lowerObservationLimits,\n upperObservationLimits);\n }\n}\n\nbool ManipulatorRobot::getObservation(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& observation) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> observationVec;\n if (observationSpace_->getObservationSpaceInfo().observationType == \"linear\") {\n Eigen::MatrixXd sample = observation_distribution_->samples(1);\n for (size_t i = 0; i < stateVec.size(); i++) {\n observationVec.push_back(stateVec[i] + sample(i, 0));\n }\n } else {\n std::vector<double> end_effector_position;\n getEndEffectorPosition(stateVec, end_effector_position);\n unsigned int observationSpaceDimension = observationSpace_->getDimension();\n Eigen::MatrixXd sample = observation_distribution_->samples(1);\n observationVec = std::vector<double>(observationSpaceDimension);\n for (size_t i = 0; i < 3; i++) {\n observationVec[i] = end_effector_position[i] + sample(i, 0);\n }\n\n unsigned int stateSizeHalf = stateVec.size() / 2;\n for (size_t i = 0; i < stateSizeHalf; i++) {\n observationVec[i + 3] = stateVec[i + stateSizeHalf] + sample(i + 3, 0);\n }\n }\n\n observation = std::make_shared<frapu::VectorObservation>(observationVec);\n return true;\n}\n\nbool ManipulatorRobot::getObservation(const frapu::RobotStateSharedPtr& state,\n std::vector<double>& observationError,\n frapu::ObservationSharedPtr& observation) const\n{\n transformToObservationSpace(state, observation);\n std::vector<double> observationVec = static_cast<frapu::VectorObservation*>(observation.get())->asVector();\n for (size_t i = 0; i < observationSpace_->getDimension(); i++) {\n observationVec[i] += observationError[i];\n }\n\n observation = std::make_shared<frapu::VectorObservation>(observationVec);\n}\n\nvoid ManipulatorRobot::transformToObservationSpace(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& res) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> observationVec;\n if (observationSpace_->getObservationSpaceInfo().observationType == \"linear\") {\n observationVec = stateVec;\n } else {\n std::vector<double> end_effector_position;\n unsigned int observationSpaceDimension = observationSpace_->getDimension();\n observationVec = std::vector<double>(observationSpaceDimension);\n getEndEffectorPosition(stateVec, end_effector_position);\n for (size_t i = 0; i < 3; i++) {\n observationVec[i] = end_effector_position[i];\n }\n\n unsigned int stateSizeHalf = stateVec.size() / 2;\n for (size_t i = 0; i < stateSizeHalf; i++) {\n observationVec[i + 3] = stateVec[i + stateSizeHalf];\n }\n }\n\n res = std::make_shared<frapu::VectorObservation>(observationVec);\n}\n\nvoid ManipulatorRobot::initCollisionObjects()\n{\n // Init the link collision objects\n std::vector<fcl::AABB> link_aabbs;\n for (size_t i = 0; i < active_link_dimensions_.size(); i++) {\n /**link_aabbs.push_back(fcl::AABB(fcl::Vec3f(0.0,\n -active_link_dimensions_[i][1] / 2.0,\n -active_link_dimensions_[i][2] / 2.0),\n fcl::Vec3f(active_link_dimensions_[i][0],\n active_link_dimensions_[i][1] / 2.0,\n active_link_dimensions_[i][2] / 2.0)));*/\n link_aabbs.push_back(fcl::AABB(fcl::Vec3f(-active_link_dimensions_[i][0] / 2.0,\n -active_link_dimensions_[i][1] / 2.0,\n -active_link_dimensions_[i][2] / 2.0),\n fcl::Vec3f(active_link_dimensions_[i][0] / 2.0,\n active_link_dimensions_[i][1] / 2.0,\n active_link_dimensions_[i][2] / 2.0)));\n }\n for (size_t i = 0; i < active_link_dimensions_.size(); i++) {\n fcl::Box* box = new fcl::Box();\n fcl::Transform3f box_tf;\n fcl::Transform3f trans;\n fcl::constructBox(link_aabbs[i], trans, *box, box_tf);\n collision_objects_.push_back(std::make_shared<fcl::CollisionObject>(boost::shared_ptr<fcl::CollisionGeometry>(box), box_tf));\n }\n\n // Init the end-effector collision object\n fcl::AABB aabb(fcl::Vec3f(0.0,\n -active_link_dimensions_[active_link_dimensions_.size() - 1][1] / 2.0,\n -active_link_dimensions_[active_link_dimensions_.size() - 1][2] / 2.0),\n fcl::Vec3f(0.001,\n active_link_dimensions_[active_link_dimensions_.size() - 1][1] / 2.0,\n active_link_dimensions_[active_link_dimensions_.size() - 1][2] / 2.0));\n fcl::Box* box = new fcl::Box();\n fcl::Transform3f box_tf;\n fcl::Transform3f trans;\n fcl::constructBox(aabb, trans, *box, box_tf);\n collision_objects_.push_back(std::make_shared<fcl::CollisionObject>(boost::shared_ptr<fcl::CollisionGeometry>(box), box_tf));\n}\n\nvoid ManipulatorRobot::createRobotCollisionObjects(const frapu::RobotStateSharedPtr state,\n std::vector<frapu::CollisionObjectSharedPtr>& collision_objects) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n unsigned int len = stateVec.size();\n unsigned int actionSpaceDimension = actionSpace_->getNumDimensions();\n if (stateVec.size() > actionSpaceDimension) {\n len = stateVec.size() / 2;\n }\n Eigen::MatrixXd res = Eigen::MatrixXd::Identity(4, 4);\n res(0, 3) = joint_origins_[0][0];\n res(1, 3) = joint_origins_[0][1];\n res(2, 3) = joint_origins_[0][2];\n\n for (size_t i = 0; i < len; i++) {\n res = kinematics_->getPoseOfLinkN(stateVec[i], res, i);\n fcl::Matrix3f trans_matrix(res(0, 0), res(0, 1), res(0, 2),\n res(1, 0), res(1, 1), res(1, 2),\n res(2, 0), res(2, 1), res(2, 2));\n fcl::Vec3f trans_vec(res(0, 3), res(1, 3), res(2, 3));\n fcl::Transform3f trans(trans_matrix, trans_vec);\n fcl::AABB link_aabb(fcl::Vec3f(0.0,\n -active_link_dimensions_[i][1] / 2.0,\n -active_link_dimensions_[i][2] / 2.0),\n fcl::Vec3f(active_link_dimensions_[i][0],\n active_link_dimensions_[i][1] / 2.0,\n active_link_dimensions_[i][2] / 2.0));\n fcl::Box* box = new fcl::Box();\n fcl::Transform3f box_tf;\n fcl::constructBox(link_aabb, trans, *box, box_tf);\n std::shared_ptr<fcl::CollisionObject> coll_obj =\n std::make_shared<fcl::CollisionObject>(boost::shared_ptr<fcl::CollisionGeometry>(box), box_tf);\n collision_objects.push_back(coll_obj);\n }\n}\n\nbool ManipulatorRobot::checkSelfCollision(std::vector<std::shared_ptr<fcl::CollisionObject>>& collision_objects) const\n{\n for (size_t i = 0; i < collision_objects.size(); i++) {\n if (i + 2 < collision_objects.size()) {\n for (size_t j = i + 2; j < collision_objects.size(); j++) {\n fcl::CollisionRequest request;\n fcl::CollisionResult result;\n fcl::collide(collision_objects[i].get(),\n collision_objects[j].get(),\n request,\n result);\n if (result.isCollision()) {\n return true;\n }\n }\n }\n }\n\n return false;\n}\n\nvoid ManipulatorRobot::getLinearProcessMatrices(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& control,\n double& duration,\n std::vector<Eigen::MatrixXd>& matrices) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> controlVec = static_cast<frapu::VectorAction*>(control.get())->asVector();\n static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator()->getProcessMatrices2(stateVec,\n controlVec,\n duration,\n observationSpace_->getObservationSpaceInfo().observationType,\n matrices);\n}\n\nvoid ManipulatorRobot::getLinearObservationDynamics(const frapu::RobotStateSharedPtr& state,\n Eigen::MatrixXd& H,\n Eigen::MatrixXd& W) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n frapu::ManipulatorPropagator* p = static_cast<frapu::ManipulatorPropagator*>(propagator_.get());\n p->getIntegrator()->getLinearObservationDynamics(stateVec,\n observationSpace_->getObservationSpaceInfo().observationType,\n H,\n W);\n}\n\nbool ManipulatorRobot::checkSelfCollision(const frapu::RobotStateSharedPtr& state) const\n{\n std::vector<std::shared_ptr<fcl::CollisionObject>> robot_collision_objects;\n createRobotCollisionObjects(state, robot_collision_objects);\n return checkSelfCollision(robot_collision_objects);\n}\n\nvoid ManipulatorRobot::createEndEffectorCollisionObject(const std::vector<double>& joint_angles,\n std::vector<std::shared_ptr<fcl::CollisionObject>>& collision_objects)\n{\n const std::pair<fcl::Vec3f, fcl::Matrix3f> pose_ee = kinematics_->getPoseOfLinkN(joint_angles, active_link_dimensions_.size());\n fcl::Transform3f trans(pose_ee.second, pose_ee.first);\n fcl::Transform3f trans_res = trans * fcl::Transform3f(collision_objects_[collision_objects_.size() - 1]->getAABB().center());\n collision_objects_[collision_objects_.size() - 1]->setTransform(trans_res);\n collision_objects.push_back(collision_objects_[collision_objects_.size() - 1]);\n}\n\nvoid ManipulatorRobot::getPositionOfLinkN(const std::vector<double>& joint_angles, const int& n, std::vector<double>& position)\n{\n kinematics_->getPositionOfLinkN(joint_angles, n, position);\n}\n\nvoid ManipulatorRobot::getEndEffectorPosition(const std::vector<double>& joint_angles, std::vector<double>& end_effector_position) const\n{\n if (joint_angles.size() > getDOF()) {\n std::vector<double> ja(getDOF());\n for (size_t i = 0; i < getDOF(); i++) {\n ja[i] = joint_angles[i];\n }\n\n const std::vector<double> ja2(ja);\n kinematics_->getEndEffectorPosition(ja2, end_effector_position);\n } else {\n kinematics_->getEndEffectorPosition(joint_angles, end_effector_position);\n }\n\n}\n\nvoid ManipulatorRobot::updateViewer(const frapu::RobotStateSharedPtr& state,\n std::vector<frapu::RobotStateSharedPtr>& particles,\n std::vector<std::vector<double>>& particleColors)\n{\n#ifdef USE_OPENRAVE\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> joint_values;\n std::vector<double> joint_velocities;\n std::vector<std::vector<double>> particle_joint_values;\n //std::vector<std::vector<double>> particle_joint_colors;\n for (size_t i = 0; i < stateVec.size() / 2; i++) {\n joint_values.push_back(stateVec[i]);\n joint_velocities.push_back(stateVec[i + stateVec.size() / 2]);\n }\n\n for (size_t i = 0; i < particles.size(); i++) {\n std::vector<double> particle;\n std::vector<double> particleVec = static_cast<const frapu::VectorState*>(particles[i].get())->asVector();\n for (size_t j = 0; j < stateVec.size() / 2; j++) {\n particle.push_back(particleVec[j]);\n }\n particle_joint_values.push_back(particle);\n\n }\n\n viewer_->updateRobotValues(joint_values,\n joint_velocities,\n particle_joint_values,\n particleColors,\n nullptr);\n\n#endif\n}\n\n\n/****************************************\n * Viewer functions\n */\n#ifdef USE_OPENRAVE\nvoid ManipulatorRobot::addPermanentViewerParticles(const std::vector<std::vector<double>>& particle_joint_values,\n const std::vector<std::vector<double>>& particle_colors)\n{\n assert(particle_joint_values.size() == particle_colors.size() &&\n \"Number of particles must be the same as number of colours!\");\n viewer_->addPermanentParticles(particle_joint_values,\n particle_colors);\n}\n\nvoid ManipulatorRobot::removePermanentViewerParticles()\n{\n viewer_->removePermanentParticles();\n}\n\nvoid ManipulatorRobot::updateViewerValues(const std::vector<double>& current_joint_values,\n const std::vector<double>& current_joint_velocities,\n const std::vector<std::vector<double>>& particle_joint_values,\n const std::vector<std::vector<double>>& particle_colors)\n{\n assert(particle_joint_values.size() == particle_colors.size() &&\n \"Number of particles must be the same as number of colours!\");\n // particle_color = {r, g, b, a}\n viewer_->updateRobotValues(current_joint_values,\n current_joint_velocities,\n particle_joint_values,\n particle_colors,\n nullptr);\n}\n\nvoid ManipulatorRobot::setViewerSize(int x, int y)\n{\n viewer_->setViewerSize(x, y);\n}\n\nvoid ManipulatorRobot::setViewerBackgroundColor(double r, double g, double b)\n{\n viewer_->setBackgroundColor(r, g, b);\n}\n\nvoid ManipulatorRobot::setViewerCameraTransform(std::vector<double>& rot, std::vector<double>& trans)\n{\n viewer_->setCameraTransform(rot, trans);\n}\n#endif\n\n\nbool ManipulatorRobot::propagate_linear(std::vector<double>& current_state,\n std::vector<double>& control_input,\n std::vector<double>& control_error,\n double duration,\n std::vector<double>& result)\n{\n return static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->propagate_linear(current_state,\n control_input,\n control_error,\n duration,\n result);\n}\n\nvoid ManipulatorRobot::setGravityConstant(double gravity_constant)\n{\n std::shared_ptr<frapu::Integrator> integrator =\n static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator();\n frapu::Integrate* integrate = static_cast<frapu::Integrate*>(integrator.get());\n if (integrate) {\n integrate->setGravityConstant(gravity_constant);\n }\n\n if (rbdl_interface_) {\n rbdl_interface_->setGravity(gravity_constant);\n }\n}\n\nvoid ManipulatorRobot::setExternalForce(double f_x,\n double f_y,\n double f_z,\n double f_roll,\n double f_pitch,\n double f_yaw)\n{\n std::shared_ptr<frapu::Integrator> integrator =\n static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator();\n frapu::Integrate* integrate = static_cast<frapu::Integrate*>(integrator.get());\n if (integrate) {\n integrate->setExternalForce(f_x, f_y, f_z, f_roll, f_pitch, f_yaw);\n }\n}\n\nvoid ManipulatorRobot::setAccelerationLimit(double accelerationLimit)\n{\n std::shared_ptr<frapu::Integrator> integrator =\n static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->getIntegrator();\n frapu::Integrate* integrate = static_cast<frapu::Integrate*>(integrator.get());\n if (integrate) {\n integrate->setAccelerationLimit(accelerationLimit);\n }\n}\n\nvoid ManipulatorRobot::getEndEffectorJacobian(const std::vector<double>& joint_angles,\n std::vector<std::vector<double>>& ee_jacobian)\n{\n\n //std::vector<double> state;\n std::vector<double> state2;\n //for (auto &k: joint_angles) {\n // state.push_back(k);\n //}\n\n if (joint_angles.size() > getStateSpaceDimension() / 2) {\n for (size_t i = 0; i < joint_angles.size() / 2; i++) {\n state2.push_back(joint_angles[i]);\n }\n } else {\n for (size_t i = 0; i < joint_angles.size(); i++) {\n state2.push_back(joint_angles[i]);\n }\n }\n\n MatrixXd jacobian(6, getStateSpaceDimension() / 2);\n kinematics_->getEEJacobian(state2, jacobian);\n for (size_t i = 0; i < jacobian.rows(); i++) {\n std::vector<double> row;\n for (size_t j = 0; j < jacobian.cols(); j++) {\n row.push_back(jacobian(i, j));\n }\n ee_jacobian.push_back(row);\n }\n}\n\nvoid ManipulatorRobot::getEndEffectorVelocity(std::vector<double>& state,\n std::vector<double>& ee_velocity)\n{\n MatrixXd j(6, getStateSpaceDimension() / 2);\n kinematics_->getEEJacobian(state, j);\n\n MatrixXd vel(state.size() / 2, 1);\n for (size_t i = 0; i < state.size() / 2; i++) {\n vel(i, 0) = state[i + state.size() / 2];\n }\n\n MatrixXd res = j * vel;\n ee_velocity.clear();\n for (size_t i = 0; i < 6; i++) {\n ee_velocity.push_back(res(i, 0));\n }\n}\n\nbool ManipulatorRobot::propagate_first_order(std::vector<double>& current_state,\n std::vector<double>& control_input,\n std::vector<double>& control_error,\n std::vector<double>& nominal_state,\n std::vector<double>& nominal_control,\n double simulation_step_size,\n double duration,\n std::vector<double>& result)\n{\n std::vector<double> current_joint_values;\n std::vector<double> current_joint_velocities;\n\n for (size_t i = 0; i < current_state.size() / 2; i++) {\n current_joint_values.push_back(current_state[i]);\n current_joint_velocities.push_back(current_state[i + current_state.size() / 2]);\n }\n\n return static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->propagate_nonlinear_first_order(current_joint_values,\n current_joint_velocities,\n control_input,\n control_error,\n nominal_state,\n nominal_control,\n simulation_step_size,\n duration,\n result);\n}\n\nbool ManipulatorRobot::propagate_second_order(std::vector<double>& current_state,\n std::vector<double>& control_input,\n std::vector<double>& control_error,\n std::vector<double>& nominal_state,\n std::vector<double>& nominal_control,\n double simulation_step_size,\n double duration,\n std::vector<double>& result)\n{\n std::vector<double> current_joint_values;\n std::vector<double> current_joint_velocities;\n\n for (size_t i = 0; i < current_state.size() / 2; i++) {\n current_joint_values.push_back(current_state[i]);\n current_joint_velocities.push_back(current_state[i + current_state.size() / 2]);\n }\n\n return static_cast<frapu::ManipulatorPropagator*>(propagator_.get())->propagate_nonlinear_second_order(current_joint_values,\n current_joint_velocities,\n control_input,\n control_error,\n nominal_state,\n nominal_control,\n simulation_step_size,\n duration,\n result);\n}\n\nunsigned int ManipulatorRobot::get_link_index(std::string& link_name)\n{\n for (size_t i = 0; i < link_names_.size(); i++) {\n if (link_name == link_names_[i]) {\n return i;\n }\n }\n\n return 0;\n}\n\nvoid ManipulatorRobot::getLinkNames(std::vector<std::string>& link_names)\n{\n for (auto & name : link_names_) {\n link_names.push_back(name);\n }\n}\n\nvoid ManipulatorRobot::getJointNames(std::vector<std::string>& joint_names)\n{\n for (auto & name : joint_names_) {\n joint_names.push_back(name);\n }\n}\n\nvoid ManipulatorRobot::getLinkMasses(std::vector<std::string>& link, std::vector<double>& link_masses)\n{\n int index = 0;\n for (size_t i = 0; i < link.size(); i++) {\n index = get_link_index(link[i]);\n link_masses.push_back(link_masses_[index]);\n }\n}\n\nvoid ManipulatorRobot::getLinkInertias(std::vector<std::string>& link, std::vector<std::vector<double>>& inertias)\n{\n double index = 0;\n for (size_t i = 0; i < link.size(); i++) {\n index = get_link_index(link[i]);\n inertias.push_back(link_inertia_matrices_[index]);\n }\n}\n\nvoid ManipulatorRobot::getActiveLinkDimensions(std::vector<std::vector<double>>& dimensions)\n{\n for (auto & k : active_link_dimensions_) {\n dimensions.push_back(k);\n }\n}\n\nvoid ManipulatorRobot::getLinkDimension(std::vector<std::string>& link, std::vector<std::vector<double>>& dimension)\n{\n int index = 0;\n for (size_t i = 0; i < link.size(); i++) {\n index = get_link_index(link[i]);\n dimension.push_back(link_dimensions_[index]);\n }\n}\n\nvoid ManipulatorRobot::getLinkPose(std::vector<std::string>& link, std::vector<std::vector<double>>& pose)\n{\n int index = 0;\n for (size_t i = 0; i < link.size(); i++) {\n index = get_link_index(link[i]);\n pose.push_back(link_origins_[index]);\n }\n}\n\nvoid ManipulatorRobot::getLinkInertialPose(std::vector<std::string>& link, std::vector<std::vector<double>>& pose)\n{\n int index = 0;\n for (size_t i = 0; i < link.size(); i++) {\n index = get_link_index(link[i]);\n pose.push_back(link_inertia_origins_[index]);\n }\n}\n\nvoid ManipulatorRobot::getActiveJoints(std::vector<std::string>& joints) const\n{\n for (auto & joint : active_joints_) {\n joints.push_back(joint);\n }\n}\n\nvoid ManipulatorRobot::getJointLowerPositionLimits(std::vector<std::string>& joints, std::vector<double>& joint_limits) const\n{\n int index = 0;\n for (size_t i = 0; i < joints.size(); i++) {\n for (size_t j = 0; j < joint_names_.size(); j++) {\n if (joints[i] == joint_names_[j]) {\n joint_limits.push_back(lower_joint_limits_[j]);\n }\n }\n }\n}\n\nvoid ManipulatorRobot::getJointUpperPositionLimits(std::vector<std::string>& joints, std::vector<double>& joint_limits) const\n{\n int index = 0;\n for (size_t i = 0; i < joints.size(); i++) {\n for (size_t j = 0; j < joint_names_.size(); j++) {\n if (joints[i] == joint_names_[j]) {\n joint_limits.push_back(upper_joint_limits_[j]);\n }\n }\n }\n}\n\nvoid ManipulatorRobot::getJointVelocityLimits(std::vector<std::string>& joints, std::vector<double>& joint_limits) const\n{\n int index = 0;\n for (size_t i = 0; i < joints.size(); i++) {\n for (size_t j = 0; j < joint_names_.size(); j++) {\n if (joints[i] == joint_names_[j]) {\n joint_limits.push_back(joint_velocity_limits_[j]);\n }\n }\n }\n}\n\nvoid ManipulatorRobot::getJointTorqueLimits(std::vector<std::string>& joints, std::vector<double>& joint_limits) const\n{\n int index = 0;\n for (size_t i = 0; i < joints.size(); i++) {\n for (size_t j = 0; j < joint_names_.size(); j++) {\n if (joints[i] == joint_names_[j]) {\n joint_limits.push_back(joint_torque_limits_[j]);\n }\n }\n }\n}\n\nvoid ManipulatorRobot::getJointDamping(std::vector<std::string>& joints, std::vector<double>& damping)\n{\n int index = 0;\n for (size_t i = 0; i < joints.size(); i++) {\n for (size_t j = 0; j < joint_names_.size(); j++) {\n if (joints[i] == joint_names_[j]) {\n damping.push_back(joint_dampings_[j]);\n }\n }\n }\n}\n\nvoid ManipulatorRobot::getJointType(std::vector<std::string>& joint, std::vector<std::string>& type)\n{\n int index = 0;\n for (size_t i = 0; i < joint.size(); i++) {\n for (size_t j = 0; j < joint_names_.size(); j++) {\n if (joint[i] == joint_names_[j]) {\n type.push_back(joint_types_[j]);\n }\n }\n }\n}\n\nvoid ManipulatorRobot::getJointOrigin(std::vector<std::string>& joints, std::vector<std::vector<double>>& origins)\n{\n for (size_t i = 0; i < joints.size(); i++) {\n for (size_t j = 0; j < joint_names_.size(); j++) {\n if (joints[i] == joint_names_[j]) {\n origins.push_back(joint_origins_[j]);\n }\n }\n }\n}\n\nvoid ManipulatorRobot::getJointAxis(std::vector<std::string>& joints, std::vector<std::vector<int>>& axis)\n{\n for (size_t i = 0; i < joints.size(); i++) {\n for (size_t j = 0; j < joint_names_.size(); j++) {\n if (joints[i] == joint_names_[j]) {\n axis.push_back(joint_axes_[j]);\n }\n }\n }\n}\n\nint ManipulatorRobot::getStateSpaceDimension() const\n{\n return stateSpace_->getNumDimensions();\n}\n\n/**int ManipulatorRobot::getControlSpaceDimension() const\n{\n return active_joints_.size();\n}*/\n\nint ManipulatorRobot::getDOF() const\n{\n return active_joints_.size();\n}\n\nbool ManipulatorRobot::isTerminal(const frapu::RobotStateSharedPtr& state) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n std::vector<double> end_effector_position;\n getEndEffectorPosition(stateVec, end_effector_position);\n bool terminal = static_cast<frapu::SphereGoal*>(goal_.get())->isSatisfied(end_effector_position);\n return static_cast<frapu::SphereGoal*>(goal_.get())->isSatisfied(end_effector_position);\n}\n\ndouble ManipulatorRobot::distanceGoal(const frapu::RobotStateSharedPtr& state) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n assert(goal_position_.size() != 0 && \"ManipulatorRobot: No goal area set. Cannot calculate distance!\");\n std::vector<double> end_effector_position;\n getEndEffectorPosition(stateVec, end_effector_position);\n return static_cast<frapu::SphereGoal*>(goal_.get())->distanceCenter(end_effector_position);\n /**double dist = 0.0;\n for (size_t i = 0; i < end_effector_position.size(); i++) {\n dist += std::pow(end_effector_position[i] - goal_position_[i], 2);\n }\n\n return std::sqrt(dist);*/\n\n}\n\nvoid ManipulatorRobot::makeNextStateAfterCollision(const frapu::RobotStateSharedPtr& previousState,\n const frapu::RobotStateSharedPtr& collidingState,\n frapu::RobotStateSharedPtr& nextState)\n{\n std::vector<double> previousStateVec = static_cast<frapu::VectorState*>(previousState.get())->asVector();\n std::vector<double> nextStateVec = previousStateVec;\n for (size_t i = nextStateVec.size() / 2; i < nextStateVec.size(); i++) {\n nextStateVec[i] = 0.0;\n }\n\n nextState = std::make_shared<frapu::VectorState>(nextStateVec);\n}\n\nvoid ManipulatorRobot::makeProcessDistribution(Eigen::MatrixXd& mean,\n Eigen::MatrixXd& covariance_matrix)\n{\n process_distribution_ = std::make_shared<Eigen::EigenMultivariateNormal<double>>(mean, covariance_matrix, false);\n}\n\nvoid ManipulatorRobot::makeObservationDistribution(Eigen::MatrixXd& mean,\n Eigen::MatrixXd& covariance_matrix)\n{\n observation_distribution_ = std::make_shared<Eigen::EigenMultivariateNormal<double>>(mean, covariance_matrix, false);\n}\n\n/**BOOST_PYTHON_MODULE(librobots)\n{\n using namespace boost::python;\n\n void (RobotWrapper::*enforceConstraintsB)(bool) = &RobotWrapper::enforceConstraints;\n //bool (RobotWrapper::*enforceConstraintsS)(std::vector<double> &) = &RobotWrapper::enforceConstraints;\n\n boost::python::type_info info = boost::python::type_id<std::vector<double>>();\n const boost::python::converter::registration* reg_double = boost::python::converter::registry::query(info);\n if (reg_double == NULL || (*reg_double).m_to_python == NULL) {\n class_<std::vector<double> > (\"v_double\")\n .def(vector_indexing_suite<std::vector<double> >());\n }\n\n info = boost::python::type_id<std::vector<int>>();\n const boost::python::converter::registration* reg_int = boost::python::converter::registry::query(info);\n if (reg_int == NULL || (*reg_int).m_to_python == NULL) {\n class_<std::vector<int> > (\"v_int\")\n .def(vector_indexing_suite<std::vector<int> >());\n }\n\n info = boost::python::type_id<std::vector<std::vector<double>>>();\n const boost::python::converter::registration* reg_v2double = boost::python::converter::registry::query(info);\n if (reg_v2double == NULL || (*reg_v2double).m_to_python == NULL) {\n class_<std::vector<std::vector<double> > > (\"v2_double\")\n .def(vector_indexing_suite<std::vector<std::vector<double> > >());\n }\n\n info = boost::python::type_id<std::vector<std::vector<int>>>();\n const boost::python::converter::registration* reg_v2int = boost::python::converter::registry::query(info);\n if (reg_v2int == NULL || (*reg_v2int).m_to_python == NULL) {\n class_<std::vector<std::vector<int> > > (\"v2_int\")\n .def(vector_indexing_suite<std::vector<std::vector<int> > >());\n }\n\n info = boost::python::type_id<std::vector<std::string>>();\n const boost::python::converter::registration* reg_vstring = boost::python::converter::registry::query(info);\n if (reg_vstring == NULL || (*reg_vstring).m_to_python == NULL) {\n class_<std::vector<std::string> > (\"v_string\")\n .def(vector_indexing_suite<std::vector<std::string> >());\n }\n\n class_<fcl::OBB>(\"OBB\");\n class_<fcl::CollisionObject>(\"CollisionObject\", init<const boost::shared_ptr<fcl::CollisionGeometry>, const fcl::Transform3f>());\n to_python_converter<std::vector<fcl::OBB, std::allocator<fcl::OBB> >, VecToList<fcl::OBB> >();\n to_python_converter<std::vector<fcl::CollisionObject, std::allocator<fcl::CollisionObject> >, VecToList<fcl::CollisionObject> >();\n to_python_converter < std::vector<std::shared_ptr<fcl::CollisionObject>, std::allocator<std::shared_ptr<fcl::CollisionObject>> >,\n VecToList<std::shared_ptr<fcl::CollisionObject>> > ();\n register_ptr_to_python<std::shared_ptr<fcl::CollisionObject>>();\n\n class_<Robot, std::shared_ptr<Robot>, boost::noncopyable>(\"Robot\", no_init);\n\n class_<RobotWrapper, boost::noncopyable>(\"Robot\", init<std::string>())\n .def(\"getDOF\", &RobotWrapper::getDOF)\n .def(\"getStateSpaceDimension\", &RobotWrapper::getStateSpaceDimension)\n .def(\"getControlSpaceDimension\", &RobotWrapper::getControlSpaceDimension)\n .def(\"enforceConstraints\", enforceConstraintsB)\n ;\n\n void (ManipulatorRobot::*enforceConstraintsMB)(bool) = &RobotWrapper::enforceConstraints;\n\n class_<ManipulatorRobot, boost::shared_ptr<ManipulatorRobot>, bases<Robot>>(\"ManipulatorRobot\", init<std::string>())\n .def(\"getLinkNames\", &ManipulatorRobot::getLinkNames)\n .def(\"getLinkDimension\", &ManipulatorRobot::getLinkDimension)\n .def(\"getActiveLinkDimensions\", &ManipulatorRobot::getActiveLinkDimensions)\n .def(\"getLinkMasses\", &ManipulatorRobot::getLinkMasses)\n .def(\"getLinkPose\", &ManipulatorRobot::getLinkPose)\n .def(\"getLinkInertialPose\", &ManipulatorRobot::getLinkInertialPose)\n .def(\"getLinkInertias\", &ManipulatorRobot::getLinkInertias)\n .def(\"getJointNames\", &ManipulatorRobot::getJointNames)\n .def(\"getActiveJoints\", &ManipulatorRobot::getActiveJoints)\n .def(\"getJointType\", &ManipulatorRobot::getJointType)\n .def(\"getJointDamping\", &ManipulatorRobot::getJointDamping)\n .def(\"getJointOrigin\", &ManipulatorRobot::getJointOrigin)\n .def(\"getJointAxis\", &ManipulatorRobot::getJointAxis)\n .def(\"propagate\", &ManipulatorRobot::propagateState)\n .def(\"propagate_first_order\", &ManipulatorRobot::propagate_first_order)\n .def(\"propagate_second_order\", &ManipulatorRobot::propagate_second_order)\n //.def(\"createRobotCollisionStructures\", &Robot::createRobotCollisionStructuresPy)\n .def(\"createRobotCollisionObjects\", &ManipulatorRobot::createRobotCollisionObjectsPy)\n .def(\"createEndEffectorCollisionObject\", &ManipulatorRobot::createEndEffectorCollisionObjectPy)\n .def(\"getEndEffectorPosition\", &ManipulatorRobot::getEndEffectorPosition)\n .def(\"getStateSpaceDimension\", &ManipulatorRobot::getStateSpaceDimension)\n .def(\"getControlSpaceDimension\", &ManipulatorRobot::getControlSpaceDimension)\n .def(\"getDOF\", &ManipulatorRobot::getDOF)\n .def(\"getJointLowerPositionLimits\", &ManipulatorRobot::getJointLowerPositionLimits)\n .def(\"getJointUpperPositionLimits\", &ManipulatorRobot::getJointUpperPositionLimits)\n .def(\"getJointVelocityLimits\", &ManipulatorRobot::getJointVelocityLimits)\n .def(\"getJointTorqueLimits\", &ManipulatorRobot::getJointTorqueLimits)\n .def(\"enforceConstraints\", enforceConstraintsMB)\n .def(\"constraintsEnforced\", &ManipulatorRobot::constraintsEnforced)\n .def(\"setGravityConstant\", &ManipulatorRobot::setGravityConstant)\n .def(\"setExternalForce\", &ManipulatorRobot::setExternalForce)\n .def(\"setAccelerationLimit\", &ManipulatorRobot::setAccelerationLimit)\n .def(\"getEndEffectorVelocity\", &ManipulatorRobot::getEndEffectorVelocity)\n .def(\"getProcessMatrices\", &ManipulatorRobot::getProcessMatrices)\n .def(\"getEndEffectorJacobian\", &ManipulatorRobot::getEndEffectorJacobian)\n .def(\"setNewtonModel\", &ManipulatorRobot::setNewtonModel)\n .def(\"checkSelfCollision\", &ManipulatorRobot::checkSelfCollisionPy)\n#ifdef USE_OPENRAVE\n .def(\"setupViewer\", &ManipulatorRobot::setupViewer)\n .def(\"updateViewerValues\", &ManipulatorRobot::updateViewerValues)\n .def(\"setViewerSize\", &ManipulatorRobot::setViewerSize)\n .def(\"setViewerBackgroundColor\", &ManipulatorRobot::setViewerBackgroundColor)\n .def(\"setViewerCameraTransform\", &ManipulatorRobot::setViewerCameraTransform)\n .def(\"addPermanentViewerParticles\", &ManipulatorRobot::addPermanentViewerParticles)\n .def(\"removePermanentViewerParticles\", &ManipulatorRobot::removePermanentViewerParticles)\n#endif\n //.def(\"setup\", &Integrate::setup)\n ;\n\n def(\"makeManipulatorRobot\", &makeManipulatorRobot);\n}*/\n\n}\n\n"
},
{
"alpha_fraction": 0.7462844848632812,
"alphanum_fraction": 0.747346043586731,
"avg_line_length": 33.88888931274414,
"blob_id": "e8f4d5c94441ace66152fa3576538e21c4812c20",
"content_id": "d871805fff4ff4a5d8ff163b536bf0394f16b8f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 942,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 27,
"path": "/src/ContinuousVectorActionSpace.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/ActionSpace.hpp>\n\nnamespace frapu\n{\nContinuousVectorActionSpace::ContinuousVectorActionSpace(const ActionSpaceInfo &actionSpaceInfo):\n ContinuousActionSpace(actionSpaceInfo)\n{\n\n}\n\nActionSharedPtr ContinuousVectorActionSpace::sampleUniform(std::default_random_engine* randGen) const\n{\n std::vector<double> lowerActionLimits;\n std::vector<double> upperActionLimits;\n static_cast<VectorActionLimits*>(actionLimits_.get())->getRawLimits(lowerActionLimits, upperActionLimits);\n std::vector<double> randomActionVec(lowerActionLimits.size());\n for (size_t i = 0; i < lowerActionLimits.size(); i++) {\n std::uniform_real_distribution<double> uniform_dist(lowerActionLimits[i], upperActionLimits[i]);\n double rand_num = uniform_dist(*randGen);\n randomActionVec[i] = rand_num;\n }\n \n ActionSharedPtr action = std::make_shared<VectorAction>(randomActionVec);\n return action;\n}\n\n}\n"
},
{
"alpha_fraction": 0.7711598873138428,
"alphanum_fraction": 0.7711598873138428,
"avg_line_length": 26.7391300201416,
"blob_id": "b7f6329e8aead3094d5731adcb52e5e59a224c1f",
"content_id": "dabef2d3c981a7762a8bfee4b7ae5aa502e552d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 638,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 23,
"path": "/src/ContinuousObservationSpace.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/ContinuousObservationSpace.hpp>\n\nnamespace frapu\n{\nContinuousObservationSpace::ContinuousObservationSpace(const ObservationSpaceInfo& observationSpaceInfo):\n ObservationSpace(observationSpaceInfo),\n lowerLimits_(),\n upperLimits_()\n{\n\n}\n\nvoid ContinuousObservationSpace::setLimits(std::vector<double> &lowerLimits, std::vector<double> &upperLimits) {\n lowerLimits_ = lowerLimits;\n upperLimits_ = upperLimits;\n}\n\nvoid ContinuousObservationSpace::getLimits(std::vector<double> &lowerLimits, std::vector<double> &upperLimits) const {\n lowerLimits = lowerLimits_;\n upperLimits = upperLimits_;\n}\n\n}\n"
},
{
"alpha_fraction": 0.7305593490600586,
"alphanum_fraction": 0.7326057553291321,
"avg_line_length": 26.660377502441406,
"blob_id": "80d11342b1967bff27cb8c5dbd9f8f1f5951ac4a",
"content_id": "f3932249e63eef9bac39ddbe0b9f12a025fcb83d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1466,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 53,
"path": "/src/DiscreteObservationSpace.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/DiscreteObservationSpace.hpp>\n\nnamespace frapu\n{\nDiscreteObservationSpace::DiscreteObservationSpace(const ObservationSpaceInfo& observationSpaceInfo):\n ObservationSpace(observationSpaceInfo),\n observationMap_() \n{\n\n}\n\nvoid DiscreteObservationSpace::addObservations(const std::vector<std::vector<double>> &observationStates) {\n std::size_t hashValue;\n for (auto &observationState: observationStates) {\n\thashValue = calcHashValue_(observationState);\t\n\tif (observationMap_.count(hashValue) == 0) {\n\t observationMap_[hashValue] = observationState;\t \n\t}\n }\n}\n\nvoid DiscreteObservationSpace::removeObservations(const std::vector<std::vector<double>> &observationStates) {\n std::size_t hashValue;\n for (auto &observationState: observationStates) {\n\thashValue = calcHashValue_(observationState);\n\tobservationMap_.erase(hashValue);\t\n }\n}\n\nbool DiscreteObservationSpace::observationExists(std::vector<double> &observation) const {\n std::size_t hashValue = calcHashValue_(observation);\n if (observationMap_.count(hashValue) > 0) {\n\treturn true;\n }\n \n return false;\n}\n\nunsigned int DiscreteObservationSpace::getNumObservations()\n{\n return observationMap_.size();\n}\n\nsize_t DiscreteObservationSpace::calcHashValue_(const std::vector<double> &observation) const{\n std::size_t hashValue = 0;\n for (auto &k: observation) {\n\tfrapu::hash_combine(hashValue, k);\n }\n \n return hashValue; \n}\n\n}\n"
},
{
"alpha_fraction": 0.4502420723438263,
"alphanum_fraction": 0.48413124680519104,
"avg_line_length": 31.61403465270996,
"blob_id": "019ede6f771a238d010e59de6d67375ef01c10cf",
"content_id": "72459c189757ac50479d1a16894fa466826d4879",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1859,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 57,
"path": "/src/Airplane/AirplaneIntegrator.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Airplane/AirplaneIntegrator.hpp>\n\nnamespace frapu\n{\n\nAirplaneIntegrator::AirplaneIntegrator():\n cl_(0.0),\n cd_(0.0),\n k_(0.0),\n g_(0.0)\n{\n\n}\n\nvoid AirplaneIntegrator::do_integration(const state_type& x,\n const state_type& control,\n const state_type& control_error,\n const state_type& int_times,\n state_type& result)\n{\n double t0 = int_times[0];\n double te = int_times[1];\n double step_size = int_times[2];\n tauDest_ = control[0] + control_error[0];\n alphaDest_ = control[1] + control_error[1];\n betaDest_ = control[2] + control_error[2];\n std::vector<double> xNonConst = x;\n size_t k = integrate_const(adams_bashforth<2, state_type>() ,\n std::bind(&AirplaneIntegrator::ode , this , pl::_1 , pl::_2 , pl::_3),\n xNonConst , t0 , te , step_size);\n result = xNonConst;\n}\n\nvoid AirplaneIntegrator::ode(const state_type& x , state_type& dxdt , double t) const\n{\n dxdt.clear();\n dxdt.resize(9);\n dxdt[0] = x[3] * cos(x[7]) * cos(x[6]); // x\n dxdt[1] = x[3] * cos(x[7]) * sin(x[6]); // y\n dxdt[2] = x[3] * sin(x[7]); // z\n dxdt[3] = x[8] * cos(x[5]) - cd_ * k_ * std::pow(x[3], 2) - g_ * sin(x[7]); // v\n dxdt[4] = alphaDest_ - x[4]; // alpha\n dxdt[5] = betaDest_ - x[5]; // beta\n dxdt[6] = x[3] * (sin(x[4]) / cos(x[7])) * ((x[8] * sin(x[5]) / x[3]) + cl_ * k_ * x[3]); // theta\n dxdt[7] = cos(x[4]) * ((x[8] * sin(x[5]) / x[3]) + cl_ * k_ * x[3]) - g_ * (cos(x[7]) / x[3]); // omega\n dxdt[8] = tauDest_ - x[8]; // tau\n}\n\nvoid AirplaneIntegrator::setup(double cl, double cd, double k, double g)\n{\n cl_ = cl;\n cd_ = cd;\n k_ = k;\n g_ = g;\n}\n\n}\n"
},
{
"alpha_fraction": 0.4809422791004181,
"alphanum_fraction": 0.5051590204238892,
"avg_line_length": 30.491098403930664,
"blob_id": "bded65a5e0009f9797d0c09d0e8a83fdcba7e7ce",
"content_id": "769d659a3a7d6812a1efb6c3461b7421710a97b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 21225,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 674,
"path": "/src/Manipulator/ManipulatorIntegrator.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Manipulator/ManipulatorIntegrator.hpp>\n\nusing namespace boost::numeric::odeint;\nusing std::endl;\nusing std::cout;\n\nnamespace frapu\n{\n\n/**template<class T>\n struct VecToList\n {\n static PyObject* convert(const std::vector<T>& vec)\n {\n boost::python::list* l = new boost::python::list();\n for(size_t i = 0; i < vec.size(); i++)\n (*l).append(vec[i]);\n\n return l->ptr();\n }\n };*/\n\nIntegrate::Integrate() :\n Integrator(),\n steady_states_setup_(false),\n g_(0.0),\n f_x_(0.0),\n f_y_(0.0),\n f_z_(0.0),\n f_roll_(0.0),\n f_pitch_(0.0),\n f_yaw_(0.0),\n viscous_(),\n acceleration_limit_(10000.0),\n xstar(),\n rbdl_interface_(nullptr),\n stateSpaceDimension_(1),\n controlSpaceDimension_(1),\n A_(nullptr),\n B_(nullptr),\n V_(nullptr)\n{\n setupSteadyStates();\n rho_vec_ = VectorXd(3);\n vel_ = VectorXd(3);\n}\n\nvoid Integrate::forwardDynamics(double* vals, Eigen::VectorXd& res)\n{\n Eigen::VectorXd forwardDynamicsResult(stateSpaceDimension_ / 2);\n rbdl_interface_->forwardDynamics(vals, forwardDynamicsResult);\n for (size_t i = 0; i < stateSpaceDimension_ / 2; i++) {\n res[i] = vals[i + stateSpaceDimension_ / 2];\n res[i + stateSpaceDimension_ / 2] = forwardDynamicsResult[i];\n }\n}\n\nvoid Integrate::setGravityConstant(double g)\n{\n cout << \"SET GRAVITY CONSTANT INTEGRATOR\" << endl;\n g_ = g;\n}\n\nvoid Integrate::setStateSpaceDimension(size_t& stateSpaceDimension)\n{\n stateSpaceDimension_ = stateSpaceDimension;\n}\n\nvoid Integrate::setControlSpaceDimension(size_t& controlSpaceDimension)\n{\n controlSpaceDimension_ = controlSpaceDimension;\n A_ = std::make_shared<Eigen::MatrixXd>(Eigen::MatrixXd::Zero(stateSpaceDimension_, \n stateSpaceDimension_));\n B_ = std::make_shared<Eigen::MatrixXd>(Eigen::MatrixXd::Zero(stateSpaceDimension_, \n controlSpaceDimension_));\n V_ = std::make_shared<Eigen::MatrixXd>(Eigen::MatrixXd::Zero(stateSpaceDimension_, \n controlSpaceDimension_));\n\n AFunct_ = std::function<void (double*)>(frapu::mixedDerivatives(std::bind(&Integrate::forwardDynamics,\n this,\n std::placeholders::_1,\n std::placeholders::_2),\n A_,\n 0));\n BFunct_ = std::function<void (double*)>(frapu::mixedDerivatives(std::bind(&Integrate::forwardDynamics,\n this,\n std::placeholders::_1,\n std::placeholders::_2),\n B_,\n stateSpaceDimension_));\n VFunct_ = std::function<void (double*)>(frapu::mixedDerivatives(std::bind(&Integrate::forwardDynamics,\n this,\n std::placeholders::_1,\n std::placeholders::_2),\n V_,\n stateSpaceDimension_ + controlSpaceDimension_));\n\n}\n\nvoid Integrate::setRBDLInterface(std::shared_ptr<RBDLInterface>& rbdl_interface)\n{\n rbdl_interface_ = rbdl_interface;\n}\n\nstd::shared_ptr<RBDLInterface> Integrate::getRBDLInterface()\n{\n return rbdl_interface_;\n}\n\nvoid Integrate::setExternalForce(double& f_x, double& f_y, double& f_z,\n double& f_roll, double& f_pitch, double& f_yaw)\n{\n f_x_ = f_x;\n f_y_ = f_y;\n f_z_ = f_z;\n f_roll_ = f_roll;\n f_pitch_ = f_pitch;\n f_yaw_ = f_yaw;\n\n}\n\nvoid Integrate::setAccelerationLimit(double& accelerationLimit)\n{\n acceleration_limit_ = accelerationLimit;\n}\n\nvoid Integrate::setJointDamping(std::vector<double>& viscous)\n{\n cout << \"SET VISCOUS INTEGRATOR\" << endl;\n viscous_.clear();\n for (auto & k : viscous) {\n viscous_.push_back(k);\n }\n}\n\ndouble Integrate::factorial_(int num) const\n{\n double factor = 1;\n for (int i = 1; i < num + 1; i++) {\n factor = factor * i;\n }\n return factor;\n}\n\nMatrixXd Integrate::power_series_(const MatrixXd& m, double t, int depth) const\n{\n MatrixXd A_t = -t * m;\n MatrixXd A_i(A_t);\n MatrixXd term = MatrixXd::Identity(m.rows(), m.cols());\n for (size_t i = 1; i < depth + 1; i++) {\n term += A_i / factorial_(i + 1);\n A_i *= A_t;\n }\n return t * term;\n}\n\nvoid Integrate::calc_inverse_inertia_matrix(MatrixXd& M) const\n{\n M_inv_ = M.inverse();\n}\n\nstd::vector<double> Integrate::getResult()\n{\n return result_;\n}\n\nvoid Integrate::getProcessMatrices2(const std::vector<double>& x,\n std::vector<double>& rho, double t_e,\n const std::string& observationType,\n std::vector<MatrixXd>& matrices) const\n{\n double vals[x.size() + 2 * rho.size()];\n for (size_t i = 0 ; i < x.size(); i++) {\n vals[i] = x[i];\n }\n\n for (size_t i = 0; i < rho.size(); i++) {\n vals[i + x.size()] = rho[i];\n vals[i + x.size() + rho.size()] = 0.0;\n }\n\n AFunct_(vals);\n BFunct_(vals);\n VFunct_(vals);\n \n cout << \"A_: \" << endl;\n cout << A_ << endl;\n \n Eigen::MatrixXd A(*(A_.get()));\n Eigen::MatrixXd B(*(B_.get()));\n Eigen::MatrixXd V(*(V_.get()));\n\n MatrixXd A_matrx1 = (t_e * A).exp();\n MatrixXd integral = power_series_(A, t_e, 20);\n MatrixXd B_matrx = A_matrx1 * integral * B;\n //MatrixXd V_matrx = A_matrx1 * integral * V;\n matrices.push_back(A_matrx1);\n matrices.push_back(B_matrx);\n matrices.push_back(V); \n}\n\nvoid Integrate::getProcessMatrices(const std::vector<double>& x,\n std::vector<double>& rho, double t_e,\n const std::string& observationType,\n std::vector<MatrixXd>& matrices) const\n{\n /**std::vector<double> zeta_nil;\n MatrixXd M = getM0(x, rho, zeta_nil);\n calc_inverse_inertia_matrix(M);\n MatrixXd AMatrix = getA0(x, rho, zeta_nil);\n MatrixXd BMatrix = getB0(x, rho, zeta_nil);\n MatrixXd VMatrix = getV0(x, rho, zeta_nil);\n MatrixXd A_matrx1 = (t_e * AMatrix).exp();\n MatrixXd integral = power_series_(AMatrix, t_e, 20);\n MatrixXd B_matrx = A_matrx1 * integral * BMatrix;\n MatrixXd V_matrx = A_matrx1 * integral * VMatrix;\n matrices.push_back(A_matrx1);\n matrices.push_back(B_matrx);\n matrices.push_back(V_matrx);\n if (observationType == \"linear\") {\n Eigen::MatrixXd H = Eigen::MatrixXd::Identity(x.size(), x.size());\n Eigen::MatrixXd W = Eigen::MatrixXd::Identity(x.size(), x.size());\n matrices.push_back(H);\n matrices.push_back(W);\n }\n else {\n Eigen::MatrixXd H = getH0(x, rho, zeta_nil);\n Eigen::MatrixXd W = getW0(x, rho, zeta_nil);\n matrices.push_back(H);\n matrices.push_back(W);\n }*/\n}\n\nvoid Integrate::getLinearObservationDynamics(const std::vector<double>& state,\n const std::string& observationType, Eigen::MatrixXd& H,\n Eigen::MatrixXd& W) const\n{\n if (observationType == \"linear\") {\n H = Eigen::MatrixXd::Identity(state.size(), state.size());\n W = Eigen::MatrixXd::Identity(state.size(), state.size());\n } else {\n std::vector<double> rho_zero;\n std::vector<double> zeta_zero;\n H = getH0(state, rho_zero, zeta_zero);\n W = getW0(state, rho_zero, zeta_zero);\n }\n}\n\nstd::vector<double> Integrate::getProcessMatricesVec(std::vector<double>& x,\n std::vector<double>& rho, double t_e) const\n{\n std::vector < MatrixXd > matrices;\n getProcessMatrices(x, rho, t_e, \"\", matrices);\n std::vector<double> res;\n\n for (size_t i = 0; i < matrices[0].size(); i++) {\n res.push_back(matrices[0](i));\n }\n\n for (size_t i = 0; i < matrices[1].size(); i++) {\n res.push_back(matrices[1](i));\n }\n\n for (size_t i = 0; i < matrices[2].size(); i++) {\n res.push_back(matrices[2](i));\n }\n\n return res;\n}\n\nstd::vector<double> Integrate::getProcessMatricesSteadyStatesVec(\n std::vector<double>& x, double t_e) const\n{\n std::vector<double> rho_nil;\n std::vector<double> zeta_nil;\n std::pair<int, std::vector<double>> closest_steady_state =\n getClosestSteadyState(x);\n for (size_t i = 0; i < closest_steady_state.second.size(); i++) {\n if (closest_steady_state.second[i] == -1) {\n closest_steady_state.second[i] = x[i];\n }\n }\n\n std::pair<AB_funct, std::pair<AB_funct, AB_funct>> ab_functions =\n getClosestSteadyStateFunctions(closest_steady_state_.first);\n auto A = ab_functions.first;\n auto B = ab_functions.second.first;\n auto V = ab_functions.second.second;\n MatrixXd AMatrix = (this->*A)(closest_steady_state.second, rho_nil,\n zeta_nil);\n MatrixXd BMatrix = (this->*B)(closest_steady_state.second, rho_nil,\n zeta_nil);\n MatrixXd VMatrix = (this->*V)(closest_steady_state.second, rho_nil,\n zeta_nil);\n MatrixXd A_matrx1 = (t_e * AMatrix).exp();\n MatrixXd integral = power_series_(AMatrix, t_e, 20);\n MatrixXd B_matrx = A_matrx1 * integral * BMatrix;\n\n MatrixXd B_matrx_temp = MatrixXd::Identity(B_matrx.rows(),\n B_matrx.cols() * 2);\n MatrixXd V_matrx_temp = MatrixXd::Identity(VMatrix.rows(),\n VMatrix.cols() * 2);\n\n for (size_t i = 0; i < B_matrx.rows(); i++) {\n for (size_t j = 0; j < B_matrx.cols(); j++) {\n B_matrx_temp(i, j) = B_matrx(i, j);\n V_matrx_temp(i, j) = VMatrix(i, j);\n }\n }\n\n std::vector<double> res;\n for (size_t i = 0; i < A_matrx1.size(); i++) {\n res.push_back(A_matrx1(i));\n }\n\n for (size_t i = 0; i < B_matrx_temp.size(); i++) {\n res.push_back(B_matrx_temp(i));\n }\n\n for (size_t i = 0; i < V_matrx_temp.size(); i++) {\n res.push_back(V_matrx_temp(i));\n }\n return res;\n}\n\nvoid Integrate::do_integration_first_order(std::vector<double>& x,\n std::vector<double>& control, std::vector<double>& control_error,\n std::vector<double>& nominal_state,\n std::vector<double>& nominal_control, std::vector<double>& int_times,\n std::vector<double>& result)\n{\n xstar.clear();\n zetastar.clear();\n for (size_t i = 0; i < x.size(); i++) {\n xstar.push_back(nominal_state[i]);\n zetastar.push_back(0.0);\n }\n\n rhostar.clear();\n for (size_t i = 0; i < control.size(); i++) {\n rhostar.push_back(nominal_control[i]);\n }\n double t0 = int_times[0];\n double te = int_times[1];\n double step_size = int_times[2];\n rho_ = control;\n zeta_ = control_error;\n\n std::vector<double> state;\n for (size_t i = 0; i < x.size(); i++) {\n state.push_back(x[i] - nominal_state[i]);\n }\n\n size_t k = integrate_const(adams_bashforth<5, state_type>(),\n std::bind(&Integrate::ode_first_order, this, pl::_1, pl::_2,\n pl::_3), state, t0, te, step_size);\n result = state;\n}\n\nvoid Integrate::do_integration_second_order(std::vector<double>& x,\n std::vector<double>& control, std::vector<double>& control_error,\n std::vector<double>& nominal_state,\n std::vector<double>& nominal_control, std::vector<double>& int_times,\n std::vector<double>& result)\n{\n xstar.clear();\n zetastar.clear();\n for (size_t i = 0; i < x.size(); i++) {\n xstar.push_back(nominal_state[i]);\n zetastar.push_back(0.0);\n }\n\n rhostar.clear();\n for (size_t i = 0; i < control.size(); i++) {\n rhostar.push_back(nominal_control[i]);\n }\n double t0 = int_times[0];\n double te = int_times[1];\n double step_size = int_times[2];\n rho_ = control;\n zeta_ = control_error;\n\n std::vector<double> state;\n for (size_t i = 0; i < x.size(); i++) {\n state.push_back(x[i] - nominal_state[i]);\n }\n\n size_t k = integrate_const(adams_bashforth<1, state_type>(),\n std::bind(&Integrate::ode_second_order, this, pl::_1, pl::_2,\n pl::_3), state, t0, te, step_size);\n result = state;\n\n}\n\nvoid Integrate::do_integration(const std::vector<double>& x,\n const std::vector<double>& control,\n const std::vector<double>& control_error,\n const std::vector<double>& int_times, std::vector<double>& result)\n{\n double t0 = int_times[0];\n double te = int_times[1];\n double step_size = int_times[2];\n rho_ = control;\n zeta_ = control_error;\n std::vector<double> xNonconst = x;\n size_t k = integrate_const(adams_bashforth<2, state_type>(),\n std::bind(&Integrate::ode, this, pl::_1, pl::_2, pl::_3), xNonconst,\n t0, te, step_size);\n result = xNonconst;\n}\n\nvoid Integrate::do_integration_delta(std::vector<double>& x,\n std::vector<double>& control, std::vector<double>& control_error,\n std::vector<double>& int_times, std::vector<double>& result) const\n{\n double t0 = int_times[0];\n double te = int_times[1];\n double step_size = int_times[2];\n rho_ = control;\n zeta_ = control_error;\n size_t k = integrate_adaptive(adams_bashforth_moulton<1, state_type>(),\n std::bind(&Integrate::odeDelta, this, pl::_1, pl::_2, pl::_3), x,\n t0, te, step_size);\n result = x;\n}\n\nvoid Integrate::setupSteadyStates() const\n{\n\n}\n\nstd::pair < Integrate::AB_funct,\n std::pair<Integrate::AB_funct, Integrate::AB_funct >> Integrate::getClosestSteadyStateFunctions(\n int& idx) const\n{\n return std::make_pair(a_map_.find(idx)->second,\n std::make_pair(b_map_.find(idx)->second, v_map_.find(idx)->second));\n}\n\nstd::pair<int, std::vector<double>> Integrate::getClosestSteadyState(\n const state_type& x) const\n{\n int min_idx = 0;\n double dist = 0.0;\n double min_dist = 10000000.0;\n double steady_state_val = 0.0;\n for (size_t i = 0; i < steady_states_.size(); i++) {\n dist = 0.0;\n for (size_t j = 0; j < steady_states_[i].size(); j++) {\n if (steady_states_[i][j] == -1) {\n steady_state_val = x[j];\n } else {\n steady_state_val = steady_states_[i][j];\n }\n\n dist += std::pow(x[j] - steady_state_val, 2);\n }\n\n dist = std::sqrt(dist);\n if (dist < min_dist) {\n min_dist = dist;\n min_idx = i;\n }\n }\n return std::make_pair(min_idx, steady_states_[min_idx]);\n}\n\nvoid Integrate::ode_first_order(const state_type& x, state_type& dxdt,\n double t) const\n{\n /**std::vector<double> x_true;\n for (size_t i = 0; i < x.size(); i++) {\n x_true.push_back(x[i] + xstar[i]);\n }\n MatrixXd M = getM0(x_true, rho_, zeta_);\n calc_inverse_inertia_matrix(M);\n MatrixXd res = getFirst0(x_true, rho_, zeta_);\n dxdt.clear();\n for (size_t i = 0; i < res.size(); i++) {\n dxdt.push_back(res(i));\n }*/\n}\n\nvoid Integrate::ode_second_order(const state_type& x, state_type& dxdt,\n double t) const\n{\n /**std::vector<double> x_true;\n for (size_t i = 0; i < x.size(); i++) {\n x_true.push_back(x[i] + xstar[i]);\n }\n MatrixXd M = getM0(x_true, rho_, zeta_);\n calc_inverse_inertia_matrix(M);\n MatrixXd res = getSec0(x_true, rho_, zeta_);\n dxdt.clear();\n for (size_t i = 0; i < res.size(); i++) {\n dxdt.push_back(res(i));\n }*/\n\n}\n\nvoid Integrate::ode(const state_type& x, state_type& dxdt, double t) const\n{\n dxdt.clear();\n if (rbdl_interface_) {\n VectorXd res = VectorXd::Zero(x.size() / 2);\n std::vector<double> q(x.size() / 2);\n std::vector<double> qdot(x.size() / 2);\n std::vector<double> rho(x.size() / 2);\n unsigned int s = x.size() / 2;\n for (size_t i = 0; i < s; i++) {\n q[i] = x[i];\n qdot[i] = x[i + s];\n dxdt.push_back(x[i + s]);\n rho[i] = rho_[i] + zeta_[i];\n }\n\n rbdl_interface_->forward_dynamics(q, qdot, rho, res);\n for (size_t i = 0; i < x.size() / 2; i++) {\n dxdt.push_back(res(i));\n }\n\n //return;\n }\n\n dxdt.clear();\n MatrixXd M = getM0(x, rho_, zeta_);\n calc_inverse_inertia_matrix(M);\n MatrixXd res = getF0(x, rho_, zeta_);\n cout << \"res: \" << res << endl;\n sleep(100);\n dxdt.resize(x.size());\n for (size_t i = 0; i < x.size(); i++) {\n dxdt[i] = res(i, 0);\n }\n}\n\nvoid Integrate::odeDelta(const state_type& x, state_type& dxdt, double t) const\n{\n /**dxdt.clear();\n MatrixXd M = getM0(x, rho_, zeta_);\n calc_inverse_inertia_matrix(M);\n Eigen::Map<Eigen::VectorXd> e_vec(x.data(), x.size());\n MatrixXd res = M_inv_ * e_vec;\n dxdt = std::vector<double>(2 * res.size(), 0.0);\n for (size_t i = 0; i < res.size() / 2; ++i) {\n dxdt[i + res.size() / 2] = res[i];\n }*/\n\n}\n\n/**BOOST_PYTHON_MODULE(libintegrate) {\n using namespace boost::python;\n\n class_<std::vector<double> > (\"v_double\")\n .def(vector_indexing_suite<std::vector<double> >());\n\n class_<Integrate>(\"Integrate\", init<>())\n .def(\"doIntegration\", &Integrate::do_integration)\n .def(\"getResult\", &Integrate::getResult)\n .def(\"getProcessMatricesSteadyStates\", &Integrate::getProcessMatricesSteadyStatesVec)\n .def(\"getProcessMatrices\", &Integrate::getProcessMatricesVec)\n .def(\"setGravityConstant\", &Integrate::setGravityConstant)\n ;\n }*/\n\nMatrixXd Integrate::getF0(const state_type& x, const state_type& rho,\n const state_type& zeta) const\n{\n VectorXd m(4);\n m(0, 0) = x[2];\n m(1, 0) = x[3];\n m(2, 0) = M_inv_(0, 0)\n * (g_ * (0.5 * cos(x[1]) + 1.5) * cos(x[0]) + rho[0]\n + 0.5 * x[2] * x[3] * (0.5 * cos(x[1]) + 1.0) * sin(x[1])\n - x[3]\n * (-0.5 * x[2] * (0.5 * cos(x[1]) + 1.0) * sin(x[1])\n + 0.01 * x[3] * sin(2 * x[0])) + zeta[0])\n + M_inv_(0, 1)\n * (-0.5 * g_ * sin(x[0]) * sin(x[1]) + rho[1]\n - 0.01 * x[2] * x[3] * sin(2 * x[0])\n - x[2]\n * (-0.5 * x[2] * (0.5 * cos(x[1]) + 1.0)\n * sin(x[1])\n + 0.01 * x[3] * sin(2 * x[0]))\n + zeta[1]);\n m(3, 0) = M_inv_(1, 0)\n * (g_ * (0.5 * cos(x[1]) + 1.5) * cos(x[0]) + rho[0]\n + 0.5 * x[2] * x[3] * (0.5 * cos(x[1]) + 1.0) * sin(x[1])\n - x[3]\n * (-0.5 * x[2] * (0.5 * cos(x[1]) + 1.0) * sin(x[1])\n + 0.01 * x[3] * sin(2 * x[0])) + zeta[0])\n + M_inv_(1, 1)\n * (-0.5 * g_ * sin(x[0]) * sin(x[1]) + rho[1]\n - 0.01 * x[2] * x[3] * sin(2 * x[0])\n - x[2]\n * (-0.5 * x[2] * (0.5 * cos(x[1]) + 1.0)\n * sin(x[1])\n + 0.01 * x[3] * sin(2 * x[0]))\n + zeta[1]);\n return m;\n\n}\nMatrixXd Integrate::getM0(const state_type& x, const state_type& rho,\n const state_type& zeta) const\n{\n MatrixXd m(2, 2);\n m(0, 0) = -0.25 * pow(sin(x[1]), 2) + 1.0 * cos(x[1]) + 1.62;\n m(0, 1) = 0;\n m(1, 0) = 0;\n m(1, 1) = 0.02 * pow(sin(x[0]), 2) + 0.28;\n return m;\n\n}\nMatrixXd Integrate::getH0(const state_type& x, const state_type& rho,\n const state_type& zeta) const\n{\n MatrixXd m(5, 4);\n m(0, 0) = -(cos(x[1]) + 1) * sin(x[0]);\n m(0, 1) = -sin(x[1]) * cos(x[0]);\n m(0, 2) = 0;\n m(0, 3) = 0;\n m(1, 0) = 0;\n m(1, 1) = cos(x[1]);\n m(1, 2) = 0;\n m(1, 3) = 0;\n m(2, 0) = -(cos(x[1]) + 1) * cos(x[0]);\n m(2, 1) = sin(x[0]) * sin(x[1]);\n m(2, 2) = 0;\n m(2, 3) = 0;\n m(3, 0) = 0;\n m(3, 1) = 0;\n m(3, 2) = 1;\n m(3, 3) = 0;\n m(4, 0) = 0;\n m(4, 1) = 0;\n m(4, 2) = 0;\n m(4, 3) = 1;\n return m;\n\n}\nMatrixXd Integrate::getW0(const state_type& x, const state_type& rho,\n const state_type& zeta) const\n{\n MatrixXd m(5, 5);\n m(0, 0) = 1;\n m(0, 1) = 0;\n m(0, 2) = 0;\n m(0, 3) = 0;\n m(0, 4) = 0;\n m(1, 0) = 0;\n m(1, 1) = 1;\n m(1, 2) = 0;\n m(1, 3) = 0;\n m(1, 4) = 0;\n m(2, 0) = 0;\n m(2, 1) = 0;\n m(2, 2) = 1;\n m(2, 3) = 0;\n m(2, 4) = 0;\n m(3, 0) = 0;\n m(3, 1) = 0;\n m(3, 2) = 0;\n m(3, 3) = 1;\n m(3, 4) = 0;\n m(4, 0) = 0;\n m(4, 1) = 0;\n m(4, 2) = 0;\n m(4, 3) = 0;\n m(4, 4) = 1;\n return m;\n\n}\n\n}\n"
},
{
"alpha_fraction": 0.6543859839439392,
"alphanum_fraction": 0.6581871509552002,
"avg_line_length": 35.774192810058594,
"blob_id": "58c2162d8054ac162447df0fc02e9eee60e14327",
"content_id": "d7e5d2e2fc099ae9c08bc46d11cf0075259f4713",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3420,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 93,
"path": "/src/DiscreteVectorActionSpace.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/ActionSpace.hpp>\n\nnamespace frapu\n{\nDiscreteVectorActionSpace::DiscreteVectorActionSpace(const ActionSpaceInfo& actionSpaceInfo):\n DiscreteActionSpace(actionSpaceInfo),\n actionSpaceInfo_(actionSpaceInfo),\n allActionsOrdered_()\n{\n\n}\n\nvoid DiscreteVectorActionSpace::setActionLimits(frapu::ActionLimitsSharedPtr& actionLimits)\n{\n if (getNumDimensions() == 0) {\n frapu::ERROR(\"DiscreteActionSpace: setActionLimits(): number of dimensions not set. Can't set action limits\");\n }\n ActionSpace::setActionLimits(actionLimits);\n makeAllActionsInOrder(actionSpaceInfo_.numStepsPerDimension);\n}\n\nvoid DiscreteVectorActionSpace::makeAllActionsInOrder(const unsigned int& numStepsPerDimension)\n{\n std::vector<double> lowerLimits;\n std::vector<double> upperLimits;\n unsigned int numDimensions = getNumDimensions();\n ActionLimitsSharedPtr actionLimits = getActionLimits();\n if (!actionLimits) {\n frapu::ERROR(\"action limits is null\");\n }\n \n allActionsOrdered_ = std::vector<frapu::ActionSharedPtr>(std::pow(numStepsPerDimension, numDimensions));\n\n static_cast<VectorActionLimits*>(actionLimits.get())->getRawLimits(lowerLimits, upperLimits);\n for (long code = 0; code < std::pow(numStepsPerDimension, numDimensions); code++) {\n std::vector<double> ks;\n std::vector<double> ss;\n for (size_t i = 0; i < lowerLimits.size(); i++) {\n ks.push_back((upperLimits[i] - lowerLimits[i]) / (numStepsPerDimension - 1));\n }\n\n double j = code;\n double j_old = code;\n double s = 0;\n for (size_t i = lowerLimits.size() - 1; i != (size_t) - 0; i--) {\n double s;\n j = j_old / std::pow(numStepsPerDimension, i);\n modf(j, &s);\n ss.push_back(s);\n if (i != 1) {\n j = (int)(j_old) % (int)std::pow(numStepsPerDimension, i);\n j_old = j;\n }\n }\n\n ss.push_back((int)j_old % numStepsPerDimension);\n std::vector<double> actionValues;\n for (size_t i = 0; i < lowerLimits.size(); i++) {\n actionValues.push_back(lowerLimits[i] + ss[i] * ks[i]);\n }\n\n frapu::ActionSharedPtr action(new frapu::DiscreteVectorAction(actionValues));\n\tstatic_cast<frapu::DiscreteVectorAction*>(action.get())->setBinNumber(code);\n allActionsOrdered_[code] = action; \n }\n}\n\nActionSharedPtr DiscreteVectorActionSpace::sampleUniform(std::default_random_engine* randGen) const\n{\n unsigned int randNum = std::uniform_int_distribution<unsigned int>(0, allActionsOrdered_.size() - 1)(*randGen);\n return allActionsOrdered_[randNum];\n}\n\nstd::vector<frapu::ActionSharedPtr> DiscreteVectorActionSpace::getAllActionsInOrder() const\n{\n if (allActionsOrdered_.size() == 0) {\n frapu::ERROR(\"DiscreteVectorActionSpace: getAllActionsInOrder(): number of actions is 0. Have you forgotten to set the action limits?\");\n }\n return allActionsOrdered_;\n}\n\nfrapu::ActionSharedPtr DiscreteVectorActionSpace::getAction(unsigned int& index) const\n{\n if (index > allActionsOrdered_.size()) {\n frapu::ERROR(\"DiscreteVectorActionSpace: getAction(): action with index \" +\n std::to_string(index) + \" requested, but largest index is \" +\n std::to_string(allActionsOrdered_.size()));\n }\n\n return allActionsOrdered_[index];\n}\n\n}\n"
},
{
"alpha_fraction": 0.7601351141929626,
"alphanum_fraction": 0.7601351141929626,
"avg_line_length": 18.733333587646484,
"blob_id": "141b7cd9dcf7fe2345cde973625ccd3dd185fc3e",
"content_id": "6ba029ead0d9b52784da5b612ab361196c0101cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 296,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 15,
"path": "/src/ContinuousActionSpace.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/ActionSpace.hpp>\n\nnamespace frapu\n{\nContinuousActionSpace::ContinuousActionSpace(const ActionSpaceInfo &actionSpaceInfo):\n ActionSpace(actionSpaceInfo)\n{\n\n}\n\nstd::string ContinuousActionSpace::getType() const {\n std::string type = \"continuous\";\n return type;\n}\n}\n"
},
{
"alpha_fraction": 0.42052242159843445,
"alphanum_fraction": 0.4416999816894531,
"avg_line_length": 42.692169189453125,
"blob_id": "d49730a296428713d023d131d9bb223988dd4cbb",
"content_id": "f0c85fbe1720529c50957e822733a6e859cbc475",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 48542,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 1111,
"path": "/src/build_model.py",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#from librobot import *\nfrom sympy import *\nimport numpy as np\nimport time\nimport os\nimport sys\nimport argparse\nfrom sympy.printing import print_ccode\nfrom sympy.abc import x\nimport mpmath as mp\n\nfrom scipy.integrate import ode, odeint\nfrom sympy.integrals.trigonometry import trigintegrate\nfrom gi.overrides.keysyms import R10\nfrom urdf_parser_py import urdf\n\nclass Test:\n def __init__(self, model, simplifying, buildcpp, lin_steady_states, xml_file, header_file, source_file):\n t_start = time.time() \n self.simplifying = simplifying\n self.parse_urdf(model, xml_file) \n g_symb = symbols(\"g_\")\n g = Matrix([[0],\n [0],\n [g_symb]])\n\t\n\tprint \"Calc first order derivatives of observation function\" \n\tH, W = self.calc_observation_derivatives()\n \n \"\"\"\n F is a 6 dimensional external force vector (fx, fy, fz, mx, my, mz), consisting of \n pull f and twist m\n \"\"\"\n f_x, f_y, f_z, f_roll, f_pitch, f_yaw = symbols(\"f_x_ f_y_ f_z_ f_roll_ f_pitch_ f_yaw_\")\n F = Matrix([[f_x],\n [f_y],\n [f_z],\n [f_roll],\n [f_pitch],\n [f_yaw]])\n print F.shape\n F = Matrix([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) \n print F.shape \n \"\"\"\n Get the Jacobians of the links expressed in the robot's base frame\n \"\"\"\n #ee_jacobian = self.get_end_effector_jacobian(self.joint_origins, self.joint_axis, self.q)\n \n print \"Calculating link Jacobian matrices\"\n #Jvs, Ocs = self.get_link_jacobians_new(self.joint_origins, self.inertial_poses, self.joint_axis, self.q) \n Jvs, Ocs = self.get_link_jacobians_old(self.joint_origins, self.inertial_poses, self.joint_axis, self.q)\n \n M_is = self.construct_link_inertia_matrices(self.link_masses, self.Is) \n print \"Calculating inertia matrix\"\n M = self.calc_inertia_matrix(Jvs, M_is)\n \n print \"Inverting inertia matrix\"\n M_inv = self.inertia_inverse(M, symbolic=True)\n #M_inv_s = self.inertial_inverse(M, symbolic=False)\n \n if self.simplifying:\n print \"Simplifying inertia matrix\"\n M = trigsimp(M)\n M = nsimplify(M, tolerance=1e-7) \n print \"Calculating coriolis matrix\"\n C = self.calc_coriolis_matrix(self.q, self.qdot, M)\n if self.simplifying:\n print \"Simplify coriolis matrix\"\n C = trigsimp(C) \n print \"Calculating normal forces\" \n \n N = self.calc_generalized_forces(self.q,\n self.qdot, \n Ocs, \n self.link_masses, \n g,\n self.viscous, \n F) \n if self.simplifying: \n print \"Simplify general forces forces vector\" \n N = trigsimp(N)\n t0 = time.time()\n \n f = self.get_dynamic_model(M, M_inv, C, N, self.q, self.qdot, self.rho, self.zeta) \n \n print \"Build taylor approximation\" \n #steady_states = self.get_steady_states()\n print \"Calculate partial derivatives\" \n A, B, V = self.partial_derivatives2(f) \n #A, B, V = self.partial_derivatives(M_inv, C, N) \n \n print \"Calculate second order Taylor approximation\"\n #First = self.partial_derivatives_first_order(f)\n #Sec = self.partial_derivatives_second_order(f, First) \n \n print \"Clean cpp code\"\n header_src = header_file\n imple_src = source_file\n \n self.clean_cpp_code(header_src, imple_src)\n \n print \"Gen cpp code\"\n if lin_steady_states:\n self.gen_cpp_code_steady_states(steady_states, header_src, imple_src) \n print \"Steady states code generated\"\n print \"Generate cpp code for linearized model...\" \n if lin_steady_states: \n for i in xrange(len(steady_states)):\n A, B, V = self.substitude_steady_states2(A, B, V, steady_states[i])\n self.gen_cpp_code2(A, \"A\" + str(i), header_src, imple_src)\n self.gen_cpp_code2(B, \"B\" + str(i), header_src, imple_src)\n self.gen_cpp_code2(V, \"V\" + str(i), header_src, imple_src) \n else:\n self.gen_cpp_code2(A, \"A0\", header_src, imple_src)\n self.gen_cpp_code2(B, \"B0\", header_src, imple_src)\n self.gen_cpp_code2(V, \"V0\", header_src, imple_src)\n self.gen_cpp_code2(M, \"M0\", header_src, imple_src)\n self.gen_cpp_code2(f, \"F0\", header_src, imple_src)\n self.gen_cpp_code2(H, \"H0\", header_src, imple_src)\n self.gen_cpp_code2(W, \"W0\", header_src, imple_src)\n #self.gen_cpp_code2(First, \"First0\", header_src, imple_src)\n #self.gen_cpp_code2(Sec, \"Sec0\", header_src, imple_src)\n #self.gen_cpp_code2(C, \"C0\", header_src, imple_src)\n #self.gen_cpp_code2(N, \"N0\", header_src, imple_src)\n #self.gen_cpp_code2(ee_jacobian, \"EEJacobian\", header_src, imple_src)\n print \"Generating dynamic model took \" + str(time.time() - t_start) + \" seconds\" \n if buildcpp:\n print \"Build c++ code...\"\n cmd = \"cd src/build && cmake -DCMAKE_BUILD_TYPE=Release .. && make -j8\" \n os.system(cmd)\n print \"Done\"\n \n def inertia_inverse(self, M, symbolic=False):\n if symbolic:\n M_inv = Matrix.zeros(M.shape[0], M.shape[1])\n for i in xrange(M.shape[0]):\n for j in xrange(M.shape[1]):\n strr = \"M_inv_(\" + str(i) + \", \" + str(j) + \")\" \n s = Symbol(strr)\n M_inv[i, j] = s\n return M_inv\n else:\n return M.inv()\n \n def get_steady_states(self):\n steady_states = [] \n if len(self.q) == 3:\n if self.joint_origins[0][3] != 0.0:\n ss1 = dict()\n ss2 = dict()\n ss1[self.q[0]] = -np.pi / 2.0\n ss1[self.q[1]] = 0.0\n ss1[self.qdot[0]] = 0.0\n ss1[self.qdot[1]] = 0.0 \n \n ss2[self.q[0]] = np.pi / 2.0 \n ss2[self.q[1]] = 0.0 \n ss2[self.qdot[0]] = 0.0\n ss2[self.qdot[1]] = 0.0 \n steady_states.append(ss1)\n steady_states.append(ss2) \n print \"return 0\"\n return steady_states\n \n if self.joint_origins[1][3] != 0.0: \n ss1 = dict()\n ss2 = dict()\n ss1[self.q[0]] = self.q[0]\n ss1[self.q[1]] = -np.pi / 2.0 \n ss1[self.qdot[0]] = 0.0\n ss1[self.qdot[1]] = 0.0 \n \n ss2[self.q[0]] = self.q[0] \n ss2[self.q[1]] = np.pi / 2.0 \n ss2[self.qdot[0]] = 0.0\n ss2[self.qdot[1]] = 0.0 \n steady_states.append(ss1)\n steady_states.append(ss2) \n print \"return 1\"\n return steady_states\n else:\n ss = dict()\n ss[self.q[0]] = self.q[0] \n ss[self.q[1]] = self.q[1] \n ss[self.qdot[0]] = 0.0\n ss[self.qdot[1]] = 0.0\n print \"return 2\"\n steady_states.append(ss) \n return steady_states\n else:\n if self.joint_origins[0][3] != 0.0:\n ss1 = dict()\n ss2 = dict()\n ss1[self.q[0]] = self.q[0] \n ss1[self.q[1]] = -np.pi / 2.0\n ss1[self.q[2]] = 0.0\n ss1[self.qdot[0]] = 0.0\n ss1[self.qdot[1]] = 0.0\n ss1[self.qdot[2]] = 0.0\n \n ss2[self.q[0]] = self.q[0] \n ss2[self.q[1]] = np.pi / 2.0\n ss2[self.q[2]] = 0.0\n ss2[self.qdot[0]] = 0.0\n ss2[self.qdot[1]] = 0.0\n ss2[self.qdot[2]] = 0.0\n print \"return 3\"\n steady_states.append(ss1)\n steady_states.append(ss2)\n return steady_states\n else:\n ss = dict() \n ss[self.q[0]] = self.q[0]\n ss[self.q[1]] = self.q[1]\n ss[self.q[2]] = self.q[2] \n ss[self.qdot[0]] = 0.0\n ss[self.qdot[1]] = 0.0\n ss[self.qdot[2]] = 0.0\n print \"return 3\"\n steady_states.append(ss)\n return steady_states\n \n print \"simplifying fs...\"\n for i in xrange(len(f)):\n print i\n print f[i, 0]\n f[i, 0] = trigsimp(f[i, 0])\n equations = []\n variables = [] \n for i in xrange(len(f)):\n equations.append(f[i, 0])\n for i in xrange(len(self.q) - 1):\n variables.append(self.q[i])\n for i in xrange(len(self.q) - 1):\n variables.append(self.qdot[i])\n print \"solve...\"\n steady_states = solve(equations, variables)\n \n print steady_states\n \n \n return steady_states[0]\n \n def calc_observation_derivatives(self):\t\n\tg = self.dh(0.0, self.joint_origins[0][2], 0.0, 0.0)\t\n\tfor i in xrange(len(self.q) - 1):\n\t g = g * self.dh(self.q[i], 0.0, self.joint_origins[i + 1][0], self.joint_origins[i + 1][3])\n\t g = trigsimp(g)\n\t\n\tg_funct = [g[0, 3], g[1, 3], g[2, 3]]\n\tfor i in xrange(len(self.qdot) - 1):\n\t g_funct.append(self.qdot[i])\n\tg_funct = Matrix(g_funct)\n\t#g_funct = g_funct.T\n\tetas = [symbols(\"eta_[\" + str(i) + \"]\") for i in xrange(g_funct.shape[0])]\t\n\t\n\tfor i in xrange(len(etas)):\n\t g_funct[i] = g_funct[i] + etas[i]\n\tx = [self.q[i] for i in xrange(len(self.q) - 1)]\n\tx.extend([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n\tH = g_funct.jacobian([x[i] for i in xrange(len(x))])\n\tW = g_funct.jacobian([etas[i] for i in xrange(len(etas))])\n\tH = simplify(H)\n\tH = nsimplify(H, tolerance=1e-4) \n\treturn H, W\n \n def parse_urdf(self, xml_file, file):\n\tr = urdf.Robot.from_xml_string(file)\n #robot = Robot(xml_file)\n self.link_names = [link.name for link in r.links]\n self.joint_names = [r.joints[i].name for i in xrange(len(r.joints))]\n self.joint_types = [joint.type for joint in r.joints]\n self.joint_origins = [Matrix([[joint.origin.xyz[0]],\n\t\t\t\t [joint.origin.xyz[1]],\n\t\t\t\t [joint.origin.xyz[2]],\n\t\t\t\t [joint.origin.rpy[0]],\n\t\t\t\t [joint.origin.rpy[1]],\n\t\t\t\t [joint.origin.rpy[2]]]) for joint in r.joints]\n\tself.joint_axis = [Matrix([[joint.axis[0]],\n\t\t\t [joint.axis[1]],\n\t\t\t [joint.axis[2]]]) for joint in r.joints]\n\tself.viscous = [symbols(\"viscous_[\" + str(i) + \"]\") for i in xrange(len(r.joints) - 1)]\n print \"===================\"\n \n \n self.q = []\n self.qdot = []\n self.qstar = []\n self.qdotstar = []\n self.rho = []\n self.rhostar = []\n self.zeta = []\n self.zetastar = []\n for i in xrange(len(self.joint_names)): \n \n symb_string_q = \"x[\" + str(i) + \"]\"\n symb_string_q_dot = \"x[\" + str(i + len(self.joint_names) - 1) + \"]\"\n symb_string_q_star = \"xstar[\" + str(i) + \"]\"\n symb_string_q_dot_star = \"xstar[\" + str(i + len(self.joint_names) - 1) + \"]\"\n symb_string_r = \"rho[\" + str(i) + \"]\"\n symb_string_r_star = \"rhostar[\" + str(i) + \"]\" \n symb_zeta = \"zeta[\" + str(i) + \"]\"\n symb_zeta_star = \"zetastar[\" + str(i) + \"]\" \n \n \n self.q.append(symbols(symb_string_q))\n self.qdot.append(symbols(symb_string_q_dot))\n self.rho.append(symbols(symb_string_r))\n self.qstar.append(symbols(symb_string_q_star))\n self.qdotstar.append(symbols(symb_string_q_dot_star))\n self.rhostar.append(symbols(symb_string_r_star))\n self.zeta.append(symbols(symb_zeta))\n self.zetastar.append(symbols(symb_zeta_star)) \n \n self.inertial_poses = []\n self.link_masses = []\n self.link_inertias = []\n self.Is = []\n for link in r.links:\n\t if link.inertial != None:\n\t\tself.inertial_poses.append([link.inertial.origin.xyz[0], \n\t\t\t link.inertial.origin.xyz[1],\n\t\t\t link.inertial.origin.xyz[2],\n\t\t\t link.inertial.origin.rpy[0],\n\t\t\t link.inertial.origin.rpy[1],\n\t\t\t link.inertial.origin.rpy[2]])\n\t\tself.link_masses.append(link.inertial.mass)\n\t\tself.link_inertias.append(Matrix(link.inertial.inertia.to_matrix()))\n\t\tself.Is.append([self.link_inertias[-1][i, i] for i in xrange(3)])\n\t else:\n\t\tself.inertial_poses.append([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\t\tself.link_masses.append(0.0)\n\t\tself.link_inertias.append(Matrix([[0.0 for j in xrange(3)] for i in xrange(3)]))\n\t\tself.Is.append([0.0 for i in xrange(3)])\n \n def gen_cpp_code_steady_states(self, steady_states, header_src, imple_src):\n lines = list(open(imple_src, 'r'))\n temp_lines = []\n \n idx1 = -1\n idx2 = -1\n breaking = False\n for i in xrange(len(lines)):\n if \"void Integrate::setupSteadyStates() const {\" in lines[i]:\n idx1 = i + 1 \n breaking = True\n elif \"std::pair<Integrate::AB_funct, std::pair<Integrate::AB_funct, Integrate::AB_funct>> Integrate::getClosestSteadyStateFunctions\" in lines[i]:\n idx2 = i - 3 \n if breaking:\n break\n for i in xrange(len(steady_states)):\n line = \"std::vector<double> steady_state_\" + str(i) + \"({\"\n for j in xrange(len(self.q) - 1): \n if steady_states[i][self.q[j]] != self.q[j]:\n line += str(steady_states[i][self.q[j]]) + \", \"\n else:\n line += \"-1, \"\n for j in xrange(len(self.qdot) - 1):\n if steady_states[i][self.qdot[j]] == 0.0:\n line += \"0.0\"\n else:\n line += \"-1\"\n if not j == len(self.q) - 2:\n line += \", \"\n line += \"}); \\n\"\n line += \"steady_states_.push_back(steady_state_\" + str(i) +\"); \\n\"\n line += \"a_map_.insert(std::make_pair(\" + str(i) + \", &Integrate::getA\" + str(i) + \")); \\n\"\n line += \"b_map_.insert(std::make_pair(\" + str(i) + \", &Integrate::getB\" + str(i) + \")); \\n\"\n line += \"v_map_.insert(std::make_pair(\" + str(i) + \", &Integrate::getV\" + str(i) + \")); \\n\"\n temp_lines.append(line) \n temp_lines.append(\"steady_states_setup_ = true; \\n\") \n del lines[idx1:idx2]\n idx = -1\n for i in xrange(len(lines)):\n if \"void Integrate::setupSteadyStates() const {\" in lines[i]:\n idx = i \n lines[idx+1:idx+1] = temp_lines\n os.remove(imple_src) \n with open(imple_src, 'a+') as f:\n for line in lines:\n f.write(line)\n \n def clean_cpp_code(self, header_src, imple_src):\n lines = list(open(imple_src, 'r'))\n lines_header = list(open(header_src, 'r'))\n tmp_lines = []\n idx_pairs = []\n \n idx1 = -1\n idx2 = -1\n breaking = False\n for i in xrange(len(lines)):\n\t if (\"MatrixXd Integrate::getA\" in lines[i] or \n \"MatrixXd Integrate::getB\" in lines[i] or \n \"MatrixXd Integrate::getV\" in lines[i] or\n \"MatrixXd Integrate::getF\" in lines[i] or\n \"MatrixXd Integrate::getM\" in lines[i] or\n \"MatrixXd Integrate::getC\" in lines[i] or\n \"MatrixXd Integrate::getN\" in lines[i] or\n \"MatrixXd Integrate::getH\" in lines[i] or\n \"MatrixXd Integrate::getW\" in lines[i] or\n \"MatrixXd Integrate::getSec\" in lines[i] or\n \"MatrixXd Integrate::getFirst\" in lines[i] or\n \"MatrixXd Integrate::getEEJacobian\" in lines[i] or\n \"MatrixXd Integrate::getMInv\" in lines[i]): \n idx1 = i \n breaking = True\n if \"}\" in lines[i] and breaking:\n idx_pairs.append((idx1, i))\n idx1 = -1\n breaking = False \n for i in xrange(len(lines)):\n app = True\n for j in xrange(len(idx_pairs)):\n if i >= idx_pairs[j][0] and i <= idx_pairs[j][1]:\n app = False\n break \n if app:\n tmp_lines.append(lines[i])\n os.remove(imple_src) \n with open(imple_src, 'a+') as f:\n for line in tmp_lines:\n f.write(line)\n \n tmp_lines = []\n idxs = []\n for i in xrange(len(lines_header)):\n\t if (\"MatrixXd getA\" in lines_header[i] or \n \"MatrixXd getB\" in lines_header[i] or \n \"MatrixXd getV\" in lines_header[i] or\n \"MatrixXd getF\" in lines_header[i] or\n \"MatrixXd getM\" in lines_header[i] or\n \"MatrixXd getC\" in lines_header[i] or\n \"MatrixXd getN\" in lines_header[i] or\n \"MatrixXd getH\" in lines_header[i] or\n \"MatrixXd getW\" in lines_header[i] or\n \"MatrixXd getSec\" in lines_header[i] or\n \"MatrixXd getFirst\" in lines_header[i] or\n \"MatrixXd getEEJacobian\" in lines_header[i] or\n \"MatrixXd getMInv\" in lines[i]): \n idxs.append(i)\n for i in xrange(len(lines_header)):\n app = True\n for j in xrange(len(idxs)):\n if i == idxs[j]:\n app = False\n if app:\n tmp_lines.append(lines_header[i])\n \n os.remove(header_src) \n with open(header_src, 'a+') as f:\n for line in tmp_lines:\n f.write(line)\n \n lines = list(open(imple_src, 'r'))\n tmp_lines = []\n idx1 = -1\n idx2 = -1\n breaking = False\n for i in xrange(len(lines)):\n if \"void Integrate::setupSteadyStates() const {\" in lines[i]:\n idx1 = i + 1\n breaking = True\n elif \"std::pair<Integrate::AB_funct, std::pair<Integrate::AB_funct, Integrate::AB_funct>> Integrate::getClosestSteadyStateFunctions\" in lines[i]:\n idx2 = i - 3 \n if breaking:\n break \n del lines[idx1:idx2] \n os.remove(imple_src) \n with open(imple_src, 'a+') as f:\n for line in lines: \n f.write(line)\n \n \n def gen_cpp_code2(self, Matr, name, header_src, imple_src): \n lines = list(open(imple_src, 'r'))\n lines_header = list(open(header_src, 'r'))\n temp_lines = []\n if Matr.shape[1] != 1:\n temp_lines.append(\"MatrixXd m(\" + str(Matr.shape[0]) + \", \" + str(Matr.shape[1]) + \"); \\n\")\n else:\n temp_lines.append(\"VectorXd m(\" + str(Matr.shape[0]) + \"); \\n\") \n for i in xrange(Matr.shape[0]):\n for j in xrange(Matr.shape[1]):\n temp_lines.append(\"m(\" + str(i) + \", \" + str(j) + \") = \" + str(ccode(Matr[i, j])) + \"; \\n\")\n temp_lines.append(\"return m; \\n\")\n idx1 = -1\n idx2 = -1\n breaking = False \n for i in xrange(len(lines)):\n if \"Integrate::get\" + name + \"(const state_type &x, const state_type &rho, const state_type &zeta) const{\" in lines[i]: \n idx1 = i + 1 \n breaking = True\n elif \"}\" in lines[i]:\n idx2 = i - 1\n if breaking:\n break \n if idx1 == -1: \n temp_lines.insert(0, \"MatrixXd Integrate::get\" + name + \"(const state_type &x, const state_type &rho, const state_type &zeta) const{ \\n\") \n temp_lines.append(\"\\n\")\n temp_lines.append(\"} \\n \\n\") \n lines[len(lines) - 2:len(lines) - 1] = temp_lines \n \n temp_lines_header = []\n idx = -1\n for i in xrange(len(lines_header)):\n if \"private:\" in lines_header[i]: \n idx = i\n temp_lines_header.append(\"MatrixXd get\" + str(name) + \"(const state_type &x, const state_type &rho, const state_type &zeta) const; \\n\")\n lines_header[idx+1:idx+1] = temp_lines_header\n \n else: \n del lines[idx1:idx2]\n idx = -1\n for i in xrange(len(lines)):\n if \"Integrate::get\" + name in lines[i]:\n idx = i \n lines[idx:idx] = temp_lines \n os.remove(imple_src)\n os.remove(header_src)\n with open(imple_src, 'a+') as f:\n for line in lines:\n f.write(line)\n with open(header_src, 'a+') as f:\n for line in lines_header:\n f.write(line) \n \n def get_dynamic_model(self, M, M_inv, C, N, thetas, dot_thetas, rs, zetas): \n #print \"time to invert: \" + str(time.time() - t0) \n Thetas = Matrix([[thetas[i]] for i in xrange(len(thetas) - 1)])\n Dotthetas = Matrix([[dot_thetas[i]] for i in xrange(len(dot_thetas) - 1)])\n Rs = Matrix([[rs[i]] for i in xrange(len(rs) - 1)])\n Zetas = Matrix([[zetas[i]] for i in xrange(len(zetas) - 1)])\n print \"Constructing 2nd-order ODE\"\n m_upper = Matrix([[dot_thetas[i]] for i in xrange(len(dot_thetas) - 1)])\n m_lower = 0\n '''if self.simplifying:\n m_lower = trigsimp(-M_inv * trigsimp(C * Dotthetas + N) + M_inv * Rs) \n else:'''\n m_lower = M_inv * ((Rs + Zetas) - C * Dotthetas - N)\n h = m_upper.col_join(m_lower) \n return h\n \n def substitude_steady_states2(self, A, B, V, steady_states): \n for i in xrange(len(self.rho)):\n A = A.subs(self.rho[i], 0)\n B = B.subs(self.rho[i], 0)\n V = V.subs(self.rho[i], 0)\n for i in xrange(len(self.zeta)):\n A = A.subs(self.zeta[i], 0)\n B = B.subs(self.zeta[i], 0)\n V = V.subs(self.zeta[i], 0) \n for i in xrange(len(steady_states.keys())):\n A = A.subs(steady_states.keys()[i], steady_states[steady_states.keys()[i]]) \n B = B.subs(steady_states.keys()[i], steady_states[steady_states.keys()[i]])\n V = V.subs(steady_states.keys()[i], steady_states[steady_states.keys()[i]])\n \n return A, B, V \n \n A = zeros(len(self.q) - 1)\n print \"Claculate A_low...\"\n A_low = A1 + A2 + A3 \n A = A.col_join(A_low) \n \n B = eye(len(self.q) - 1)\n B = B.col_join(B1)\n A = A.row_join(B) \n \n B = zeros(len(self.q) - 1)\n B = B.col_join(C1)\n return f, A, B \n \n def partial_derivatives_first_order(self, f):\n vars = [self.q[i] for i in xrange(len(self.q) - 1)] \n vars.extend([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n vars.extend([self.rho[i] for i in xrange(len(self.rho) - 1)])\n vars.extend([self.zeta[i] for i in xrange(len(self.zeta) - 1)])\n stars = [self.qstar[i] for i in xrange(len(self.qstar) - 1)]\n stars.extend([self.qdotstar[i] for i in xrange(len(self.qdotstar) - 1)])\n stars.extend([self.rhostar[i] for i in xrange(len(self.rhostar) - 1)])\n stars.extend([self.zetastar[i] for i in xrange(len(self.zetastar) - 1)])\n \n A1 = f.jacobian([self.q[i] for i in xrange(len(self.q) - 1)])\n A2 = f.jacobian([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n B = f.jacobian([self.rho[i] for i in xrange(len(self.rho) - 1)])\n V = f.jacobian([self.zeta[i] for i in xrange(len(self.zeta) - 1)])\n f_temp = f\n for i in xrange(len(self.qdotstar)):\n A1 = A1.subs(self.q[i], self.qstar[i])\n A2 = A2.subs(self.q[i], self.qstar[i])\n B = B.subs(self.q[i], self.qstar[i])\n V = V.subs(self.q[i], self.qstar[i])\n f_temp = f_temp.subs(self.q[i], self.qstar[i])\n \n A1 = A1.subs(self.qdot[i], self.qdotstar[i])\n A2 = A2.subs(self.qdot[i], self.qdotstar[i])\n B = B.subs(self.qdot[i], self.qdotstar[i])\n V = V.subs(self.qdot[i], self.qdotstar[i])\n f_temp = f_temp.subs(self.qdot[i], self.qdotstar[i])\n \n A1 = A1.subs(self.rho[i], self.rhostar[i])\n A2 = A2.subs(self.rho[i], self.rhostar[i])\n B = B.subs(self.rho[i], self.rhostar[i])\n V = V.subs(self.rho[i], self.rhostar[i])\n f_temp = f_temp.subs(self.rho[i], self.rhostar[i])\n \n A1 = A1.subs(self.zeta[i], self.zetastar[i])\n A2 = A2.subs(self.zeta[i], self.zetastar[i])\n B = B.subs(self.zeta[i], self.zetastar[i])\n V = V.subs(self.zeta[i], self.zetastar[i])\n f_temp = f_temp.subs(self.zeta[i], self.zetastar[i])\n \n q_matr = Matrix([[self.q[i]] for i in xrange(len(self.q) - 1)])\n qdot_matr = Matrix([[self.qdot[i]] for i in xrange(len(self.q) - 1)])\n rho_matr = Matrix([[self.rho[i]] for i in xrange(len(self.q) - 1)])\n zeta_matr = Matrix([[self.zeta[i]] for i in xrange(len(self.q) - 1)])\n \n qstar_matr = Matrix([[self.qstar[i]] for i in xrange(len(self.q) - 1)])\n qdotstar_matr = Matrix([[self.qdotstar[i]] for i in xrange(len(self.q) - 1)])\n rhostar_matr = Matrix([[self.rhostar[i]] for i in xrange(len(self.q) - 1)])\n zetastar_matr = Matrix([[self.zetastar[i]] for i in xrange(len(self.q) - 1)])\n m = A1 * (q_matr - qstar_matr) + A2 * (qdot_matr - qdotstar_matr) + B * (rho_matr - rhostar_matr) + V * (zeta_matr - zetastar_matr)\n return m\n \n \n \n \n diff_fs = []\n matr_elems = []\n for k in xrange(len(f)):\n sum1 = 0.0 \n diff_f_first = [] \n for i in xrange(len(vars)):\n diff_f = trigsimp(diff(f[k], vars[i])) \n for l in xrange(len(vars)):\n diff_f = diff_f.subs(vars[l], stars[l])\n \n sum1 += diff_f * (vars[i] - stars[i]) \n \n \n #matr_elems.append(sum1 + (1.0 / factorial(2)) * sum2)\n matr_elems.append(sum1)\n m = Matrix(matr_elems)\n '''for i in xrange(len(self.zeta)):\n m = m.subs(self.zeta[i], 0.0) \n m = m.subs(self.zetastar[i], 0.0) '''\n return m \n \n def partial_derivatives_second_order(self, f, First):\n vars = [self.q[i] for i in xrange(len(self.q) - 1)] \n vars.extend([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n vars.extend([self.rho[i] for i in xrange(len(self.rho) - 1)])\n vars.extend([self.zeta[i] for i in xrange(len(self.zeta) - 1)])\n stars = [self.qstar[i] for i in xrange(len(self.qstar) - 1)]\n stars.extend([self.qdotstar[i] for i in xrange(len(self.qdotstar) - 1)])\n stars.extend([self.rhostar[i] for i in xrange(len(self.rhostar) - 1)])\n stars.extend([self.zetastar[i] for i in xrange(len(self.zetastar) - 1)])\n \n \n diff_fs = []\n matr_elems = []\n for k in xrange(len(f)):\n #sum1 = 0.0\n sum2 = 0.0\n diff_f_first = [] \n for i in xrange(len(vars)):\n for j in xrange(len(vars)): \n diff_f = trigsimp(diff(f[k], vars[i], vars[j]))\n for l in xrange(len(vars)):\n diff_f = diff_f.subs(vars[l], stars[l]) \n for n in xrange(len(self.zetastar)):\n diff_f = diff_f.subs(self.zetastar[n], 0.0) \n \n sum2 += diff_f * (vars[i] - stars[i]) * (vars[j] - stars[j])\n \n \n #matr_elems.append(sum1 + (1.0 / factorial(2)) * sum2)\n matr_elems.append(First[k] + (1.0 / factorial(2)) * sum2)\n m = Matrix(matr_elems)\n '''for i in xrange(len(self.zeta)):\n m = m.subs(self.zeta[i], 0.0) \n m = m.subs(self.zetastar[i], 0.0) '''\n return m \n \n def partial_derivatives2(self, f):\n A1 = f.jacobian([self.q[i] for i in xrange(len(self.q) - 1)])\n A2 = f.jacobian([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n B = f.jacobian([self.rho[i] for i in xrange(len(self.rho) - 1)])\n C = f.jacobian([self.zeta[i] for i in xrange(len(self.zeta) - 1)])\n A = A1.row_join(A2) \n return A, B, C\n #sleep\n \n def partial_derivatives(self, M_inv, C, N): \n r = Matrix([[self.rho[i]] for i in xrange(len(self.rho) - 1)])\n x1 = Matrix([[self.q[i]] for i in xrange(len(self.q) - 1)])\n x2 = Matrix([[self.qdot[i]] for i in xrange(len(self.qdot) - 1)])\n z = Matrix([[self.zeta[i]] for i in xrange(len(self.zeta) - 1)]) \n A1 = M_inv * r\n A2 = M_inv * z\n A3 = M_inv * (-C * x2)\n A4 = M_inv * (-N)\n \n A1_x1 = A1.jacobian([self.q[i] for i in xrange(len(self.q) - 1)])\n A2_x1 = A2.jacobian([self.q[i] for i in xrange(len(self.q) - 1)])\n A3_x1 = A3.jacobian([self.q[i] for i in xrange(len(self.q) - 1)])\n A4_x1 = A4.jacobian([self.q[i] for i in xrange(len(self.q) - 1)]) \n \n A3_x2 = A3.jacobian([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n A4_x2 = A4.jacobian([self.qdot[i] for i in xrange(len(self.qdot) - 1)])\n \n A1_r = A1.jacobian([self.rho[i] for i in xrange(len(self.rho) - 1)])\n A2_z = A2.jacobian([self.zeta[i] for i in xrange(len(self.rho) - 1)])\n \n A = zeros(len(self.q) - 1).col_join(A1_x1 + A2_x1 + A3_x1 + A4_x1)\n B = eye(len(self.q) - 1).col_join(A3_x2 + A4_x2)\n A = A.row_join(B)\n \n B = zeros(len(self.q) - 1).col_join(A1_r)\n C = zeros(len(self.q) - 1).col_join(A2_z)\n \n for i in xrange(len(self.zeta)):\n A = A.subs(self.zeta[i], 0.0)\n B = B.subs(self.zeta[i], 0.0)\n C = C.subs(self.zeta[i], 0.0) \n print A\n print B\n return A, B, C\n \n def calc_generalized_forces(self, \n thetas, \n dot_thetas, \n Ocs, \n ms, \n g,\n viscous, \n F): \n V = 0.0 \n for i in xrange(len(Ocs)): \n el = ms[i + 1] * g.transpose() * Ocs[i] \n V += el[0] \n N = 0\n if self.simplifying: \n N = Matrix([[trigsimp(diff(V, thetas[i]))] for i in xrange(len(thetas) - 1)]) \n else:\n N = Matrix([[diff(V, thetas[i])] for i in xrange(len(thetas) - 1)]) \n '''\n The joint friction forces\n ''' \n K = N + Matrix([[viscous[i] * dot_thetas[i]] for i in xrange(len(dot_thetas) - 1)]) \n #K = K - ee_jacobian.transpose() * F\n return K \n \n def calc_coriolis_matrix(self, thetas, dot_thetas, M): \n C = Matrix([[0.0 for m in xrange(len(thetas) - 1)] for n in xrange(len(thetas) - 1)])\n for i in xrange(len(thetas) - 1):\n for j in xrange(len(thetas) - 1):\n val = 0.0\n for k in xrange(len(thetas) - 1): \n print (i, j, k)\n if self.simplifying: \n val += trigsimp(self.calc_christoffel_symbol(i, j, k, thetas, M) * dot_thetas[k])\n else: \n val += self.calc_christoffel_symbol(i, j, k, thetas, M) * dot_thetas[k] \n C[i, j] = val \n return C \n \n def calc_christoffel_symbol(self, i, j, k, thetas, M):\n t_i_j_k = 0.0\n if self.simplifying:\n t_i_j_k = 0.5 * (trigsimp(diff(M[i, j], thetas[k])) + \n trigsimp(diff(M[i, k], thetas[j])) -\n trigsimp(diff(M[k, j], thetas[i])))\n else:\n t_i_j_k = 0.5 * (diff(M[i, j], thetas[k]) + \n diff(M[i, k], thetas[j]) -\n diff(M[k, j], thetas[i]))\n return t_i_j_k\n \n def calc_inertia_matrix(self, Jvs, M_is): \n res = Matrix([[0.0 for n in xrange(len(Jvs))] for m in xrange(len(Jvs))])\n for i in xrange(len(Jvs)):\n if self.simplifying:\n res += trigsimp(Jvs[i].transpose() * M_is[i] * Jvs[i])\n else:\n res += Jvs[i].transpose() * M_is[i] * Jvs[i] \n return res\n \n def construct_link_inertia_matrices(self, ms, Is):\n M_is = [Matrix([[ms[i], 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, ms[i], 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, ms[i], 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, Is[i][0], 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, Is[i][1], 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, Is[i][2]]]) for i in xrange(len(ms))]\n return M_is[1:len(M_is)-1]\n \n def get_link_jacobians_old(self, joint_origins, com_coordinates, axis, thetas): \n \"\"\"\n Vectors from the center of mass to the next joint origin \n \"\"\" \n com_coordinates = [Matrix([[com_coordinates[i][j]] for j in xrange(len(com_coordinates[i]))]) \n for i in xrange(len(com_coordinates))]\n m_to_joint_vectors = [Matrix([[joint_origins[i][0]],\n [joint_origins[i][1]],\n [joint_origins[i][2]]]) - \n Matrix([[com_coordinates[i][0]],\n [com_coordinates[i][1]],\n [com_coordinates[i][2]]]) for i in xrange(1, len(joint_origins))] \n \n \"\"\"\n Transformation matrix from the center of masses to the next joint origins\n \"\"\"\n trans_matrices2 = [self.transform(m_to_joint_vectors[i][0], \n m_to_joint_vectors[i][1], \n m_to_joint_vectors[i][2], \n 0.0, \n 0.0, \n 0.0) for i in xrange(len(m_to_joint_vectors))]\n \n \"\"\"\n Transformations from the link origins to the center of masses\n \"\"\" \n dhcs = [self.transform(com_coordinates[i + 1][0], \n com_coordinates[i + 1][1], \n com_coordinates[i + 1][2], \n joint_origins[i][3] + axis[i][0] * thetas[i], \n joint_origins[i][4] + axis[i][1] * thetas[i], \n joint_origins[i][5] + axis[i][2] * thetas[i]) for i in xrange(len(joint_origins) -1)]\n \n \n \"\"\"\n O and z of the first joint\n \"\"\"\n \n \"\"\"\n Os => Origins of the joints w.r.t. to the base frame\n Ocs => Origins of the center of masses w.r.t to the base frame\n \"\"\"\n Os = [Matrix([[joint_origins[0][0]],\n [joint_origins[0][1]],\n [joint_origins[0][2]]])] \n zs = [Matrix([[axis[0][0]],\n [axis[0][1]],\n [axis[0][2]]])]\n Ocs = []\n zcs = []\n I = Matrix([[1.0, 0.0, 0.0, joint_origins[0][0]],\n [0.0, 1.0, 0.0, joint_origins[0][1]],\n [0.0, 0.0, 1.0, joint_origins[0][2]],\n [0.0, 0.0, 0.0, 1.0]])\n res = I\n for i in xrange(len(thetas) - 1):\n res *= dhcs[i] \n res = nsimplify(res, tolerance=1e-4) \n col3 = res.col(2)\n col4 = res.col(3) \n z = Matrix([col3[j] for j in xrange(3)])\n #z = nsimplify(z, tolerance=1e-4)\n O = Matrix([col4[j] for j in xrange(3)])\n if self.simplifying:\n Ocs.append(trigsimp(O))\n else:\n Ocs.append(O)\n zcs.append(z)\n res = res * trans_matrices2[i] \n col3 = res.col(2)\n col4 = res.col(3) \n z = Matrix([col3[j] for j in xrange(3)])\n O = Matrix([col4[j] for j in xrange(3)])\n if self.simplifying:\n Os.append(trigsimp(O))\n else:\n Os.append(O)\n zs.append(z) \n #print [nsimplify(zcs[i], tolerance=1e-4) for i in xrange(len(zcs))] \n \n Jvs = []\n for i in xrange(len(thetas) - 1):\n Jv = Matrix([[0.0 for m in xrange(len(thetas) - 1)] for n in xrange(6)])\n for k in xrange(i + 1): \n r1 = 0.0\n if self.simplifying:\n r1 = trigsimp(Matrix(zcs[i].cross(Ocs[i] - Os[k])))\n else:\n r1 = Matrix(zcs[i].cross(Ocs[i] - Os[k])) \n for t in xrange(3):\n Jv[t, k] = r1[t, 0]\n Jv[t + 3, k] = zcs[i][t, 0]\n if self.simplifying:\n Jvs.append(trigsimp(Jv))\n else:\n Jvs.append(Jv)\n Jvs_new = []\n Ocs_new = [] \n if self.simplifying:\n for i in xrange(len(Jvs)): \n try:\n jv_s = nsimplify(jv_s, [pi])\n Jvs_new.append(jv_s)\n except:\n Jvs_new.append(Jvs[i])\n for i in xrange(len(Ocs)):\n oc_s = Ocs[i]\n try:\n oc_s = nsimplify(oc_s, [pi])\n Ocs_new.append(oc_s) \n except: \n Ocs_new.append(Ocs[i]) \n \n return Jvs_new, Ocs \n \n \n def get_link_jacobians_new(self, joint_origins, com_coordinates, axis, thetas): \n \"\"\"\n Vectors from the center of mass to the next joint origin \n \"\"\"\n com_coordinates = [Matrix([[com_coordinates[i][j]] for j in xrange(len(com_coordinates[i]))]) \n for i in xrange(len(com_coordinates))]\n m_to_joint_vectors = [Matrix([[joint_origins[i][0]],\n [joint_origins[i][1]],\n [joint_origins[i][2]]]) - \n Matrix([[com_coordinates[i][0]],\n [com_coordinates[i][1]],\n [com_coordinates[i][2]]]) for i in xrange(1, len(joint_origins))]\n \n \n \"\"\"\n Os => Origins of the joints w.r.t. to the base frame\n Ocs => Origins of the center of masses w.r.t to the base frame\n \"\"\"\n Os = [] \n zs = []\n Ocs = []\n zcs = []\n \n \n #res = self.dh(0.0, joint_origins[0][2], 0.0, joint_origins[0][3])\n res = self.dh(0.0, joint_origins[0][2], 0.0, 0.0)\n col3 = res.col(2) \n col4 = res.col(3)\n z = Matrix([col3[j] for j in xrange(3)]) \n zs.append(z)\n O = Matrix([col4[j] for j in xrange(3)])\n Os.append(trigsimp(O))\n for i in xrange(len(thetas) - 1):\n # Transform to the next joint frame\n res_temp = res * self.dh(thetas[i], 0.0, com_coordinates[i + 1][0], 0.0)\n col3 = res_temp.col(2) \n col4 = res_temp.col(3) \n \n zcs.append(Matrix([col3[j] for j in xrange(3)]))\n Oc = Matrix([col4[j] for j in xrange(3)])\n if self.simplifying:\n Ocs.append(trigsimp(Oc))\n else:\n Ocs.append(Oc)\n \n res = res_temp * self.dh(0.0, 0.0, m_to_joint_vectors[i][0], joint_origins[i + 1][3])\n col3 = res.col(2) \n col4 = res.col(3)\n z = Matrix([col3[j] for j in xrange(3)]) \n zs.append(trigsimp(z))\n O = Matrix([col4[j] for j in xrange(3)])\n Os.append(trigsimp(O))\n \n Jvs = []\n for i in xrange(len(thetas) - 1):\n Jv = Matrix([[0.0 for m in xrange(len(thetas) - 1)] for n in xrange(6)])\n for k in xrange(i + 1): \n r1 = 0.0\n if self.simplifying:\n r1 = trigsimp(Matrix(zs[k].cross(Ocs[i] - Os[k])))\n else:\n r1 = Matrix(zs[k].cross(Ocs[i] - Os[k])) \n for t in xrange(3):\n Jv[t, k] = r1[t, 0]\n Jv[t + 3, k] = zs[k][t, 0]\n if self.simplifying:\n Jvs.append(trigsimp(Jv))\n else:\n Jvs.append(Jv)\n print len(Jvs)\n Jvs_new = []\n Ocs_new = [] \n if self.simplifying:\n for i in xrange(len(Jvs)): \n try:\n jv_s = nsimplify(jv_s, [pi])\n Jvs_new.append(jv_s)\n except:\n Jvs_new.append(Jvs[i])\n for i in xrange(len(Ocs)):\n oc_s = Ocs[i]\n try:\n oc_s = nsimplify(oc_s, [pi])\n Ocs_new.append(oc_s) \n except: \n Ocs_new.append(Ocs[i]) \n return Jvs_new, Ocs\n \n def dh(self, theta, d, a, alpha):\n return Matrix([[cos(theta), -sin(theta) * cos(alpha), sin(theta) * sin(alpha), a * cos(theta)],\n [sin(theta), cos(theta) * cos(alpha), -cos(theta) * sin(alpha), a * sin(theta)],\n [0.0, sin(alpha), cos(alpha), d],\n [0.0, 0.0, 0.0, 1.0]])\n \n def get_end_effector_jacobian(self, joint_origins, axis, thetas): \n I = Matrix([[1.0, 0.0, 0.0, joint_origins[0][0]],\n [0.0, 1.0, 0.0, joint_origins[0][1]],\n [0.0, 0.0, 1.0, joint_origins[0][2]],\n [0.0, 0.0, 0.0, 1.0]])\n \n Os = [Matrix([[joint_origins[0][0]],\n [joint_origins[0][1]],\n [joint_origins[0][2]]])] \n zs = [Matrix([[axis[0][0]],\n [axis[0][1]],\n [axis[0][2]]])]\n res = I\n \n for i in xrange(0, len(thetas) - 1):\n \"\"\"\n Rotation about the joint angle\n \"\"\"\n t1 = self.transform(0.0,\n 0.0,\n 0.0,\n axis[i][0] * thetas[i],\n axis[i][1] * thetas[i],\n axis[i][2] * thetas[i])\n \n \n \"\"\"\n Translation and rotation to the next joint angle\n \"\"\"\n t2 = self.transform(joint_origins[i+1][0], \n joint_origins[i+1][1],\n joint_origins[i+1][2],\n joint_origins[i+1][3], \n joint_origins[i+1][4],\n joint_origins[i+1][5])\n t = t1 * t2\n \n res *= t\n \n \n col3 = res.col(2)\n col4 = res.col(3) \n zs.append(trigsimp(Matrix([col3[j] for j in xrange(3)])))\n Os.append(trigsimp(Matrix([col4[j] for j in xrange(3)])))\n \n \n Jv = Matrix([[0.0 for m in xrange(len(thetas) - 1)] for n in xrange(6)])\n for i in xrange(len(thetas) - 1):\n r1 = 0.0\n if self.simplifying:\n r1 = trigsimp(Matrix(zs[i].cross(Os[-1] - Os[i])))\n else:\n r1 = Matrix(zs[i].cross(Oc[-1] - Os[i])) \n for t in xrange(3):\n Jv[t, i] = r1[t, 0]\n Jv[t + 3, i] = zs[i][t, 0]\n Jv_s = nsimplify(Jv, tolerance=1e-4) \n return Jv_s\n \n def transform(self, x, y, z, r, p, ya, verbose=False):\n trans = Matrix([[1.0, 0.0, 0.0, x],\n [0.0, 1.0, 0.0, y],\n [0.0, 0.0, 1.0, z],\n [0.0, 0.0, 0.0, 1.0]])\n roll = Matrix([[1.0, 0.0, 0.0, 0.0],\n [0.0, cos(r), -sin(r), 0.0],\n [0.0, sin(r), cos(r), 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n pitch = Matrix([[cos(p), 0.0, sin(p), 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [-sin(p), 0.0, cos(p), 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n yaw = Matrix([[cos(ya), -sin(ya), 0.0, 0.0],\n [sin(ya), cos(ya), 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n \n \n res = roll * pitch * yaw * trans\n if (verbose == True):\n print \"roll: \" + str(roll)\n print \"pitch: \" + str(pitch)\n print \"yaw: \" + str(yaw)\n print \"res: \" + str(res) \n return res\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Dynamic model generator.')\n parser.add_argument(\"-s\", \"--simplifying\", \n help=\"Simplify the generated dynamic model\", \n action=\"store_true\")\n parser.add_argument(\"-b\", \"--buildcpp\", \n help=\"Compile the c++ code after generating it\", \n action=\"store_true\")\n parser.add_argument(\"-ss\", \"--steady_states\",\n help=\"Linearize about steady states\",\n action=\"store_true\")\n parser.add_argument(\"-he\", \"--header\", help=\"Path to the robot header file\")\n parser.add_argument(\"-src\", \"--source\", help=\"Path to the robot source file\")\n parser.add_argument(\"-m\", \"--model\", help=\"Path to the robot model file\")\n parser.add_argument('file', type=argparse.FileType('r'), nargs='?', default=None, help='File to load. Use - for stdin')\n args = parser.parse_args()\n xml_file = args.file.read()\n Test(args.model, args.simplifying, args.buildcpp, args.steady_states, xml_file, args.header, args.source)\n"
},
{
"alpha_fraction": 0.6173995733261108,
"alphanum_fraction": 0.6202497482299805,
"avg_line_length": 38.60752868652344,
"blob_id": "599d31ead14aeb4822e452a891747d14fc4079d3",
"content_id": "ef2e70fb3f2d5b42873e79e4eff8b94c56a5c8df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 7368,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 186,
"path": "/CMakeLists.txt",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required (VERSION 2.6)\n\nproject (robots)\n\nif (NOT CMAKE_BUILD_TYPE)\n set(CMAKE_BUILD_TYPE Release)\nendif()\nset(CMAKE_CXX_FLAGS_DEBUG \"-g -std=c++11 -Og\")\nset(CMAKE_CXX_FLAGS_RELEASE \"-std=c++11 -O3 -DEIGEN_NO_DEBUG -flto\")\nSET(BOOST_MIN_VERSION \"1.54.0\")\nSET(PYTHON_MIN_VERSION \"2.7.0\")\ninclude(FindPkgConfig)\ninclude(GNUInstallDirs)\n\n\nset(USE_OPENRAVE True)\nset(INCLUDE_DIR \"${CMAKE_CURRENT_SOURCE_DIR}/src/include\")\n\nmessage(\"LIBDIR: ${CMAKE_INSTALL_LIBDIR}\")\n\n############################# LOAD FRAPU_CORE #############################\nif(PKG_CONFIG_FOUND)\n pkg_check_modules(FRAPU_CORE frapu_core)\n if(NOT FRAPU_CORE_FOUND)\n message(FATAL_ERROR \"FRAPU_CORE could not be found\")\n endif()\n include_directories(${FRAPU_CORE_INCLUDE_DIRS})\n link_directories(${FRAPU_CORE_LIBRARY_DIRS}) \n message(\"-- FRAPU_CORE LIB DIRS ${FRAPU_CORE_LIBRARY_DIRS}\") \nendif()\n\n############################# LOAD ROBOT_HEADERS #############################\nif(PKG_CONFIG_FOUND)\n pkg_check_modules(ROBOT_HEADERS robot_headers)\n if(NOT ROBOT_HEADERS_FOUND)\n message(FATAL_ERROR \"ROBOT_HEADERS could not be found\")\n endif()\n include_directories(${ROBOT_HEADERS_INCLUDE_DIRS})\n link_directories(${ROBOT_HEADERS_LIBRARY_DIRS}) \n message(\"-- ROBOT_HEADERS LIB DIRS ${ROBOT_HEADERS_LIBRARY_DIRS}\") \nendif()\n \n############################# LOAD OPENRAVE #############################\nif(PKG_CONFIG_FOUND)\n pkg_check_modules(OPENRAVE openrave0.9-core)\n if(NOT OPENRAVE_FOUND)\n message(\"-- OpenRAVE 0.9 could not be found. Compliling without viewer support\")\n set(USE_OPENRAVE False) \n endif()\n include_directories(${OPENRAVE_INCLUDE_DIRS})\n link_directories(${OPENRAVE_LIBRARY_DIRS}) \n endif() \n \nif(PKG_CONFIG_FOUND) \n\tpkg_check_modules(VIEWER viewer_interface)\n\tif(NOT VIEWER_FOUND)\n\t message(\"-- viewer_interface could not be found. compiling without viewer support\")\n\t set(USE_OPENRAVE False)\t \n\tendif()\t\n\tinclude_directories(${VIEWER_INCLUDE_DIRS})\n\tlink_directories(${VIEWER_LIBRARY_DIRS})\nendif()\n\nif(USE_OPENRAVE)\nadd_definitions(-DUSE_OPENRAVE)\nendif() \n\n############################# LOAD PYTHON #############################\n#if(PKG_CONFIG_FOUND) \n# pkg_check_modules(PYTHON python-2.7)\n# if(NOT PYTHON_FOUND)\n# message(FATAL_ERROR \"Python could not be found\")\n# endif()\n# include_directories(${PYTHON_INCLUDE_DIRS})\n# link_directories(${PYTHON_LIBRARY_DIRS}) \n#endif()\n\n############################# LOAD BOOST #############################\nfind_package(Boost ${BOOST_MIN_VERSION} REQUIRED COMPONENTS system thread timer)\nif (NOT Boost_FOUND)\n set(Boost_INCLUDE_DIRS \"${BOOST_INCLUDE_DIRS}\")\n IF ( NOT Boost_INCLUDE_DIRS )\n message(FATAL_ERROR \"Please point the environment variable BOOST_INCLUDE_DIRS to the include directory of Boost\")\n ENDIF()\nendif ()\ninclude_directories(${Boost_INCLUDE_DIRS})\nlink_directories(${Boost_LIBRARY_DIRS})\n\n############################# LOAD FCL #############################\nif(PKG_CONFIG_FOUND)\n pkg_check_modules(FCL fcl)\n if(NOT FCL_FOUND)\n message(FATAL_ERROR \"FCL could not be found\")\n endif()\n include_directories(${FCL_INCLUDE_DIRS})\n link_directories(${FCL_LIBRARY_DIRS}) \n message(\"-- FCL LIB DIRS ${FCL_LIBRARY_DIRS}\") \nendif()\n\n############################# LOAD RBDL #############################\nif(PKG_CONFIG_FOUND) \n pkg_check_modules(RBDL rbdl)\n if(NOT RBDL_FOUND)\n message(FATAL_ERROR \"RBDL could not be found\")\n endif()\n include_directories(${RBDL_INCLUDE_DIRS})\n link_directories(${RBDL_LIBRARY_DIRS})\n message(\"RBDL include: ${RBDL_INCLUDE_DIRS}\") \nendif()\n\n############################# LOAD RBDL_INTERFACE #############################\nif(PKG_CONFIG_FOUND) \n pkg_check_modules(RBDL_INTERFACE rbdl_interface)\n if(NOT RBDL_FOUND)\n message(FATAL_ERROR \"RBDL_INTERFACE could not be found\")\n endif()\n include_directories(${RBDL_INTERFACE_INCLUDE_DIRS})\n link_directories(${RBDL_INTERFACE_LIBRARY_DIRS}) \n message(\"RBDL include: ${RBDL_INTERFACE_INCLUDE_DIRS}\") \nendif()\n\n############################# LOAD TINYXML #############################\nif(PKG_CONFIG_FOUND) \n pkg_check_modules(TINYXML tinyxml)\n if(NOT TINYXML_FOUND)\n message(FATAL_ERROR \"TINYXML could not be found\")\n endif()\n include_directories(${TINYXML_INCLUDE_DIRS}) \n link_directories(${TINYXML_LIBRARY_DIRS}) \nendif()\n\n\n############################# LOAD EIGEN #############################\nif(PKG_CONFIG_FOUND)\n pkg_check_modules(EIGEN eigen3)\n if(NOT EIGEN_FOUND)\n message(FATAL_ERROR \"EIGEN could not be found\")\n endif()\n include_directories(${EIGEN_INCLUDE_DIRS})\n link_directories(${EIGEN_LIBRARY_DIRS}) \nendif()\n\nadd_library(robots SHARED \n ${CMAKE_CURRENT_SOURCE_DIR}/src/robot.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Manipulator/ManipulatorRobot.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Manipulator/ManipulatorRobotLinear.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Manipulator/Kinematics.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Manipulator/ManipulatorPropagator.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Manipulator/ManipulatorPropagatorLinear.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Manipulator/ManipulatorIntegrator.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Dubin/DubinRobot.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Dubin/DubinPropagator.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/AUV/auv.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/AUV/AUVPropagator.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Airplane/AirplanePropagator.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Airplane/AirplaneIntegrator.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/Homecare/Homecare.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/propagator.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/ActionSpace.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/DiscreteActionSpace.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/DiscreteVectorActionSpace.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/ContinuousActionSpace.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/ContinuousVectorActionSpace.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/ObservationSpace.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/DiscreteObservationSpace.cpp\n ${CMAKE_CURRENT_SOURCE_DIR}/src/ContinuousObservationSpace.cpp)\n \ntarget_link_libraries (robots\n ${Boost_LIBRARIES}\n ${PYTHON_LIBRARIES}\n ${TINYXML_LIBRARIES}\n ${FCL_LIBRARIES}\n ${RBDL_INTERFACE_LIBRARIES}\n ${VIEWER_LIBRARIES})\n \nset(pkg_conf_file_in \"${CMAKE_CURRENT_SOURCE_DIR}/robots.pc.in\")\nset(pkg_conf_file_out \"${CMAKE_CURRENT_BINARY_DIR}/robots.pc\")\nconfigure_file(\"${pkg_conf_file_in}\" \"${pkg_conf_file_out}\" @ONLY)\n\ninstall(DIRECTORY ${INCLUDE_DIR}/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/robots\n FILES_MATCHING PATTERN \"*.h\" PATTERN \"*.hxx\" PATTERN \"*.hpp\"\n PATTERN \".DS_Store\" EXCLUDE\n)\n\ninstall(TARGETS robots DESTINATION ${CMAKE_INSTALL_LIBDIR})\ninstall(FILES \"${pkg_conf_file_out}\" DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig/ COMPONENT pkgconfig)\n\n"
},
{
"alpha_fraction": 0.7534722089767456,
"alphanum_fraction": 0.7534722089767456,
"avg_line_length": 18.200000762939453,
"blob_id": "0bcd6164d800683f9ab9e4d744b8c085a1270126",
"content_id": "8ab201b166287da5cbbc54213bf8bcc62e49397d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 288,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 15,
"path": "/src/DiscreteActionSpace.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/ActionSpace.hpp>\n\nnamespace frapu\n{\nDiscreteActionSpace::DiscreteActionSpace(const ActionSpaceInfo &actionSpaceInfo):\n ActionSpace(actionSpaceInfo)\n{\n\n}\n\nstd::string DiscreteActionSpace::getType() const {\n std::string type = \"discrete\";\n return type;\n}\n}\n"
},
{
"alpha_fraction": 0.6626580953598022,
"alphanum_fraction": 0.6657682061195374,
"avg_line_length": 27.70833396911621,
"blob_id": "5ddfd56e980f8711184a5b29d5e0e446ed496454",
"content_id": "a195c43beb702ba93ffbb1d19257cd0ec17ffafb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4823,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 168,
"path": "/src/Homecare/Homecare.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Homecare/Homecare.hpp>\n\nnamespace frapu\n{\nHomecare::Homecare(std::string robotFile, std::string configFile):\n Robot(robotFile, configFile),\n lowerStateLimits_(),\n upperStateLimits_(),\n lowerControlLimits_(),\n upperControlLimits_()\n{\n lowerStateLimits_.push_back(-1.0);\n lowerStateLimits_.push_back(-1.0);\n upperStateLimits_.push_back(1.0);\n upperStateLimits_.push_back(1.0);\n\n lowerControlLimits_.push_back(1.0);\n upperControlLimits_.push_back(5.0);\n}\n\nstd::string Homecare::getName() const {\n std::string name = \"Homecare\";\n return name;\n}\n\nbool Homecare::propagateState(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& action,\n double duration,\n double simulationStepSize,\n frapu::RobotStateSharedPtr& result)\n{\n//TODO\n}\n\nbool Homecare::propagateState(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& action,\n const std::vector<double> controlError,\n double duration,\n double simulationStepSize,\n frapu::RobotStateSharedPtr& result)\n{\n//TODO\n}\n\n\nbool Homecare::makeStateSpace()\n{\n stateSpace_ = std::make_shared<frapu::VectorStateSpace>(4);\n frapu::StateLimitsSharedPtr stateLimits =\n std::make_shared<frapu::VectorStateLimits>(lowerStateLimits_, upperStateLimits_);\n stateSpace_->setStateLimits(stateLimits);\n}\n\nbool Homecare::makeActionSpace(const frapu::ActionSpaceInfo& actionSpaceInfo)\n{\n actionSpace_ = std::make_shared<frapu::DiscreteVectorActionSpace>(actionSpaceInfo);\n unsigned int numDimensions = 1;\n actionSpace_->setNumDimensions(numDimensions);\n frapu::ActionLimitsSharedPtr actionLimits =\n std::make_shared<frapu::VectorActionLimits>(lowerControlLimits_, upperControlLimits_);\n actionSpace_->setActionLimits(actionLimits);\n return true;\n}\n\nbool Homecare::makeObservationSpace(const frapu::ObservationSpaceInfo& observationSpaceInfo)\n{\n observationSpace_ = std::make_shared<frapu::DiscreteObservationSpace>(observationSpaceInfo);\n observationSpace_->setDimension(6);\n std::vector<std::vector<double>> observations;\n\n // Get the observations using a serializer\n\n static_cast<frapu::DiscreteObservationSpace*>(observationSpace_.get())->addObservations(observations);\n return true;\n}\n\nbool Homecare::getObservation(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& observation) const\n{\n //TODO\n}\n\nbool Homecare::getObservation(const frapu::RobotStateSharedPtr& state,\n std::vector<double>& observationError,\n frapu::ObservationSharedPtr& observation) const\n{\n //TODO\n}\n\nvoid Homecare::createRobotCollisionObjects(const frapu::RobotStateSharedPtr state,\n std::vector<frapu::CollisionObjectSharedPtr>& collision_objects) const\n{\n //TODO\n}\n\nint Homecare::getDOF() const\n{\n //TODO\n}\n\nvoid Homecare::makeNextStateAfterCollision(const frapu::RobotStateSharedPtr& previousState,\n const frapu::RobotStateSharedPtr& collidingState,\n frapu::RobotStateSharedPtr& nextState)\n{\n //TODO\n}\n\nvoid Homecare::getLinearProcessMatrices(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& control,\n double& duration,\n std::vector<Eigen::MatrixXd>& matrices) const\n{\n//TODO\n}\n\nvoid Homecare::getLinearObservationDynamics(const frapu::RobotStateSharedPtr& state,\n Eigen::MatrixXd& H,\n Eigen::MatrixXd& W) const\n{\n//TODO\n}\n\nbool Homecare::isTerminal(const frapu::RobotStateSharedPtr& state) const\n{\n//TODO\n}\n\ndouble Homecare::distanceGoal(const frapu::RobotStateSharedPtr& state) const\n{\n//TODO\n}\n\nvoid Homecare::makeProcessDistribution(Eigen::MatrixXd& mean,\n Eigen::MatrixXd& covariance_matrix)\n{\n//TODO\n}\n\nvoid Homecare::makeObservationDistribution(Eigen::MatrixXd& mean,\n Eigen::MatrixXd& covariance_matrix)\n{\n//TODO\n}\n\nvoid Homecare::transformToObservationSpace(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& res) const\n{\n\n}\n\nvoid Homecare::updateViewer(const frapu::RobotStateSharedPtr& state,\n std::vector<frapu::RobotStateSharedPtr>& particles,\n std::vector<std::vector<double>>& particleColors)\n{\n//TODO\n}\n\nfrapu::RobotStateSharedPtr Homecare::sampleInitialState() const\n{\n//TODO\n}\n\nfrapu::HeuristicFunctionSharedPtr Homecare::makeHeuristicFunction() const\n{\n//TODO\n}\n\n}\n"
},
{
"alpha_fraction": 0.5969974994659424,
"alphanum_fraction": 0.6028357148170471,
"avg_line_length": 30.387434005737305,
"blob_id": "da636898f06fbdcdb2078093616c38dbf15c7f7e",
"content_id": "d23ea0c570a1d84aac85aa31da0432aa1eafa688",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5995,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 191,
"path": "/src/Manipulator/ManipulatorPropagator.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Manipulator/ManipulatorPropagator.hpp>\n#include <iostream>\n\nusing std::cout;\nusing std::endl;\n\nnamespace frapu\n{\n\nManipulatorPropagator::ManipulatorPropagator():\n frapu::Propagator(),\n integrator_(new Integrate())\n{\n}\n\n\nstd::shared_ptr<Integrate> ManipulatorPropagator::getIntegrator()\n{\n return integrator_;\n}\n\nbool ManipulatorPropagator::propagate_linear(const std::vector<double>& current_joint_values,\n const std::vector<double>& control,\n const std::vector<double>& control_error,\n const double duration,\n std::vector<double>& result)\n{\n\n std::vector<double> c;\n bool allZeros = true;\n\n for (size_t i = 0; i < control.size(); i++) {\n if (control[i] != 0) {\n allZeros = false;\n c.push_back(1.0);\n } else {\n // Add no uncertainty if joint input is 0\n c.push_back(0.0);\n }\n }\n\n if (allZeros) {\n return false;\n }\n\n for (size_t i = 0; i < control.size(); i++) {\n result.push_back(current_joint_values[i] +\n duration * control[i] +\n c[i] * control_error[i]);\n }\n for (size_t i = 0; i < control.size(); i++) {\n result.push_back(0.0);\n }\n return true;\n\n}\n\nbool ManipulatorPropagator::propagate_nonlinear_first_order(const std::vector<double>& current_joint_values,\n const std::vector<double>& current_joint_velocities,\n std::vector<double>& control,\n std::vector<double>& control_error_vec,\n std::vector<double>& nominal_state,\n std::vector<double>& nominal_control,\n const double simulation_step_size,\n const double duration,\n std::vector<double>& result)\n{\n std::vector<double> state;\n\n for (size_t i = 0; i < current_joint_values.size(); i++) {\n state.push_back(current_joint_values[i]);\n }\n for (size_t i = 0; i < current_joint_values.size(); i++) {\n state.push_back(current_joint_velocities[i]);\n }\n\n std::vector<double> integration_result;\n std::vector<double> inte_times( {0.0, duration, simulation_step_size});\n integrator_->do_integration_first_order(state,\n control,\n control_error_vec,\n nominal_state,\n nominal_control,\n inte_times,\n integration_result);\n\n std::vector<double> newJointValues;\n std::vector<double> newJointVelocities;\n\n for (size_t i = 0; i < integration_result.size() / 2; i++) {\n newJointValues.push_back(integration_result[i]);\n }\n\n for (size_t i = integration_result.size() / 2; i < integration_result.size(); i++) {\n newJointVelocities.push_back(integration_result[i]);\n }\n\n for (size_t i = 0; i < newJointValues.size(); i++) {\n result.push_back(newJointValues[i]);\n }\n\n for (size_t i = 0; i < newJointVelocities.size(); i++) {\n result.push_back(newJointVelocities[i]);\n }\n\n return true;\n\n}\n\nbool ManipulatorPropagator::propagate_nonlinear_second_order(const std::vector<double>& current_joint_values,\n const std::vector<double>& current_joint_velocities,\n std::vector<double>& control,\n std::vector<double>& control_error_vec,\n std::vector<double>& nominal_state,\n std::vector<double>& nominal_control,\n const double simulation_step_size,\n const double duration,\n std::vector<double>& result)\n{\n std::vector<double> state;\n\n for (size_t i = 0; i < current_joint_values.size(); i++) {\n state.push_back(current_joint_values[i]);\n }\n for (size_t i = 0; i < current_joint_values.size(); i++) {\n state.push_back(current_joint_velocities[i]);\n }\n\n std::vector<double> integration_result;\n std::vector<double> inte_times( {0.0, duration, simulation_step_size});\n integrator_->do_integration_second_order(state,\n control,\n control_error_vec,\n nominal_state,\n nominal_control,\n inte_times,\n integration_result);\n\n std::vector<double> newJointValues;\n std::vector<double> newJointVelocities;\n\n for (size_t i = 0; i < integration_result.size() / 2; i++) {\n newJointValues.push_back(integration_result[i]);\n }\n\n for (size_t i = integration_result.size() / 2; i < integration_result.size(); i++) {\n newJointVelocities.push_back(integration_result[i]);\n }\n\n for (size_t i = 0; i < newJointValues.size(); i++) {\n result.push_back(newJointValues[i]);\n }\n\n for (size_t i = 0; i < newJointVelocities.size(); i++) {\n result.push_back(newJointVelocities[i]);\n }\n\n return true;\n\n}\n\nbool ManipulatorPropagator::propagateState(const std::vector<double>& currentState,\n const std::vector<double>& control,\n const std::vector<double>& control_error,\n const double& duration,\n const double& simulation_step_size,\n std::vector<double>& result)\n{\n std::vector<double> state = currentState;\n std::vector<double> integration_result;\n std::vector<double> inte_times( {0.0, duration, simulation_step_size});\n integrator_->do_integration(state, control, control_error, inte_times, integration_result);\n result = integration_result;\n return true;\n}\n\n/**BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(propagate_nonlinear_overload, propagateState, 6, 6);\nBOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(propagate_linear_overload, propagate_linear, 5, 5);\n\nBOOST_PYTHON_MODULE(libpropagator)\n{\n using namespace boost::python;\n\n class_<ManipulatorPropagator>(\"ManipulatorPropagator\", init<>())\n .def(\"propagateState\", &Propagator::propagateState, propagate_nonlinear_overload())\n .def(\"propagateLinear\", &ManipulatorPropagator::propagate_linear, propagate_linear_overload()) \n ;\n}*/\n\n\n}\n"
},
{
"alpha_fraction": 0.5955346822738647,
"alphanum_fraction": 0.6216050386428833,
"avg_line_length": 36.76606750488281,
"blob_id": "38d1e621b09bb58815626b12380a95d4ae357028",
"content_id": "a8fb408b90d8b111b00fd575f20bd51d4982ba66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 14691,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 389,
"path": "/src/Dubin/DubinRobot.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Dubin/DubinRobot.hpp>\n\nnamespace frapu\n{\n\nDubinRobot::DubinRobot(std::string robotFile, std::string configFile):\n Robot(robotFile, configFile),\n dim_x_(0.0),\n dim_y_(0.0),\n dim_z_(0.0),\n d_(0.0),\n beacons_(),\n initialState_(nullptr)\n{\n //Dimensions\n dim_x_ = 0.06;\n dim_y_ = 0.035;\n dim_z_ = 0.005;\n\n //Distance between axels\n d_ = 0.11;\n\n serializer_ = std::make_shared<frapu::DubinSerializer>();\n propagator_ = std::make_shared<frapu::DubinPropagator>();\n static_cast<frapu::DubinPropagator*>(propagator_.get())->setD(d_);\n\n //make the state limits\n lowerStateLimits_.clear();\n upperStateLimits_.clear();\n\n lowerStateLimits_.push_back(-1.0);\n lowerStateLimits_.push_back(-1.0);\n lowerStateLimits_.push_back(-3.14);\n lowerStateLimits_.push_back(-0.2);\n\n upperStateLimits_.push_back(1.0);\n upperStateLimits_.push_back(1.0);\n upperStateLimits_.push_back(3.14);\n upperStateLimits_.push_back(0.2);\n\n //make the control limits\n lowerControlLimits_.clear();\n upperControlLimits_.clear();\n\n lowerControlLimits_.push_back(0.0);\n lowerControlLimits_.push_back(-1.0);\n\n upperControlLimits_.push_back(1.0);\n upperControlLimits_.push_back(1.0);\n\n // put the beacons in the evironment\n Beacon b0(-0.7, 0.7);\n Beacon b1(0.7, -0.7);\n beacons_ = std::vector<Beacon>( {b0, b1});\n\n std::ifstream input(configFile);\n initialState_ = static_cast<frapu::DubinSerializer*>(serializer_.get())->loadInitalState(input);\n}\n\nstd::string DubinRobot::getName() const\n{\n std::string name = \"Dubin\";\n return name;\n}\n\nfrapu::HeuristicFunctionSharedPtr DubinRobot::makeHeuristicFunction() const\n{\n frapu::HeuristicFunctionSharedPtr heuristicFunction = std::make_shared<RRTHeuristicFunction>();\n auto terminalFunction = std::bind(&DubinRobot::isTerminal, this, std::placeholders::_1);\n heuristicFunction->setTerminalFunction(terminalFunction);\n return heuristicFunction;\n}\n\nfrapu::RobotStateSharedPtr DubinRobot::sampleInitialState() const\n{\n return initialState_;\n}\n\nvoid DubinRobot::createRobotCollisionObjects(const frapu::RobotStateSharedPtr state,\n std::vector<frapu::CollisionObjectSharedPtr>& collision_objects) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n double x = stateVec[0];\n double y = stateVec[1];\n double theta = stateVec[2];\n\n fcl::Matrix3f rot_matrix(cos(theta), -sin(theta), 0.0,\n sin(theta), cos(theta), 0.0,\n 0.0, 0.0, 1.0);\n fcl::Vec3f trans_vec(x, y, 0.01 + dim_z_ / 2.0);\n fcl::Transform3f trans(rot_matrix, trans_vec);\n fcl::AABB link_aabb(fcl::Vec3f(-dim_x_ / 2.0,\n -dim_y_ / 2.0,\n -dim_z_ / 2.0),\n fcl::Vec3f(dim_x_ / 2.0,\n dim_y_ / 2.0,\n dim_z_ / 2.0));\n fcl::Box* box = new fcl::Box();\n fcl::Transform3f box_tf;\n fcl::constructBox(link_aabb, trans, *box, box_tf);\n std::shared_ptr<fcl::CollisionObject> coll_obj =\n std::make_shared<fcl::CollisionObject>(boost::shared_ptr<fcl::CollisionGeometry>(box), box_tf);\n collision_objects.push_back(coll_obj);\n}\n\nbool DubinRobot::makeStateSpace()\n{\n stateSpace_ = std::make_shared<frapu::VectorStateSpace>(4);\n frapu::StateLimitsSharedPtr stateLimits =\n std::make_shared<frapu::VectorStateLimits>(lowerStateLimits_, upperStateLimits_);\n stateSpace_->setStateLimits(stateLimits);\n}\n\nvoid DubinRobot::makeGoal()\n{\n goal_ = std::make_shared<frapu::SphereGoal>(goal_position_, goal_radius_);\n}\n\nbool DubinRobot::makeActionSpace(const frapu::ActionSpaceInfo& actionSpaceInfo)\n{\n if (actionSpaceInfo.type == \"continuous\") {\n actionSpace_ = std::make_shared<frapu::ContinuousVectorActionSpace>(actionSpaceInfo);\n } else {\n actionSpace_ = std::make_shared<frapu::DiscreteVectorActionSpace>(actionSpaceInfo);\n }\n\n unsigned int numDimensions = 2;\n actionSpace_->setNumDimensions(numDimensions);\n\n frapu::ActionLimitsSharedPtr actionLimits =\n std::make_shared<frapu::VectorActionLimits>(lowerControlLimits_, upperControlLimits_);\n actionSpace_->setActionLimits(actionLimits);\n}\n\nbool DubinRobot::makeObservationSpace(const frapu::ObservationSpaceInfo& observationSpaceInfo)\n{\n observationSpace_ = std::make_shared<frapu::ContinuousObservationSpace>(observationSpaceInfo);\n std::vector<double> lowerLimits;\n std::vector<double> upperLimits;\n if (observationSpaceInfo.observationType == \"linear\") {\n observationSpace_->setDimension(4);\n static_cast<frapu::ContinuousObservationSpace*>(observationSpace_.get())->setLimits(lowerStateLimits_,\n upperStateLimits_);\n } else {\n observationSpace_->setDimension(3);\n lowerLimits = std::vector<double>( {0.0, 0.0, -1.2});\n upperLimits = std::vector<double>( {1.0, 1.0, 1.2});\n static_cast<frapu::ContinuousObservationSpace*>(observationSpace_.get())->setLimits(lowerLimits,\n upperLimits);\n }\n}\n\nbool DubinRobot::getObservation(const frapu::RobotStateSharedPtr& state,\n std::vector<double>& observationError,\n frapu::ObservationSharedPtr& observation) const\n{\n transformToObservationSpace(state, observation);\n std::vector<double> observationVec =\n static_cast<frapu::VectorObservation*>(observation.get())->asVector();\n for (size_t i = 0; i < observationError.size(); i++) {\n observationVec[i] += observationError[i];\n }\n\n observation = std::make_shared<frapu::VectorObservation>(observationVec);\n}\n\nbool DubinRobot::getObservation(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& observation) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> observationVec;\n if (observationSpace_->getObservationSpaceInfo().observationType == \"linear\") {\n observationVec = std::vector<double>(stateVec.size());\n Eigen::MatrixXd sample = observation_distribution_->samples(1);\n for (size_t i = 0; i < stateVec.size(); i++) {\n observationVec[i] = stateVec[i] + sample(i, 0);\n }\n } else {\n observationVec = std::vector<double>(3);\n unsigned int observationSpaceDimension = observationSpace_->getDimension();\n Eigen::MatrixXd sample = observation_distribution_->samples(1);\n\n observationVec[0] = sample(0, 0) + 1.0 / (std::pow(stateVec[0] - beacons_[0].x_, 2) + std::pow(stateVec[1] - beacons_[0].y_, 2) + 1.0);\n observationVec[1] = sample(1, 0) + 1.0 / (std::pow(stateVec[0] - beacons_[1].x_, 2) + std::pow(stateVec[1] - beacons_[1].y_, 2) + 1.0);\n observationVec[2] = stateVec[3] + sample(2, 0);\n\n }\n\n observation = std::make_shared<frapu::VectorObservation>(observationVec);\n return true;\n}\n\nvoid DubinRobot::transformToObservationSpace(const frapu::RobotStateSharedPtr& state,\n frapu::ObservationSharedPtr& res) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> observationVec;\n if (observationSpace_->getObservationSpaceInfo().observationType == \"linear\") {\n observationVec = stateVec;\n } else {\n observationVec = std::vector<double>(3);\n observationVec[0] = 1.0 / (std::pow(stateVec[0] - beacons_[0].x_, 2) + std::pow(stateVec[1] - beacons_[0].y_, 2) + 1.0);\n observationVec[1] = 1.0 / (std::pow(stateVec[0] - beacons_[1].x_, 2) + std::pow(stateVec[1] - beacons_[1].y_, 2) + 1.0);\n observationVec[2] = stateVec[3];\n }\n\n res = std::make_shared<frapu::VectorObservation>(observationVec);\n}\n\nint DubinRobot::getDOF() const\n{\n return 4;\n}\n\nvoid DubinRobot::makeNextStateAfterCollision(const frapu::RobotStateSharedPtr& previousState,\n const frapu::RobotStateSharedPtr& collidingState,\n frapu::RobotStateSharedPtr& nextState)\n{\n std::vector<double> previousStateVec = static_cast<frapu::VectorState*>(previousState.get())->asVector();\n std::vector<double> nextStateVec = previousStateVec;\n nextStateVec[3] = 0.0;\n nextState = std::make_shared<frapu::VectorState>(nextStateVec);\n}\n\nbool DubinRobot::isTerminal(const frapu::RobotStateSharedPtr& state) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n if (stateVec.size() > 4) {\n frapu::ERROR(\"state vec size \" + std::to_string(stateVec.size()));\n }\n\n std::vector<double> sVec(3);\n sVec[0] = stateVec[0];\n sVec[1] = stateVec[1];\n sVec[2] = 0.0;\n\n return static_cast<frapu::SphereGoal*>(goal_.get())->isSatisfied(sVec);\n}\n\n\ndouble DubinRobot::distanceGoal(const frapu::RobotStateSharedPtr& state) const\n{\n std::vector<double> stateVec = static_cast<const frapu::VectorState*>(state.get())->asVector();\n std::vector<double> sVec(3);\n sVec[0] = stateVec[0];\n sVec[1] = stateVec[1];\n sVec[2] = 0.0;\n return static_cast<frapu::SphereGoal*>(goal_.get())->distanceCenter(sVec);\n /**assert(goal_position_.size() != 0 && \"DubinRobot: No goal area set. Cannot calculate distance!\");\n double x = stateVec[0];\n double y = stateVec[1];\n\n double dist = std::pow(goal_position_[0] - x, 2);\n dist += std::pow(goal_position_[1] - y, 2);\n return std::sqrt(dist);*/\n}\n\nvoid DubinRobot::setGravityConstant(double gravity_constant)\n{\n\n}\n\nvoid DubinRobot::getLinearObservationMatrix(const std::vector<double>& state, Eigen::MatrixXd& H) const\n{\n H = Eigen::MatrixXd(3, 4);\n\n H(0, 0) = 1.0 * (2 * beacons_[0].x_ - 2 * state[0]) / std::pow(std::pow(-beacons_[0].x_ + state[0], 2) + std::pow(-beacons_[0].y_ + state[1], 2) + 1.0, 2);\n H(0, 1) = 1.0 * (2 * beacons_[0].y_ - 2 * state[1]) / std::pow(std::pow(-beacons_[0].x_ + state[0], 2) + std::pow(-beacons_[0].y_ + state[1], 2) + 1.0, 2);\n H(0, 2) = 0.0;\n H(0, 3) = 0.0;\n H(1, 0) = 1.0 * (2 * beacons_[1].x_ - 2 * state[0]) / std::pow(std::pow(-beacons_[1].x_ + state[0], 2) + std::pow(-beacons_[1].y_ + state[1], 2) + 1.0, 2);\n H(1, 1) = 1.0 * (2 * beacons_[1].y_ - 2 * state[1]) / std::pow(std::pow(-beacons_[1].x_ + state[0], 2) + std::pow(-beacons_[1].y_ + state[1], 2) + 1.0, 2);\n H(1, 2) = 0.0;\n H(1, 3) = 0.0;\n H(2, 0) = 0.0;\n H(2, 1) = 0.0;\n H(2, 2) = 0.0;\n H(2, 3) = 1.0;\n}\n\nvoid DubinRobot::getLinearObservationDynamics(const frapu::RobotStateSharedPtr& state,\n Eigen::MatrixXd& H,\n Eigen::MatrixXd& W) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n if (observationSpace_->getObservationSpaceInfo().observationType == \"linear\") {\n H = Eigen::MatrixXd::Identity(4, 4);\n W = Eigen::MatrixXd::Identity(4, 4);\n } else {\n getLinearObservationMatrix(stateVec, H);\n W = Eigen::MatrixXd::Identity(3, 3);\n }\n}\n\nvoid DubinRobot::getLinearProcessMatrices(const frapu::RobotStateSharedPtr& state,\n const frapu::ActionSharedPtr& control,\n double& duration,\n std::vector<Eigen::MatrixXd>& matrices) const\n{\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<double> controlVec = static_cast<frapu::VectorAction*>(control.get())->asVector();\n Eigen::MatrixXd A(4, 4);\n A << 1, 0, -duration* stateVec[3]*sin(stateVec[2]), duration* cos(stateVec[2]),\n 0, 1, duration* stateVec[3]*cos(stateVec[2]), duration* sin(stateVec[2]),\n 0, 0, 1, duration* tan(controlVec[1]) / d_,\n 0, 0, 0, 1;\n\n Eigen::MatrixXd B(4, 2);\n B << 0, 0,\n 0, 0,\n 0, duration* stateVec[3]*(std::pow(tan(controlVec[1]), 2) + 1) / d_,\n duration, 0;\n\n Eigen::MatrixXd V(4, 2);\n V << 0, 0,\n 0, 0,\n 0, duration* stateVec[3]*(std::pow(tan(controlVec[1]), 2) + 1) / d_,\n duration, 0;\n\n matrices.push_back(A);\n matrices.push_back(B);\n matrices.push_back(V);\n if (observationSpace_->getObservationSpaceInfo().observationType == \"linear\") {\n Eigen::MatrixXd H = Eigen::MatrixXd::Identity(4, 4);\n Eigen::MatrixXd W = Eigen::MatrixXd::Identity(4, 4);\n matrices.push_back(H);\n matrices.push_back(W);\n } else {\n Eigen::MatrixXd H;\n getLinearObservationMatrix(stateVec, H);\n Eigen::MatrixXd W = Eigen::MatrixXd::Identity(3, 3);\n matrices.push_back(H);\n matrices.push_back(W);\n }\n\n}\n\nvoid DubinRobot::makeProcessDistribution(Eigen::MatrixXd& mean,\n Eigen::MatrixXd& covariance_matrix)\n{\n process_distribution_ =\n std::make_shared<Eigen::EigenMultivariateNormal<double>>(mean, covariance_matrix, false); \n}\n\nvoid DubinRobot::makeObservationDistribution(Eigen::MatrixXd& mean,\n Eigen::MatrixXd& covariance_matrix)\n{\n observation_distribution_ =\n std::make_shared<Eigen::EigenMultivariateNormal<double>>(mean, covariance_matrix, false);\n}\n\nvoid DubinRobot::updateViewer(const frapu::RobotStateSharedPtr& state,\n std::vector<frapu::RobotStateSharedPtr>& particles,\n std::vector<std::vector<double>>& particleColors)\n{\n#ifdef USE_OPENRAVE\n std::vector<double> stateVec = static_cast<frapu::VectorState*>(state.get())->asVector();\n std::vector<std::string> names;\n std::vector<std::vector<double>> dims;\n std::vector<std::vector<double>> colors;\n std::string name = \"dubin\";\n names.push_back(name);\n std::vector<double> main_dims( {stateVec[0], stateVec[1], 0.025, dim_x_, dim_y_, dim_z_, stateVec[2]});\n dims.push_back(main_dims);\n std::vector<double> main_color( {1.0, 0.0, 0.0, 0.5});\n colors.push_back(main_color);\n for (size_t i = 0; i < particles.size(); i++) {\n std::string p_name = \"particle_dubin\" + std::to_string(i);\n names.push_back(p_name);\n std::vector<double> particle = static_cast<const frapu::VectorState*>(particles[i].get())->asVector();\n std::vector<double> p_dims( {particle[0],\n particle[1],\n 0.025,\n dim_x_,\n dim_y_,\n dim_z_,\n particle[2]\n });\n dims.push_back(p_dims);\n colors.push_back(particleColors[i]);\n }\n\n viewer_->addBoxes(names, dims, colors);\n\n\n#endif\n}\n\n}\n"
},
{
"alpha_fraction": 0.5968841314315796,
"alphanum_fraction": 0.602726399898529,
"avg_line_length": 33.233333587646484,
"blob_id": "a819fe33af73e79c1a0a82092244ce8f7814b12b",
"content_id": "77519e89d006b95a054df6d9f155a838c5e4c399",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1027,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 30,
"path": "/src/Airplane/AirplanePropagator.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Airplane/AirplanePropagator.hpp>\n\nnamespace frapu\n{\nAirplanePropagator::AirplanePropagator():\n integrator_(std::make_shared<AirplaneIntegrator>())\n{\n\n}\n\nbool AirplanePropagator::propagateState(const std::vector<double>& currentState,\n const std::vector<double>& control,\n const std::vector<double>& control_error,\n const double& duration,\n const double& simulation_step_size,\n std::vector<double>& result)\n{\n std::vector<double> currentStateNonConst = currentState;\n std::vector<double> intTimes(3);\n intTimes[0] = 0.0;\n intTimes[1] = duration;\n intTimes[2] = simulation_step_size;\n integrator_->do_integration(currentStateNonConst, control, control_error, intTimes, result);\n}\n\nstd::shared_ptr<AirplaneIntegrator> AirplanePropagator::getIntegrator() const\n{\n return integrator_;\n}\n}\n"
},
{
"alpha_fraction": 0.6791604161262512,
"alphanum_fraction": 0.6806596517562866,
"avg_line_length": 26.79166603088379,
"blob_id": "cc8dc7999598de6d793e4df24ee1c40b330cc925",
"content_id": "7171194f1e8b8b8e7dc395105bd3a1043cfd2c46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 667,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 24,
"path": "/src/Manipulator/ManipulatorPropagatorLinear.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Manipulator/ManipulatorPropagatorLinear.hpp>\n\nnamespace frapu\n{\nManipulatorPropagatorLinear::ManipulatorPropagatorLinear():\n frapu::Propagator()\n{\n}\n\nbool ManipulatorPropagatorLinear::propagateState(const std::vector<double>& currentState,\n const std::vector<double>& control,\n const std::vector<double>& control_error,\n const double& duration,\n const double& simulation_step_size,\n std::vector<double>& result)\n{\n result = std::vector<double>(currentState.size());\n for (size_t i = 0; i < currentState.size(); i++) {\n result[i] = currentState[i] + control[i] + control_error[i];\n }\n\n}\n\n}\n"
},
{
"alpha_fraction": 0.5653809905052185,
"alphanum_fraction": 0.5776105523109436,
"avg_line_length": 29.371429443359375,
"blob_id": "cc26596a3dd1a892478b265c844d8fd3d356aa1a",
"content_id": "30fdb1ef8335a4a756bce39e73b4fff64dc054f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1063,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 35,
"path": "/src/Dubin/DubinPropagator.cpp",
"repo_name": "hoergems/robots",
"src_encoding": "UTF-8",
"text": "#include <robot_headers/Dubin/DubinPropagator.hpp>\n\nnamespace frapu\n{\n\nusing std::cout;\nusing std::endl;\n\nDubinPropagator::DubinPropagator():\n Propagator(),\n d_()\n{\n\n}\n\nvoid DubinPropagator::setD(double& d)\n{\n d_ = d;\n}\n\nbool DubinPropagator::propagateState(const std::vector<double>& currentState,\n const std::vector<double>& control,\n const std::vector<double>& control_error,\n const double& duration,\n const double& simulation_step_size,\n std::vector<double>& result)\n{\n result.clear();\n result.push_back(currentState[0] + duration * currentState[3] * cos(currentState[2]));\n result.push_back(currentState[1] + duration * currentState[3] * sin(currentState[2]));\n result.push_back(currentState[2] + duration * currentState[3] * tan(control[1] + control_error[1]) / d_);\n result.push_back(currentState[3] + duration * (control[0] + control_error[0]));\n}\n\n}\n"
}
] | 26 |
biben-hub/api-request-csv-create
|
https://github.com/biben-hub/api-request-csv-create
|
cbad3bc89791dd955376f047ceeacce2933a29e5
|
8603c034cc0b892d6a003a27f8480c26ca9420bc
|
15357d4c703f705027a7f7fbd713de1a1c07fb98
|
refs/heads/main
| 2023-02-28T11:06:54.180238 | 2021-02-01T12:50:44 | 2021-02-01T12:50:44 | 332,875,605 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7835051417350769,
"alphanum_fraction": 0.7835051417350769,
"avg_line_length": 47.5,
"blob_id": "edd9c6a1edbcbb69f77a7edd123c4ad954be6592",
"content_id": "639032c4e7be67bf55cefde92277efc3d5047274",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 2,
"path": "/README.md",
"repo_name": "biben-hub/api-request-csv-create",
"src_encoding": "UTF-8",
"text": "# api-request-csv-create\nrequests an api to manipulate data, store in csv and more all in python\n"
},
{
"alpha_fraction": 0.6377079486846924,
"alphanum_fraction": 0.6598891019821167,
"avg_line_length": 26.769229888916016,
"blob_id": "fe4a043f18475eb45c6751fb7c9c148bcaf8bc50",
"content_id": "0aa70176f9879e9ace7260d84fc58c7ba818df87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1090,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 39,
"path": "/rebuild.py",
"repo_name": "biben-hub/api-request-csv-create",
"src_encoding": "UTF-8",
"text": "import json\nimport requests\n\n# d'abord on stock l'url avec son endpoint et ses authorization qu'on prend chez le fournissuer de l' API\nurl = \"https://api.sncf.com/v1/coverage/sncf/stop_areas\"\nheaders = {\"Authorization\" : \"88829ffa-7c9c-4051-9990-9fdf9564a298\"}\n\n#on se crée une fonction qui va nous appeler l'api\n#récupérer les data de l'api et les ecrire dans un json\n#nous dire que la connexion est ok en nous renvoyant un message\ndef connex_rw_data_json_api():\n response = requests.get(url, headers = headers)\n with open('stop_areas_file.json', mode=\"w\") as file:\n json.dump(response.text, file)\n \n print(response, \"\\n Le fichier à bien été crée\")\n\n\nconnex_rw_data_json_api()\n\n#fonction pour lire les données de mon json\ndef read_links():\n \n with open('stop_areas_file.json') as fichier_areas:\n data = json.load(fichier_areas)\n\n links = data[\"links\"]\n list_hrefs = []\n print(type(data))\n print(data)\n \n# links = data['links']\n# #list_hrefs = []\n \n# print(type(links))\n\n# read_links()\n\nread_links()"
},
{
"alpha_fraction": 0.6166253089904785,
"alphanum_fraction": 0.6269644498825073,
"avg_line_length": 24.73404312133789,
"blob_id": "0259bc43b9e4984cb7cdb554c480a3ea1e2d60f4",
"content_id": "e808c5879d15e86f9f5c0cfb39668776843d41c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2423,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 94,
"path": "/exercice_api.py",
"repo_name": "biben-hub/api-request-csv-create",
"src_encoding": "UTF-8",
"text": "import json\nimport pprint\nimport requests\nimport csv\nimport logging\n\nlogging.basicConfig(filename='logFile.log', level=logging.DEBUG)\nlogging.info(\"Starting info\")\n\n#appel pour connexion à l'API +\nurl = \"https://api.sncf.com/v1/coverage/sncf/stop_areas\"\nheaders = {\"Authorization\" : \"88829ffa-7c9c-4051-9990-9fdf9564a298\"}\nresponse = requests.get(url, headers = headers)\ndata = json.loads(response.text)\n\nprint(response)\nprint(type(data))\n\nareas = data[\"stop_areas\"] #entrée dans l'API par le endpoint stop_areas (le endpoint est la clé d'entrée de communication aux data de 'lAPI)\nprint(type(areas))\n\narea = areas[2]\n\nlist_ids = []\n\nfor loop_area in areas:\n if type(loop_area) == dict:\n if\"id\" in loop_area.keys():\n local_id = loop_area[\"id\"]\n list_ids.append(local_id)\n else:\n print(\"Missing key id\")\n else:\n print(f\"Unexpected format {type(loop_area)}\")\n\nprint(len(list_ids))\n\n# print(type(area),area)\n# print(area.keys())\nprint(area[\"id\"])\n\nlist_name = []\n\nfor loop_area in areas:\n if type(loop_area) == dict:\n if \"name\" in loop_area.keys():\n local_name = loop_area[\"name\"]\n list_name.append(local_name)\n else:\n print(\"Missing key name\")\n else:\n print(f\"unexpected format {type(loop_area)}\")\n\nprint(area[\"name\"])\n\nlist_timezone = []\n\nfor loop_area in areas:\n if type(loop_area) == dict:\n if \"timezone\" in loop_area.keys():\n local_timezone = loop_area[\"timezone\"]\n list_timezone.append(local_timezone)\n else:\n print(\"Missing key name\")\n else:\n print(f\"Unexpected format {type(loop_area)}\")\n\nprint(area[\"timezone\"])\n\nlist_label = []\n\nfor loop_area in areas:\n if type(loop_area) == dict:\n if \"label\" in loop_area.keys():\n local_label = loop_area[\"label\"]\n list_label.append(local_label)\n else:\n print(\"Missing key name\")\n else:\n print(f\"Unexeptected format {type(loop_area)}\")\n\nprint(area[\"label\"])\n\ndata = set(zip(list_ids, list_name, list_timezone, list_label))\n\nwith open(\"APIStropAreas.csv\", \"w\") as file:\n head = [\"ids\", \"name\", \"time\", \"label\"]\n fileWriter = csv.writer(file, delimiter = \";\" )\n fileWriter.writerow(i for i in head)\n\n for row in data:\n fileWriter.writerow(row)\n\n#soit on fait beautify soit on créer l'architecture de l'api dynamiquement"
},
{
"alpha_fraction": 0.6458333134651184,
"alphanum_fraction": 0.6614583134651184,
"avg_line_length": 15,
"blob_id": "4738dd0c48da1e6b88a923b9682ae1aa89caaeec",
"content_id": "6d9f7365900dfc4c1018e700943e2e81ab3a2d4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 24,
"path": "/read_json.py",
"repo_name": "biben-hub/api-request-csv-create",
"src_encoding": "UTF-8",
"text": "import json\nimport pprint\nimport csv\n\n\n'''\nMéthode 1\n\nf = open('stop_areas.json', \"r\")\n\ndata = json.load(f)\nfor i in data['emp_details']:\n print(i)\n\nf.close()\n'''\n\n# Méthode 2\nwith open('stop_areas.json', \"r\") as read_file:\n data = json.load(read_file)\n json.dumps(data, sort_keys=True, indent=4)\n\npp = pprint.PrettyPrinter(indent=4, width=80, compact=False)\npp.pprint(data)\n"
},
{
"alpha_fraction": 0.630234956741333,
"alphanum_fraction": 0.630234956741333,
"avg_line_length": 26.22222137451172,
"blob_id": "aebd0871cdf6409e467e5ad8be3547c2eda9d3fd",
"content_id": "0b8fc9e4292c59f5f4ac2be6f444a14805380b5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 983,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 36,
"path": "/test_unitaire.py",
"repo_name": "biben-hub/api-request-csv-create",
"src_encoding": "UTF-8",
"text": "import json\nimport requests\nimport csv\nimport pprint\nimport os.path\nimport tempfile\nimport unittest\n\n\n#integration d'un test unitaire module écriture json\nclass RmTestCase(unittest.TestCase):\n\n tmpfilepath = os.path.join(tempfile.gettempdir(), \"tmp-testfile\")\n\n def setUp(self):\n with open(self.tmpfilepath, \"wb\") as f:\n f.write(\"Delete me!\")\n \n def test_rm(self):\n #supprimer ce fichier\n rm(self.tmpfilepath)\n #tester qu'il a bien été supprimé\n self.assertFalse(os.path.isfile(self.tmpfilepath),\n \"Faile to remove the file.\")\n\n\n#-----------------------end of test----------------------\n def read_json(): # read and saves json\n\n response = requests.get(url, headers=headers) #pop up for password\n # raw_data = json.loads(response.text) #dict\n with open('stop_areas_maria.json', mode=\"w\") as file:\n json.dump(response.text, file)\n# returns nothing, saves json\n\nprint(read_json())"
}
] | 5 |
OohmdoO/shared-autonomy-for-teleoperating-baxter-robot
|
https://github.com/OohmdoO/shared-autonomy-for-teleoperating-baxter-robot
|
f63d180415811a94f9794c6b70b66e630817a7f5
|
4b439be4d4f178dc731fc6b9a1c1731c2e8b0867
|
f9805eb7a1ff3ca6b76137882328f324e50c5d37
|
refs/heads/master
| 2023-01-09T23:29:52.881239 | 2020-10-29T22:35:48 | 2020-10-29T22:35:48 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7623777985572815,
"alphanum_fraction": 0.7867329120635986,
"avg_line_length": 85.65277862548828,
"blob_id": "6e95b8b977159d0fe03768823ff48bf61cc1c8e3",
"content_id": "ce099884c0b4825674da0d2b06081e2f31e61a70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6299,
"license_type": "no_license",
"max_line_length": 880,
"num_lines": 72,
"path": "/README.md",
"repo_name": "OohmdoO/shared-autonomy-for-teleoperating-baxter-robot",
"src_encoding": "UTF-8",
"text": "# Teleoperating-robot-project\n\nFor the Teleoperating Nursing Robot Baxter to detect the cups to hold we designed a program to detect aurco markers which are placed on cups.\n\n- Run the print markers file to print your tags.\n\n- Run calibration.py file to create a calibrated file ex: test.yaml.\n\n- Run Aruco.py to detect any Aruco marker/Checkboard live.\n\n# Result images\n\n \n\n---\n\n# Baxter-robot teleoperation:\nFor the Baxter robot installation you can follow the link: https://github.com/RethinkRobotics/baxter\n \nAccording to our Hiro Lab @wpi, we used our custom baxter robot for problem application. The folder \"baxter robot teleoperation functions\" consists of our implemented package for teleoperating the robot, autonomous, semi-autonomous methods. The following is the simulation setup for our project:\n\n\n\n\n## Implementation:\n\n### Baxter Camer sensors:\n\nThe Baxter robot in the HiRo lab has a intel RealSense RGB-D camera located on its head. This camera provides a top-down view of the robot’s workspace. This view is used for teleoperation and tasks that require images processing. To provide this capability, an open-source simulated real-sense was implemented in the Gazebo simulation. This simulated camera continuously publishes RGB, infrared, and depth images to separate rostopics. \n\nImplementing this camera required making minor changes to the source code to make it compatible with recent version of Gazebo. The following image shows the detection of aruco marker using realsense rgbd camera:\n\n\n\n### Autonomous function: \n\nThe autonomous function used the end effectors pose as the start point and the given pose (the cup’s pose) as the goal pose. The autonomous function moves the end effector long the shortest path the the goal pose and increments the end effector in steps of a specified length. The incremental poses are calculating by divided the total path length and rotation by the number of steps need to traverse the path. The methods described in the previous section are used to calculated and send the joint angles for the poses along the path. This simple autonomous function was implemented to show how shared autonomy can enhance even the most basic autonomous functions. To complete the act of pouring water, functions were created to raise the cup a specified distance and to pour the cup a specified angle. Once the autonomous function reaches its goal these to functions are called.\n \n### Teleoperation:\n\nA method of controlling the end effector’s pose via teleoperation was developed so that it could be integrated into the shared autonomy functions and so that it could be compared to the shared autonomy functions. The developed teleoperation method is designed to be used with a flight-stick style USB gamepad. The game-pad inputs were interpreted by launching a ROS ”joy node” which listens for USB gamepad messages and publishes them to a rostopic. Then, a ”joystick” class was implemented to subscribe to this topic and save the button and axis status’s as variables. The joystick class was implemented with a flag to show if a new message had been received, meaning the state of the gamepad had changed. Using the ”joystick” class and the Baxter control methods, and end effector pose-based teleoperation function was implemented.\n\nIn this function, any time the joystick receives an input, the end effector’s pose is changed based on that input. The translation of the end effector is based on three of the axis from the gamepad, one for each axis. The rotation of the end effector is based on two axis and one set of buttons on the gamepad, one for each euler angle. Each cycle, the variable position of the each of the gamepad’s axes is multiplied by a constant and added to the respective component of the end effector’s pose. The new pose is then converted to joint angles and published so the robot can move to the new pose.\n \n### Semi-Autonmous functions:\n\nUsing the methods developed in the autonomous and teleoperation functions, three unique methods of shared autonomy were developed. These methods all blended the autonomous\nand teleoperation approach. All of the shared autonomy functions conclude by calling the lift and pouring functions. The following methods are implemented in sem-autonomous functions:\n\n#### Take Control method: \n- The user to take control of the end effectors movements without any input from the autonomous functions. This method also allows \n- The user can pause the robot by putting the robot into teleoperation mode and providing no gamepad input.\n- This is useful for user providing time to think about the robots path and actions.\n\n#### Meshed method:\n- The user when applies input the end effector is teleoperated, when stops the autonomy resumes\n- Additional features are added to this method, which allows smooth transition from autonomy to teleoperation that helps user to more easily making adjustments to the end effector’s path.\n\n#### Cone method:\n- This method is based on the ”Meshed” method, but it limits the distance that the user can displace the end effector from the autonomous path proportionally. \n- The user here is provided with less control when the end effector is near the object, this is assumed to reduce the amount of unwanted collisions with the objects being manipulated.\n\n## Package:\nThe pacakge is in following format: \n```\nbaxter robot teleoperation functions \n├── cups # urdf files for the simulation setup\n│── ebolabot # the baxter robot simulation \n│── src \n| |── image processing # It consists of teleoperating, autonomous and semi-autonomous functions with opencv bridge to detect to auruco markers.\n|── grasp.py # function to grasp the objects\n```\n\n\n"
},
{
"alpha_fraction": 0.60326087474823,
"alphanum_fraction": 0.6206521987915039,
"avg_line_length": 33.11111068725586,
"blob_id": "e884515af52ceabc53df5324d4dcfe8b8b3a09fe",
"content_id": "204f40f19c4424ce314fad92dc558eeb812d2bcd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 920,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 27,
"path": "/Dockerfile",
"repo_name": "OohmdoO/shared-autonomy-for-teleoperating-baxter-robot",
"src_encoding": "UTF-8",
"text": "############################ ROS ############################ \nFROM ros:melodic-ros-core-bionic\n\nRUN sudo apt-get upgrade\n\n# install ros packages\nRUN sudo apt-get update && sudo apt-get install -y \\\n ros-melodic-ros-base=1.4.1-0* \\\n && rm -rf /var/lib/apt/lists/*\n\n###################### Kinect Driver ########################\n# https://github.com/OpenKinect/libfreenect2/blob/master/README.md#linux\nRUN git clone https://github.com/OpenKinect/libfreenect2.git && cd libfreenect2\nRUN sudo apt-get install build-essential cmake pkg-config\n# RUN sudo apt-get install libusb-1.0-0-dev\n# RUN sudo apt-get install libturbojpeg0-dev\nRUN sudo apt-get install libglfw3-dev\nRUN mkdir build && cd build\nRUN cmake .. -DCMAKE_INSTALL_PREFIX=$HOME/freenect2\nRUN make\nRUN make install\nRUN sudo cp ../platform/linux/udev/90-kinect2.rules /etc/udev/rules.d/\n\n\n\n###################### Code ##########################\n# COPY src/ ~/"
},
{
"alpha_fraction": 0.6972281336784363,
"alphanum_fraction": 0.7292110919952393,
"avg_line_length": 28.375,
"blob_id": "e94112105d5da46083a27537a75a61dbb4e023e1",
"content_id": "73bf9906c96fd058a631955498a557e6e78d5af3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 469,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 16,
"path": "/print_markers.py",
"repo_name": "OohmdoO/shared-autonomy-for-teleoperating-baxter-robot",
"src_encoding": "UTF-8",
"text": "# print_markers.py By: Trevor Rizzo\n# prints all markers in dictonary and stores them in \"markers\" folder\nimport cv2\nimport cv2.aruco as aruco\n\nmarkerImage = None\n\ndictionary = aruco.Dictionary_get(aruco.DICT_6X6_100)\ndictSize = 100\n# Print Markers\nfor index in range(dictSize):\n markerImage = aruco.drawMarker(dictionary, index, 200, markerImage, 1)\n\n fileName = './markers/marker' + str(index) + '.png'\n cv2.imwrite(fileName, markerImage)\n # print(markerImage)"
},
{
"alpha_fraction": 0.5425214767456055,
"alphanum_fraction": 0.5680716037750244,
"avg_line_length": 33.371795654296875,
"blob_id": "5110b4bd3e32d0cc2a660f816051bd282b5a34c9",
"content_id": "1c1a7e67b5995d73cc23eedb5833c7e14c78af0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5362,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 156,
"path": "/aruco.py",
"repo_name": "OohmdoO/shared-autonomy-for-teleoperating-baxter-robot",
"src_encoding": "UTF-8",
"text": "# Sources:\n# https://github.com/njanirudh\n\nimport cv2\nimport cv2.aruco as aruco\nimport numpy as np\nimport math\n# import tag\n# import endEffector\n\n#Colors\nblue = (255, 0, 0)\ngreen = (0, 255, 0)\nred = (0, 0, 255)\n\nEndEffectorID = 1\nEndEffectPresent = False\n\nBoundBoxSize = 0.40\n\ncap = cv2.VideoCapture(0)\n\ndef getDistance(point1, point2):\n return int(math.sqrt(((point1[0]-point2[0])**2)+((point1[1]-point2[1])**2))) #distance between two points\n#Source\n''' Import Calibration Data Source: https://github.com/njanirudh/Aruco_Tracker/blob/master/extract_calibration.py'''\n# File storage in OpenCV\ncv_file = cv2.FileStorage(\"E:\\WPI\\Semester-2(Spring)\\Motion planning\\project\\nursing-motion-planning-master\\test.py\", cv2.FILE_STORAGE_READ)\n\n# Note : we also have to specify the type\n# to retrieve otherwise we only get a 'None'\n# FileNode object back instead of a matrix\ncamera_matrix = cv_file.getNode(\"camera_matrix\").mat()\ndist_matrix = cv_file.getNode(\"dist_coeff\").mat()\n\ncv_file.release()\n\n''' Aruco Tracking '''\n# set dictionary size depending on the aruco marker selected\naruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_100)\n\n# font for displaying text (below)\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\nwhile (True):\n ret, frame = cap.read()\n\n # operations on the frame\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # detector parameters can be set here (List of detection parameters[3])\n parameters = aruco.DetectorParameters_create()\n parameters.adaptiveThreshConstant = 10\n\n # lists of ids and the corners belonging to each id\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\n\n # check if the ids list is not empty\n # if no check is added the code will crash\n if np.all(ids != None):\n # draw a square around the markers\n aruco.drawDetectedMarkers(frame, corners)\n # indentify poses\n rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, 0.053, camera_matrix, dist_matrix)\n\n #check for end effector\n EndEffectPresent = False\n EndEffectorCenter = 0\n EndEffectorRadius = 0\n for i in range(0, ids.size):\n if ids[i][0] == EndEffectorID:\n #Set Flag\n EndEffectPresent = True\n #Find Center\n sumX = 0\n sumY = 0\n for point in corners[i][0]:\n sumX += point[0]\n sumY += point[1]\n EndEffectorCenter = (int(sumX/4), int(sumY/4))\n #Set Radius\n corner1 = corners[i][0][0]\n EndEffectorRadius = getDistance(EndEffectorCenter, corner1)\n #Draw Circle\n cv2.circle(frame, EndEffectorCenter, EndEffectorRadius, blue, thickness=2)\n # Draw axis\n aruco.drawAxis(frame, camera_matrix, dist_matrix, rvecs[i], tvecs[i], 0.053) \n #Remove from ids and corners\n ids = np.delete(ids, i, 0)\n corners = np.delete(corners, i, 0)\n #exit loop\n break\n \n # Locate all lther markers\n for i in range(0, ids.size):\n box = []\n sumX = 0\n sumY = 0\n #find center of marker\n for point in corners[i][0]:\n sumX += point[0]\n sumY += point[1]\n center = (int(sumX/4), int(sumY/4))\n #extend bounding square\n for point in corners[i][0]:\n newX = point[0] + ((point[0] + center[0])*BoundBoxSize)\n newY = point[1] + ((point[1] + center[1])*BoundBoxSize)\n point_to_center = getDistance(point, center)\n newX = point[0]\n newY = point[1]\n box = box + [[newX, newY]]\n box = np.array([box])\n box = np.int32(box)\n #find bound radius\n radius = 0\n for tagCorner in box[0]:\n toCenter = getDistance(center, tagCorner)\n if toCenter > radius:\n radius = toCenter \n #Check for end effector\n color = green\n if (EndEffectPresent):\n distance = getDistance(center, EndEffectorCenter)\n if (distance < (radius + EndEffectorRadius)):\n color = red\n else:\n color = green\n else:\n color = green\n #print square/Circle\n # cv2.polylines(frame, box, True, color, thickness=2)\n cv2.circle(frame, center, radius, color, thickness=2)\n\n # Draw axis\n aruco.drawAxis(frame, camera_matrix, dist_matrix, rvecs[i], tvecs[i], 0.053) \n\n # code to show ids of the marker found\n strg = ''\n for i in range(0, ids.size):\n strg += str(ids[i][0])+', '\n\n cv2.putText(frame, \"Id: \" + strg, (0,64), font, 1, (0,255,0),2,cv2.LINE_AA)\n\n\n else:\n # code to show 'No Ids' when no markers are found\n cv2.putText(frame, \"No Ids\", (0,64), font, 1, (0,255,0),2,cv2.LINE_AA)\n\n # display the resulting frame\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n"
}
] | 4 |
Chaos-Monkey-Island/uzen
|
https://github.com/Chaos-Monkey-Island/uzen
|
7457c5537cdf322ba9493f131d2fd0a92e2b30e2
|
99ec71c16a0b33c2553a02ad725569370fa9bb5a
|
b31be80021db14e8c459550ccaa29580ea59c474
|
refs/heads/master
| 2023-04-16T07:00:43.704230 | 2021-01-02T03:32:44 | 2021-01-02T03:32:44 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7766798138618469,
"alphanum_fraction": 0.7766798138618469,
"avg_line_length": 28.764705657958984,
"blob_id": "709593b30d05ecf691707f7d85ddf9e57c9e2c4d",
"content_id": "671a5503ad5c5f2cf6d8d601b2e8df8ec4441101",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 17,
"path": "/uzen/api/endpoints/domain.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from fastapi import APIRouter\n\nfrom uzen.factories.domain import DomainInformationFactory\nfrom uzen.schemas.domain import DomainInformation\n\nrouter = APIRouter()\n\n\[email protected](\n \"/{hostname}\",\n response_model=DomainInformation,\n response_description=\"Returns information of a domain\",\n summary=\"Get domain information\",\n description=\"Get information related to a domain\",\n)\nasync def get(hostname: str) -> DomainInformation:\n return await DomainInformationFactory.from_hostname(hostname)\n"
},
{
"alpha_fraction": 0.7354910969734192,
"alphanum_fraction": 0.7354910969734192,
"avg_line_length": 29.89655113220215,
"blob_id": "a790e961aef5c9d659facdc1003e56d5db4094e7",
"content_id": "296452d41efb4a53226699f5751c45cfeb923ca4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 896,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 29,
"path": "/uzen/api/endpoints/yara.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional\n\nfrom fastapi import APIRouter, Depends\n\nfrom uzen.api.dependencies.snapshots import SearchFilters\nfrom uzen.schemas.yara import ScanPayload, ScanResult\nfrom uzen.services.yara_scanner import YaraScanner\n\nrouter = APIRouter()\n\n\[email protected](\n \"/scan\",\n response_model=List[ScanResult],\n response_description=\"Returns a list of matched snapshots\",\n summary=\"Perform YARA scans against snapshtos\",\n description=\"Perform YARA scans against snapshtos (which can be narrowed down by filters)\",\n)\nasync def scan(\n payload: ScanPayload,\n size: Optional[int] = None,\n offset: Optional[int] = None,\n filters: SearchFilters = Depends(),\n) -> List[ScanResult]:\n yara_scanner = YaraScanner(payload.source)\n results = await yara_scanner.scan_snapshots(\n payload.target, vars(filters), size=size, offset=offset\n )\n return results\n"
},
{
"alpha_fraction": 0.5677586793899536,
"alphanum_fraction": 0.6078246235847473,
"avg_line_length": 31.63846206665039,
"blob_id": "3f6fa4b5617179ee94074f70ac27a390b61ba340",
"content_id": "7d5042a0b93f98c965e62dce985f00a87510da0b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4243,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 130,
"path": "/tests/apis/test_yara.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import json\n\nimport pytest\n\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.utils import SnapshotResult\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_yara_scan(client):\n # it matches with all snapshots\n payload = {\"source\": 'rule foo: bar {strings: $a = \"foo\" condition: $a}'}\n response = await client.post(\"/api/yara/scan\", data=json.dumps(payload))\n assert response.status_code == 200\n\n snapshots = response.json()\n assert len(snapshots) == await Snapshot.all().count()\n\n\[email protected]\[email protected](\"snapshots_setup\")\[email protected](\"size\", [1, 5, 10])\nasync def test_yara_scan_with_size(client, size):\n payload = {\n \"source\": 'rule foo: bar {strings: $a = \"foo\" condition: $a}',\n }\n params = {\"size\": size}\n response = await client.post(\n \"/api/yara/scan\", data=json.dumps(payload), params=params\n )\n assert response.status_code == 200\n\n snapshots = response.json()\n assert len(snapshots) == size\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_yara_scan_with_target(client):\n # it should return all snapshots because every snapshot has \"whois\" which contains \"foo\"\n payload = {\n \"source\": 'rule foo: bar {strings: $a = \"foo\" condition: $a}',\n \"target\": \"whois\",\n }\n response = await client.post(\"/api/yara/scan\", data=json.dumps(payload))\n assert response.status_code == 200\n\n snapshots = response.json()\n assert len(snapshots) == await Snapshot.all().count()\n\n # it should return an empty list because there is no snapshot which has \"certificate\"\n payload = {\n \"source\": 'rule foo: bar {strings: $a = \"foo\" condition: $a}',\n \"target\": \"certificate\",\n }\n response = await client.post(\"/api/yara/scan\", data=json.dumps(payload))\n assert response.status_code == 200\n\n snapshots = response.json()\n assert len(snapshots) == 0\n\n\[email protected]\nasync def test_yara_scan_with_invalid_input(client):\n payload = {\"source\": \"boo\"}\n response = await client.post(\"/api/yara/scan\", data=json.dumps(payload))\n assert response.status_code == 422\n\n\nasync def mock_take_snapshot(*args, **kwargs):\n screenshot = Screenshot()\n screenshot.data = \"\"\n\n return SnapshotResult(\n snapshot=Snapshot(\n url=\"https://www.w3.org/\",\n submitted_url=\"https://www.w3.org\",\n status=200,\n hostname=\"example.com\",\n ip_address=\"1.1.1.1\",\n asn=\"AS15133 MCI Communications Services, Inc. d/b/a Verizon Business\",\n server=\"ECS (sjc/4E5D)\",\n content_type=\"text/html; charset=UTF-8\",\n content_length=1256,\n headers={},\n body='<html><body><script type=\"text/javascript\" src=\"/2008/site/js/main\"></body></html>',\n sha256=\"fbc1a9f858ea9e177916964bd88c3d37b91a1e84412765e29950777f265c4b75\",\n screenshot=Screenshot(data=\"\"),\n whois=\"foo\",\n request={},\n ),\n screenshot=screenshot,\n scripts=[\n Script(\n url=\"https://www.w3.org/2008/site/js/main\",\n content=\"foo\",\n sha256=\"dummy\",\n )\n ],\n )\n\n\nasync def mock_take_snapshot_without_script(*args, **kwargs):\n screenshot = Screenshot()\n screenshot.data = \"\"\n\n return SnapshotResult(\n snapshot=Snapshot(\n url=\"https://www.w3.org/\",\n submitted_url=\"https://www.w3.org\",\n status=200,\n hostname=\"example.com\",\n ip_address=\"1.1.1.1\",\n asn=\"AS15133 MCI Communications Services, Inc. d/b/a Verizon Business\",\n server=\"ECS (sjc/4E5D)\",\n content_type=\"text/html; charset=UTF-8\",\n content_length=1256,\n headers={},\n body=\"<html><body></body></html>\",\n sha256=\"fbc1a9f858ea9e177916964bd88c3d37b91a1e84412765e29950777f265c4b75\",\n screenshot=Screenshot(data=\"\"),\n whois=\"foo\",\n request={},\n ),\n screenshot=screenshot,\n scripts=[],\n )\n"
},
{
"alpha_fraction": 0.6622583866119385,
"alphanum_fraction": 0.6851475238800049,
"avg_line_length": 30.70967674255371,
"blob_id": "59a917f196e5b9ca757165b211cc7dc4ecc0ccf5",
"content_id": "15a226b02fe36a30d37fdc2e2ade3567c8305573",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1966,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 62,
"path": "/tests/factories/test_scripts.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pathlib\n\nimport pytest\nimport respx\nfrom httpx import Response\n\nfrom tests.utils import make_snapshot\nfrom uzen.factories.scripts import ScriptFactory, get_script_sources\n\n\[email protected]\[email protected]\nasync def test_build_from_snapshot():\n snapshot = make_snapshot()\n snapshot.body = '<html><body><script type=\"text/javascript\" src=\"https://www.w3.org/2008/site/js/main\"></body></html>'\n respx.get(\"https://www.w3.org/2008/site/js/main\").mock(\n Response(status_code=200, content=\"foo\")\n )\n\n scripts = await ScriptFactory.from_snapshot(snapshot)\n assert len(scripts) == 1\n\n script = scripts[0]\n assert script.url == \"https://www.w3.org/2008/site/js/main\"\n assert \"foo\" in script.content\n\n\[email protected]\[email protected]\nasync def test_build_from_snapshot_with_relative_src():\n snapshot = make_snapshot()\n snapshot.url = \"https://www.w3.org\"\n snapshot.body = '<html><body><script type=\"text/javascript\" src=\"/2008/site/js/main\"></body></html>'\n respx.get(\"https://www.w3.org/2008/site/js/main\").mock(\n Response(status_code=200, content=\"foo\")\n )\n\n scripts = await ScriptFactory.from_snapshot(snapshot)\n assert len(scripts) == 1\n\n script = scripts[0]\n assert script.url == \"https://www.w3.org/2008/site/js/main\"\n assert \"foo\" in script.content\n\n\[email protected]\nasync def test_build_from_snapshot_with_no_src():\n snapshot = make_snapshot()\n snapshot.body = '<html><body><script type=\"text/javascript\"></body></html>'\n\n scripts = await ScriptFactory.from_snapshot(snapshot)\n assert len(scripts) == 0\n\n\ndef test_get_script_sources():\n path = pathlib.Path(__file__).parent / \"../fixtures/test.html\"\n fixture = open(path).read()\n\n sources = get_script_sources(url=\"http://example.com/test.php\", body=fixture)\n assert len(sources) == 2\n assert \"http://example.com/vendor/jquery-3.2.1.min.js\" in sources\n assert \"http://example.com/js/main.js\" in sources\n"
},
{
"alpha_fraction": 0.7118644118309021,
"alphanum_fraction": 0.7118644118309021,
"avg_line_length": 30.769229888916016,
"blob_id": "71355e476fef3edd3d78795909ddcee78b55dce7",
"content_id": "7fe8a51df489beb1355992618139a7926c4eb67b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 826,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 26,
"path": "/uzen/models/matches.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from tortoise import fields\n\nfrom uzen.models.base import AbstractBaseModel\nfrom uzen.models.mixins import TimestampMixin\nfrom uzen.schemas.matches import Match as MatchModel\n\n\nclass Match(TimestampMixin, AbstractBaseModel):\n matches = fields.JSONField()\n\n snapshot: fields.ForeignKeyRelation[\"Snapshot\"] = fields.ForeignKeyField(\n \"models.Snapshot\", on_delete=fields.CASCADE\n )\n rule: fields.ForeignKeyRelation[\"Rule\"] = fields.ForeignKeyField(\n \"models.Rule\", on_delete=fields.CASCADE\n )\n script: fields.ForeignKeyNullableRelation[\"Script\"] = fields.ForeignKeyField(\n \"models.Script\", null=True, on_delete=fields.CASCADE\n )\n\n def to_model(self) -> MatchModel:\n return MatchModel.from_orm(self)\n\n class Meta:\n table = \"matches\"\n ordering = [\"-created_at\"]\n"
},
{
"alpha_fraction": 0.6534653306007385,
"alphanum_fraction": 0.6647807359695435,
"avg_line_length": 22.566667556762695,
"blob_id": "99f8f8d87b35e2766650cbfbf4cf8958c82d1d5c",
"content_id": "21002746b70cfabb26fc5e51a9b4cecd00b607ec",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 707,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 30,
"path": "/tests/apis/test_ip_address.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\nimport vcr\n\nfrom uzen.services.whois import Whois\n\n\ndef mock_whois(hostname: str):\n return \"foo\"\n\n\[email protected]\[email protected]_cassette(\n \"tests/fixtures/vcr_cassettes/ip_address.yaml\", ignore_hosts=[\"testserver\"]\n)\nasync def test_get(client, monkeypatch):\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n\n ip_address = \"1.1.1.1\"\n response = await client.get(f\"/api/ip_address/{ip_address}\")\n assert response.status_code == 200\n\n json = response.json()\n ip_address_ = json.get(\"ipAddress\", \"\")\n assert ip_address_ == ip_address\n\n snapshots = json.get(\"snapshots\", [])\n assert len(snapshots) == 0\n\n whois = json.get(\"whois\", \"\")\n assert whois == whois\n"
},
{
"alpha_fraction": 0.685543954372406,
"alphanum_fraction": 0.685543954372406,
"avg_line_length": 24.80769157409668,
"blob_id": "d559222091989df97ad5c0b1982e14f4fa4b3c3d",
"content_id": "909ff99e4614256ba4696089e6c534225507391a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 671,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 26,
"path": "/uzen/schemas/dns_records.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from fastapi_utils.api_model import APIModel\nfrom pydantic import Field\n\nfrom uzen.schemas.base import AbstractBaseModel\nfrom uzen.schemas.mixins import TimestampMixin\n\n\nclass BaseDnsRecord(APIModel):\n \"\"\"Base Pydantic model for DnsRecord\n\n Note that this model doesn't have \"id\" and \"created_at\" fields.\n \"\"\"\n\n type: str = Field(\n ..., title=\"Type\", description=\"A type of the DNS record\",\n )\n value: str = Field(\n ..., title=\"Value\", description=\"A value of the DNS record\",\n )\n\n class Config:\n orm_mode = True\n\n\nclass DnsRecord(BaseDnsRecord, AbstractBaseModel, TimestampMixin):\n \"\"\"Full Pydantic model for DnsRecord\"\"\"\n"
},
{
"alpha_fraction": 0.721030056476593,
"alphanum_fraction": 0.725321888923645,
"avg_line_length": 24.88888931274414,
"blob_id": "d8997fb7af2ccb7d1c0845da630221f3a1f3babc",
"content_id": "1f1d3d8d8bc075dffbeffabad44eb24083a1ea03",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 466,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 18,
"path": "/uzen/schemas/screenshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from uuid import UUID\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import Field\n\nfrom uzen.schemas.base import AbstractBaseModel\n\n\nclass BaseScreenshot(APIModel):\n data: str = Field(..., title=\"Data\", description=\"Base64 encoded png data\")\n\n\nclass Screenshot(BaseScreenshot, AbstractBaseModel):\n \"\"\"Full Pydantic model for Screenshot\"\"\"\n\n snapshot_id: UUID = Field(\n ..., title=\"Snapshot ID\", description=\"An ID of the snaphsot\"\n )\n"
},
{
"alpha_fraction": 0.5350961685180664,
"alphanum_fraction": 0.6004807949066162,
"avg_line_length": 27.88888931274414,
"blob_id": "7475d7c38149dc9ecae828a45512a0327397bb1c",
"content_id": "bf437681f55050ac2b65bc077851e2f2f62fc85c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2080,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 72,
"path": "/tests/utils.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import datetime\nimport uuid\nfrom uuid import UUID\n\nfrom uzen.models.rules import Rule\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.utils import SnapshotResult\n\n\ndef make_snapshot() -> Snapshot:\n screenshot = Screenshot()\n screenshot.data = \"\"\n\n return Snapshot(\n id=uuid.uuid4(),\n url=f\"http://example.com/\",\n submitted_url=f\"http://example.com\",\n status=200,\n hostname=\"example.com\",\n ip_address=\"1.1.1.1\",\n asn=\"AS15133 MCI Communications Services, Inc. d/b/a Verizon Business\",\n server=\"ECS (sjc/4E5D)\",\n content_type=\"text/html; charset=UTF-8\",\n content_length=1256,\n headers={},\n body=\"foo bar\",\n sha256=\"fbc1a9f858ea9e177916964bd88c3d37b91a1e84412765e29950777f265c4b75\",\n screenshot=screenshot,\n whois=\"foo\",\n request={},\n created_at=datetime.datetime.now(),\n )\n\n\nasync def make_snapshot_result() -> SnapshotResult:\n screenshot = Screenshot()\n screenshot.data = \"\"\n\n return SnapshotResult(\n snapshot=Snapshot(\n id=uuid.uuid4(),\n url=f\"http://example.com/\",\n submitted_url=f\"http://example.com\",\n status=200,\n hostname=\"example.com\",\n ip_address=\"1.1.1.1\",\n asn=\"AS15133 MCI Communications Services, Inc. d/b/a Verizon Business\",\n server=\"ECS (sjc/4E5D)\",\n content_type=\"text/html; charset=UTF-8\",\n content_length=1256,\n headers={},\n body=\"foo bar\",\n sha256=\"fbc1a9f858ea9e177916964bd88c3d37b91a1e84412765e29950777f265c4b75\",\n screenshot=\"yoyo\",\n whois=\"foo\",\n request={},\n created_at=datetime.datetime.now(),\n ),\n screenshot=screenshot,\n scripts=[],\n )\n\n\nasync def first_rule_id() -> UUID:\n rule = await Rule.all().first()\n return rule.id\n\n\nasync def first_snapshot_id() -> UUID:\n snapshot = await Snapshot.all().first()\n return snapshot.id\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7647058963775635,
"avg_line_length": 16,
"blob_id": "36b561d0722068353432174e83bb3503873c5d8c",
"content_id": "e06d5c136429fa737de25c4b904cd7f02962916d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 119,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 7,
"path": "/mysql/Dockerfile",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "FROM mysql:5.7\n\nEXPOSE 3306\n\nENV MYSQL_ALLOW_EMPTY_PASSWORD yes\n\nCOPY ./initdb.d/init.sql /docker-entrypoint-initdb.d/\n"
},
{
"alpha_fraction": 0.7356608510017395,
"alphanum_fraction": 0.7880299091339111,
"avg_line_length": 56.28571319580078,
"blob_id": "e5f71bfa30e7c761abd247f8bae12a8d34a0e4f8",
"content_id": "4069e81fc6cf2a9ad69935ee839b18d70cf98ec6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 401,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 7,
"path": "/uzen/models/__init__.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from uzen.models.classifications import Classification # noqa: F401\nfrom uzen.models.dns_records import DnsRecord # noqa: F401\nfrom uzen.models.matches import Match # noqa: F401\nfrom uzen.models.rules import Rule # noqa: F401\nfrom uzen.models.screenshots import Screenshot # noqa: F401\nfrom uzen.models.scripts import Script # noqa: F401\nfrom uzen.models.snapshots import Snapshot # noqa: F401\n"
},
{
"alpha_fraction": 0.6553936004638672,
"alphanum_fraction": 0.6915451884269714,
"avg_line_length": 19.66265106201172,
"blob_id": "b22f72cdd0d33904e2b5f3e0596fdd58a490c464",
"content_id": "a0c609341b842c43829b202fba6699bab27a3683",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1715,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 83,
"path": "/Dockerfile",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "# build env\nFROM node:14-alpine as build\n\nCOPY ./frontend /frontend\nWORKDIR /frontend\nRUN npm install && npm run build && rm -rf node_modules\n\n# prod env\nFROM python:3.8-slim-buster\n\nRUN apt-get update \\\n && apt-get install -y \\\n # Install dependencies for puppeteer\n # Ref. https://github.com/puppeteer/puppeteer/blob/master/docs/troubleshooting.md#chrome-headless-doesnt-launch-on-unix\n fonts-liberation \\\n libappindicator3-1 \\\n libasound2 \\\n libatk-bridge2.0-0 \\\n libatk1.0-0 \\\n libc6 \\\n libcairo2 \\\n libcups2 \\\n libdbus-1-3 \\\n libexpat1 \\\n libfontconfig1 \\\n libgbm1 \\\n libgcc1 \\\n libglib2.0-0 \\\n libgtk-3-0 \\\n libnspr4 \\\n libnss3 \\\n libpango-1.0-0 \\\n libpangocairo-1.0-0 \\\n libstdc++6 \\\n libx11-6 \\\n libx11-xcb1 \\\n libxcb1 \\\n libxcomposite1 \\\n libxcursor1 \\\n libxdamage1 \\\n libxext6 \\\n libxfixes3 \\\n libxi6 \\\n libxrandr2 \\\n libxrender1 \\\n libxss1 \\\n libxtst6 \\\n lsb-release \\\n wget \\\n xdg-utils \\\n # Install dependencies for YARA\n # Ref. https://yara.readthedocs.io/en/latest/gettingstarted.html\n automake \\\n libtool \\\n make \\\n gcc \\\n pkg-config \\\n # Install dependencies for Uzen\n dnsutils \\\n procps \\\n && apt-get clean \\\n && rm -rf /var/lib/apt/lists/*\n\nWORKDIR /app\n\nCOPY pyproject.toml /app\nCOPY poetry.lock /app\nCOPY .env.sample /app/.env\nCOPY uzen /app/uzen\nCOPY --from=build /frontend /app/frontend\n\nRUN pip3 install poetry && poetry config virtualenvs.create false && poetry install --no-dev\n\nENV PLAYWRIGHT_BROWSERS_PATH /app/playwright\n\nRUN mkdir -p /app/playwright && python -m playwright install\nRUN rm -rf /app/playwright/webkit-* && rm -rf /app/playwright/firefox-*\n\nENV PORT 8000\n\nEXPOSE $PORT\n\nCMD uvicorn --host 0.0.0.0 --port $PORT uzen:app\n"
},
{
"alpha_fraction": 0.6514925360679626,
"alphanum_fraction": 0.6673134565353394,
"avg_line_length": 30.904762268066406,
"blob_id": "8433b6708ae951e91abbf4e99391425022019f4b",
"content_id": "587ec6a23dbb65a9c4b40b81e502c96a133695b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6700,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 210,
"path": "/tests/apis/test_snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import json\n\nimport pytest\n\nfrom tests.utils import first_snapshot_id, make_snapshot_result\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.services.browser import Browser\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_snapshot_search(client):\n count = await Snapshot.all().count()\n response = await client.get(\"/api/snapshots/search\")\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == count\n\n response = await client.get(\n \"/api/snapshots/search\", params={\"hostname\": \"example.com\"}\n )\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == count\n\n response = await client.get(\"/api/snapshots/search\", params={\"server\": \"ECS\"})\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == count\n\n response = await client.get(\n \"/api/snapshots/search\", params={\"from_at\": \"1970-01-01T15:53:00+05:00\"}\n )\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == count\n\n response = await client.get(\n \"/api/snapshots/search\", params={\"from_at\": \"1970-01-01\"}\n )\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == count\n\n response = await client.get(\n \"/api/snapshots/search\", params={\"to_at\": \"3000-01-01T15:53:00+05:00\"}\n )\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == count\n\n # it doesn't match any snapshot\n response = await client.get(\"/api/snapshots/search\", params={\"server\": \"Tomcat\"})\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == 0\n\n # it doesn't match any snapshot\n response = await client.get(\"/api/snapshots/search\", params={\"status\": 404})\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == 0\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_snapshot_list_with_size(client):\n payload = {\"size\": 1}\n response = await client.get(\"/api/snapshots/search\", params=payload)\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == 1\n first = snapshots[0]\n assert first.get(\"url\") == \"http://example10.com/\"\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_snapshot_list_with_offset_and_size(client):\n payload = {\"offset\": 0, \"size\": 1}\n response = await client.get(\"/api/snapshots/search\", params=payload)\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == 1\n\n offset = 0\n size = 10\n payload = {\"offset\": offset, \"size\": size}\n response = await client.get(\"/api/snapshots/search\", params=payload)\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == size - offset\n first = snapshots[0]\n assert first.get(\"url\") == f\"http://example{size - offset}.com/\"\n\n offset = 5\n size = 100000\n payload = {\"offset\": offset, \"size\": size}\n response = await client.get(\"/api/snapshots/search\", params=payload)\n json = response.json()\n snapshots = json.get(\"results\")\n assert len(snapshots) == await Snapshot.all().count() - offset\n first = snapshots[0]\n assert first.get(\"url\") == f\"http://example{offset}.com/\"\n\n\[email protected]\nasync def test_snapshot_post_without_url(client):\n payload = {}\n response = await client.post(\"/api/snapshots/\", data=json.dumps(payload))\n assert response.status_code == 422\n\n\[email protected]\nasync def test_snapshot_post_with_invalid_url(client):\n payload = {\"url\": \"foo\"}\n response = await client.post(\"/api/snapshots/\", data=json.dumps(payload))\n assert response.status_code == 422\n\n\ndef mock_take_snapshot(*args, **kwargs):\n return make_snapshot_result()\n\n\[email protected]\nasync def test_snapshot_post(client, monkeypatch):\n monkeypatch.setattr(Browser, \"take_snapshot\", mock_take_snapshot)\n\n payload = {\"url\": \"http://example.com\"}\n response = await client.post(\"/api/snapshots/\", data=json.dumps(payload))\n\n assert response.status_code == 201\n\n snapshot = response.json()\n assert snapshot.get(\"url\") == \"http://example.com/\"\n assert snapshot.get(\"body\") == \"foo bar\"\n\n snapshot = await Snapshot.get(id=snapshot.get(\"id\"))\n await snapshot.fetch_related(\"_scripts\")\n assert len(snapshot.scripts) == 0\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_snapshot_get(client):\n id_ = await first_snapshot_id()\n response = await client.get(f\"/api/snapshots/{id_}\")\n assert response.status_code == 200\n assert response.json().get(\"screenshot\") is None\n\n id_ = await first_snapshot_id()\n response = await client.get(\n f\"/api/snapshots/{id_}\", params={\"include_screenshot\": True}\n )\n assert response.status_code == 200\n assert response.json().get(\"screenshot\") is not None\n json = response.json()\n assert json.get(\"screenshot\", {}).get(\"data\") == \"\"\n\n\[email protected]\[email protected](\"dns_records_setup\")\nasync def test_snapshot_get_with_dns_records(client):\n id_ = await first_snapshot_id()\n response = await client.get(f\"/api/snapshots/{id_}\")\n assert response.status_code == 200\n\n snapshot = response.json()\n assert len(snapshot.get(\"dnsRecords\")) == 1\n assert len(snapshot.get(\"scripts\")) == 0\n assert len(snapshot.get(\"classifications\")) == 0\n\n\[email protected]\[email protected](\"classifications_setup\")\nasync def test_snapshot_get_with_classifications(client):\n id_ = await first_snapshot_id()\n response = await client.get(f\"/api/snapshots/{id_}\")\n assert response.status_code == 200\n\n snapshot = response.json()\n assert len(snapshot.get(\"classifications\")) == 1\n assert len(snapshot.get(\"dnsRecords\")) == 0\n assert len(snapshot.get(\"scripts\")) == 0\n\n\[email protected]\[email protected](\"scripts_setup\")\nasync def test_snapshot_get_with_scripts(client):\n id_ = await first_snapshot_id()\n response = await client.get(f\"/api/snapshots/{id_}\")\n assert response.status_code == 200\n\n snapshot = response.json()\n assert len(snapshot.get(\"scripts\")) == 1\n assert len(snapshot.get(\"classifications\")) == 0\n assert len(snapshot.get(\"dnsRecords\")) == 0\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_count(client):\n count = await Snapshot.all().count()\n\n response = await client.get(f\"/api/snapshots/count\")\n assert response.status_code == 200\n\n json = response.json()\n count_ = json.get(\"count\")\n assert count == count_\n"
},
{
"alpha_fraction": 0.6588447690010071,
"alphanum_fraction": 0.666064977645874,
"avg_line_length": 21.15999984741211,
"blob_id": "d7867673c560a995d44d1ded1957b754af3544bb",
"content_id": "7592065eba555f09d3154f4ee209778825d4b06c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 554,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 25,
"path": "/tests/apis/test_domain.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom uzen.services.whois import Whois\n\n\ndef mock_whois(hostname: str):\n return \"foo\"\n\n\[email protected]\nasync def test_get(client, monkeypatch):\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n\n hostname = \"example.com\"\n response = await client.get(f\"/api/domain/{hostname}\")\n assert response.status_code == 200\n\n json = response.json()\n assert json.get(\"hostname\") == hostname\n\n snapshots = json.get(\"snapshots\", [])\n assert len(snapshots) == 0\n\n whois = json.get(\"whois\", \"\")\n assert whois == whois\n"
},
{
"alpha_fraction": 0.6341072916984558,
"alphanum_fraction": 0.6397117972373962,
"avg_line_length": 22.129629135131836,
"blob_id": "edbf8ce0d643ae84db852ffcf983c862a32d9e17",
"content_id": "5787890dd2aec9d0f64d3711fd0df6dc692c0522",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1249,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 54,
"path": "/uzen/services/utils.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "\"\"\"Helper utilities and decorators.\"\"\"\nimport hashlib\nimport socket\nfrom typing import Optional\nfrom urllib.parse import urlparse\n\nfrom uzen.services.rdap import RDAP\n\n\ndef get_hostname_from_url(url: str) -> Optional[str]:\n \"\"\"Get a hostname from a URL\n\n Arguments:\n url {str} -- URL\n\n Returns:\n Optional[str] -- A hostname, returns None if an invalid input is given\n \"\"\"\n parsed = urlparse(url)\n if parsed.hostname == \"\":\n return None\n return parsed.hostname\n\n\ndef get_ip_address_by_hostname(hostname: str) -> Optional[str]:\n \"\"\"Get an IP address by a hostname\n\n Arguments:\n hostname {str} -- Hostname\n\n Returns:\n Optional[str] -- An IP address, returns None if an error occurs\n \"\"\"\n try:\n return socket.gethostbyname(hostname)\n except OSError:\n return None\n\n\ndef get_asn_by_ip_address(ip_address: str) -> Optional[str]:\n \"\"\"Get ASN by an IP address\n\n Arguments:\n ip_address {str} -- IP address\n\n Returns:\n Optional[str] -- ASN as a string, returns None if an error occurs\n \"\"\"\n res = RDAP.lookup(ip_address)\n return res.get(\"asn\")\n\n\ndef calculate_sha256(s: str) -> str:\n return hashlib.sha256(s.encode(\"utf-8\")).hexdigest()\n"
},
{
"alpha_fraction": 0.5979999899864197,
"alphanum_fraction": 0.6389999985694885,
"avg_line_length": 24.64102554321289,
"blob_id": "5c94fc0596e565bb342eedc7fe6d661d8d586dbb",
"content_id": "c6684dce55690218f2f101b2a8c17e2467a527b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1000,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 39,
"path": "/tests/services/test_utils.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import socket\n\nimport pytest\nimport vcr\n\nfrom uzen.services.utils import (\n get_asn_by_ip_address,\n get_hostname_from_url,\n get_ip_address_by_hostname,\n)\n\n\[email protected]_cassette(\"tests/fixtures/vcr_cassettes/ip_address.yaml\")\ndef test_get_asn_by_ip_address():\n asn = get_asn_by_ip_address(\"1.1.1.1\")\n assert asn == \"AS13335\"\n\n\[email protected](\n \"hostname,expected\",\n [\n pytest.param(\"http://example.com\", \"example.com\"),\n pytest.param(\"http://1.1.1.1\", \"1.1.1.1\"),\n pytest.param(\"http://127.0.0.1:8080\", \"127.0.0.1\"),\n pytest.param(\"example.com\", None),\n ],\n)\ndef test_get_hostname_from_url(hostname, expected):\n assert get_hostname_from_url(hostname) == expected\n\n\ndef test_get_ip_address_by_hostname(monkeypatch):\n def mockreturn(arg):\n if arg == \"one.one.one.one\":\n return \"1.1.1.1\"\n\n monkeypatch.setattr(socket, \"gethostbyname\", mockreturn)\n\n assert get_ip_address_by_hostname(\"one.one.one.one\") == \"1.1.1.1\"\n"
},
{
"alpha_fraction": 0.6812705397605896,
"alphanum_fraction": 0.6856517195701599,
"avg_line_length": 28.45161247253418,
"blob_id": "59f5935f26da1fe1e2398e4ff9eb9ed6d46a5dc2",
"content_id": "b899cb9c6273c4bc30f3c8a14d4cf5ec5a5dacb7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 913,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 31,
"path": "/frontend/src/components/mixins/highlight.ts",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import hljs from \"highlight.js/lib/core\";\nimport javascript from \"highlight.js/lib/languages/javascript\";\nimport json from \"highlight.js/lib/languages/json\";\nimport xml from \"highlight.js/lib/languages/xml\";\nimport Vue from \"vue\";\nimport { Mixin } from \"vue-mixin-decorator\";\n\nimport yara from \"@/hljs/yara\";\n// register highlight languages\nhljs.registerLanguage(\"javascript\", javascript);\nhljs.registerLanguage(\"json\", json);\nhljs.registerLanguage(\"xml\", xml);\nhljs.registerLanguage(\"yara\", yara);\n\n@Mixin\nexport class HighlightMixin extends Vue {\n highlightCodeBlocks() {\n if (this.$el.textContent === \"\") {\n // do nothing when $el is empty\n return;\n }\n\n this.$el.querySelectorAll(\"pre code\").forEach((block) => {\n hljs.highlightBlock(block);\n const parent = block.parentElement;\n if (parent !== null) {\n parent.style.backgroundColor = \"#282b2e\";\n }\n });\n }\n}\n"
},
{
"alpha_fraction": 0.7099236845970154,
"alphanum_fraction": 0.7099236845970154,
"avg_line_length": 28.11111068725586,
"blob_id": "ee7432c55525bd40cba08762eb2756c3090795e6",
"content_id": "1420633a1e1c246c89bcf8a5976c93f2031c31c1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 262,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 9,
"path": "/uzen/services/searchers/utils.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from datetime import date, datetime\nfrom typing import Union, cast\n\n\ndef convert_to_datetime(d: Union[datetime, date]) -> datetime:\n if isinstance(d, datetime):\n return cast(datetime, d)\n\n return datetime.combine(cast(date, d), datetime.min.time())\n"
},
{
"alpha_fraction": 0.699999988079071,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 21.272727966308594,
"blob_id": "60bae910e29bf918af257e405578186555f85129",
"content_id": "92d8f8e4c2a7265f1397fb395f0e8c948b335b8f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 490,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 22,
"path": "/frontend/src/main.ts",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import \"@fortawesome/fontawesome-free/css/all.css\";\nimport \"@fortawesome/fontawesome-free/js/all.js\";\nimport \"@mdi/font/css/materialdesignicons.css\";\nimport \"buefy/dist/buefy.css\";\n\nimport Buefy from \"buefy\";\nimport Vue from \"vue\";\n\nimport App from \"@/App.vue\";\nimport router from \"@/router\";\nimport { truncate } from \"@/utils/truncate\";\n\nVue.use(Buefy);\n\nVue.config.productionTip = false;\n\nVue.filter(\"truncate\", truncate);\n\nnew Vue({\n router,\n render: (h) => h(App),\n}).$mount(\"#app\");\n"
},
{
"alpha_fraction": 0.6028537750244141,
"alphanum_fraction": 0.6300039887428284,
"avg_line_length": 27.508474349975586,
"blob_id": "9a27a5cff9fd588a179ac5e69a939a5c845a2c54",
"content_id": "9d4d138a65fcd7d41ab17587fea815a51c5353b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5046,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 177,
"path": "/tests/conftest.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import datetime\nfrom typing import List\nfrom uuid import UUID\n\nimport httpx\nimport pytest\nfrom starlette.config import environ\nfrom tortoise import Tortoise\nfrom tortoise.backends.base.config_generator import generate_config\nfrom tortoise.exceptions import DBConnectionError\n\nfrom uzen import create_app\nfrom uzen.core import settings\nfrom uzen.models.classifications import Classification\nfrom uzen.models.dns_records import DnsRecord\nfrom uzen.models.matches import Match\nfrom uzen.models.rules import Rule\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\n\n\[email protected]\nasync def client():\n app = create_app()\n async with httpx.AsyncClient(app=app, base_url=\"http://testserver\") as client:\n yield client\n\n\ndef get_db_config(app_label: str, db_url: str, modules: List[str]) -> dict:\n return generate_config(\n db_url,\n app_modules={app_label: modules},\n testing=True,\n connection_label=app_label,\n )\n\n\[email protected](autouse=True)\nasync def tortoise_db():\n db_url = environ.get(\"TORTOISE_TEST_DB\", \"sqlite://:memory:\")\n config = get_db_config(\n app_label=\"models\", db_url=db_url, modules=settings.APP_MODELS,\n )\n try:\n await Tortoise.init(config)\n await Tortoise._drop_databases()\n except DBConnectionError:\n pass\n\n await Tortoise.init(config, _create_db=True)\n await Tortoise.generate_schemas()\n\n yield\n\n await Tortoise.close_connections()\n\n\[email protected]\nasync def snapshots_setup(client):\n for i in range(1, 11):\n snapshot = Snapshot(\n url=f\"http://example{i}.com/\",\n submitted_url=f\"http://example{i}.com\",\n status=200,\n hostname=\"example.com\",\n ip_address=\"1.1.1.1\",\n asn=\"AS15133 MCI Communications Services, Inc. d/b/a Verizon Business\",\n server=\"ECS (sjc/4E5D)\",\n content_type=\"text/html; charset=UTF-8\",\n content_length=1256,\n headers={},\n body=\"foo bar\",\n sha256=\"fbc1a9f858ea9e177916964bd88c3d37b91a1e84412765e29950777f265c4b75\",\n whois=\"foo\",\n request={},\n created_at=datetime.datetime.now(),\n )\n await snapshot.save()\n\n screenshot = Screenshot()\n screenshot.data = \"\"\n\n screenshot.snapshot_id = snapshot.id\n await screenshot.save()\n\n\[email protected]\nasync def scripts_setup(client, snapshots_setup):\n snapshot_ids = await Snapshot().all().values_list(\"id\", flat=True)\n for id_ in snapshot_ids:\n script = Script(\n snapshot_id=id_,\n url=f\"http://example{id_}.com/test.js\",\n content=\"foo bar\",\n sha256=\"fbc1a9f858ea9e177916964bd88c3d37b91a1e84412765e29950777f265c4b75\",\n created_at=datetime.datetime.now(),\n )\n await script.save()\n\n\[email protected]\nasync def dns_records_setup(client, snapshots_setup):\n snapshot_ids = await Snapshot().all().values_list(\"id\", flat=True)\n for id_ in snapshot_ids:\n record = DnsRecord(\n snapshot_id=id_,\n value=f\"1.1.1.1\",\n type=\"A\",\n created_at=datetime.datetime.now(),\n )\n await record.save()\n\n\[email protected]\nasync def classifications_setup(client, snapshots_setup):\n snapshot_ids = await Snapshot().all().values_list(\"id\", flat=True)\n for id_ in snapshot_ids:\n classification = Classification(\n snapshot_id=id_,\n name=\"test\",\n malicious=True,\n created_at=datetime.datetime.now(),\n )\n await classification.save()\n\n\[email protected]\nasync def rules_setup(client):\n for i in range(1, 6):\n rule = Rule(\n name=f\"test{i}\",\n target=\"body\",\n source='rule foo: bar {strings: $a = \"lmn\" condition: $a}',\n created_at=datetime.datetime.now(),\n )\n await rule.save()\n\n\[email protected]\nasync def matches_setup(client, snapshots_setup, rules_setup):\n snapshot_ids = await Snapshot().all().values_list(\"id\", flat=True)\n rules_ids = await Rule().all().values_list(\"id\", flat=True)\n zipped = zip(snapshot_ids, rules_ids)\n\n for (snapshot_id, rule_id) in list(zipped):\n match = Match(\n snapshot_id=snapshot_id,\n rule_id=rule_id,\n matches=\"[]\",\n created_at=datetime.datetime.now(),\n )\n await match.save()\n\n\[email protected]\nasync def first_rule_id(client, rules_setup) -> UUID:\n rule = await Rule.all().first()\n return rule.id\n\n\[email protected]\nasync def first_snapshot_id(client, snapshots_setup) -> UUID:\n snapshot = await Snapshot.all().first()\n return snapshot.id\n\n\[email protected]\ndef patch_datetime_now(monkeypatch):\n FAKE_TIME = datetime.datetime(2020, 12, 25, 17, 5, 55)\n\n class mydatetime:\n @classmethod\n def now(cls):\n return FAKE_TIME\n\n monkeypatch.setattr(datetime, \"datetime\", mydatetime)\n"
},
{
"alpha_fraction": 0.5494912266731262,
"alphanum_fraction": 0.5568917393684387,
"avg_line_length": 31.75757598876953,
"blob_id": "a0438bf6f4fb8543066e488c13bb7e1e6ba4fb69",
"content_id": "8a29eaf6abd1b67885e623a51c229ea3e4bf190c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3243,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 99,
"path": "/uzen/services/urlscan.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import base64\nimport datetime\nfrom typing import cast\n\nimport httpx\n\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.utils import SnapshotResult\n\n\nclass URLScan:\n HOST = \"urlscan.io\"\n BASE_URL = f\"https://{HOST}\"\n\n def __init__(self, uuid: str):\n self.uuid = uuid\n\n async def body(self) -> str:\n url = f\"{self.BASE_URL}/dom/{self.uuid}/\"\n async with httpx.AsyncClient() as client:\n r = await client.get(url)\n r.raise_for_status()\n return r.text\n\n async def screenshot(self) -> str:\n url = f\"{self.BASE_URL}/screenshots/{self.uuid}.png\"\n async with httpx.AsyncClient() as client:\n r = await client.get(url)\n r.raise_for_status()\n return str(base64.b64encode(r.content), \"utf-8\")\n\n async def result(self) -> dict:\n url = f\"{self.BASE_URL}/api/v1/result/{self.uuid}/\"\n async with httpx.AsyncClient() as client:\n r = await client.get(url)\n r.raise_for_status()\n return cast(dict, r.json())\n\n @classmethod\n async def import_as_snapshot(cls, uuid: str) -> SnapshotResult:\n \"\"\"Import urlscan.io scan as a snapshot\n\n Arguments:\n uuid {str} -- Scan ID\n\n Returns:\n Snapshot -- Snapshot ORM instance\n \"\"\"\n instance = cls(uuid)\n result = await instance.result()\n\n requests = result.get(\"data\", {}).get(\"requests\", [])\n response = {}\n for request in requests:\n tmp = request.get(\"response\", {}).get(\"response\", {})\n if tmp.get(\"status\") == 200:\n response = tmp\n break\n\n url = result.get(\"page\", {}).get(\"url\")\n submitted_url = result.get(\"task\", {}).get(\"url\")\n hostname = result.get(\"page\", {}).get(\"domain\")\n ip_address = result.get(\"page\", {}).get(\"ip\")\n asn = result.get(\"page\", {}).get(\"asn\")\n asnname = result.get(\"page\", {}).get(\"asnname\")\n\n headers = response.get(\"headers\", {})\n server = result.get(\"page\", {}).get(\"server\")\n content_type = headers.get(\"Content-Type\") or headers.get(\"content-type\")\n content_length = headers.get(\"Content-Length\") or headers.get(\"content-length\")\n\n body = await instance.body()\n sha256 = result.get(\"lists\", {}).get(\"hashes\", [])[0]\n time = cast(str, result.get(\"task\", {}).get(\"time\"))\n created_at = datetime.datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n snapshot = Snapshot(\n url=url,\n submitted_url=submitted_url,\n status=200,\n hostname=hostname,\n ip_address=ip_address,\n asn=f\"{asn} {asnname}\",\n server=server,\n content_type=content_type,\n content_length=content_length,\n headers=headers,\n body=body,\n sha256=sha256,\n created_at=created_at,\n request={\"urlscan.io\": uuid},\n )\n\n data = await instance.screenshot()\n screenshot = Screenshot()\n screenshot.data = data\n\n return SnapshotResult(screenshot=screenshot, snapshot=snapshot, scripts=[])\n"
},
{
"alpha_fraction": 0.6774030327796936,
"alphanum_fraction": 0.6792579889297485,
"avg_line_length": 34.2976188659668,
"blob_id": "0fd5b3fe00b64653dc44b9e790afde718e1606b9",
"content_id": "8d17c7b94e58a70cee24ff3c896722407efa955d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5930,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 168,
"path": "/uzen/schemas/snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import datetime\nfrom typing import List, Optional, Union, cast\nfrom uuid import UUID\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import AnyHttpUrl, Field, IPvAnyAddress, validator\n\nfrom uzen.schemas.base import AbstractBaseModel\nfrom uzen.schemas.classifications import BaseClassification, Classification\nfrom uzen.schemas.common import Source, Target\nfrom uzen.schemas.dns_records import BaseDnsRecord, DnsRecord\nfrom uzen.schemas.mixins import TimestampMixin\nfrom uzen.schemas.screenshots import BaseScreenshot, Screenshot\nfrom uzen.schemas.scripts import BaseScript, Script\nfrom uzen.schemas.search import BaseSearchResults\nfrom uzen.services.utils import get_hostname_from_url, get_ip_address_by_hostname\n\n# Declare rules related schemas here to prevent circular reference\n\n\ndef remove_sharp_and_question_from_tail(v: str) -> str:\n return v.rstrip(\"#|?\")\n\n\nclass BaseRule(Source, Target):\n \"\"\"Base Pydantic model for Rule\n\n Note that this model doesn't have \"id\" and \"created_at\" fields.\n \"\"\"\n\n name: str = Field(..., title=\"Name\", description=\"A name of the YARA rule\")\n\n\nclass Rule(BaseRule, AbstractBaseModel, TimestampMixin):\n \"\"\"Full Pydantic model for Rule\"\"\"\n\n updated_at: datetime.datetime\n snapshots: List[\"Snapshot\"] = Field(\n ...,\n title=\"Snapshots\",\n description=\"A list of matched snapshots. It contains only the latest 10 snapshots.\",\n )\n\n\nclass BasicAttributes(APIModel):\n url: AnyHttpUrl = Field(..., title=\"URL\", description=\"A URL of the snapshot\")\n submitted_url: AnyHttpUrl = Field(\n ..., title=\"Submitted URL\", description=\"A submitted URL of the snapshot\"\n )\n hostname: str = Field(..., title=\"Hostname\", description=\"Hostname\")\n ip_address: IPvAnyAddress = Field(..., title=\"IP address\", description=\"IP address\")\n asn: str = Field(..., title=\"ASN\", description=\"AS number\")\n server: Optional[str] = Field(None, title=\"Server\", description=\"Server header\")\n content_type: Optional[str] = Field(\n None, title=\"Content type\", description=\"Content type\"\n )\n status: int = Field(..., title=\"Status\", description=\"Status code\")\n content_length: Optional[int] = Field(\n None, title=\"Content length\", description=\"Content length\"\n )\n body: str = Field(..., title=\"Body\", description=\"HTTP response body\")\n sha256: str = Field(\n ..., title=\"SHA256\", description=\"SHA256 hash of HTTP response body\"\n )\n\n @validator(\n \"url\", pre=True,\n )\n def normalize_url(cls, v: str):\n return remove_sharp_and_question_from_tail(v)\n\n @validator(\n \"submitted_url\", pre=True,\n )\n def normalize_submitted_url(cls, v: str):\n return remove_sharp_and_question_from_tail(v)\n\n\nclass BaseSnapshot(BasicAttributes):\n \"\"\"Base Pydantic model of Snapshot\n\n Note that this model doesn't have \"id\" and \"created_at\" fields.\n \"\"\"\n\n headers: dict = Field(..., title=\"Headers\", description=\"HTTP response headers\")\n whois: Optional[str] = Field(None, title=\"Whois\", description=\"Whois record\")\n certificate: Optional[str] = Field(\n None, title=\"Certiricate\", description=\"Certificate record\"\n )\n request: dict = Field(..., title=\"Request\", description=\"Meta data of HTTP request\")\n processing: bool = Field(\n ...,\n title=\"Processing\",\n description=\"A boolean flag to show a status of background tasks\",\n )\n\n scripts: List[Union[Script, BaseScript]] = Field(\n ..., title=\"Scripts\", description=\"A list of scripts\"\n )\n dns_records: List[Union[DnsRecord, BaseDnsRecord]] = Field(\n ..., title=\"DNS records\", description=\"A list of DNS records\"\n )\n classifications: List[Union[Classification, BaseClassification]] = Field(\n ..., title=\"Classifications\", description=\"A list of classifications\"\n )\n rules: List[Rule] = Field(..., title=\"Rules\", description=\"A list of matched rules\")\n\n screenshot: Optional[Union[Screenshot, BaseScreenshot]] = Field(\n None, title=\"Screenshot\", description=\"Screenshot\"\n )\n\n\nclass Snapshot(BaseSnapshot, AbstractBaseModel, TimestampMixin):\n \"\"\"Pydantic model of Snapshot\"\"\"\n\n\nclass SimplifiedSnapshot(BasicAttributes, AbstractBaseModel, TimestampMixin):\n \"\"\"Simplified version of Pydantic model of Snapshot\"\"\"\n\n @classmethod\n def field_keys(cls) -> List[str]:\n return list(cls.__fields__.keys())\n\n\nclass SearchResults(BaseSearchResults):\n results: Union[List[SimplifiedSnapshot], List[UUID]]\n\n\nclass CountResponse(APIModel):\n count: int = Field(\n None,\n title=\"A number of snapshots\",\n description=\"A number of snapshots matched with filters\",\n )\n\n\nclass CreateSnapshotPayload(APIModel):\n url: AnyHttpUrl = Field(..., title=\"URL\", description=\"A URL to take a snapshot\")\n user_agent: Optional[str] = Field(\n None, title=\"User agent\", description=\"Specific user agent to use\"\n )\n timeout: Optional[int] = Field(\n None, title=\"Timeout\", description=\"Maximum time to wait for in seconds\"\n )\n ignore_https_errors: Optional[bool] = Field(\n None, title=\"Ignore HTTPS erros\", description=\"Whether to ignore HTTPS errors\"\n )\n accept_language: Optional[str] = Field(\n None, title=\"Accept language\", description=\"Accept-Language HTTP header\"\n )\n referer: Optional[str] = Field(\n None, title=\"Referer\", description=\"Referer HTTP header\"\n )\n host: Optional[str] = Field(\n None, title=\"Host\", description=\"Host HTTP header (it only works with HTTPX)\"\n )\n\n @validator(\"url\")\n def hostname_must_resolvable(cls, v):\n hostname = cast(str, get_hostname_from_url(v))\n ip_address = get_ip_address_by_hostname(hostname)\n if ip_address is None:\n raise ValueError(f\"Cannot resolve hostname: {hostname}.\")\n return v\n\n\n# Update foward references\nRule.update_forward_refs()\n"
},
{
"alpha_fraction": 0.7103347778320312,
"alphanum_fraction": 0.7234352231025696,
"avg_line_length": 33.349998474121094,
"blob_id": "95bafb2bf62178708c27d879c45729221120d088",
"content_id": "52afac22f17cb6fc25e9a160583b53cfe790ec13",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 687,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 20,
"path": "/uzen/schemas/scripts.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from fastapi_utils.api_model import APIModel\nfrom pydantic import AnyHttpUrl, Field\n\nfrom uzen.schemas.base import AbstractBaseModel\nfrom uzen.schemas.mixins import TimestampMixin\n\n\nclass BaseScript(APIModel):\n \"\"\"Base Pydantic model for Script\n\n Note that this model doesn't have \"id\" and \"created_at\" fields.\n \"\"\"\n\n url: AnyHttpUrl = Field(..., title=\"URL\", description=\"A URL of the script\")\n content: str = Field(..., title=\"Content\", description=\"A content of the script\")\n sha256: str = Field(..., title=\"SHA256\", description=\"A SHA256 hash of the script\")\n\n\nclass Script(BaseScript, AbstractBaseModel, TimestampMixin):\n \"\"\"Full Pydantic model for Snapshot\"\"\"\n"
},
{
"alpha_fraction": 0.5594082474708557,
"alphanum_fraction": 0.5709662437438965,
"avg_line_length": 33.8870964050293,
"blob_id": "6938af4f10bf6824cd0b51411502b1550782fc41",
"content_id": "13947a978449504bb58dd7e85cc2af7746792580",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4326,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 124,
"path": "/uzen/services/fake_browser.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional, cast\n\nimport httpx\n\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.utils import SnapshotResult\nfrom uzen.services.certificate import Certificate\nfrom uzen.services.utils import (\n calculate_sha256,\n get_asn_by_ip_address,\n get_hostname_from_url,\n get_ip_address_by_hostname,\n)\nfrom uzen.services.whois import Whois\nfrom uzen.tasks.scripts import ScriptTask\n\nDEFAULT_UA = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\"\nDEFAULT_AL = \"en-US\"\nDEFAULT_REFERER = \"\"\n\n\nclass FakeBrowser:\n @staticmethod\n async def take_snapshot(\n url: str,\n accept_language: Optional[str] = None,\n host: Optional[str] = None,\n ignore_https_errors: bool = False,\n referer: Optional[str] = None,\n timeout: Optional[int] = None,\n user_agent: Optional[str] = None,\n ) -> SnapshotResult:\n \"\"\"Take a snapshot of a website by httpx\n\n Arguments:\n url {str} -- A URL of a website\n\n Keyword Arguments:\n accept_language {Optional[str]} -- Accept-language header to use (default: {None})\n host {Optional[str]} -- Host header to use (default: {None})\n ignore_https_errors {bool} -- Whether to ignore HTTPS errors (default: {False})\n referer {Optional[str]} -- Referer header to use (default: {None})\n timeout {Optional[int]} -- Maximum time to wait for in seconds (default: {None})\n user_agent {Optional[str]} -- User-agent header to use (default: {None})\n\n Returns:\n SnapshotResult\n \"\"\"\n submitted_url: str = url\n verify = not ignore_https_errors\n\n try:\n # default timeout = 30 seconds\n timeout = int(timeout / 1000) if timeout is not None else 30\n\n headers = {\n \"user-agent\": user_agent or DEFAULT_UA,\n \"accept-language\": accept_language or DEFAULT_AL,\n \"referer\": referer or DEFAULT_REFERER,\n }\n if host is not None:\n headers[\"host\"] = host\n\n async with httpx.AsyncClient(verify=verify) as client:\n res = await client.get(\n url, headers=headers, timeout=timeout, allow_redirects=True,\n )\n\n request = {\n \"accept_language\": accept_language,\n \"browser\": \"httpx\",\n \"host\": host,\n \"ignore_https_errors\": ignore_https_errors,\n \"referer\": referer,\n \"timeout\": timeout,\n \"user_agent\": user_agent,\n }\n\n url = str(res.url)\n status = res.status_code\n body = res.text\n sha256 = calculate_sha256(body)\n headers = {k.lower(): v for (k, v) in res.headers.items()}\n except httpx.HTTPError as e:\n raise (e)\n\n server = headers.get(\"server\")\n content_type = headers.get(\"content-type\")\n content_length = headers.get(\"content-length\")\n\n hostname = cast(str, get_hostname_from_url(url))\n certificate = Certificate.load_and_dump_from_url(url)\n ip_address = cast(str, get_ip_address_by_hostname(hostname))\n asn = get_asn_by_ip_address(ip_address) or \"\"\n whois = Whois.whois(hostname)\n\n snapshot = Snapshot(\n url=url,\n submitted_url=submitted_url,\n status=status,\n body=body,\n sha256=sha256,\n headers=headers,\n hostname=hostname,\n ip_address=ip_address,\n asn=asn,\n server=server,\n content_length=content_length,\n content_type=content_type,\n whois=whois,\n certificate=certificate,\n request=request,\n )\n screenshot = Screenshot()\n screenshot.data = \"\"\n\n # get scripts\n scripts = cast(\n List[Script], await ScriptTask.process(snapshot, insert_to_db=False)\n )\n\n return SnapshotResult(screenshot=screenshot, snapshot=snapshot, scripts=scripts)\n"
},
{
"alpha_fraction": 0.7644787430763245,
"alphanum_fraction": 0.7644787430763245,
"avg_line_length": 27.77777862548828,
"blob_id": "1bd51bdcf06266b973ddb38b5016532d25abe6b7",
"content_id": "a97f39f836281688e405c208193c22385e6df05e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 518,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 18,
"path": "/uzen/schemas/search.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from dataclasses import dataclass\nfrom typing import List, Type, Union\nfrom uuid import UUID\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import BaseModel, Field\nfrom tortoise.models import Model\n\n\nclass BaseSearchResults(APIModel):\n total: int = Field(..., title=\"total\", description=\"Total count of search results\")\n\n\n# TODO: Use Pydantic model instead of dataclass\n@dataclass\nclass SearchResults:\n total: int\n results: Union[List[Type[Model]], List[Type[BaseModel]], List[dict], List[UUID]]\n"
},
{
"alpha_fraction": 0.6066176295280457,
"alphanum_fraction": 0.6076680421829224,
"avg_line_length": 25.082191467285156,
"blob_id": "a81fc12e08d64b16f4e51b9a325729b530b57894",
"content_id": "49e2f2c2284855d385f381ab2aabbed3ca108987",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1904,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 73,
"path": "/uzen/factories/dns_records.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from functools import partial\nfrom typing import List\n\nimport aiometer\nfrom dns.asyncresolver import Resolver\nfrom dns.exception import DNSException\n\nfrom uzen.models.dns_records import DnsRecord\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.dns_records import BaseDnsRecord\n\nTYPES: List[str] = [\"A\", \"AAAA\", \"CNAME\", \"MX\", \"NS\", \"PTR\", \"TXT\"]\n\n\nasync def resolve(\n resolver: Resolver,\n hostname: str,\n rdtype=\"A\",\n rdclass=\"IN\",\n tcp=False,\n source=None,\n raise_on_no_answer=True,\n source_port=0,\n lifetime=None,\n) -> List[BaseDnsRecord]:\n try:\n answer = await resolver.resolve(\n hostname,\n rdtype,\n rdclass,\n tcp,\n source,\n raise_on_no_answer,\n source_port,\n lifetime,\n True,\n )\n return [BaseDnsRecord(type=rdtype, value=str(rr)) for rr in answer]\n except DNSException:\n return []\n\n\nasync def query(hostname: str) -> List[BaseDnsRecord]:\n \"\"\"Quqery DNS records\n\n Arguments:\n hostname {str} -- A hostname to query\n\n Returns:\n List[BaseDnsRecord] -- A list of DNS records\n \"\"\"\n resolver = Resolver()\n tasks = [partial(resolve, resolver, hostname, record_type) for record_type in TYPES]\n results = await aiometer.run_all(tasks)\n return sum(results, [])\n\n\nclass DnsRecordFactory:\n @staticmethod\n async def from_snapshot(snapshot: Snapshot) -> List[DnsRecord]:\n return [\n DnsRecord(\n type=record.type,\n value=record.value,\n # insert a dummy ID if a snapshot doesn't have ID\n snapshot_id=snapshot.id or -1,\n )\n for record in await query(snapshot.hostname)\n ]\n\n @staticmethod\n async def from_hostname(hostname: str) -> List[BaseDnsRecord]:\n return await query(hostname)\n"
},
{
"alpha_fraction": 0.6612903475761414,
"alphanum_fraction": 0.6646859049797058,
"avg_line_length": 24.06382942199707,
"blob_id": "c98fee8f4d1533bc6abd867647a2596852eac1f1",
"content_id": "0bf63a0335e6a2b4cfc39f0429b1e9475ec5e1e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1178,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 47,
"path": "/tests/tasks/test_matches.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom tests.utils import first_snapshot_id\nfrom uzen.models.matches import Match\nfrom uzen.models.rules import Rule\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.tasks.matches import MatchinbgTask\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_matching_taskl(client):\n rule = Rule(\n name=\"test\",\n target=\"body\",\n source='rule foo: bar {strings: $a = \"foo\" condition: $a}',\n )\n await rule.save()\n\n id_ = await first_snapshot_id()\n snapshot = await Snapshot.get(id=id_)\n\n assert await Match.all().count() == 0\n\n await MatchinbgTask.process(snapshot)\n\n assert await Match.all().count() == 1\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_matching_task_with_zero_matches(client):\n rule = Rule(\n name=\"test\",\n target=\"whois\",\n source='rule foo: bar {strings: $a = \"bar\" condition: $a}',\n )\n await rule.save()\n\n id_ = await first_snapshot_id()\n snapshot = await Snapshot.get(id=id_)\n\n assert await Match.all().count() == 0\n\n await MatchinbgTask.process(snapshot)\n\n assert await Match.all().count() == 0\n"
},
{
"alpha_fraction": 0.6393244862556458,
"alphanum_fraction": 0.6393244862556458,
"avg_line_length": 30.884614944458008,
"blob_id": "c0a1957b0006b89d3f04853e1410afc1d12fb094",
"content_id": "55184af79fb181121bfce3182e4fe0fd8abd411c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 829,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 26,
"path": "/uzen/factories/ip_address.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from uzen.models.snapshots import Snapshot\nfrom uzen.schemas.ip_address import IPAddressInformation\nfrom uzen.services.rdap import RDAP\nfrom uzen.services.whois import Whois\n\n\nclass IPAddressInformationFactory:\n @staticmethod\n async def from_ip_address(ip_address: str) -> IPAddressInformation:\n res = RDAP.lookup(ip_address)\n whois = Whois.whois(ip_address)\n snapshots = await Snapshot.find_by_ip_address(ip_address)\n\n ip_address = ip_address\n asn = res.get(\"asn\", \"\")\n country = res.get(\"country\", \"\")\n description = res.get(\"description\", \"\")\n\n return IPAddressInformation(\n asn=asn,\n country=country,\n description=description,\n ip_address=ip_address,\n snapshots=snapshots,\n whois=whois,\n )\n"
},
{
"alpha_fraction": 0.7116863131523132,
"alphanum_fraction": 0.7162993550300598,
"avg_line_length": 30.467741012573242,
"blob_id": "7769b93917ba53010351a8851a6ec0250f1e1f69",
"content_id": "34b1b7b29fc29707d83c0dfe779e81877a10f8ca",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3902,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 124,
"path": "/uzen/api/endpoints/snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional, cast\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, BackgroundTasks, Depends, HTTPException\nfrom tortoise.exceptions import DoesNotExist\n\nfrom uzen.api.dependencies.snapshots import SearchFilters\nfrom uzen.core.exceptions import TakeSnapshotError\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.snapshots import (\n CreateSnapshotPayload,\n SearchResults,\n SimplifiedSnapshot,\n)\nfrom uzen.schemas.snapshots import Snapshot as SnapshotModel\nfrom uzen.schemas.utils import CountResponse\nfrom uzen.services.searchers.snapshots import SnapshotSearcher\nfrom uzen.services.snapshot import save_snapshot, take_snapshot\nfrom uzen.tasks.enrichment import EnrichmentTasks\nfrom uzen.tasks.matches import MatchinbgTask\nfrom uzen.tasks.snapshots import UpdateProcessingTask\n\nrouter = APIRouter()\n\n\[email protected](\n \"/count\",\n response_model=CountResponse,\n response_description=\"Returns a count of snapshots\",\n summary=\"Count snapshots\",\n description=\"Get a count of snapshots\",\n status_code=200,\n)\nasync def count() -> CountResponse:\n count = await Snapshot.count()\n return CountResponse(count=count)\n\n\[email protected](\n \"/search\",\n response_model=SearchResults,\n response_description=\"Returns a list of matched snapshots\",\n summary=\"Search snapshots\",\n description=\"Searcn snapshtos with filters\",\n)\nasync def search(\n size: Optional[int] = None,\n offset: Optional[int] = None,\n filters: SearchFilters = Depends(),\n) -> SearchResults:\n results = await SnapshotSearcher.search(vars(filters), size=size, offset=offset)\n snapshots = cast(List[SimplifiedSnapshot], results.results)\n return SearchResults(results=snapshots, total=results.total)\n\n\[email protected](\n \"/{snapshot_id}\",\n response_model=SnapshotModel,\n response_description=\"Returns a snapshot\",\n summary=\"Get a snapshot\",\n description=\"Get a snapshot which has a given id\",\n)\nasync def get(snapshot_id: UUID, include_screenshot: bool = False) -> SnapshotModel:\n try:\n snapshot: Snapshot = await Snapshot.get_by_id(snapshot_id, include_screenshot)\n except DoesNotExist:\n raise HTTPException(\n status_code=404, detail=f\"Snapshot:{snapshot_id} is not found\"\n )\n\n model = cast(SnapshotModel, snapshot.to_model())\n return model\n\n\[email protected](\n \"/\",\n response_model=SnapshotModel,\n response_description=\"Returns a created snapshot\",\n summary=\"Create a snapshot\",\n description=\"Create a snapshot of a website by using puppeteer\",\n status_code=201,\n)\nasync def create(\n payload: CreateSnapshotPayload, background_tasks: BackgroundTasks\n) -> SnapshotModel:\n try:\n result = await take_snapshot(\n url=payload.url,\n accept_language=payload.accept_language,\n host=payload.host,\n ignore_https_errors=payload.ignore_https_errors,\n referer=payload.referer,\n timeout=payload.timeout,\n user_agent=payload.user_agent,\n )\n except TakeSnapshotError as e:\n raise HTTPException(status_code=500, detail=str(e))\n\n snapshot = await save_snapshot(result)\n\n background_tasks.add_task(EnrichmentTasks.process, snapshot)\n background_tasks.add_task(MatchinbgTask.process, snapshot)\n background_tasks.add_task(UpdateProcessingTask.process, snapshot)\n\n model = cast(SnapshotModel, snapshot.to_model())\n return model\n\n\[email protected](\n \"/{snapshot_id}\",\n response_description=\"Returns an empty JSON\",\n summary=\"Delete a snapshot\",\n description=\"Delete a snapshot which has a given ID\",\n status_code=204,\n)\nasync def delete(snapshot_id: UUID) -> dict:\n try:\n await Snapshot.delete_by_id(snapshot_id)\n except DoesNotExist:\n raise HTTPException(\n status_code=404, detail=f\"Snapshot:{snapshot_id} is not found\"\n )\n\n return {}\n"
},
{
"alpha_fraction": 0.6076086759567261,
"alphanum_fraction": 0.6119565367698669,
"avg_line_length": 29.66666603088379,
"blob_id": "217179848bb9fb5eba238c17021620855123af5c",
"content_id": "2fd0f87705d96b101eeb86823ebe7b89eeaa9f4a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 920,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 30,
"path": "/uzen/services/certificate.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import ssl\nfrom typing import Optional\nfrom urllib.parse import urlparse\n\nfrom OpenSSL import crypto\n\n\nclass Certificate:\n @staticmethod\n def load_and_dump_from_url(url: str) -> Optional[str]:\n \"\"\"Load and dump a certficate as a string from a URL\n\n Arguments:\n url {str} -- A URL of a website\n\n Returns:\n Optional[str] -- A certificate as a string, returns None if it is not an HTTPS website\n \"\"\"\n parsed = urlparse(url)\n if parsed.scheme != \"https\":\n return None\n\n hostname = parsed.netloc\n try:\n cert_pem = ssl.get_server_certificate((hostname, 443))\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)\n dump = crypto.dump_certificate(crypto.FILETYPE_TEXT, cert)\n return dump.decode(encoding=\"utf-8\")\n except (ssl.SSLError, ValueError):\n return None\n"
},
{
"alpha_fraction": 0.6723940372467041,
"alphanum_fraction": 0.6723940372467041,
"avg_line_length": 33.91999816894531,
"blob_id": "f4f7b7d44d531dbf5ddde383fb9d1d933417b775",
"content_id": "28fb8bbf4ceaadd4d576173b9ba7d138723d8b8e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 873,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 25,
"path": "/uzen/schemas/ip_address.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import Field, IPvAnyAddress\n\nfrom uzen.schemas.snapshots import Snapshot\n\n\nclass IPAddressInformation(APIModel):\n \"\"\"Pydantic model for IP informaiton\"\"\"\n\n ip_address: IPvAnyAddress = Field(\n ..., title=\"IP address\", description=\"An IP address\"\n )\n country: str = Field(..., title=\"Country\", description=\"A country of an IP address\")\n asn: str = Field(..., title=\"ASN\", description=\"An ASN of an IP address\")\n description: str = Field(\n ..., title=\"Description\", description=\"A dectiption of an IP address\"\n )\n whois: Optional[str] = Field(\n None, title=\"Whois\", description=\"A whois record of an IP address\"\n )\n snapshots: List[Snapshot] = Field(\n ..., title=\"Snapshots\", description=\"A list of related snapshots\"\n )\n"
},
{
"alpha_fraction": 0.6825284361839294,
"alphanum_fraction": 0.6878551244735718,
"avg_line_length": 27.73469352722168,
"blob_id": "1e1a082a54548af8a046e755413604e2110243b6",
"content_id": "6bf1b5e9a282293fcf3499ce31aa44d68cbf16e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2816,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 98,
"path": "/uzen/api/endpoints/rules.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import Optional\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom tortoise.exceptions import DoesNotExist\n\nfrom uzen.api.dependencies.rules import SearchFilters\nfrom uzen.models.rules import Rule\nfrom uzen.schemas.rules import CreateRulePayload\nfrom uzen.schemas.rules import Rule as RuleModel\nfrom uzen.schemas.rules import SearchResults, UpdateRulePayload\nfrom uzen.services.searchers.rules import RuleSearcher\n\nrouter = APIRouter()\n\n\[email protected](\n \"/search\",\n response_model=SearchResults,\n response_description=\"Returns a list of matched rules\",\n summary=\"Search rules\",\n description=\"Searcn rules with filters\",\n)\nasync def search(\n size: Optional[int] = None,\n offset: Optional[int] = None,\n filters: SearchFilters = Depends(),\n) -> SearchResults:\n return await RuleSearcher.search(vars(filters), size=size, offset=offset)\n\n\[email protected](\n \"/{rule_id}\",\n response_model=RuleModel,\n response_description=\"Returns a rule\",\n summary=\"Get a rule\",\n description=\"Get a rule which has a given id\",\n)\nasync def get(rule_id: UUID) -> RuleModel:\n try:\n rule = await Rule.get_by_id(rule_id)\n except DoesNotExist:\n raise HTTPException(status_code=404, detail=f\"Rule:{rule_id} is not found\")\n\n return rule.to_model()\n\n\[email protected](\n \"/{rule_id}\",\n response_model=RuleModel,\n response_description=\"Returns a rule\",\n summary=\"Update a rule\",\n description=\"Update a rule which has a given id\",\n)\nasync def put(rule_id: UUID, payload: UpdateRulePayload) -> RuleModel:\n try:\n rule = await Rule.get(id=rule_id)\n if payload.name is not None:\n rule.name = payload.name\n if payload.target is not None:\n rule.target = payload.target\n if payload.source is not None:\n rule.source = payload.source\n await rule.save()\n except DoesNotExist:\n raise HTTPException(status_code=404, detail=f\"Rule:{rule_id} is not found\")\n\n return rule.to_model()\n\n\[email protected](\n \"/\",\n response_model=RuleModel,\n response_description=\"Returns a created rule\",\n summary=\"Create a rule\",\n description=\"Create a rule\",\n status_code=201,\n)\nasync def create(payload: CreateRulePayload) -> RuleModel:\n rule = Rule(name=payload.name, target=payload.target, source=payload.source)\n await rule.save()\n return rule.to_model()\n\n\[email protected](\n \"/{rule_id}\",\n response_description=\"Returns an empty JSON\",\n summary=\"Delete a rule\",\n description=\"Delete a rule which has a given ID\",\n status_code=204,\n)\nasync def delete(rule_id: UUID) -> dict:\n try:\n await Rule.delete_by_id(rule_id)\n except DoesNotExist:\n raise HTTPException(status_code=404, detail=f\"Rule:{rule_id} is not found\")\n\n return {}\n"
},
{
"alpha_fraction": 0.5469798445701599,
"alphanum_fraction": 0.5469798445701599,
"avg_line_length": 16.829059600830078,
"blob_id": "8eb5044b286f0811f7e22de2da147a626389abc8",
"content_id": "d5305fef615c6b6856551e55cad810f9c3ca2324",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 2086,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 117,
"path": "/frontend/src/router/index.ts",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import Vue from \"vue\";\nimport VueRouter from \"vue-router\";\n\nimport Bulk from \"@/views/Bulk.vue\";\nimport Domain from \"@/views/Domain.vue\";\nimport EditRule from \"@/views/EditRule.vue\";\nimport Home from \"@/views/Home.vue\";\nimport Import from \"@/views/Import.vue\";\nimport IPAddress from \"@/views/IPAddress.vue\";\nimport Matches from \"@/views/Matches.vue\";\nimport Rule from \"@/views/Rule.vue\";\nimport Rules from \"@/views/Rules.vue\";\nimport Snapshot from \"@/views/Snapshot.vue\";\nimport Snapshots from \"@/views/Snapshots.vue\";\nimport Yara from \"@/views/Yara.vue\";\n\nVue.use(VueRouter);\n\nconst routes = [\n {\n path: \"/\",\n name: \"Home\",\n component: Home,\n meta: {\n title: \"Uzen\",\n },\n },\n {\n path: \"/snapshots\",\n name: \"Snapshots\",\n component: Snapshots,\n meta: {\n title: \"Snapshots - Uzen\",\n },\n },\n {\n path: \"/snapshots/:id\",\n name: \"Snapshot\",\n component: Snapshot,\n props: true,\n },\n {\n path: \"/yara\",\n name: \"Yara\",\n component: Yara,\n meta: {\n title: \"YARA - Uzen\",\n },\n },\n {\n path: \"/import\",\n name: \"Import\",\n component: Import,\n meta: {\n title: \"Import - Uzen\",\n },\n },\n {\n path: \"/rules\",\n name: \"Rules\",\n component: Rules,\n meta: {\n title: \"Rules - Uzen\",\n },\n },\n {\n path: \"/rules/:id\",\n name: \"Rule\",\n component: Rule,\n },\n {\n path: \"/rules/edit/:id\",\n name: \"EditRule\",\n component: EditRule,\n meta: {\n title: \"Edit a rule - Uzen\",\n },\n },\n {\n path: \"/matches\",\n name: \"Matches\",\n component: Matches,\n meta: {\n title: \"Matches - Uzen\",\n },\n },\n {\n path: \"/ip_address/:ipAddress\",\n name: \"IP address\",\n component: IPAddress,\n },\n {\n path: \"/domain/:hostname\",\n name: \"Domain\",\n component: Domain,\n },\n {\n path: \"/bulk\",\n name: \"Bulk\",\n component: Bulk,\n meta: {\n title: \"Bulk - Uzen\",\n },\n },\n];\n\nconst router = new VueRouter({\n routes,\n});\n\nrouter.beforeEach((to, _from, next) => {\n document.title = to.meta.title || \"\";\n\n next();\n});\n\nexport default router;\n"
},
{
"alpha_fraction": 0.8181818127632141,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 21,
"blob_id": "8747d018412b4de103604172cbea736bb1fc31a4",
"content_id": "0872dad31e40e406c6c58046c76ce767177bb512",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 22,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 1,
"path": "/mysql/initdb.d/init.sql",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "CREATE DATABASE uzen;\n"
},
{
"alpha_fraction": 0.6809079051017761,
"alphanum_fraction": 0.6835781335830688,
"avg_line_length": 23.96666717529297,
"blob_id": "4d75a4c5794becd56c6cc0a88d4c07abd9106dc7",
"content_id": "36c091ce715f80a1bd72de0a3beca6550a100529",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 749,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 30,
"path": "/tests/services/test_rule_matcher.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import datetime\nimport uuid\n\nimport pytest\n\nfrom uzen.models.rules import Rule\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.services.rule_matcher import RuleMatcher\n\n\[email protected]\nasync def rule_setup(client):\n rule = Rule(\n id=uuid.uuid4(),\n name=\"test\",\n target=\"script\",\n source='rule foo: bar {strings: $a = \"foo\" condition: $a}',\n created_at=datetime.datetime.now(),\n )\n await rule.save()\n\n\[email protected]\[email protected](\"rule_setup\")\[email protected](\"scripts_setup\")\nasync def test_scan():\n snapshot = await Snapshot.all().first().prefetch_related(\"_scripts\")\n matcher = RuleMatcher(snapshot)\n results = await matcher.scan()\n assert len(results) == 1\n"
},
{
"alpha_fraction": 0.6878727674484253,
"alphanum_fraction": 0.6955410242080688,
"avg_line_length": 31.009090423583984,
"blob_id": "23bfa10d6c97392579c2a58622f9ec507788d826",
"content_id": "6a4acc72aad3b4df0ff3ca4abb2dda2021bcfb09",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3521,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 110,
"path": "/tests/services/test_browser.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import playwright\nimport pytest\nfrom playwright import Error, async_playwright\n\nfrom uzen.core import settings\nfrom uzen.services.browser import Browser, launch_browser\nfrom uzen.services.certificate import Certificate\nfrom uzen.services.rdap import RDAP\nfrom uzen.services.whois import Whois\n\n\ndef mock_lookup(ip_address: str):\n return {\"asn\": \"AS15133\"}\n\n\ndef mock_whois(hostname: str):\n return \"foo\"\n\n\ndef mock_load_and_dump_from_url(url: str):\n return \"Certificate:\"\n\n\[email protected]\nasync def test_take_snapshot(monkeypatch):\n monkeypatch.setattr(RDAP, \"lookup\", mock_lookup)\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n monkeypatch.setattr(\n Certificate, \"load_and_dump_from_url\", mock_load_and_dump_from_url\n )\n\n result = await Browser.take_snapshot(\"http://example.com\")\n snapshot = result.snapshot\n assert snapshot.url == \"http://example.com/\"\n assert snapshot.submitted_url == \"http://example.com\"\n\n assert snapshot.hostname == \"example.com\"\n assert snapshot.status == 200\n assert snapshot.content_type == \"text/html; charset=UTF-8\"\n assert snapshot.asn == \"AS15133\"\n assert snapshot.whois == \"foo\"\n\n\[email protected]\nasync def test_take_snapshot_with_scripts(monkeypatch):\n monkeypatch.setattr(RDAP, \"lookup\", mock_lookup)\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n monkeypatch.setattr(\n Certificate, \"load_and_dump_from_url\", mock_load_and_dump_from_url\n )\n\n result = await Browser.take_snapshot(\"https://github.com/\")\n assert len(result.scripts) > 0\n\n\[email protected]\nasync def test_take_snapshot_with_options(monkeypatch):\n monkeypatch.setattr(RDAP, \"lookup\", mock_lookup)\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n\n result = await Browser.take_snapshot(\"http://example.com\", timeout=10000)\n snapshot = result.snapshot\n assert snapshot.url == \"http://example.com/\"\n\n result = await Browser.take_snapshot(\"http://example.com\", user_agent=\"foo\")\n snapshot = result.snapshot\n assert snapshot.url == \"http://example.com/\"\n\n result = await Browser.take_snapshot(\"http://example.com\", accept_language=\"ja-JP\")\n snapshot = result.snapshot\n assert snapshot.url == \"http://example.com/\"\n\n result = await Browser.take_snapshot(\n \"http://example.com\", timeout=10000, user_agent=\"foo\"\n )\n snapshot = result.snapshot\n assert snapshot.url == \"http://example.com/\"\n\n\[email protected]\nasync def test_take_snapshot_with_bad_ssl(monkeypatch):\n monkeypatch.setattr(RDAP, \"lookup\", mock_lookup)\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n\n with pytest.raises(Error):\n result = await Browser.take_snapshot(\"https://expired.badssl.com\")\n\n result = await Browser.take_snapshot(\n \"https://expired.badssl.com\", ignore_https_errors=True\n )\n snapshot = result.snapshot\n assert snapshot.url == \"https://expired.badssl.com/\"\n\n\[email protected]\[email protected](10, method=\"thread\")\nasync def test_launch_browser(monkeypatch):\n monkeypatch.setattr(\n \"uzen.core.settings.BROWSER_WS_ENDPOINT\", \"wss://chrome.browserless.io\"\n )\n assert settings.BROWSER_WS_ENDPOINT == \"wss://chrome.browserless.io\"\n\n try:\n async with async_playwright() as p:\n browser = await launch_browser(p)\n assert isinstance(browser, playwright.browser.Browser)\n assert browser.wsEndpoint == \"wss://chrome.browserless.io\"\n await browser.close()\n except Exception:\n pass\n"
},
{
"alpha_fraction": 0.6010470986366272,
"alphanum_fraction": 0.6062827110290527,
"avg_line_length": 30.83333396911621,
"blob_id": "c339aed748eccd5d61c9c100053eaa87ccf18c75",
"content_id": "54aaa01caf00024b1697810dde3c3b42a2283487",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2865,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 90,
"path": "/uzen/services/searchers/snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, cast\nfrom uuid import UUID\n\nfrom tortoise.query_utils import Q\n\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.snapshots import SearchResults, SimplifiedSnapshot\nfrom uzen.services.searchers import AbstractSearcher\nfrom uzen.services.searchers.utils import convert_to_datetime\n\n\nclass SnapshotSearcher(AbstractSearcher):\n @classmethod\n async def search(\n cls, filters: dict, size=None, offset=None, id_only=False,\n ) -> SearchResults:\n \"\"\"Search snapshots\n\n Arguments:\n filters {dict} -- Filters for snapshot search\n\n Keyword Arguments:\n size {[int]} -- Nmber of results returned (default: {None})\n offset {[int]} -- Offset of the first result for pagination (default: {None})\n id_only {bool} -- Whether to return only a list of ids (default: {False})\n\n Returns:\n SearchResults -- A list of simlified snapshots and total count\n \"\"\"\n queries = []\n\n url = filters.get(\"url\")\n if url is not None:\n queries.append(Q(url=url))\n\n status = filters.get(\"status\")\n if status is not None:\n queries.append(Q(status=status))\n\n hostname = filters.get(\"hostname\")\n if hostname is not None:\n queries.append(Q(hostname=hostname))\n\n ip_address = filters.get(\"ip_address\")\n if ip_address is not None:\n queries.append(Q(ip_address=ip_address))\n\n asn = filters.get(\"asn\")\n if asn is not None:\n queries.append(Q(asn=asn))\n\n server = filters.get(\"server\")\n if server is not None:\n queries.append(Q(server__contains=server))\n\n content_type = filters.get(\"content_type\")\n if content_type is not None:\n queries.append(Q(content_type__contains=content_type))\n\n sha256 = filters.get(\"sha256\")\n if sha256 is not None:\n queries.append(Q(sha256=sha256))\n\n from_at = filters.get(\"from_at\")\n if from_at is not None:\n queries.append(Q(created_at__gt=convert_to_datetime(from_at)))\n\n to_at = filters.get(\"to_at\")\n if to_at is not None:\n queries.append(Q(created_at__lt=convert_to_datetime(to_at)))\n\n query = Q(*queries)\n\n # Run search\n instance = cls(\n model=Snapshot, query=query, values=SimplifiedSnapshot.field_keys()\n )\n\n results = await instance._search(size=size, offset=offset, id_only=id_only)\n\n if id_only:\n return SearchResults(\n results=cast(List[UUID], results.results), total=results.total\n )\n\n results_ = cast(List[dict], results.results)\n return SearchResults(\n results=[SimplifiedSnapshot(**result) for result in results_],\n total=results.total,\n )\n"
},
{
"alpha_fraction": 0.6986111402511597,
"alphanum_fraction": 0.6986111402511597,
"avg_line_length": 31.727272033691406,
"blob_id": "c90917d2149724561e78c4f854f83253b8256ce2",
"content_id": "724a90e6cc6c509bec82e7876cc16aca3c741d84",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 720,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 22,
"path": "/uzen/schemas/domain.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import Field\n\nfrom uzen.schemas.dns_records import BaseDnsRecord\nfrom uzen.schemas.snapshots import Snapshot\n\n\nclass DomainInformation(APIModel):\n \"\"\"Pydantic model for domain informaiton\"\"\"\n\n hostname: str = Field(..., title=\"Hostname\", description=\"A hostname\")\n dns_records: List[BaseDnsRecord] = Field(\n ..., title=\"DNS records\", description=\"A list of DNS records\"\n )\n whois: Optional[str] = Field(\n None, title=\"Whois\", description=\"A whois record of an IP address\"\n )\n snapshots: List[Snapshot] = Field(\n ..., title=\"Snapshots\", description=\"A list of related snapshots\"\n )\n"
},
{
"alpha_fraction": 0.6920199394226074,
"alphanum_fraction": 0.7013715505599976,
"avg_line_length": 31.73469352722168,
"blob_id": "ab61b8981f2f4dd7879b6e92bd8ea926be828860",
"content_id": "8c520ce2f8a22ea51d8dd2539a00406040d1e85f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1604,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 49,
"path": "/tests/apis/test_cascade_delete.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom tests.utils import first_rule_id, first_snapshot_id\nfrom uzen.models.matches import Match\nfrom uzen.models.rules import Rule\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\n\n\[email protected]\[email protected](\"scripts_setup\")\nasync def test_delete_snapshot_with_scripts(client):\n id_ = await first_snapshot_id()\n snapshot_count = await Snapshot.all().count()\n script_count = await Script.all().count()\n\n response = await client.delete(f\"/api/snapshots/{id_}\")\n assert response.status_code == 204\n\n assert await Snapshot.all().count() == snapshot_count - 1\n assert await Script.all().count() == script_count - 1\n\n\[email protected]\[email protected](\"matches_setup\")\nasync def test_delete_snapshot_with_matches(client):\n id_ = await first_snapshot_id()\n snapshot_count = await Snapshot.all().count()\n match_count = await Match.all().count()\n\n response = await client.delete(f\"/api/snapshots/{id_}\")\n assert response.status_code == 204\n\n assert await Snapshot.all().count() == snapshot_count - 1\n assert await Match.all().count() == match_count - 1\n\n\[email protected]\[email protected](\"matches_setup\")\nasync def test_delete_rule_with_matches(client):\n id_ = await first_rule_id()\n rule_count = await Rule.all().count()\n match_count = await Match.all().count()\n\n response = await client.delete(f\"/api/rules/{id_}\")\n assert response.status_code == 204\n\n assert await Rule.all().count() == rule_count - 1\n assert await Match.all().count() == match_count - 1\n"
},
{
"alpha_fraction": 0.6940132975578308,
"alphanum_fraction": 0.6940132975578308,
"avg_line_length": 27.1875,
"blob_id": "3a575c57791d285587b23d74360b87517ec39f02",
"content_id": "7157af631705d00945a7ed431db2dc34d78d84bc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 16,
"path": "/uzen/tasks/snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from uzen.models.snapshots import Snapshot\nfrom uzen.tasks import AbstractTask\n\n\nclass UpdateProcessingTask(AbstractTask):\n def __init__(self, snapshot: Snapshot):\n self.snapshot = snapshot\n\n async def _process(self):\n self.snapshot.processing = False\n await self.snapshot.save()\n\n @classmethod\n async def process(cls, snapshot: Snapshot):\n instance = cls(snapshot)\n return await instance.safe_process()\n"
},
{
"alpha_fraction": 0.5958677530288696,
"alphanum_fraction": 0.6983470916748047,
"avg_line_length": 32.61111068725586,
"blob_id": "e31cdc6752799536558ac7ccb3e2f784d4bd9272",
"content_id": "c92f5a9765092cba109f9a420550c7800d271133",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1210,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 36,
"path": "/tests/services/test_urlscan.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import datetime\nimport pathlib\n\nimport pytest\nimport respx\nfrom httpx import Response\n\nfrom uzen.services.urlscan import URLScan\n\npath = pathlib.Path(__file__).parent / \"../fixtures/urlscan.json\"\nfixture = open(path).read()\n\n\[email protected]\[email protected]\nasync def test_urlscan_import():\n respx.get(\n \"https://urlscan.io/api/v1/result/e6d69372-b402-487a-9825-7e25cc15ce41/\",\n ).mock(Response(status_code=200, content=fixture))\n respx.get(\"https://urlscan.io/dom/e6d69372-b402-487a-9825-7e25cc15ce41/\").mock(\n Response(status_code=200, content=\"foo\")\n )\n respx.get(\n \"https://urlscan.io/screenshots/e6d69372-b402-487a-9825-7e25cc15ce41.png\",\n ).mock(Response(status_code=200, content=\"foo\"))\n\n result = await URLScan.import_as_snapshot(\"e6d69372-b402-487a-9825-7e25cc15ce41\")\n snapshot = result.snapshot\n assert snapshot.url == \"https://nnpub.org/\"\n assert snapshot.ip_address == \"162.215.240.128\"\n assert (\n snapshot.server\n == \"Apache/2.4.41 (cPanel) OpenSSL/1.1.1d mod_bwlimited/1.4 Phusion_Passenger/5.3.7\"\n )\n assert snapshot.content_type == \"text/html; charset=utf-8\"\n assert isinstance(snapshot.created_at, datetime.datetime)\n"
},
{
"alpha_fraction": 0.6510695219039917,
"alphanum_fraction": 0.6542780995368958,
"avg_line_length": 30.96581268310547,
"blob_id": "da4b82a31076bda8bd7a3f1a57544bcd49873197",
"content_id": "c92dfa7b804c8f620117b4ce53e9f7a6af557124",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3740,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 117,
"path": "/uzen/models/snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations\n\nfrom typing import List, Optional\nfrom uuid import UUID\n\nfrom tortoise import fields\nfrom tortoise.exceptions import NoValuesFetched\n\nfrom uzen.models.base import AbstractBaseModel\nfrom uzen.models.mixins import TimestampMixin\nfrom uzen.schemas.classifications import Classification\nfrom uzen.schemas.dns_records import DnsRecord\nfrom uzen.schemas.rules import Rule\nfrom uzen.schemas.screenshots import Screenshot\nfrom uzen.schemas.scripts import Script\nfrom uzen.schemas.snapshots import Snapshot as SnapshotModel\n\n\nclass Snapshot(TimestampMixin, AbstractBaseModel):\n \"\"\"An ORM class for snapshots table\"\"\"\n\n url = fields.TextField()\n submitted_url = fields.TextField()\n status = fields.IntField()\n hostname = fields.TextField()\n ip_address = fields.CharField(max_length=255)\n asn = fields.TextField()\n server = fields.TextField(null=True)\n content_type = fields.TextField(null=True)\n content_length = fields.IntField(null=True)\n body = fields.TextField()\n sha256 = fields.CharField(max_length=64)\n headers = fields.JSONField()\n whois = fields.TextField(null=True)\n certificate = fields.TextField(null=True)\n request = fields.JSONField()\n processing = fields.BooleanField(default=True)\n\n _screenshot: fields.OneToOneRelation[\"Screenshot\"]\n\n _scripts: fields.ReverseRelation[\"Script\"]\n _dns_records: fields.ReverseRelation[\"DnsRecord\"]\n _classifications: fields.ReverseRelation[\"Classification\"]\n\n _rules: fields.ManyToManyRelation[\"Rule\"] = fields.ManyToManyField(\n \"models.Rule\",\n related_name=\"_snapshots\",\n through=\"matches\",\n forward_key=\"rule_id\",\n backward_key=\"snapshot_id\",\n )\n\n @property\n def screenshot(self) -> Optional[Screenshot]:\n if self._screenshot is not None:\n return self._screenshot.to_model()\n\n return None\n\n @property\n def rules(self) -> List[Rule]:\n try:\n return [rule.to_model() for rule in self._rules]\n except NoValuesFetched:\n return []\n\n @property\n def scripts(self) -> List[Script]:\n try:\n return [script.to_model() for script in self._scripts]\n except NoValuesFetched:\n return []\n\n @property\n def dns_records(self) -> List[DnsRecord]:\n try:\n return [record.to_model() for record in self._dns_records]\n except NoValuesFetched:\n return []\n\n @property\n def classifications(self) -> List[Classification]:\n try:\n return [\n classification.to_model() for classification in self._classifications\n ]\n except NoValuesFetched:\n return []\n\n def to_model(self) -> SnapshotModel:\n return SnapshotModel.from_orm(self)\n\n def to_dict(self) -> dict:\n model = self.to_model()\n return model.dict()\n\n @classmethod\n async def get_by_id(cls, id_: UUID, include_screenshot: bool = False) -> Snapshot:\n if include_screenshot:\n return await cls.get(id=id_).prefetch_related(\n \"_screenshot\", \"_scripts\", \"_dns_records\", \"_classifications\", \"_rules\"\n )\n return await cls.get(id=id_).prefetch_related(\n \"_scripts\", \"_dns_records\", \"_classifications\", \"_rules\"\n )\n\n @classmethod\n async def find_by_ip_address(cls, ip_address: str, size=20) -> List[Snapshot]:\n return await cls.filter(ip_address=ip_address).limit(size)\n\n @classmethod\n async def find_by_hostname(cls, hostname: str, size=20) -> List[Snapshot]:\n return await cls.filter(hostname=hostname).limit(size)\n\n class Meta:\n table = \"snapshots\"\n ordering = [\"-created_at\"]\n"
},
{
"alpha_fraction": 0.750902533531189,
"alphanum_fraction": 0.756317675113678,
"avg_line_length": 30.657142639160156,
"blob_id": "746a8f28222380509b82cde56336d7927fbcc282",
"content_id": "f0978d8df936ef7f9977e7bf458c08d1537ff07d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1108,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 35,
"path": "/uzen/api/endpoints/urlscan.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import cast\n\nimport httpx\nfrom fastapi import APIRouter, BackgroundTasks, HTTPException\n\nfrom uzen.schemas.snapshots import Snapshot\nfrom uzen.services.snapshot import save_snapshot\nfrom uzen.services.urlscan import URLScan\nfrom uzen.tasks.matches import MatchinbgTask\nfrom uzen.tasks.snapshots import UpdateProcessingTask\n\nrouter = APIRouter()\n\n\[email protected](\n \"/{uuid}\",\n response_model=Snapshot,\n response_description=\"Returns an imported snapshot\",\n status_code=201,\n summary=\"Import data from urlscan.io\",\n description=\"Import scan data from urlscan.io as a snapshot\",\n)\nasync def import_from_urlscan(uuid: str, background_tasks: BackgroundTasks) -> Snapshot:\n try:\n result = await URLScan.import_as_snapshot(uuid)\n except httpx.HTTPError:\n raise HTTPException(status_code=404, detail=f\"{uuid} is not found\")\n\n snapshot = await save_snapshot(result)\n\n background_tasks.add_task(MatchinbgTask.process, snapshot)\n background_tasks.add_task(UpdateProcessingTask.process, snapshot)\n\n model = cast(Snapshot, snapshot.to_model())\n return model\n"
},
{
"alpha_fraction": 0.5898305177688599,
"alphanum_fraction": 0.7118644118309021,
"avg_line_length": 12.409090995788574,
"blob_id": "fadc5b1c33ccdc86d57c8cfe7c6927f8cc4745b8",
"content_id": "4d7f90e5a588c66f5c05c8daa8641fe66c023272",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 295,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 22,
"path": "/migrations/versions/7512c0c3ef65_restart_alembic_migrations.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "\"\"\"restart alembic migrations\n\nRevision ID: 7512c0c3ef65\nRevises: \nCreate Date: 2020-05-24 18:10:12.975412\n\n\"\"\"\n\n\n# revision identifiers, used by Alembic.\nrevision = \"7512c0c3ef65\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n pass\n\n\ndef downgrade():\n pass\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 27,
"blob_id": "3a1195dfbfab7e12cc88d92dc14a99f2e95205e2",
"content_id": "567f7eb54d65b54c128fce006599ffb0ad66e8be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 672,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 24,
"path": "/uzen/api/endpoints/matches.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import Optional\n\nfrom fastapi import APIRouter, Depends\n\nfrom uzen.api.dependencies.matches import SearchFilters\nfrom uzen.schemas.matches import SearchResults\nfrom uzen.services.searchers.matches import MatchSearcher\n\nrouter = APIRouter()\n\n\[email protected](\n \"/search\",\n response_model=SearchResults,\n response_description=\"Returns a list of matches\",\n summary=\"Search matches\",\n description=\"Searcn matches with filters\",\n)\nasync def search(\n size: Optional[int] = None,\n offset: Optional[int] = None,\n filters: SearchFilters = Depends(),\n) -> SearchResults:\n return await MatchSearcher.search(vars(filters), size=size, offset=offset)\n"
},
{
"alpha_fraction": 0.6887941360473633,
"alphanum_fraction": 0.6961023211479187,
"avg_line_length": 26.830509185791016,
"blob_id": "c001ddc1d17b2544fbee3fb75df99c59244a89ef",
"content_id": "1d2380ef0cb55d78c5d345ccb3569a82733ef63b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1642,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 59,
"path": "/tests/apis/test_screenshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.services.browser import Browser\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_screenshots(client):\n first = await Snapshot.all().first()\n snapshot_id = first.id\n\n response = await client.get(f\"/api/screenshots/{snapshot_id}\")\n assert response.status_code == 200\n\n json = response.json()\n assert json.get(\"data\") == \"\"\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_screenshots_in_png_format(client):\n first = await Snapshot.all().first()\n snapshot_id = first.id\n\n response = await client.get(\n f\"/api/screenshots/{snapshot_id}\", params={\"output_format\": \"png\"}\n )\n assert response.status_code == 200\n assert response.headers.get(\"content-type\") == \"image/png\"\n\n\nasync def mock_preview(hostname: str):\n s = Screenshot()\n s.data = \"\"\n return s\n\n\[email protected]\nasync def test_preview(client, monkeypatch):\n monkeypatch.setattr(Browser, \"preview\", mock_preview)\n\n response = await client.get(\"/api/screenshots/preview/example.com\")\n assert response.status_code == 200\n\n json = response.json()\n assert json.get(\"data\") == \"\"\n\n\[email protected]\nasync def test_preview_in_png_format(client, monkeypatch):\n monkeypatch.setattr(Browser, \"preview\", mock_preview)\n\n response = await client.get(\n \"/api/screenshots/preview/example.com\", params={\"output_format\": \"png\"}\n )\n assert response.status_code == 200\n assert response.headers.get(\"content-type\") == \"image/png\"\n"
},
{
"alpha_fraction": 0.5567651391029358,
"alphanum_fraction": 0.5567651391029358,
"avg_line_length": 20.433332443237305,
"blob_id": "d0b28ecfeae1cac8150f0e0fc862fa127821c8cc",
"content_id": "207ff08a55059e5b68e2c721e354e8a4f0f63336",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 643,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 30,
"path": "/uzen/services/whois.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import socket\nfrom functools import lru_cache\nfrom typing import Optional\n\nimport whois\n\n\nclass Whois:\n @staticmethod\n @lru_cache()\n def whois(hostname: str) -> Optional[str]:\n \"\"\"Perform Whois lookup\n\n Arguments:\n hostname {str} -- Hostname\n\n Returns:\n Optional[str] -- Whois response as a string, returns None if an error occurs\n \"\"\"\n try:\n w = whois.whois(hostname)\n except (\n whois.parser.PywhoisError,\n socket.timeout,\n ConnectionError,\n TimeoutError,\n ):\n return None\n\n return w.text\n"
},
{
"alpha_fraction": 0.7256757020950317,
"alphanum_fraction": 0.7297297120094299,
"avg_line_length": 29.83333396911621,
"blob_id": "c40eab0b510102d95326db3bd4bd5208f8ecb57f",
"content_id": "f85500d20850cc2cf5d0791672542b3650adcb42",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 740,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 24,
"path": "/uzen/models/classifications.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from tortoise import fields\n\nfrom uzen.models.base import AbstractBaseModel\nfrom uzen.models.mixins import TimestampMixin\nfrom uzen.schemas.classifications import Classification as ClassificationModel\n\n\nclass Classification(TimestampMixin, AbstractBaseModel):\n name = fields.CharField(max_length=100)\n malicious = fields.BooleanField()\n note = fields.TextField(null=True)\n\n snapshot: fields.ForeignKeyRelation[\"Snapshot\"] = fields.ForeignKeyField(\n \"models.Snapshot\",\n related_name=\"_classifications\",\n to_field=\"id\",\n on_delete=fields.CASCADE,\n )\n\n def to_model(self) -> ClassificationModel:\n return ClassificationModel.from_orm(self)\n\n class Meta:\n table = \"classifications\"\n"
},
{
"alpha_fraction": 0.772020697593689,
"alphanum_fraction": 0.772020697593689,
"avg_line_length": 33.05882263183594,
"blob_id": "b733ed0266144e3dc9b5a7b400d9ae4c8a28f584",
"content_id": "11324f3130417710db457f84516a16bcf69a56b3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 579,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 17,
"path": "/uzen/tasks/classifications.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nfrom loguru import logger\n\nfrom uzen.factories.classifications import ClassificationFactory\nfrom uzen.models.classifications import Classification\nfrom uzen.tasks import EnrichmentTask\n\n\nclass ClassificationTask(EnrichmentTask):\n async def _process(self) -> List[Classification]:\n logger.debug(f\"Fetch classifications of {self.snapshot.url}\")\n classifications = ClassificationFactory.from_snapshot(self.snapshot)\n if self.insert_to_db:\n await Classification.bulk_create(classifications)\n\n return classifications\n"
},
{
"alpha_fraction": 0.6429504156112671,
"alphanum_fraction": 0.6468668580055237,
"avg_line_length": 22.9375,
"blob_id": "7e1cf3db0b639994475ac992032c65b2906e09cb",
"content_id": "fd9b5d32dfc706fcbbdd61214644686575c99bc5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1532,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 64,
"path": "/frontend/src/components/mixins/search_form.ts",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import dayjs from \"dayjs\";\nimport relativeTime from \"dayjs/plugin/relativeTime\";\nimport timezone from \"dayjs/plugin/timezone\";\nimport utc from \"dayjs/plugin/utc\";\nimport Vue from \"vue\";\nimport { Mixin } from \"vue-mixin-decorator\";\n\ndayjs.extend(relativeTime);\ndayjs.extend(timezone);\ndayjs.extend(utc);\n\n@Mixin\nexport class SearchFormMixin extends Vue {\n DEFAULT_PAGE_SIZE = 10;\n DEFAULT_OFFSET = 0;\n\n count: number | undefined = undefined;\n totalCount = 0;\n size = this.DEFAULT_PAGE_SIZE;\n offset = this.DEFAULT_OFFSET;\n oldestCreatedAt: string | undefined = undefined;\n\n hasCount(): boolean {\n return this.count !== undefined;\n }\n\n hasLoadMore() {\n const count = this.count || 0;\n const total = this.totalCount || 0;\n\n return count < total;\n }\n\n normalizeFilterValue(\n value: string | number | Date\n ): string | number | undefined {\n if (value instanceof Date) {\n return value.toISOString();\n }\n if (typeof value === \"string\") {\n // returns undefined if a value is an empty string\n return value === \"\" ? undefined : value;\n }\n return value;\n }\n\n datetimeFormatter(datetime: Date): string {\n return dayjs(datetime).local().format();\n }\n\n nowDatetime(): string {\n return dayjs().toISOString();\n }\n\n minDatetime(\n a: string | number | undefined,\n b: string | number | undefined\n ): string {\n const c = a === undefined ? dayjs() : dayjs(a);\n const d = b === undefined ? dayjs() : dayjs(b);\n\n return c > d ? d.toISOString() : c.toISOString();\n }\n}\n"
},
{
"alpha_fraction": 0.5931315422058105,
"alphanum_fraction": 0.6047729849815369,
"avg_line_length": 39.904762268066406,
"blob_id": "d37d2585f78eedff88bd38b79588952808566a61",
"content_id": "4fe7854392e4e38aa50df471a9a88515f42e6634",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1718,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 42,
"path": "/uzen/api/dependencies/snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from datetime import date, datetime\nfrom typing import Optional, Union\n\nfrom fastapi import Query\n\n\nclass SearchFilters:\n def __init__(\n self,\n asn: Optional[str] = Query(None, title=\"AS number\"),\n content_type: Optional[str] = Query(None, title=\"Content type\"),\n contentType: Optional[str] = Query(None, description=\"Alias of content_type\"),\n hostname: Optional[str] = Query(None, title=\"Hostname\"),\n ip_address: Optional[str] = Query(None, title=\"IP address\"),\n ipAddress: Optional[str] = Query(None, description=\"Alias of ip_address\"),\n server: Optional[str] = Query(None, title=\"Server\"),\n sha256: Optional[str] = Query(None, title=\"SHA256\"),\n status: Optional[int] = Query(None, title=\"Status\"),\n url: Optional[str] = Query(None, title=\"URL\"),\n from_at: Optional[Union[datetime, date]] = Query(\n None, title=\"From at\", description=\"Datetime or date in ISO 8601 format\"\n ),\n fromAt: Optional[Union[datetime, date]] = Query(\n None, description=\"Alias of from_at\"\n ),\n to_at: Optional[Union[datetime, date]] = Query(\n None, title=\"To at\", description=\"Datetime or date in ISO 8601 format\"\n ),\n toAt: Optional[Union[datetime, date]] = Query(\n None, description=\"Alias of to_at\"\n ),\n ):\n self.asn = asn\n self.content_type = content_type or contentType\n self.hostname = hostname\n self.ip_address = ip_address or ipAddress\n self.server = server\n self.sha256 = sha256\n self.status = status\n self.url = url\n self.from_at = from_at or fromAt\n self.to_at = to_at or toAt\n"
},
{
"alpha_fraction": 0.7090012431144714,
"alphanum_fraction": 0.7090012431144714,
"avg_line_length": 27.964284896850586,
"blob_id": "7e73774612f5eb63fbf0587b22f66017f97b0ec0",
"content_id": "1b6752cd05961bc665a26b0cb046c191c80e0256",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 811,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 28,
"path": "/uzen/schemas/classifications.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import Optional\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import Field\n\nfrom uzen.schemas.base import AbstractBaseModel\nfrom uzen.schemas.mixins import TimestampMixin\n\n\nclass BaseClassification(APIModel):\n \"\"\"Base Pydantic model for Classification\n\n Note that this model doesn't have \"id\" and \"created_at\" fields.\n \"\"\"\n\n name: str = Field(\n ..., title=\"Name\", description=\"A name of the classification\",\n )\n malicious: bool = Field(\n ..., title=\"Malicious\", description=\"A result of the classification\",\n )\n note: Optional[str] = Field(\n None, title=\"Note\", description=\"A note of the classification\",\n )\n\n\nclass Classification(BaseClassification, AbstractBaseModel, TimestampMixin):\n \"\"\"Full Pydantic model for Classification\"\"\"\n"
},
{
"alpha_fraction": 0.5432299971580505,
"alphanum_fraction": 0.5432299971580505,
"avg_line_length": 25.65217399597168,
"blob_id": "c0c875eee4b311c116ea1745cc39364c9a0adfd8",
"content_id": "da44cd39bdf88f4309658c04dadc97741c2e53f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 613,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 23,
"path": "/uzen/api/dependencies/rules.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import Optional\n\nfrom fastapi import Query\n\n\nclass SearchFilters:\n def __init__(\n self,\n name: Optional[str] = Query(\n None, title=\"Name\", description=\"A name of the rule\"\n ),\n target: Optional[str] = Query(\n None,\n title=\"Target\",\n description=\"A target of the rule (body, certificate, script or whois)\",\n ),\n source: Optional[str] = Query(\n None, title=\"Source\", description=\"A source of the rule\"\n ),\n ):\n self.name = name\n self.target = target\n self.source = source\n"
},
{
"alpha_fraction": 0.7668161392211914,
"alphanum_fraction": 0.7668161392211914,
"avg_line_length": 23.108108520507812,
"blob_id": "4185188f6196a2c24ffd73fdcb31f087d9e99de3",
"content_id": "7092671cdbbcd036e9520280ca1fea26a9e66334",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 892,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 37,
"path": "/uzen/schemas/utils.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from dataclasses import dataclass\nfrom typing import List, Type, Union\nfrom uuid import UUID\n\nfrom pydantic import BaseModel, Field\nfrom tortoise.models import Model\n\nfrom uzen.models.classifications import Classification\nfrom uzen.models.dns_records import DnsRecord\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\n\n\n@dataclass\nclass SnapshotResult:\n snapshot: Snapshot\n screenshot: Screenshot\n scripts: List[Script]\n\n\n@dataclass\nclass EnrichmentResults:\n classifications: List[Classification]\n dns_records: List[DnsRecord]\n\n\n@dataclass\nclass SearchResults:\n results: Union[List[Type[Model]], List[Type[BaseModel]], List[dict], List[UUID]]\n total: int\n\n\nclass CountResponse(BaseModel):\n count: int = Field(\n ..., title=\"Count\", description=\"Total count of existing items\",\n )\n"
},
{
"alpha_fraction": 0.616156280040741,
"alphanum_fraction": 0.6177402138710022,
"avg_line_length": 27.696969985961914,
"blob_id": "262f3c7eff1f17f62a608459066606855945a43d",
"content_id": "7a818f5cfc16203216eb160a15b9e9f8b8e9b98d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1894,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 66,
"path": "/uzen/services/searchers/__init__.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import List, Optional, Type\n\nfrom tortoise.models import Model, QuerySet\nfrom tortoise.query_utils import Q\n\nfrom uzen.schemas.search import SearchResults\n\n\nclass AbstractSearcher(ABC):\n def __init__(\n self,\n model: Type[Model],\n query: Q,\n values: Optional[List[str]] = None,\n prefetch_related: List[str] = [],\n ):\n self.model = model\n self.query = query\n self.values = values\n self.prefetch_related = prefetch_related\n\n async def _total(self) -> int:\n return await self.model.filter(self.query).count()\n\n def build_queryset(\n self, size: Optional[int] = None, offset: Optional[int] = None, id_only=False\n ) -> QuerySet[Type[Model]]:\n size = 100 if size is None else size\n\n queryset = self.model.filter(self.query).limit(size)\n if offset is not None:\n queryset = queryset.offset(offset)\n\n if id_only:\n return queryset.values_list(\"id\", flat=True)\n\n if self.values is not None:\n return queryset.values(*self.values)\n\n return queryset.prefetch_related(*self.prefetch_related)\n\n async def _search(\n self, size: Optional[int] = None, offset: Optional[int] = None, id_only=False,\n ) -> SearchResults:\n total = await self._total()\n queryset = self.build_queryset(size=size, offset=offset, id_only=id_only)\n results = await queryset\n return SearchResults(results=results, total=total)\n\n @classmethod\n @abstractmethod\n async def search(\n cls,\n filters: dict,\n size: Optional[int] = None,\n offset: Optional[int] = None,\n id_only=False,\n ):\n \"\"\"Search a table.\n\n Override this method in child classes.\n \"\"\"\n raise NotImplementedError()\n"
},
{
"alpha_fraction": 0.7317554354667664,
"alphanum_fraction": 0.7317554354667664,
"avg_line_length": 36.55555725097656,
"blob_id": "f276c28248db69735551e99bcf5b86d762421350",
"content_id": "a238c5a5ed0fe7bff947a57fc2cedf94fe331e40",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1014,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 27,
"path": "/uzen/core/settings.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import sys\n\nfrom starlette.config import Config\nfrom starlette.datastructures import CommaSeparatedStrings, Secret\n\nconfig = Config(\".env\")\n\nPROJECT_NAME: str = config(\"PROJECT_NAME\", default=\"uzen\")\n\nDEBUG: bool = config(\"DEBUG\", cast=bool, default=False)\nTESTING: bool = config(\"TESTING\", cast=bool, default=False)\n\nLOG_FILE = config(\"LOG_FILE\", default=sys.stderr)\nLOG_LEVEL: str = config(\"LOG_LEVEL\", cast=str, default=\"DEBUG\")\nLOG_BACKTRACE: bool = config(\"LOG_BACKTRACE\", cast=bool, default=True)\n\nDATABASE_URL: str = config(\"DATABASE_URL\", cast=str, default=\"sqlite://:memory:\")\nAPP_MODELS = config(\"APP_MODELS\", cast=CommaSeparatedStrings, default=\"uzen.models\",)\n\nGOOGLE_SAFE_BROWSING_API_KEY: str = config(\n \"GOOGLE_SAFE_BROWSING_API_KEY\", cast=Secret, default=\"\"\n)\n\nBROWSER_WS_ENDPOINT: str = config(\"BROWSER_WS_ENDPOINT\", cast=str, default=\"\")\nBROWSER_WAIT_UNTIL: str = config(\"BROWSER_WAIT_UNTIL\", cast=str, default=\"load\")\n\nHTTPX_FALLBACK: bool = config(\"HTTPX_FALLBACK\", cast=bool, default=True)\n"
},
{
"alpha_fraction": 0.6522842645645142,
"alphanum_fraction": 0.6522842645645142,
"avg_line_length": 19.736841201782227,
"blob_id": "d0e9853400b705924505a1633f1c2d9a136b3319",
"content_id": "b7ab4a1fd006aacab26d247eb28e5102a74e5078",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 394,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 19,
"path": "/uzen/models/base.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from uuid import UUID\n\nfrom tortoise import fields\nfrom tortoise.models import Model\n\n\nclass AbstractBaseModel(Model):\n id = fields.UUIDField(pk=True)\n\n @classmethod\n async def delete_by_id(cls, id_: UUID) -> None:\n await cls.get(id=id_).delete()\n\n @classmethod\n async def count(cls) -> int:\n return await cls.all().count()\n\n class Meta:\n abstract = True\n"
},
{
"alpha_fraction": 0.7765567898750305,
"alphanum_fraction": 0.7765567898750305,
"avg_line_length": 31.117647171020508,
"blob_id": "b5c277cfeb95fe024e97bf42bd826d07a26c8d27",
"content_id": "4f158fb92f363aff59b9109003f9cd22c63dd81f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 546,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 17,
"path": "/uzen/api/endpoints/ip_address.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from fastapi import APIRouter\n\nfrom uzen.factories.ip_address import IPAddressInformationFactory\nfrom uzen.schemas.ip_address import IPAddressInformation\n\nrouter = APIRouter()\n\n\[email protected](\n \"/{ip_address}\",\n response_model=IPAddressInformation,\n response_description=\"Returns information of an IP address\",\n summary=\"Get IP information\",\n description=\"Get an information related to an IP address\",\n)\nasync def get(ip_address: str) -> IPAddressInformation:\n return await IPAddressInformationFactory.from_ip_address(ip_address)\n"
},
{
"alpha_fraction": 0.7291242480278015,
"alphanum_fraction": 0.7291242480278015,
"avg_line_length": 27.882352828979492,
"blob_id": "a3f7fb98b028b36a2ba033ae350472253a93aa46",
"content_id": "6d29261967c41983b7e5e2328611ca3705ef7709",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 491,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 17,
"path": "/uzen/tasks/scripts.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nfrom loguru import logger\n\nfrom uzen.factories.scripts import ScriptFactory\nfrom uzen.models.scripts import Script\nfrom uzen.tasks import EnrichmentTask\n\n\nclass ScriptTask(EnrichmentTask):\n async def _process(self) -> List[Script]:\n logger.debug(f\"Fetch scripts from {self.snapshot.url}\")\n scripts = await ScriptFactory.from_snapshot(self.snapshot)\n if self.insert_to_db:\n await Script.bulk_create(scripts)\n\n return scripts\n"
},
{
"alpha_fraction": 0.6833756566047668,
"alphanum_fraction": 0.6833756566047668,
"avg_line_length": 27.14285659790039,
"blob_id": "659f7eb1bc9eadb3b1aa86c3083f4c34ca98c980",
"content_id": "852a0a9c387e9c044372af3a4022d93a3cd2dd70",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1576,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 56,
"path": "/uzen/schemas/matches.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional, Union\nfrom uuid import UUID\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import Field\n\nfrom uzen.schemas.base import AbstractBaseModel\nfrom uzen.schemas.mixins import TimestampMixin\nfrom uzen.schemas.rules import Rule\nfrom uzen.schemas.scripts import Script\nfrom uzen.schemas.search import BaseSearchResults\nfrom uzen.schemas.snapshots import Snapshot\nfrom uzen.schemas.yara import YaraMatch\n\n\nclass BaseMatch(APIModel):\n \"\"\"Base Pydantic model for Match\n\n Note that this model doesn't have \"id\" and \"created_at\" fields.\n \"\"\"\n\n matches: List[YaraMatch] = Field(\n ..., title=\"Matches\", description=\"A list of YARA mastches\",\n )\n snapshot: Snapshot = Field(\n ..., title=\"Snapshot\", description=\"A matched snapshot\",\n )\n rule: Rule = Field(\n ..., title=\"Rule\", description=\"A matched rule\",\n )\n script: Optional[Script] = Field(\n None, title=\"Script\", description=\"A matched script\",\n )\n\n class Config:\n orm_mode = True\n\n\nclass Match(BaseMatch, AbstractBaseModel, TimestampMixin):\n \"\"\"Full Pydantic model for Match\"\"\"\n\n\nclass MatchResult(APIModel):\n rule_id: UUID = Field(\n ..., title=\"Matches\", description=\"An ID of the rule\",\n )\n script_id: Optional[UUID] = Field(\n None, title=\"Matches\", description=\"An ID of the script\",\n )\n matches: List[YaraMatch] = Field(\n ..., title=\"Matches\", description=\"A list of YARA mastches\",\n )\n\n\nclass SearchResults(BaseSearchResults):\n results: Union[List[Match], List[UUID]]\n"
},
{
"alpha_fraction": 0.6757678985595703,
"alphanum_fraction": 0.6801559925079346,
"avg_line_length": 29.16176414489746,
"blob_id": "8cac38e06b317ab9bd44cd778164b602337d7ebd",
"content_id": "89635c7c85e03be0ac0b506d56275fd30d0871ab",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2051,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 68,
"path": "/uzen/api/endpoints/screenshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import Optional, Union, cast\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, HTTPException\nfrom fastapi.responses import Response\nfrom tortoise.exceptions import DoesNotExist\n\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.schemas.screenshots import BaseScreenshot\nfrom uzen.schemas.screenshots import Screenshot as ScreenshotModel\nfrom uzen.services.browser import Browser\n\nrouter = APIRouter()\n\n\[email protected](\n \"/{snapshot_id}\",\n response_model=ScreenshotModel,\n responses={\n 200: {\n \"content\": {\"image/png\": {}},\n \"description\": \"Returns a sreenshot or an image.\",\n }\n },\n response_description=\"Returns a screenshot\",\n summary=\"Get a screenshot\",\n description=\"Get a screenshot which related to a snapshot\",\n)\nasync def get_by_snapshot_id(\n snapshot_id: UUID, output_format: Optional[str] = None\n) -> Union[ScreenshotModel, Response]:\n try:\n screenshot = await Screenshot.get_by_snapshot_id(snapshot_id)\n except DoesNotExist:\n raise HTTPException(\n status_code=404, detail=f\"Screenshot related to {snapshot_id} is not found\",\n )\n\n if output_format == \"png\":\n return Response(content=screenshot.png, media_type=\"image/png\")\n\n model = cast(ScreenshotModel, screenshot.to_model())\n return model\n\n\[email protected](\n \"/preview/{hostname}\",\n response_model=BaseScreenshot,\n responses={\n 200: {\n \"content\": {\"image/png\": {}},\n \"description\": \"Returns a sreenshot or an image.\",\n }\n },\n response_description=\"Returns a screenshot\",\n summary=\"Get a screenshot\",\n description=\"Get a screenshot for previewing\",\n)\nasync def perview(\n hostname: str, output_format: Optional[str] = None\n) -> Union[BaseScreenshot, Response]:\n screenshot: Screenshot = await Browser.preview(hostname)\n\n if output_format == \"png\":\n return Response(content=screenshot.png, media_type=\"image/png\")\n\n model = cast(BaseScreenshot, screenshot.to_model())\n return model\n"
},
{
"alpha_fraction": 0.5753012299537659,
"alphanum_fraction": 0.625,
"avg_line_length": 30.619047164916992,
"blob_id": "fb8bfb8d47942f923ad0fb38659423f1d60ec8a0",
"content_id": "0eed66d95dd9ecbc2388b54f0e47e7d7e72b88ed",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 664,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 21,
"path": "/frontend/tests/unit/components/mixins/search_form.spec.ts",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import { SearchFormMixin } from \"@/components/mixins\";\n\ndescribe(\"SearchFormMixin\", () => {\n const subject = new SearchFormMixin();\n\n describe(\"#normalizeFilterValue\", () => {\n it(\"return a string of a data\", () => {\n const date: Date = new Date(\"December 17, 1995 00:00:00 GMT\");\n const str = subject.normalizeFilterValue(date);\n expect(str).toEqual(\"1995-12-17T00:00:00.000Z\");\n });\n\n it(\"return a value without any modification\", () => {\n const str = subject.normalizeFilterValue(\"foo\");\n expect(str).toEqual(\"foo\");\n\n const number = subject.normalizeFilterValue(42);\n expect(number).toEqual(42);\n });\n });\n});\n"
},
{
"alpha_fraction": 0.738095223903656,
"alphanum_fraction": 0.738095223903656,
"avg_line_length": 13,
"blob_id": "0c1e9584d65b9c754d6a98c8f8e80450b050ac06",
"content_id": "e4e7d04d9bdf0a18f3f3c8e27fc5a10b668e17be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 6,
"path": "/uzen/core/exceptions.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "class UzenError(Exception):\n pass\n\n\nclass TakeSnapshotError(UzenError):\n pass\n"
},
{
"alpha_fraction": 0.7068702578544617,
"alphanum_fraction": 0.7083969712257385,
"avg_line_length": 27.478260040283203,
"blob_id": "89e620f817b31beb3c98ec1741ec2ae313196190",
"content_id": "87ed6c702835fa59ed13e97eb52eb1955e9e413c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 655,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 23,
"path": "/uzen/models/dns_records.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from tortoise import fields\n\nfrom uzen.models.base import AbstractBaseModel\nfrom uzen.models.mixins import TimestampMixin\nfrom uzen.schemas.dns_records import DnsRecord as DnsRecordModel\n\n\nclass DnsRecord(TimestampMixin, AbstractBaseModel):\n type = fields.CharField(max_length=5)\n value = fields.TextField()\n\n snapshot: fields.ForeignKeyRelation[\"Snapshot\"] = fields.ForeignKeyField(\n \"models.Snapshot\",\n related_name=\"_dns_records\",\n to_field=\"id\",\n on_delete=fields.CASCADE,\n )\n\n def to_model(self) -> DnsRecordModel:\n return DnsRecordModel.from_orm(self)\n\n class Meta:\n table = \"dns_records\"\n"
},
{
"alpha_fraction": 0.7460317611694336,
"alphanum_fraction": 0.761904776096344,
"avg_line_length": 27,
"blob_id": "00683d958a09ea0c2bcc5fe9ae7b97d6bb87e1ed",
"content_id": "0dd84869adc361941efa7ca4f3c04b1e023420f6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 18,
"path": "/tests/factories/test_ip_address.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\nimport vcr\n\nfrom uzen.factories.ip_address import IPAddressInformationFactory\nfrom uzen.services.whois import Whois\n\n\ndef mock_whois(hostname: str):\n return \"foo\"\n\n\[email protected]\[email protected]_cassette(\"tests/fixtures/vcr_cassettes/ip_address.yaml\")\nasync def test_build_from_ip_address(monkeypatch):\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n\n information = await IPAddressInformationFactory.from_ip_address(\"1.1.1.1\")\n assert str(information.ip_address) == \"1.1.1.1\"\n"
},
{
"alpha_fraction": 0.6471054553985596,
"alphanum_fraction": 0.6471054553985596,
"avg_line_length": 30.524999618530273,
"blob_id": "70cf00c625a762c88a1c883fdc824750a713b695",
"content_id": "ada3a8c5ce1277653013d071efe87b3fda1f14c3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1261,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 40,
"path": "/uzen/tasks/matches.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nfrom loguru import logger\n\nfrom uzen.models.matches import Match\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.matches import MatchResult\nfrom uzen.services.rule_matcher import RuleMatcher\nfrom uzen.tasks import AbstractTask\n\n\nclass MatchinbgTask(AbstractTask):\n def __init__(self, snapshot: Snapshot):\n self.snapshot = snapshot\n\n async def _process(self):\n logger.debug(\"Start matching job...\")\n\n snapshot_ = await Snapshot.get(id=self.snapshot.id).prefetch_related(\"_scripts\")\n matcher = RuleMatcher(snapshot_)\n results: List[MatchResult] = await matcher.scan()\n\n matches = [\n Match(\n snapshot_id=self.snapshot.id,\n rule_id=result.rule_id,\n script_id=result.script_id,\n matches=[match.dict() for match in result.matches],\n )\n for result in results\n ]\n await Match.bulk_create(matches)\n\n logger.debug(f\"Snapshot {self.snapshot.id} matches with {len(matches)} rule(s)\")\n logger.debug(\"Matching job is finished\")\n\n @classmethod\n async def process(cls, snapshot: Snapshot):\n instance = cls(snapshot)\n return await instance._process()\n"
},
{
"alpha_fraction": 0.7525083422660828,
"alphanum_fraction": 0.7525083422660828,
"avg_line_length": 38.86666488647461,
"blob_id": "6df3c6cbbce71d6bc036b484f02b2154462bb23c",
"content_id": "7dc5cd7b6545cf4811624698b2e8139d2a854571",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 598,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 15,
"path": "/uzen/factories/domain.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from uzen.factories.dns_records import DnsRecordFactory\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.domain import DomainInformation\nfrom uzen.services.whois import Whois\n\n\nclass DomainInformationFactory:\n @staticmethod\n async def from_hostname(hostname: str) -> DomainInformation:\n whois = Whois.whois(hostname)\n records = await DnsRecordFactory.from_hostname(hostname)\n snapshots = await Snapshot.find_by_hostname(hostname)\n return DomainInformation(\n hostname=hostname, whois=whois, dns_records=records, snapshots=snapshots\n )\n"
},
{
"alpha_fraction": 0.5898773670196533,
"alphanum_fraction": 0.5937907695770264,
"avg_line_length": 26.575538635253906,
"blob_id": "a7a0d4de2123d42d74f75290b3a811c6d0f2be35",
"content_id": "f2815152ee0889175da66fa878c26798c3525766",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3833,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 139,
"path": "/uzen/factories/scripts.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import dataclasses\nfrom functools import partial\nfrom typing import Dict, List, Optional\nfrom urllib.parse import urlparse\n\nimport aiometer\nimport httpx\nfrom bs4 import BeautifulSoup\n\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.services.utils import calculate_sha256\n\nMAX_AT_ONCE = 10\n\n\ndef extract_base_path(path: str) -> str:\n \"\"\"Extract base path from a path (or remove a filename from the path)\n\n Arguments:\n path {str} -- A path e.g. /foo/bar/index.html\n\n Returns:\n str -- A base path (e.g. /foo/bar/)\n \"\"\"\n parts = path.split(\"/\")\n return \"/\".join(parts[:-1])\n\n\ndef normalize_source(url: str, source: str) -> str:\n \"\"\"Convert a URL to an absolute URL\n\n Arguments:\n url {str} -- A URL\n source {str} -- Source of a script\n\n Returns:\n str -- An absolute URL\n \"\"\"\n if source.startswith(\"http://\") or source.startswith(\"https://\"):\n return source\n\n parsed = urlparse(url)\n base_path = extract_base_path(parsed.path)\n base_url = f\"{parsed.scheme}://{parsed.netloc}{base_path}\"\n\n if source.startswith(\"/\"):\n return f\"{base_url}{source}\"\n\n return f\"{base_url}/{source}\"\n\n\ndef get_script_sources(url: str, body: str) -> List[str]:\n \"\"\"Get script sources\n\n Arguments:\n url {str} -- A URL\n body {str} -- An HTTP response body\n\n Returns:\n List[str] -- A list of script sources\n \"\"\"\n html = BeautifulSoup(body, \"html.parser\")\n\n sources: List[str] = []\n for script in html.find_all(\"script\"):\n source = script.attrs.get(\"src\")\n if source is not None:\n sources.append(normalize_source(url, source))\n\n return list(set(sources))\n\n\[email protected]\nclass ScriptContent:\n source: str\n content: str\n\n\nasync def get_script_content(\n client, source: str, headers: Dict\n) -> Optional[ScriptContent]:\n \"\"\"Get script contents\n\n Arguments:\n source {str} -- A source of a script (an absolute URL)\n\n Returns:\n Optional[ScriptContent] -- A fetched result\n \"\"\"\n try:\n res = await client.get(source, headers=headers)\n res.raise_for_status()\n return ScriptContent(source=source, content=res.text)\n except httpx.HTTPError:\n return None\n\n\nclass ScriptFactory:\n @staticmethod\n async def from_snapshot(snapshot: Snapshot) -> List[Script]:\n sources = get_script_sources(url=snapshot.url, body=snapshot.body)\n scripts = []\n\n # Use the same settings as the original request\n headers = {\n \"accept_language\": snapshot.request.get(\"accept_language\"),\n \"host\": snapshot.request.get(\"host\"),\n \"user_agent\": snapshot.request.get(\"user_agent\"),\n }\n # Remove none value\n headers = {k: v for k, v in headers.items() if v is not None}\n\n ignore_https_errors = snapshot.request.get(\"ignore_https_errors\")\n verify = not ignore_https_errors\n\n async with httpx.AsyncClient(verify=verify) as client:\n # Get sources\n tasks = [\n partial(get_script_content, client, source, headers)\n for source in sources\n ]\n if len(tasks) <= 0:\n return []\n\n results = await aiometer.run_all(tasks, max_at_once=MAX_AT_ONCE)\n for result in results:\n if result is None:\n continue\n\n script = Script(\n url=result.source,\n content=result.content,\n sha256=calculate_sha256(result.content),\n # insert a dummy ID if a snapshot doesn't have ID\n snapshot_id=snapshot.id or -1,\n )\n scripts.append(script)\n return scripts\n"
},
{
"alpha_fraction": 0.6486956477165222,
"alphanum_fraction": 0.6591304540634155,
"avg_line_length": 28.237287521362305,
"blob_id": "8fc062d750b432d47f3fdf2bad81d7244b0c67e5",
"content_id": "e91df76005c5b6d1cd1fe4f1d33cd2bf5e939de7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1725,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 59,
"path": "/uzen/models/screenshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import base64\nimport os\nimport zlib\nfrom typing import Union\nfrom uuid import UUID\n\nfrom tortoise import fields\n\nfrom uzen.models.base import AbstractBaseModel\nfrom uzen.schemas.screenshots import BaseScreenshot\nfrom uzen.schemas.screenshots import Screenshot as ScreenshotModel\n\n\ndef not_found_png() -> bytes:\n current_path = os.path.abspath(os.path.dirname(__file__))\n path = os.path.join(current_path, \"../../frontend/dist/images/not-found.png\")\n with open(path, \"rb\") as f:\n return f.read()\n\n\nclass Screenshot(AbstractBaseModel):\n _data: str = fields.TextField(source_field=\"data\")\n\n snapshot: fields.OneToOneRelation[\"Snapshot\"] = fields.OneToOneField(\n \"models.Snapshot\", related_name=\"_screenshot\", on_delete=fields.CASCADE\n )\n\n @property\n def data(self) -> str:\n try:\n b64decoded = base64.b64decode(self._data.encode())\n decompressed = zlib.decompress(b64decoded)\n return decompressed.decode()\n except zlib.error:\n return self._data\n\n @data.setter\n def data(self, data: str):\n compressed = zlib.compress(data.encode())\n self._data = base64.b64encode(compressed).decode()\n\n @property\n def png(self) -> bytes:\n if self.data != \"\":\n return base64.b64decode(self.data)\n return not_found_png()\n\n @classmethod\n async def get_by_snapshot_id(cls, id_: UUID) -> \"Screenshot\":\n return await cls.get(snapshot_id=id_)\n\n def to_model(self) -> Union[ScreenshotModel, BaseScreenshot]:\n if self.snapshot_id is not None:\n return ScreenshotModel.from_orm(self)\n\n return BaseScreenshot.from_orm(self)\n\n class Meta:\n table = \"screenshots\"\n"
},
{
"alpha_fraction": 0.6464537382125854,
"alphanum_fraction": 0.6464537382125854,
"avg_line_length": 26.567163467407227,
"blob_id": "c1a01eb7aedff8c3ba72fd3df9ce444c3d80bda2",
"content_id": "349c0b83c70b85940cee8e96db2f0c79e338a6f4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1847,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 67,
"path": "/uzen/schemas/yara.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional\nfrom uuid import UUID\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import Field\n\nfrom uzen.schemas.common import Source, Target\nfrom uzen.schemas.snapshots import SimplifiedSnapshot\n\n\nclass ScanPayload(Source, Target, APIModel):\n pass\n\n\nclass YaraMatchString(APIModel):\n offset: int\n string_identifier: str\n string_data: str\n\n\nclass YaraMatch(APIModel):\n rule: str = Field(..., title=\"Rule\", description=\"A name of the rule\")\n namespace: str = Field(\n ...,\n title=\"Namespace\",\n description=\"A namespace associated to the matching rule\",\n )\n tags: List[str] = Field(\n [],\n title=\"Tags\",\n description=\"An array of strings containig the tags associated to the matching rule\",\n )\n meta: dict = Field(\n {},\n title=\"Meta\",\n description=\"A dictionary containing metadata associated to the matching rule\",\n )\n strings: List[YaraMatchString] = Field(\n [],\n title=\"Strings\",\n description=\"A list of tuples containing information about the matching strings\",\n )\n\n\nclass YaraResult(APIModel):\n snapshot_id: UUID = Field(\n ..., title=\"Snapshot ID\", description=\"An ID of the snapshot\"\n )\n script_id: Optional[UUID] = Field(\n ..., title=\"Script ID\", description=\"An ID of the script\"\n )\n target: str = Field(..., title=\"Target\", description=\"The target to scan\")\n matches: List[YaraMatch] = Field(\n [], title=\"YARA matches\", description=\"A list of YARA matches\"\n )\n\n\nclass ScanResult(SimplifiedSnapshot):\n \"\"\"Simplified version of Pydantic model of Snapshot\"\"\"\n\n yara_result: YaraResult\n\n @classmethod\n def field_keys(cls) -> List[str]:\n keys = list(cls.__fields__.keys())\n keys.remove(\"yara_result\")\n return keys\n"
},
{
"alpha_fraction": 0.7469879388809204,
"alphanum_fraction": 0.7469879388809204,
"avg_line_length": 40.5,
"blob_id": "7f21361ad1a1d16d7ab4dbc3222dd3e2328f73d0",
"content_id": "2a900d4f6424869bf93b42ce529a81d496f6ddcb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 6,
"path": "/tests/services/test_certificate.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from uzen.services.certificate import Certificate\n\n\ndef test_load_and_dump_from_url():\n assert \"example.com\" in Certificate.load_and_dump_from_url(\"https://example.com\")\n assert Certificate.load_and_dump_from_url(\"http://example.com\") is None\n"
},
{
"alpha_fraction": 0.7798165082931519,
"alphanum_fraction": 0.7798165082931519,
"avg_line_length": 20.799999237060547,
"blob_id": "9839e19aed9563c3d2238ecbfc9476fbe0001cf1",
"content_id": "b456e0dcd9b5edd50a5b9ae41b39ac2b1467a991",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 5,
"path": "/uzen/models/mixins.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from tortoise import fields\n\n\nclass TimestampMixin:\n created_at = fields.DatetimeField(auto_now_add=True)\n"
},
{
"alpha_fraction": 0.6701268553733826,
"alphanum_fraction": 0.6747404932975769,
"avg_line_length": 28.89655113220215,
"blob_id": "512b969de29e5df94e210a7857ae2d7c294cab86",
"content_id": "3f4f107cb84759597e07aa2429e7478b1768252b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 867,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 29,
"path": "/scripts/check.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "\"\"\"A script for checking general settings\"\"\"\nimport sqlite3\nimport sys\n\nfrom pyppeteer import __chromium_revision__, __pyppeteer_home__\nfrom pyppeteer.chromium_downloader import check_chromium, chromium_executable\n\nsys.path = [\"\", \"..\"] + sys.path[1:] # noqa # isort:skip\n\nfrom uzen.core import settings # noqa # isort:skip\n\n\ndef check():\n print(\"[pyppeteer settings]\")\n print(f\"Chromium version: {__chromium_revision__}\")\n print(f\"Home drectory: {__pyppeteer_home__}\")\n downloaded = \"downloaded\" if check_chromium() else \"not downloaded\"\n print(f\"Executable: {chromium_executable()} ({downloaded})\")\n\n print()\n\n print(\"[DB settings]\")\n print(f\"SQLite version: {sqlite3.sqlite_version}\")\n print(f\"SQLite Python library version: {sqlite3.version}\")\n print(f\"File path: {settings.DATABASE_URL}\")\n\n\nif __name__ == \"__main__\":\n check()\n"
},
{
"alpha_fraction": 0.5930831432342529,
"alphanum_fraction": 0.6077998280525208,
"avg_line_length": 26.73469352722168,
"blob_id": "7a5ce0992bcc331585020658d6377825742c26eb",
"content_id": "2cc05fec62af8813e4b8d8931c04a2e94f005c92",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1359,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 49,
"path": "/tests/schemas/test_snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom uzen.schemas.snapshots import (\n BasicAttributes,\n CreateSnapshotPayload,\n remove_sharp_and_question_from_tail,\n)\n\n\ndef test_create_snapsnot_payload():\n payload = CreateSnapshotPayload(url=\"http://example.com\")\n assert payload.url == \"http://example.com\"\n\n with pytest.raises(ValueError):\n CreateSnapshotPayload(url=\"http://nope.example.com\")\n\n\ndef test_basic_attributes():\n basic = BasicAttributes(\n url=\"http://example.com#\",\n submitted_url=\"http://example.com#\",\n hostname=\"example.com\",\n ip_address=\"1.1.1.1\",\n asn=\"\",\n status=200,\n body=\"\",\n sha256=\"\",\n )\n assert basic.url == \"http://example.com\"\n assert basic.submitted_url == \"http://example.com\"\n\n basic = BasicAttributes(\n url=\"http://example.com?\",\n submitted_url=\"http://example.com?\",\n hostname=\"example.com\",\n ip_address=\"1.1.1.1\",\n asn=\"\",\n status=200,\n body=\"\",\n sha256=\"\",\n )\n assert basic.url == \"http://example.com\"\n assert basic.submitted_url == \"http://example.com\"\n\n\ndef test_remove_sharp_and_question_from_tail():\n assert remove_sharp_and_question_from_tail(\"foo#\") == \"foo\"\n assert remove_sharp_and_question_from_tail(\"foo?\") == \"foo\"\n assert remove_sharp_and_question_from_tail(\"foo\") == \"foo\"\n"
},
{
"alpha_fraction": 0.6337164640426636,
"alphanum_fraction": 0.634482741355896,
"avg_line_length": 25.632652282714844,
"blob_id": "cfbea8dd4ca416d747a27f11305b676ed38f2e49",
"content_id": "08d53eb259b082d7a1dee9982f0bd6f18cc61b06",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1305,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 49,
"path": "/uzen/factories/classifications.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional\n\nfrom pysafebrowsing import SafeBrowsing\nfrom pysafebrowsing.api import SafeBrowsingInvalidApiKey, SafeBrowsingWeirdError\n\nfrom uzen.core import settings\nfrom uzen.models.classifications import Classification\nfrom uzen.models.snapshots import Snapshot\n\n\ndef google_safe_brwosing_lookup(url: str) -> Optional[dict]:\n \"\"\"Lookup a url on GSB\n\n Arguments:\n url {str} -- A URL to lookup\n\n Returns:\n Optional[dict] -- A lookup result\n \"\"\"\n key = str(settings.GOOGLE_SAFE_BROWSING_API_KEY)\n if key == \"\":\n return None\n\n try:\n s = SafeBrowsing(key)\n return s.lookup_url(url)\n except (SafeBrowsingInvalidApiKey, SafeBrowsingWeirdError):\n pass\n\n return None\n\n\nclass ClassificationFactory:\n @staticmethod\n def from_snapshot(snapshot: Snapshot) -> List[Classification]:\n classifications = []\n\n res = google_safe_brwosing_lookup(snapshot.url)\n if res is not None:\n malicious = bool(res.get(\"malicious\"))\n classifications.append(\n Classification(\n name=\"Google Safe Browsing\",\n malicious=malicious,\n snapshot_id=snapshot.id or -1,\n )\n )\n\n return classifications\n"
},
{
"alpha_fraction": 0.6082772016525269,
"alphanum_fraction": 0.6082772016525269,
"avg_line_length": 25.64102554321289,
"blob_id": "febff11b5fa72046119a2a981d1a733487749a42",
"content_id": "836d9949b521bcc4422a153e9bb7a87877117069",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1039,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 39,
"path": "/uzen/schemas/common.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import yara\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import Field, validator\n\n\nclass CountResponse(APIModel):\n count: int = Field(\n ...,\n title=\"A number of matched items\",\n description=\"A number of matched items with filters\",\n )\n\n\nclass Target(APIModel):\n target: str = Field(\n \"body\",\n title=\"Target\",\n description=\"A target field to scan (body, certificate, script or whois)\",\n )\n\n @validator(\"target\")\n def target_types(cls, v):\n if v not in [\"body\", \"certificate\", \"script\", \"whois\"]:\n raise ValueError(\"Target must be any of body, certificate, script or whois\")\n return v\n\n\nclass Source(APIModel):\n source: str = Field(\n ..., title=\"YARA rule\", description=\"String containing the rules code\",\n )\n\n @validator(\"source\")\n def source_compilable(cls, v):\n try:\n yara.compile(source=v)\n except yara.Error as e:\n raise ValueError(f\"YARA compile error: {str(e)}\")\n return v\n"
},
{
"alpha_fraction": 0.5769149661064148,
"alphanum_fraction": 0.5792361497879028,
"avg_line_length": 33.59123992919922,
"blob_id": "76d1d718551595d386952f5996abb179bbb9996c",
"content_id": "68375516fe15196eb50b880119fa1a0f6807fdbe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4739,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 137,
"path": "/uzen/services/yara_scanner.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import itertools\nfrom functools import partial\nfrom typing import Dict, List, Optional, cast\nfrom uuid import UUID\n\nimport aiometer\nimport yara\n\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.yara import ScanResult, YaraMatch, YaraResult\nfrom uzen.services.matches_converter import MatchesConverter\nfrom uzen.services.searchers.snapshots import SnapshotSearcher\n\nCHUNK_SIZE = 100\nMAX_AT_ONCE = 10\n\n\nclass YaraScanner:\n def __init__(self, source: str):\n self.rule: yara.Rules = yara.compile(source=source)\n\n async def partial_scan_for_scripts(self, ids: List[UUID]) -> List[YaraResult]:\n scripts = await Script.filter(snapshot_id__in=ids).values(\n \"id\", \"snapshot_id\", \"content\"\n )\n matched_results = []\n for script in scripts:\n snapshot_id = script.get(\"snapshot_id\")\n content = script.get(\"content\")\n matches = self.match(data=content)\n if len(matches) > 0:\n result = YaraResult(\n snapshot_id=snapshot_id,\n script_id=script.get(\"id\"),\n target=\"script\",\n matches=matches,\n )\n matched_results.append(result)\n\n return matched_results\n\n async def partial_scan(self, target: str, ids: List[UUID]) -> List[YaraResult]:\n \"\"\"Scan a list of snapshots with a YARA rule\n\n Arguments:\n target {str} -- A target of a snapshot's attribute\n ids {List[int]} -- A list of ids of snapshots\n\n Returns:\n List[int] -- A list of ids which are matched with a YARA rule\n \"\"\"\n if target == \"script\":\n return await self.partial_scan_for_scripts(ids)\n\n snapshots = await Snapshot.filter(id__in=ids).values(\"id\", target)\n matched_results = []\n for snapshot in snapshots:\n snapshot_id = snapshot.get(\"id\")\n data = snapshot.get(target, \"\")\n matches = self.match(data=data)\n if len(matches) > 0:\n result = YaraResult(\n snapshot_id=snapshot_id,\n script_id=None,\n target=target,\n matches=matches,\n )\n matched_results.append(result)\n\n return matched_results\n\n async def scan_snapshots(\n self,\n target: str = \"body\",\n filters: dict = {},\n size: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[ScanResult]:\n \"\"\"Scan snapshots data with a YARA rule\n\n Keyword Arguments:\n target {str} -- A target of a snapshot's attribute (default: {\"body\"})\n filters {dict} -- Filters for snapshot search (default: {{}})\n\n Returns:\n List[SearchResultModel] -- A list of simlified snapshot models\n \"\"\"\n # get snapshots ids based on filters\n search_results = await SnapshotSearcher.search(\n filters, id_only=True, size=size, offset=offset\n )\n snapshot_ids = cast(List[UUID], search_results.results)\n if len(snapshot_ids) == 0:\n return []\n\n # split ids into chunks\n chunks = [\n snapshot_ids[i : i + CHUNK_SIZE]\n for i in range(0, len(snapshot_ids), CHUNK_SIZE)\n ]\n # make scan tasks\n tasks = [partial(self.partial_scan, target, chunk) for chunk in chunks]\n results = await aiometer.run_all(tasks, max_at_once=MAX_AT_ONCE)\n flatten_results = list(itertools.chain.from_iterable(results))\n\n matched_ids = [result.snapshot_id for result in flatten_results]\n snapshots: List[dict] = (\n await Snapshot.filter(id__in=matched_ids).values(*ScanResult.field_keys())\n )\n\n table = self._build_snapshot_table(snapshots)\n for result in flatten_results:\n snapshot = table.get(str(result.snapshot_id))\n if snapshot is not None:\n snapshot[\"yara_result\"] = result\n\n return [ScanResult(**snapshot) for snapshot in snapshots]\n\n def _build_snapshot_table(self, snapshots: List[dict]) -> Dict[str, dict]:\n table = {}\n for snapshot in snapshots:\n id_ = str(snapshot.get(\"id\"))\n table[id_] = snapshot\n return table\n\n def match(self, data: Optional[str]) -> List[YaraMatch]:\n \"\"\"Scan a data with a YARA rule\n\n Arguments:\n data {Optional[str]} -- Data to scan\n\n Returns:\n List[yara.Match] -- YARA matches\n \"\"\"\n data = \"\" if data is None else data\n return MatchesConverter.convert(self.rule.match(data=data, timeout=60))\n"
},
{
"alpha_fraction": 0.5923076868057251,
"alphanum_fraction": 0.5984615087509155,
"avg_line_length": 36.14285659790039,
"blob_id": "d3c3fe1f51f3aec0c6e7ac0907b3d25757dd4786",
"content_id": "fff6adf0780f284b421b5fbc3aaf05158df5296f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1300,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 35,
"path": "/uzen/api/dependencies/matches.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from datetime import date, datetime\nfrom typing import Optional, Union\nfrom uuid import UUID\n\nfrom fastapi import Query\n\n\nclass SearchFilters:\n def __init__(\n self,\n rule_id: Optional[UUID] = Query(\n None, title=\"Rule ID\", description=\"An ID of the rule\"\n ),\n ruleId: Optional[UUID] = Query(None, description=\"Alias of rule_id\"),\n snapshot_id: Optional[UUID] = Query(\n None, title=\"Snapshot ID\", description=\"An ID of the snapshot\"\n ),\n snapshotId: Optional[UUID] = Query(None, description=\"Alias of snapshot_id\"),\n from_at: Optional[Union[datetime, date]] = Query(\n None, title=\"From at\", description=\"Datetime or date in ISO 8601 format\"\n ),\n fromAt: Optional[Union[datetime, date]] = Query(\n None, description=\"Alias of from_at\"\n ),\n to_at: Optional[Union[datetime, date]] = Query(\n None, title=\"To at\", description=\"Datetime or date in ISO 8601 format\"\n ),\n toAt: Optional[Union[datetime, date]] = Query(\n None, description=\"Alias of to_at\"\n ),\n ):\n self.rule_id = rule_id or ruleId\n self.snapshot_id = snapshot_id or snapshotId\n self.from_at = from_at or fromAt\n self.to_at = to_at or toAt\n"
},
{
"alpha_fraction": 0.6264222264289856,
"alphanum_fraction": 0.6264222264289856,
"avg_line_length": 28.849056243896484,
"blob_id": "b1b2864ba4301ebc13991104f514f4b873332e9e",
"content_id": "5b2c7ee48898d1c014fed60fd91801791a3d397c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1582,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 53,
"path": "/uzen/tasks/__init__.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from abc import ABC, abstractmethod\nfrom typing import List, Union\n\nfrom loguru import logger\n\nfrom uzen.models.classifications import Classification\nfrom uzen.models.dns_records import DnsRecord\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\n\n\nclass AbstractTask(ABC):\n @abstractmethod\n async def _process(self):\n raise NotImplementedError()\n\n async def safe_process(self):\n try:\n return await self._process()\n except Exception as e:\n logger.error(\n f\"Failed to process {self.__class__.__name__} task. Error: {e}\"\n )\n\n\nclass EnrichmentTask(AbstractTask):\n def __init__(self, snapshot: Snapshot, insert_to_db: bool = True):\n self.snapshot = snapshot\n self.insert_to_db = insert_to_db\n\n async def _process(\n self,\n ) -> Union[List[Script], List[DnsRecord], List[Classification]]:\n raise NotImplementedError()\n\n async def safe_process(\n self,\n ) -> Union[List[Script], List[DnsRecord], List[Classification]]:\n try:\n return await self._process()\n except Exception as e:\n logger.error(\n f\"Failed to process {self.__class__.__name__} task. URL: {self.snapshot.url} / Error: {e}\"\n )\n\n return []\n\n @classmethod\n async def process(\n cls, snapshot: Snapshot, insert_to_db: bool = True\n ) -> Union[List[Script], List[DnsRecord], List[Classification]]:\n instance = cls(snapshot, insert_to_db)\n return await instance.safe_process()\n"
},
{
"alpha_fraction": 0.7797468304634094,
"alphanum_fraction": 0.7797468304634094,
"avg_line_length": 29.384614944458008,
"blob_id": "a26dd356a6e563e9dd70995a3f6ce919238a32d9",
"content_id": "2a52a3c48b227fbdc88ba90f5046da519236667a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 395,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 13,
"path": "/frontend/src/components/mixins/index.ts",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import { ErrorDialogMixin } from \"./error_dialog\";\nimport { HighlightMixin } from \"./highlight\";\nimport { SearchFormMixin } from \"./search_form\";\n\nexport interface SearchFormComponentMixin\n extends SearchFormMixin,\n ErrorDialogMixin {}\n\nexport interface HighlightComponentMixin\n extends HighlightMixin,\n ErrorDialogMixin {}\n\nexport { ErrorDialogMixin, HighlightMixin, SearchFormMixin };\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 17.66666603088379,
"blob_id": "0986cfc984f7a23000e7c6bbe8601c8dc3f63c95",
"content_id": "fa0b620c41fb42608cb6b785d3942149d07923b9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 168,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 9,
"path": "/uzen/schemas/base.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from uuid import UUID\n\nfrom fastapi_utils.api_model import APIModel\n\n\nclass AbstractBaseModel(APIModel):\n \"\"\"Full Pydantic model for Classification\"\"\"\n\n id: UUID\n"
},
{
"alpha_fraction": 0.7079018950462341,
"alphanum_fraction": 0.710354208946228,
"avg_line_length": 18.11458396911621,
"blob_id": "bfd53656c331dd2334c83b985f6c53da38d6b3d9",
"content_id": "b0be00d65714a861daedfc93be26568577f888fc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 3670,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 192,
"path": "/frontend/src/types.ts",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "export interface Dict {\n [key: string]: string | number;\n}\n\nexport interface Screenshot {\n id: string | undefined;\n data: string;\n}\n\nexport interface Snapshot {\n id: string | undefined;\n url: string;\n submittedUrl: string;\n status: number;\n hostname: string;\n ipAddress: string;\n asn: string;\n server: string;\n contentType: string;\n contentLength: number;\n body: string;\n sha256: string;\n headers: Dict;\n whois: string | undefined;\n certificate: string | undefined;\n processing: boolean;\n createdAt: string | undefined;\n\n screenshot: Screenshot;\n\n scripts: Script[];\n dnsRecords: DnsRecord[];\n classifications: Classification[];\n rules: Rule[];\n}\n\nexport interface ValidationError {\n loc: string[];\n msg: string;\n type: string;\n}\n\nexport interface ErrorData {\n detail: string | ValidationError[];\n}\n\nexport interface SnapshotFilters {\n asn: string | undefined;\n contentType: string | undefined;\n hostname: string | undefined;\n ipAddress: string | undefined;\n server: string | undefined;\n sha256: string | undefined;\n status: number | undefined;\n url: string | undefined;\n fromAt: Date | undefined;\n toAt: Date | undefined;\n}\n\nexport type LinkType = \"ip_address\" | \"domain\";\n\nexport interface Link {\n name: string;\n type: string;\n baseURL: string;\n favicon: string;\n href(hostname: string): string;\n}\n\nexport interface Script {\n id: string | undefined;\n url: string;\n content: string;\n sha256: string;\n createdAt: string | undefined;\n}\n\nexport interface DnsRecord {\n id: string | undefined;\n type: string;\n value: string;\n createdAt: string | undefined;\n}\n\nexport interface Classification {\n id: string | undefined;\n name: string;\n malicious: boolean;\n note: string | undefined;\n createdAt: string | undefined;\n}\n\nexport interface YaraMatchString {\n offset: number;\n stringIdentifier: string;\n stringData: string;\n}\n\nexport interface YaraMatch {\n meta: Dict;\n namespace: string;\n rule: string;\n strings: YaraMatchString[];\n tags: string[];\n}\n\nexport interface YaraResult {\n snapshotId: string;\n scriptId: string | undefined;\n target: string;\n matches: YaraMatch[];\n}\n\nexport interface SnapshotWithYaraResult extends Snapshot {\n yaraResult: YaraResult | undefined;\n}\n\nexport interface Oneshot {\n matched: boolean;\n matches: YaraMatch[];\n snapshot: Snapshot;\n}\n\nexport type TargetTypes = \"body\" | \"whois\" | \"certificate\" | \"script\";\n\nexport interface Rule {\n id: string;\n name: string;\n target: TargetTypes;\n source: string;\n snapshots: Snapshot[];\n createdAt: string;\n updatedAt: string;\n}\n\nexport interface RuleFilters {\n name: string | undefined;\n target: TargetTypes | undefined;\n source: string | undefined;\n}\n\nexport interface Match {\n id: string;\n snapshot: Snapshot;\n rule: Rule;\n script: Script | undefined;\n matches: YaraMatch[];\n createdAt: string;\n}\n\nexport interface MatchFilters {\n snapshotId: string | undefined;\n ruleId: string | undefined;\n fromAt: Date | undefined;\n toAt: Date | undefined;\n}\n\ninterface SearchResults {\n total: number;\n}\n\nexport interface SnapshotSearchResults extends SearchResults {\n results: Snapshot[];\n}\n\nexport interface MatchSearchResults extends SearchResults {\n results: Match[];\n}\n\nexport interface RuleSearchResults extends SearchResults {\n results: Rule[];\n}\n\nexport interface CountResponse {\n count: number;\n}\n\nexport interface IPAddressInformation {\n ipAddress: string;\n asn: string;\n country: string;\n description: string;\n whois: string | undefined;\n snapshots: Snapshot[];\n}\n\nexport interface DomainInformation {\n hostname: string;\n whois: string | undefined;\n dnsRecords: DnsRecord[];\n snapshots: Snapshot[];\n}\n"
},
{
"alpha_fraction": 0.6703196167945862,
"alphanum_fraction": 0.673059344291687,
"avg_line_length": 27.076923370361328,
"blob_id": "1ffc5d74c846a72452390dc942b238fac56ff7e7",
"content_id": "64ce266a2b8511ffa33c80cc7c5d26e87507f74d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1095,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 39,
"path": "/uzen/schemas/rules.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, Optional, Union\nfrom uuid import UUID\n\nfrom fastapi_utils.api_model import APIModel\nfrom pydantic import Field, validator\n\nfrom uzen.schemas.search import BaseSearchResults\nfrom uzen.schemas.snapshots import BaseRule, Rule # noqa: F401\n\n\nclass CreateRulePayload(BaseRule):\n pass\n\n\nclass UpdateRulePayload(APIModel):\n name: Optional[str] = Field(\n None, title=\"Name\", description=\"A name of the YARA rule\"\n )\n source: Optional[str] = Field(\n None, title=\"YARA rule\", description=\"String containing the rules code\",\n )\n target: Optional[str] = Field(\n None,\n title=\"Target\",\n description=\"A target field to scan (body, certificate, script or whois)\",\n )\n\n @validator(\"target\")\n def target_types(cls, v):\n if v not in [\"body\", \"certificate\", \"script\", \"whois\", None]:\n raise ValueError(\"Target must be any of body, certificate, script or whois\")\n return v\n\n class Config:\n orm_mode = False\n\n\nclass SearchResults(BaseSearchResults):\n results: Union[List[Rule], List[UUID]]\n"
},
{
"alpha_fraction": 0.600910484790802,
"alphanum_fraction": 0.6043247580528259,
"avg_line_length": 31.950000762939453,
"blob_id": "6a8bc150689bee779fb3ee55961e1a7dae1baa60",
"content_id": "bd812268a69bb6b941e9401250cafe46d4179af1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2636,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 80,
"path": "/uzen/services/rule_matcher.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import itertools\nfrom functools import partial\nfrom typing import List, cast\nfrom uuid import UUID\n\nimport aiometer\n\nfrom uzen.models.rules import Rule\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.matches import MatchResult\nfrom uzen.schemas.scripts import Script\nfrom uzen.services.searchers.rules import RuleSearcher\nfrom uzen.services.yara_scanner import YaraScanner\n\nCHUNK_SIZE = 100\nMAX_AT_ONCE = 10\n\n\nclass RuleMatcher:\n def __init__(self, snapshot: Snapshot):\n self.snapshot = snapshot\n\n def _extract_data_from_snapshot(self, target: str = \"body\") -> str:\n if target == \"body\":\n return self.snapshot.body\n\n if target == \"whois\":\n return self.snapshot.whois\n\n if target == \"certificate\":\n return self.snapshot.certificate\n\n return \"\"\n\n def _partial_scan_for_script(\n self, rule: Rule, scanner: YaraScanner\n ) -> List[MatchResult]:\n results = []\n for script in cast(List[Script], self.snapshot.scripts):\n data = script.content\n matches = scanner.match(data)\n if len(matches) > 0:\n results.append(\n MatchResult(rule_id=rule.id, script_id=script.id, matches=matches)\n )\n return results\n\n async def partial_scan(self, ids: List[UUID]) -> List[MatchResult]:\n results: List[MatchResult] = []\n rules: List[Rule] = await Rule.filter(id__in=ids)\n for rule in rules:\n scanner = YaraScanner(rule.source)\n\n if rule.target == \"script\":\n results.extend(\n self._partial_scan_for_script(scanner=scanner, rule=rule)\n )\n else:\n data = self._extract_data_from_snapshot(rule.target)\n matches = scanner.match(data)\n if len(matches) > 0:\n results.append(MatchResult(rule_id=rule.id, matches=matches))\n\n return results\n\n async def scan(self) -> List[MatchResult]:\n search_results = await RuleSearcher.search({}, id_only=True)\n rule_ids = cast(List[UUID], search_results.results)\n if len(rule_ids) == 0:\n return []\n\n # split ids into chunks\n chunks = [\n rule_ids[i : i + CHUNK_SIZE] for i in range(0, len(rule_ids), CHUNK_SIZE)\n ]\n # make scan tasks\n tasks = [partial(self.partial_scan, chunk) for chunk in chunks]\n results = await aiometer.run_all(tasks, max_at_once=MAX_AT_ONCE)\n flatten_results = list(itertools.chain.from_iterable(results))\n return flatten_results\n"
},
{
"alpha_fraction": 0.7414448857307434,
"alphanum_fraction": 0.7414448857307434,
"avg_line_length": 29.941177368164062,
"blob_id": "d3e7fd61b14fc0f25925e65205811fe46bf3b2d6",
"content_id": "5c32769ef2204c5c4ec1034b1ba85bb363da329d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 526,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 17,
"path": "/uzen/tasks/dns_records.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nfrom loguru import logger\n\nfrom uzen.factories.dns_records import DnsRecordFactory\nfrom uzen.models.dns_records import DnsRecord\nfrom uzen.tasks import EnrichmentTask\n\n\nclass DnsRecordTask(EnrichmentTask):\n async def _process(self) -> List[DnsRecord]:\n logger.debug(f\"Fetch DNS records from {self.snapshot.hostname}\")\n records = await DnsRecordFactory.from_snapshot(self.snapshot)\n if self.insert_to_db:\n await DnsRecord.bulk_create(records)\n\n return records\n"
},
{
"alpha_fraction": 0.6342270970344543,
"alphanum_fraction": 0.6411784291267395,
"avg_line_length": 29.209999084472656,
"blob_id": "6c0d21071bf448ca877a60ada97e4e87e2005f68",
"content_id": "0ad36b369735777ee57bb39b0c8ebeda21962fb9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3021,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 100,
"path": "/tests/apis/test_rules.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import json\n\nimport pytest\n\nfrom tests.utils import first_rule_id\nfrom uzen.models.rules import Rule\n\n\[email protected]\nasync def test_create_rule_with_invalid_target(client):\n payload = {\"name\": \"test\", \"target\": \"foo\", \"source\": \"foo\"}\n response = await client.post(\"/api/rules/\", data=json.dumps(payload))\n assert response.status_code == 422\n\n\[email protected]\nasync def test_create_rule_with_invalid_source(client):\n payload = {\"name\": \"test\", \"target\": \"body\", \"source\": \"foo; bar;\"}\n response = await client.post(\"/api/rules/\", data=json.dumps(payload))\n assert response.status_code == 422\n\n\[email protected]\nasync def test_create_rule(client):\n payload = {\n \"name\": \"test\",\n \"target\": \"body\",\n \"source\": 'rule foo: bar {strings: $a = \"lmn\" condition: $a}',\n }\n response = await client.post(\"/api/rules/\", data=json.dumps(payload))\n assert response.status_code == 201\n\n count = await Rule.all().count()\n assert count == 1\n\n\[email protected]\[email protected](\"rules_setup\")\nasync def test_delete_rule(client):\n id_ = await first_rule_id()\n response = await client.delete(f\"/api/rules/{id_}\")\n assert response.status_code == 204\n\n\[email protected]\[email protected](\"rules_setup\")\nasync def test_rules_search(client):\n count = await Rule.all().count()\n\n response = await client.get(\"/api/rules/search\")\n json = response.json()\n rules = json.get(\"results\")\n assert len(rules) == count\n\n # it matches with a rule\n response = await client.get(\"/api/rules/search\", params={\"name\": \"test1\"})\n json = response.json()\n rules = json.get(\"results\")\n assert len(rules) == 1\n\n # it matches with the all rules\n response = await client.get(\"/api/rules/search\", params={\"target\": \"body\"})\n json = response.json()\n rules = json.get(\"results\")\n assert len(rules) == count\n\n # it matches with the all rules\n response = await client.get(\"/api/rules/search\", params={\"source\": \"lmn\"})\n json = response.json()\n rules = json.get(\"results\")\n assert len(rules) == count\n\n\[email protected]\[email protected](\"rules_setup\")\nasync def test_update(client):\n id_ = await first_rule_id()\n\n payload = {\"name\": \"woweee\"}\n response = await client.put(f\"/api/rules/{id_}\", data=json.dumps(payload))\n assert response.status_code == 200\n\n rule = await Rule.get(id=id_)\n assert rule.name == \"woweee\"\n old_updated_at = rule.updated_at\n\n payload = {\n \"name\": \"test\",\n \"target\": \"script\",\n \"source\": 'rule foo: bar {strings: $a = \"html\" condition: $a}',\n }\n response = await client.put(f\"/api/rules/{id_}\", data=json.dumps(payload))\n assert response.status_code == 200\n\n rule = await Rule.get(id=id_)\n assert rule.name == \"test\"\n assert rule.target == \"script\"\n assert rule.source == 'rule foo: bar {strings: $a = \"html\" condition: $a}'\n # should update updated_at field\n assert old_updated_at < rule.updated_at\n"
},
{
"alpha_fraction": 0.6392130851745605,
"alphanum_fraction": 0.6452150940895081,
"avg_line_length": 27.836538314819336,
"blob_id": "d1dfb8652d201c078cf92fd9b06793fbc962f6cf",
"content_id": "d6f3b1931a855da30e56dbbb00d24801bc44225b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2999,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 104,
"path": "/uzen/services/snapshot.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import Optional\n\nimport httpx\nfrom loguru import logger\nfrom playwright import Error\nfrom tortoise.transactions import in_transaction\n\nfrom uzen.core import settings\nfrom uzen.core.exceptions import TakeSnapshotError\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.utils import SnapshotResult\nfrom uzen.services.browser import Browser\nfrom uzen.services.fake_browser import FakeBrowser\n\n\ndef use_playwright(host: Optional[str] = None) -> bool:\n return host is None\n\n\ndef use_httpx(host: Optional[str] = None) -> bool:\n if host is not None:\n return True\n return settings.HTTPX_FALLBACK\n\n\nasync def take_snapshot(\n url: str,\n accept_language: Optional[str] = None,\n host: Optional[str] = None,\n ignore_https_errors: Optional[bool] = None,\n referer: Optional[str] = None,\n timeout: Optional[int] = None,\n user_agent: Optional[str] = None,\n) -> SnapshotResult:\n\n timeout = timeout or 30000\n ignore_https_errors = ignore_https_errors or False\n\n result = None\n errors = []\n\n # Skip playwright if a host is not None\n # because Chromium prohibits setting \"host\" header.\n # ref. https://github.com/puppeteer/puppeteer/issues/4575#issuecomment-511259872\n if use_playwright(host):\n try:\n result = await Browser.take_snapshot(\n url,\n accept_language=accept_language,\n ignore_https_errors=ignore_https_errors,\n referer=referer,\n timeout=timeout,\n user_agent=user_agent,\n )\n except Error as e:\n message = f\"Failed to take a snapshot by playwright: {e}.\"\n logger.debug(message)\n errors.append(message)\n\n if result is not None:\n return result\n\n # raise an error if HTTPX is not enabled\n if not use_httpx(host):\n raise TakeSnapshotError(\"\\n\".join(errors))\n\n # fallback to HTTPX\n logger.debug(\"Fallback to HTTPX\")\n try:\n result = await FakeBrowser.take_snapshot(\n url,\n accept_language=accept_language,\n host=host,\n ignore_https_errors=ignore_https_errors,\n referer=referer,\n timeout=timeout,\n user_agent=user_agent,\n )\n except httpx.HTTPError as e:\n message = f\"Failed to take a snapshot by HTTPX: {e}.\"\n logger.debug(message)\n errors.append(message)\n\n if result is not None:\n return result\n\n raise TakeSnapshotError(\"\\n\".join(errors))\n\n\nasync def save_snapshot(result: SnapshotResult) -> Snapshot:\n async with in_transaction():\n snapshot = result.snapshot\n screenshot = result.screenshot\n\n await snapshot.save()\n screenshot.snapshot_id = snapshot.id\n await screenshot.save()\n\n for script in result.scripts:\n script.snapshot_id = snapshot.id\n await Script.bulk_create(result.scripts)\n\n return snapshot\n"
},
{
"alpha_fraction": 0.6482627391815186,
"alphanum_fraction": 0.6668252944946289,
"avg_line_length": 29.897058486938477,
"blob_id": "95531f90840c49dff32d7426f5ea724af41d2a79",
"content_id": "f54d7ecc165e9af45cef79f3bcea12d7b0330cab",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2101,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 68,
"path": "/tests/apis/test_matches.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom tests.utils import first_rule_id, first_snapshot_id\nfrom uzen.models.matches import Match\n\n\[email protected]\[email protected](\"matches_setup\")\nasync def test_matches_search(client):\n count = await Match.all().count()\n\n response = await client.get(\"/api/matches/search\")\n assert response.status_code == 200\n\n json = response.json()\n matches = json.get(\"results\")\n assert len(matches) == count\n\n first = matches[0]\n assert isinstance(first.get(\"snapshot\"), dict)\n assert isinstance(first.get(\"rule\"), dict)\n\n total = json.get(\"total\")\n assert total == count\n\n\[email protected]\[email protected](\"matches_setup\")\nasync def test_matches_search_with_filters(client):\n snapshot_id = await first_snapshot_id()\n response = await client.get(\n \"/api/matches/search\", params={\"snapshot_id\": snapshot_id}\n )\n assert response.status_code == 200\n json = response.json()\n matches = json.get(\"results\")\n assert len(matches) == 1\n\n rule_id = await first_rule_id()\n response = await client.get(\"/api/matches/search\", params={\"rule_id\": rule_id})\n assert response.status_code == 200\n json = response.json()\n matches = json.get(\"results\")\n assert len(matches) == 1\n\n response = await client.get(\n \"/api/matches/search\", params={\"rule_id\": rule_id, \"snapshot_id\": snapshot_id}\n )\n assert response.status_code == 200\n json = response.json()\n matches = json.get(\"results\")\n assert len(matches) == 1\n\n\[email protected]\[email protected](\"matches_setup\")\nasync def test_matches_search_with_daterange(client):\n response = await client.get(\"/api/matches/search\", params={\"from_at\": \"1970-01-01\"})\n assert response.status_code == 200\n json = response.json()\n matches = json.get(\"results\")\n assert len(matches) == await Match.all().count()\n\n response = await client.get(\"/api/matches/search\", params={\"to_at\": \"1970-01-01\"})\n assert response.status_code == 200\n json = response.json()\n matches = json.get(\"results\")\n assert len(matches) == 0\n"
},
{
"alpha_fraction": 0.7620087265968323,
"alphanum_fraction": 0.7663755416870117,
"avg_line_length": 25.941177368164062,
"blob_id": "30db7e2bf274dd23d5072823e52bcd9a8c0ab0d2",
"content_id": "d0eb06b05b9811092a0316127d112c0ecdb7f948",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 458,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 17,
"path": "/tests/factories/test_classifications.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import vcr\n\nfrom tests.utils import make_snapshot\nfrom uzen.factories.classifications import ClassificationFactory\n\n\[email protected]_cassette(\n \"tests/fixtures/vcr_cassettes/classifications.yaml\", filter_query_parameters=[\"key\"]\n)\ndef test_build_from_snapshot():\n snapshot = make_snapshot()\n\n classifications = ClassificationFactory.from_snapshot(snapshot)\n assert len(classifications) > 0\n\n first = classifications[0]\n assert not first.malicious\n"
},
{
"alpha_fraction": 0.7742574214935303,
"alphanum_fraction": 0.7742574214935303,
"avg_line_length": 27.05555534362793,
"blob_id": "861bd35e8c7574c5ac5954f08df789bc27dbfd5d",
"content_id": "0da4dfb7d37a6337901a2262284c61635b224421",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 505,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 18,
"path": "/tests/tasks/test_snapshots.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom tests.utils import first_snapshot_id\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.tasks.snapshots import UpdateProcessingTask\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_update_processing_task(client):\n id_ = await first_snapshot_id()\n snapshot = await Snapshot.get(id=id_)\n assert snapshot.processing\n\n await UpdateProcessingTask.process(snapshot)\n\n snapshot = await Snapshot.get(id=id_)\n assert not snapshot.processing\n"
},
{
"alpha_fraction": 0.7427745461463928,
"alphanum_fraction": 0.7456647157669067,
"avg_line_length": 23.714284896850586,
"blob_id": "60c2434e2a8bf021b85b2f079567b20bf2c8885b",
"content_id": "9c109539cd2f7cb7129c75337ccb8806eb18e734",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 346,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 14,
"path": "/tests/factories/test_dns_records.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom tests.utils import make_snapshot\nfrom uzen.factories.dns_records import DnsRecordFactory\n\n\[email protected]\nasync def test_build_from_snapshot():\n snapshot = make_snapshot()\n\n records = await DnsRecordFactory.from_snapshot(snapshot)\n for record in records:\n print(record.value)\n assert len(records) > 0\n"
},
{
"alpha_fraction": 0.692799985408783,
"alphanum_fraction": 0.7056000232696533,
"avg_line_length": 27.409090042114258,
"blob_id": "c57b4f2ffec6853acbecbae83afc441996b0b427",
"content_id": "4e7d6dfc48eaf9ceea35c216ce6c55c324d5754a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1250,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 44,
"path": "/tests/services/test_fake_browser.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\nimport respx\nfrom httpx import Response\n\nfrom uzen.services.certificate import Certificate\nfrom uzen.services.fake_browser import FakeBrowser\nfrom uzen.services.rdap import RDAP\nfrom uzen.services.whois import Whois\n\n\ndef mock_lookup(ip_address: str):\n return {\"asn\": \"AS15133\"}\n\n\ndef mock_whois(hostname: str):\n return \"foo\"\n\n\ndef mock_load_and_dump_from_url(url: str):\n return \"Certificate:\"\n\n\[email protected]\[email protected]\nasync def test_take_snapshot(monkeypatch):\n monkeypatch.setattr(RDAP, \"lookup\", mock_lookup)\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n monkeypatch.setattr(\n Certificate, \"load_and_dump_from_url\", mock_load_and_dump_from_url\n )\n respx.get(\"http://example.com/\",).mock(\n Response(status_code=200, content=\"foo\", headers={\"Content-Type\": \"text/html\"})\n )\n\n result = await FakeBrowser.take_snapshot(\"http://example.com\")\n snapshot = result.snapshot\n assert snapshot.url == \"http://example.com\"\n assert snapshot.submitted_url == \"http://example.com\"\n\n assert snapshot.hostname == \"example.com\"\n assert snapshot.status == 200\n assert \"text/html\" in snapshot.content_type\n assert snapshot.asn == \"AS15133\"\n assert snapshot.whois == \"foo\"\n"
},
{
"alpha_fraction": 0.6119133830070496,
"alphanum_fraction": 0.6119133830070496,
"avg_line_length": 32.07462692260742,
"blob_id": "2656671aeccd8254295fdaff68df113849bf7a6e",
"content_id": "d3b9a9653a4a3e65f843f14551b716d0d5e5bbe3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2216,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 67,
"path": "/uzen/services/searchers/matches.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, cast\nfrom uuid import UUID\n\nfrom tortoise.query_utils import Q\n\nfrom uzen.models.matches import Match\nfrom uzen.schemas.matches import Match as MatchModel\nfrom uzen.schemas.matches import SearchResults\nfrom uzen.services.searchers import AbstractSearcher\nfrom uzen.services.searchers.utils import convert_to_datetime\n\n\nclass MatchSearcher(AbstractSearcher):\n @classmethod\n async def search(\n cls, filters: dict, size=None, offset=None, id_only=False\n ) -> SearchResults:\n \"\"\"Search matches\n\n Arguments:\n filters {dict} -- Filters for match search\n\n Keyword Arguments:\n size {[int]} -- Nmber of results returned (default: {None})\n offset {[int]} -- Offset of the first result for pagination (default: {None})\n id_only {bool} -- Whether to return only a list of ids (default: {False})\n\n Returns:\n SearchResults -- A list of matches and total count\n \"\"\"\n queries: List[Q] = []\n\n rule_id = filters.get(\"rule_id\")\n if rule_id is not None:\n queries.append(Q(rule_id=rule_id))\n\n snapshot_id = filters.get(\"snapshot_id\")\n if snapshot_id is not None:\n queries.append(Q(snapshot_id=snapshot_id))\n\n from_at = filters.get(\"from_at\")\n if from_at is not None:\n from_at = convert_to_datetime(from_at)\n queries.append(Q(created_at__gt=from_at))\n\n to_at = filters.get(\"to_at\")\n if to_at is not None:\n to_at = convert_to_datetime(to_at)\n queries.append(Q(created_at__lt=to_at))\n\n query = Q(*queries)\n\n # Run search\n instance = cls(\n model=Match, query=query, prefetch_related=[\"snapshot\", \"rule\", \"script\"]\n )\n results = await instance._search(size=size, offset=offset, id_only=id_only)\n\n if id_only:\n return SearchResults(\n results=cast(List[UUID], results.results), total=results.total\n )\n\n matches: List[MatchModel] = [\n match.to_model() for match in cast(List[Match], results.results)\n ]\n return SearchResults(results=matches, total=results.total)\n"
},
{
"alpha_fraction": 0.5157083868980408,
"alphanum_fraction": 0.6158862113952637,
"avg_line_length": 24.17910385131836,
"blob_id": "49334f6710faad237e8c0fd5bcc139d959152b94",
"content_id": "c66287d51b1cc86290801827ccff1c538ec82d7d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 1687,
"license_type": "permissive",
"max_line_length": 277,
"num_lines": 67,
"path": "/pyproject.toml",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "[tool.poetry]\nname = \"uzen\"\nversion = \"0.1.0\"\ndescription = \"YARA with Puppeteer\"\nauthors = [\"Manabu Niseki <[email protected]>\"]\nlicense = \"MIT\"\n\n[tool.poetry.dependencies]\npython = \"^3.8\"\naiofiles = \"^0.6.0\"\naiometer = \"^0.2.1\"\naiomysql = \"^0.0.21\"\naiosqlite = \"^0.16.0\"\nalembic = \"^1.4.3\"\nasync_lru = \"^1.0.2\"\nbeautifulsoup4 = \"^4.9.3\"\ncertifi = \"^2020.12.5\"\ndnspython = \"^2.0.0\"\nfastapi = \"^0.63.0\"\nfastapi-utils = \"^0.2.1\"\nhttpx = \"^0.16.1\"\nipwhois = \"^1.2.0\"\nloguru = \"^0.5.3\"\nplaywright = \"^0.171.1\"\npydantic = \"^1.7.3\"\npyopenssl = \"^20.0.1\"\npysafebrowsing = \"^0.1.1\"\npython-whois = \"^0.7.3\"\ntortoise-orm = \"^0.16.19\"\nuvicorn = {extras = [\"standard\"], version = \"^0.13.3\"}\nyara-python = \"^4.0.2\"\n\n[tool.poetry.dev-dependencies]\nasynctest = \"^0.13.0\"\nautoflake = \"^1.4\"\nautopep8 = \"^1.5.4\"\nblack = \"^20.8b1\"\ncoveralls = \"^2.2.0\"\nflake8 = \"^3.8.4\"\nisort = \"^5.7.0\"\nmypy = \"^0.790\"\nmysqlclient = \"^2.0.2\"\npre-commit = \"^2.9.3\"\npytest = \"^6.2.1\"\npytest-asyncio = \"^0.14.0\"\npytest-black = \"^0.3.12\"\npytest-cov = \"^2.10.1\"\npytest-mock = \"^3.4.0\"\npytest-randomly = \"^3.5.0\"\npytest-timeout = \"^1.4.2\"\npyupgrade = \"^2.7.4\"\nrespx = \"^0.16.3\"\nvcrpy = \"^4.1.1\"\npytest-sugar = \"^0.9.4\"\npytest-parallel = \"^0.1.0\"\n\n[tool.isort]\nforce_grid_wrap = 0\ninclude_trailing_comma = true\nknown_third_party = [\"OpenSSL\", \"aiometer\", \"alembic\", \"bs4\", \"dns\", \"fastapi\", \"fastapi_utils\", \"httpx\", \"ipwhois\", \"loguru\", \"playwright\", \"pydantic\", \"pyppeteer\", \"pysafebrowsing\", \"pytest\", \"requests\", \"respx\", \"sqlalchemy\", \"starlette\", \"tortoise\", \"vcr\", \"whois\", \"yara\"]\nline_length = 88\nmulti_line_output = 3\nuse_parentheses= true\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0a5\"]\nbuild-backend = \"poetry.core.masonry.api\"\n"
},
{
"alpha_fraction": 0.7156398296356201,
"alphanum_fraction": 0.7156398296356201,
"avg_line_length": 34.16666793823242,
"blob_id": "425dd2ceaa82a2ad30360d4f0f0a286f1a0c0ff1",
"content_id": "6ee1b190361f24f08e0003fcb317a332dc371c7c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 844,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 24,
"path": "/uzen/api/api.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from fastapi import APIRouter\n\nfrom uzen.api.endpoints import (\n domain,\n ip_address,\n matches,\n rules,\n screenshots,\n snapshots,\n urlscan,\n yara,\n)\n\napi_router = APIRouter()\napi_router.include_router(domain.router, prefix=\"/domain\", tags=[\"Domain\"])\napi_router.include_router(ip_address.router, prefix=\"/ip_address\", tags=[\"IP address\"])\napi_router.include_router(matches.router, prefix=\"/matches\", tags=[\"Matches\"])\napi_router.include_router(rules.router, prefix=\"/rules\", tags=[\"Rules\"])\napi_router.include_router(\n screenshots.router, prefix=\"/screenshots\", tags=[\"Screenshots\"]\n)\napi_router.include_router(snapshots.router, prefix=\"/snapshots\", tags=[\"Snapshots\"])\napi_router.include_router(urlscan.router, prefix=\"/import\", tags=[\"Import\"])\napi_router.include_router(yara.router, prefix=\"/yara\", tags=[\"YARA\"])\n"
},
{
"alpha_fraction": 0.5381872057914734,
"alphanum_fraction": 0.5439821481704712,
"avg_line_length": 34.60846710205078,
"blob_id": "c1bd724d69f84cfbe2f2c33562541fc2cde83eb6",
"content_id": "2b780ab93fac7f4a13ebacb8567f673253fd99e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6730,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 189,
"path": "/uzen/services/browser.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport base64\nfrom typing import List, Optional, cast\n\nimport playwright\nfrom playwright import async_playwright\nfrom playwright.async_api import Browser, Error, Page, Playwright, Response\n\nfrom uzen.core import settings\nfrom uzen.models.screenshots import Screenshot\nfrom uzen.models.scripts import Script\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.utils import SnapshotResult\nfrom uzen.services.certificate import Certificate\nfrom uzen.services.utils import (\n calculate_sha256,\n get_asn_by_ip_address,\n get_hostname_from_url,\n get_ip_address_by_hostname,\n)\nfrom uzen.services.whois import Whois\n\n\nasync def launch_browser(p: Playwright) -> Browser:\n if settings.BROWSER_WS_ENDPOINT != \"\":\n return await p.chromium.connect(wsEndpoint=settings.BROWSER_WS_ENDPOINT)\n\n return await p.chromium.launch(headless=True, chromiumSandbox=False)\n\n\ndef is_js_content_type(content_type: str) -> bool:\n return content_type.startswith(\"application/javascript\") or content_type.startswith(\n \"text/javascript\"\n )\n\n\nclass Browser:\n @staticmethod\n async def take_snapshot(\n url: str,\n accept_language: Optional[str] = None,\n ignore_https_errors: bool = False,\n referer: Optional[str] = None,\n timeout: Optional[int] = None,\n user_agent: Optional[str] = None,\n ) -> SnapshotResult:\n \"\"\"Take a snapshot of a website by puppeteer\n\n Arguments:\n url {str} -- A URL of a website\n\n Keyword Arguments:\n accept_language {Optional[str]} -- Accept-language header to use (default: {None})\n ignore_https_errors {bool} -- Whether to ignore HTTPS errors (default: {False})\n referer {Optional[str]} -- Referer header to use (default: {None})\n timeout {Optional[int]} -- Maximum time to wait for in seconds (default: {None})\n user_agent {Optional[str]} -- User-agent header to use (default: {None})\n\n Returns:\n SnapshotResult\n \"\"\"\n submitted_url: str = url\n try:\n async with async_playwright() as p:\n browser: playwright.browser.Browser = await launch_browser(p)\n page: Page = await browser.newPage(\n ignoreHTTPSErrors=ignore_https_errors, userAgent=user_agent\n )\n\n headers = {}\n if accept_language is not None:\n headers[\"Accept-Language\"] = accept_language\n await page.setExtraHTTPHeaders(headers)\n\n # intercept responses on page to get scripts\n scripts: List[Script] = []\n\n async def handle_response(response: Response) -> None:\n content_type: str = response.headers.get(\"content-type\", \"\")\n if response.ok and is_js_content_type(content_type):\n content = await response.text()\n scripts.append(\n Script(\n url=response.url,\n content=content,\n sha256=calculate_sha256(content),\n )\n )\n\n page.on(\n \"response\",\n lambda response: asyncio.create_task(handle_response(response)),\n )\n\n # default timeout = 30 seconds\n timeout = timeout or 30 * 1000\n res: Response = await page.goto(\n url,\n referer=referer,\n timeout=timeout,\n waitUntil=settings.BROWSER_WAIT_UNTIL,\n )\n\n request = {\n \"accept_language\": accept_language,\n \"browser\": browser.version,\n \"ignore_https_errors\": ignore_https_errors,\n \"referer\": referer,\n \"timeout\": timeout,\n \"user_agent\": await page.evaluate(\"() => navigator.userAgent\"),\n }\n\n url = page.url\n status = res.status\n screenshot_data = await page.screenshot()\n body = await page.content()\n sha256 = calculate_sha256(body)\n headers = res.headers\n\n await browser.close()\n except Error as e:\n raise (e)\n\n server = headers.get(\"server\")\n content_type = headers.get(\"content-type\")\n content_length = headers.get(\"content-length\")\n\n hostname = cast(str, get_hostname_from_url(url))\n certificate = Certificate.load_and_dump_from_url(url)\n ip_address = cast(str, get_ip_address_by_hostname(hostname))\n asn = get_asn_by_ip_address(ip_address) or \"\"\n whois = Whois.whois(hostname)\n\n snapshot = Snapshot(\n url=url,\n submitted_url=submitted_url,\n status=status,\n body=body,\n sha256=sha256,\n headers=headers,\n hostname=hostname,\n ip_address=ip_address,\n asn=asn,\n server=server,\n content_length=content_length,\n content_type=content_type,\n whois=whois,\n certificate=certificate,\n request=request,\n )\n screenshot = Screenshot()\n screenshot.data = base64.b64encode(screenshot_data).decode()\n\n return SnapshotResult(\n screenshot=screenshot, snapshot=snapshot, scripts=scripts,\n )\n\n @staticmethod\n async def preview(hostname: str) -> Screenshot:\n async def _preview(hostname: str, protocol=\"http\") -> Screenshot:\n try:\n async with async_playwright() as p:\n browser = await launch_browser(p)\n page = await browser.newPage()\n # try with http\n await page.goto(\n f\"{protocol}://{hostname}\",\n waitUntil=settings.BROWSER_WAIT_UNTIL,\n )\n screenshot_data = await page.screenshot()\n await browser.close()\n\n screenshot = Screenshot()\n screenshot.data = base64.b64encode(screenshot_data).decode()\n return screenshot\n except Error as e:\n raise (e)\n\n try:\n return await _preview(hostname, \"http\")\n except Error:\n pass\n\n try:\n return await _preview(hostname, \"https\")\n except Error:\n screenshot = Screenshot()\n screenshot.data = \"\"\n return screenshot\n"
},
{
"alpha_fraction": 0.6592292189598083,
"alphanum_fraction": 0.6646382808685303,
"avg_line_length": 28,
"blob_id": "95e1acf6ee313ece5951aaa463e88a8200791433",
"content_id": "5bc7e7c473a23c1aa45191e95ea37b6b3ad4d113",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1479,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 51,
"path": "/uzen/models/rules.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations\n\nfrom typing import Any, List, Optional, cast\nfrom uuid import UUID\n\nfrom tortoise import fields\n\nfrom uzen.models.base import AbstractBaseModel\nfrom uzen.models.mixins import TimestampMixin\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.rules import Rule as RuleModel\nfrom uzen.schemas.snapshots import Snapshot as SnapshotModel\n\nLIMIT_OF_PREFETCH = 20\n\n\nclass Rule(TimestampMixin, AbstractBaseModel):\n name = fields.CharField(max_length=255)\n target = fields.CharField(max_length=255)\n source = fields.TextField()\n updated_at = fields.DatetimeField(auto_now=True)\n\n _snapshots: fields.ManyToManyRelation[Snapshot]\n\n def __init__(self, **kwargs: Any) -> None:\n super().__init__(**kwargs)\n\n self.snapshots_: Optional[List[Snapshot]] = None\n\n @property\n def snapshots(self) -> List[SnapshotModel]:\n if hasattr(self, \"snapshots_\") and self.snapshots_ is not None:\n return cast(\n List[SnapshotModel],\n [snapshot.to_model() for snapshot in self.snapshots_],\n )\n\n return []\n\n def to_model(self) -> RuleModel:\n return RuleModel.from_orm(self)\n\n @classmethod\n async def get_by_id(cls, id_: UUID) -> Rule:\n rule = await cls.get(id=id_)\n rule.snapshots_ = await rule._snapshots.all().limit(LIMIT_OF_PREFETCH)\n return rule\n\n class Meta:\n table = \"rules\"\n ordering = [\"-created_at\"]\n"
},
{
"alpha_fraction": 0.7701711654663086,
"alphanum_fraction": 0.7726161479949951,
"avg_line_length": 24.5625,
"blob_id": "18d201644b374ac01d935462ade6551b205daf3d",
"content_id": "4f610ed409526aba1d1909a7bd098359a94c319c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 409,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 16,
"path": "/tests/factories/test_domain.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom uzen.factories.domain import DomainInformationFactory\nfrom uzen.services.whois import Whois\n\n\ndef mock_whois(hostname: str):\n return \"foo\"\n\n\[email protected]\nasync def test_build_from_hostname(monkeypatch):\n monkeypatch.setattr(Whois, \"whois\", mock_whois)\n\n information = await DomainInformationFactory.from_hostname(\"example.com\")\n assert len(information.dns_records) > 0\n"
},
{
"alpha_fraction": 0.6561086177825928,
"alphanum_fraction": 0.6832579374313354,
"avg_line_length": 21.100000381469727,
"blob_id": "b7192284c4f2dec07a81be420ac9908c2f19fdb0",
"content_id": "792da23542ba4426be993b37685ff7c3391d6164",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 221,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 10,
"path": "/tests/test_app.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\n\[email protected]\nasync def test_app(client):\n response = await client.get(\"/\")\n assert response.status_code == 200\n\n response = await client.get(\"/foo\")\n assert response.status_code == 404\n"
},
{
"alpha_fraction": 0.65727698802948,
"alphanum_fraction": 0.6629108190536499,
"avg_line_length": 25.625,
"blob_id": "12d40fb97d2dfe76e12ac59c8334dd983f4fd016",
"content_id": "af79aa6d99df0583393cd46d5b63fcff0dbbec98",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1065,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 40,
"path": "/uzen/models/scripts.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from tortoise import fields\n\nfrom uzen.models.base import AbstractBaseModel\nfrom uzen.models.mixins import TimestampMixin\nfrom uzen.schemas.scripts import Script as ScriptModel\n\n\ndef normalize_url(url: str) -> str:\n \"\"\"Normalize URL\n\n Arguments:\n url {str} -- A URL\n\n Returns:\n str -- A normalized URL\n \"\"\"\n # remove string after \"?\" to comply with Pydantic AnyHttpUrl validation\n # e.g. http:/example.com/test.js?foo=bar to http://example.com/test.js\n splitted = url.split(\"?\")\n return splitted[0]\n\n\nclass Script(TimestampMixin, AbstractBaseModel):\n url = fields.TextField()\n content = fields.TextField()\n sha256 = fields.CharField(max_length=64)\n\n snapshot: fields.ForeignKeyRelation[\"Snapshot\"] = fields.ForeignKeyField(\n \"models.Snapshot\",\n related_name=\"_scripts\",\n to_field=\"id\",\n on_delete=fields.CASCADE,\n )\n\n def to_model(self) -> ScriptModel:\n self.url = normalize_url(self.url)\n return ScriptModel.from_orm(self)\n\n class Meta:\n table = \"scripts\"\n"
},
{
"alpha_fraction": 0.6137105822563171,
"alphanum_fraction": 0.6137105822563171,
"avg_line_length": 30.152542114257812,
"blob_id": "12d6e31972d4fd085b9698805bd37c29f43f45ca",
"content_id": "38ce437f6b517bf308cf3b45e86ddc5886d3707c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1838,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 59,
"path": "/uzen/services/searchers/rules.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import List, cast\nfrom uuid import UUID\n\nfrom tortoise.query_utils import Q\n\nfrom uzen.models.rules import Rule\nfrom uzen.schemas.rules import Rule as RuleModel\nfrom uzen.schemas.rules import SearchResults\nfrom uzen.services.searchers import AbstractSearcher\n\n\nclass RuleSearcher(AbstractSearcher):\n @classmethod\n async def search(\n cls, filters: dict, size=None, offset=None, id_only=False,\n ) -> SearchResults:\n \"\"\"Search rules.\n\n Arguments:\n filters {dict} -- Filters for rule search\n\n Keyword Arguments:\n size {[int]} -- Nmber of results returned (default: {None})\n offset {[int]} -- Offset of the first result for pagination (default: {None})\n id_only {bool} -- Whether to return only a list of ids (default: {False})\n\n Returns:\n SearchResults -- A list of rules and total count\n \"\"\"\n # build queirs from filters\n queries = []\n\n name = filters.get(\"name\")\n if name is not None:\n queries.append(Q(name__contains=name))\n\n target = filters.get(\"target\")\n if target is not None:\n queries.append(Q(target=target))\n\n source = filters.get(\"source\")\n if source is not None:\n queries.append(Q(source__contains=source))\n\n query = Q(*queries)\n\n # Run search\n instance = cls(model=Rule, query=query)\n results = await instance._search(size=size, offset=offset, id_only=id_only)\n\n if id_only:\n return SearchResults(\n results=cast(List[UUID], results.results), total=results.total\n )\n\n rules: List[RuleModel] = [\n rule.to_model() for rule in cast(List[Rule], results.results)\n ]\n return SearchResults(results=rules, total=results.total)\n"
},
{
"alpha_fraction": 0.6829710006713867,
"alphanum_fraction": 0.6829710006713867,
"avg_line_length": 23,
"blob_id": "a7fa91a61d94a124b2cab9d5506b6deb7d6e42d7",
"content_id": "e49027cca47148b432aea92fb82d0596ea72440e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 23,
"path": "/uzen/core/events.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from typing import Callable\n\nfrom fastapi import FastAPI\nfrom tortoise import Tortoise\n\nfrom uzen.core import settings\n\n\ndef create_start_app_handler(app: FastAPI) -> Callable:\n async def start_app() -> None:\n await Tortoise.init(\n db_url=settings.DATABASE_URL, modules={\"models\": settings.APP_MODELS}\n )\n await Tortoise.generate_schemas()\n\n return start_app\n\n\ndef create_stop_app_handler(app: FastAPI) -> Callable:\n async def stop_app() -> None:\n await Tortoise.close_connections()\n\n return stop_app\n"
},
{
"alpha_fraction": 0.5515872836112976,
"alphanum_fraction": 0.5529100298881531,
"avg_line_length": 29.239999771118164,
"blob_id": "02e661f7f5e15565ee0a90fb1557805036b6f49a",
"content_id": "ce3561b2d4af1b1816c725202681d634afbcfb22",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 756,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 25,
"path": "/uzen/services/rdap.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from functools import lru_cache\nfrom typing import Dict\n\nfrom ipwhois import IPWhois\nfrom ipwhois.exceptions import BaseIpwhoisException\n\n\nclass RDAP:\n @staticmethod\n @lru_cache()\n def lookup(ip_address: str) -> Dict[str, str]:\n obj = IPWhois(ip_address)\n try:\n answer = obj.lookup_rdap(depth=1)\n asn = \"AS\" + answer.get(\"asn\", \"\")\n country = answer.get(\"asn_country_code\", \"\")\n description = answer.get(\"asn_description\", \"\")\n return {\n \"ip_address\": ip_address,\n \"asn\": asn,\n \"country\": country,\n \"description\": description,\n }\n except (BaseIpwhoisException, AttributeError):\n return {}\n"
},
{
"alpha_fraction": 0.7589437961578369,
"alphanum_fraction": 0.7597955465316772,
"avg_line_length": 35.6875,
"blob_id": "ca6b84dddba25e95df3316e2df6f0b9e8c8efbdc",
"content_id": "cbe4f56ab56ef8c569c18782a4a9026ab54a1a9a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1174,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 32,
"path": "/tests/services/test_snapshot.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from unittest.mock import AsyncMock\n\nimport pytest\n\nfrom uzen.services.browser import Browser\nfrom uzen.services.fake_browser import FakeBrowser\nfrom uzen.services.snapshot import take_snapshot\n\n\[email protected]\nasync def test_take_snapshot(mocker):\n mocker.patch(\"uzen.services.browser.Browser.take_snapshot\", AsyncMock())\n mocker.patch(\"uzen.services.fake_browser.FakeBrowser.take_snapshot\", AsyncMock())\n\n # it should fallback to HTTPX if a host is given\n await take_snapshot(url=\"http://example.com\", host=\"example.com\")\n\n Browser.take_snapshot.assert_not_called()\n FakeBrowser.take_snapshot.assert_called_once()\n\n\[email protected]\nasync def test_take_snapshot_2(mocker, monkeypatch):\n monkeypatch.setattr(\"uzen.core.settings.HTTPX_FALLBACK\", False)\n mocker.patch(\"uzen.services.browser.Browser.take_snapshot\", AsyncMock())\n mocker.patch(\"uzen.services.fake_browser.FakeBrowser.take_snapshot\", AsyncMock())\n\n # it should use HTTPX even if HTTPX_FALLBACK is false\n await take_snapshot(url=\"http://example.com\", host=\"example.com\")\n\n Browser.take_snapshot.assert_not_called()\n FakeBrowser.take_snapshot.assert_called_once()\n"
},
{
"alpha_fraction": 0.6822810769081116,
"alphanum_fraction": 0.6822810769081116,
"avg_line_length": 31.733333587646484,
"blob_id": "416b22b6046b19f35fbb33c5b2d1e38164a53b6c",
"content_id": "8ede998840c5ba40140b04f14d672515b2f54b53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1473,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 45,
"path": "/uzen/tasks/enrichment.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "from functools import partial\nfrom typing import cast\n\nimport aiometer\n\nfrom uzen.models.classifications import Classification\nfrom uzen.models.dns_records import DnsRecord\nfrom uzen.models.snapshots import Snapshot\nfrom uzen.schemas.utils import EnrichmentResults\nfrom uzen.tasks import AbstractTask\nfrom uzen.tasks.classifications import ClassificationTask\nfrom uzen.tasks.dns_records import DnsRecordTask\n\n\nclass EnrichmentTasks(AbstractTask):\n def __init__(\n self, snapshot: Snapshot, insert_to_db: bool = True,\n ):\n self.tasks = [\n partial(ClassificationTask.process, snapshot, insert_to_db),\n partial(DnsRecordTask.process, snapshot, insert_to_db),\n ]\n\n async def _process(self) -> EnrichmentResults:\n results = await aiometer.run_all(self.tasks)\n\n classifications = []\n dns_records = []\n for result in results:\n if isinstance(result, Classification):\n classifications.append(result)\n elif isinstance(result, DnsRecord):\n dns_records.append(result)\n\n return EnrichmentResults(\n classifications=classifications, dns_records=dns_records,\n )\n\n @classmethod\n async def process(\n cls, snapshot: Snapshot, insert_to_db: bool = True\n ) -> EnrichmentResults:\n instance = cls(snapshot, insert_to_db)\n results = await instance.safe_process()\n return cast(EnrichmentResults, results)\n"
},
{
"alpha_fraction": 0.7147707939147949,
"alphanum_fraction": 0.7198641896247864,
"avg_line_length": 25.772727966308594,
"blob_id": "f47fc26fb369e828fdc0afe056dcd06a933a4ced",
"content_id": "1a1b69dd86256384b8d489a7296c5b9605da5747",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 589,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 22,
"path": "/tests/apis/test_urlscan.py",
"repo_name": "Chaos-Monkey-Island/uzen",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom tests.utils import make_snapshot_result\nfrom uzen.services.urlscan import URLScan\n\n\ndef mock_import_as_snapshot(url: str):\n return make_snapshot_result()\n\n\[email protected]\[email protected](\"snapshots_setup\")\nasync def test_snapshot_post(client, monkeypatch):\n monkeypatch.setattr(URLScan, \"import_as_snapshot\", mock_import_as_snapshot)\n\n response = await client.post(\"/api/import/foo\")\n\n assert response.status_code == 201\n\n data = response.json()\n assert data.get(\"url\") == \"http://example.com/\"\n assert data.get(\"body\") == \"foo bar\"\n"
}
] | 106 |
wfus/zzzz
|
https://github.com/wfus/zzzz
|
38b1aa50c617b2023c53ff2d3a1fdbd476247284
|
8babe18f6f1cef6548330c5e20fe9dffe91562de
|
e881a9346970b63fe8b8c4ff1b2ef486641f2beb
|
refs/heads/master
| 2020-05-05T06:09:12.090242 | 2019-04-06T02:04:10 | 2019-04-06T02:04:10 | 179,777,423 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6959385275840759,
"alphanum_fraction": 0.6970362067222595,
"avg_line_length": 33.96154022216797,
"blob_id": "f04b94f01408fb9214cde48a2212bdbe188d4725",
"content_id": "64194c48d0b8aa526a5441caed7165ef00c2c755",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 911,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 26,
"path": "/meme.py",
"repo_name": "wfus/zzzz",
"src_encoding": "UTF-8",
"text": "from pyspark.context import SparkContext\nfrom pyspark.conf import SparkConf\nfrom com.yahoo.ml.tf import TFCluster, TFNode\nfrom datetime import datetime\n\n\ndef main_fun(argv, ctx):\n \"\"\"Main function entrance for spark. Make sure that all imports are done here,\n or spark will try to serialize libraries when they are placed outside\n for each executor, and we don't want that! ~WFU\"\"\"\n import tensorflow as tf\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n args, rem = parser.parse_known_args()\n\n sc = SparkContext(conf=SparkConf().setAppName(\"Nacho\"))\n num_executors = int(sc._conf.get(\"spark.executor.instances\"))\n num_processes = 1\n use_tensorboard = False\n\n cluster = TFCluster.run(sc, main_fun, sys.argv, num_executors, num_processes,\n use_tensorboard, TFCluster.InputMode.TENSORFLOW)\n cluster.shutdown()\n\n\n"
},
{
"alpha_fraction": 0.6144209504127502,
"alphanum_fraction": 0.6244432330131531,
"avg_line_length": 28.941667556762695,
"blob_id": "405389e624e0d3ccc15f07b12e1d52fbed5fd7ab",
"content_id": "7dee80a80290c45cbf631601540592b5642c2669",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3592,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 120,
"path": "/tfinfer.py",
"repo_name": "wfus/zzzz",
"src_encoding": "UTF-8",
"text": "import logging\nfrom collections import namedtuple\n\nlog_fmt = '%(asctime)s - %(name)s - %(levelname)s %(process)d %(funcName)s:%(lineno)d %(message)s'\nlogging.basicConfig(format=log_fmt, level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nBatch = namedtuple('Batch', ['data'])\nbatch_size = 32\n\nclass Singleton(type):\n \"\"\"\n Singleton Metaclass used to create a MXModel Singleton object\n \"\"\"\n _instances = {}\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass MXModel(object):\n \"\"\"\n This is a singleton class that just holds the loaded model in the module object\n We don't want to load the model for every inference when called from the map method\n \"\"\"\n __metaclass__ = Singleton\n model_loaded = False\n mod = None\n synsets = None\n \n \n def __init__(self, model_url, batch_size):\n model_fname = self.download_model_files(model_url)\n MXModel.mod = self.init_module(model_fname, batch_size)\n MXModel.model_loaded = True\n\n\n def download_model_files(self, model):\n \"\"\"\n Download model files from the given urls to local files \n \"\"\" \n logger.info('download_model_files: model:%s' % (model))\n import tensorflow as tf\n s_fname = mx.test_utils.download(model, overwrite=False)\n return s_fname, p_fname, synset_fname\n\n def init_module(self, model_fname, batch_size):\n logger.info(\"initializing model\")\n import tensorflow as tf\n return mod\n\n\ndef predict(img_batch, args):\n \"\"\"\n Run predication on batch of images in 4-D numpy array format and return the top_5 probability along with their classes\n \"\"\"\n import mxnet as mx\n import numpy as np\n logger.info('predict-args:%s' %(args))\n \n if not MXModel.model_loaded:\n MXModel(args['sym_url'], args['param_url'], args['label_url'], args['batch'])\n \n MXModel.mod.forward(Batch([mx.nd.array(img_batch)]))\n\n output = MXModel.mod.get_outputs()\n batch_prob = output[0].asnumpy()\n batch_top5 = []\n b = batch_prob.shape[0]\n\n for out in range(0,b):\n top_5 = []\n prob = batch_prob[out, :]\n prob = np.squeeze(prob)\n a = np.argsort(prob)[::-1]\n for i in a[0:5]:\n top_5.append('probability={:f}, class={}'.format(prob[i], MXModel.synsets[i]))\n batch_top5.append(top_5)\n\n logger.info('batch_top5:%s' %(batch_top5))\n return batch_top5\n\ndef load_images(images):\n \"\"\"\n Decodes batch of image bytes and returns a 4-D numpy array.\n \"\"\"\n import numpy as np\n batch = []\n for image in images:\n img_np = readImage(image)\n batch.append(img_np)\n\n batch_images = np.concatenate(batch)\n\n logger.info('batch_images.shape:%s'%(str(batch_images.shape)))\n\n return batch_images\n\ndef readImage(img_bytes):\n \"\"\"\n Decodes an Image bytearray into 3-D numpy array.\n \"\"\"\n from PIL import Image\n import numpy as np\n import io\n from array import array\n img = io.BytesIO(bytearray(img_bytes))\n # read the bytearray using OpenCV and convert to RGB\n img = Image.open(img)\n img = img.convert('RGB')\n #resize the image to 224x224\n img = img.resize((224, 224), Image.ANTIALIAS)\n # reshape the array from (height, width, channel) to (channel, height, width)\n img = np.swapaxes(img, 0, 2)\n img = np.swapaxes(img, 1, 2)\n # add a new axis to hold a batch of images.\n img = img[np.newaxis, :]\nreturn img"
},
{
"alpha_fraction": 0.73617023229599,
"alphanum_fraction": 0.7617021203041077,
"avg_line_length": 34.25,
"blob_id": "9e758d536510aed993b9f3672bf493c3358b4954",
"content_id": "cb8eb983ddbbfe68bdf82b5f5b5e7e0b04060f4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 705,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 20,
"path": "/create.sh",
"repo_name": "wfus/zzzz",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nKEYPAIR=$HOME/.ssh/cs205.pem\nMASTER_INSTANCE=m3.xlarge\nWORKER_INSTANCE=m3.xlarge\n\naws emr create-cluster \\\n--applications Name=MXNet Name=Spark \\\n--release-label emr-5.10.0 \\\n--service-role EMR_DefaultRole \\\n--ec2-attributes InstanceProfile=EMR_EC2_DefaultRole,KeyName=$KEYPAIR \\\n--instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=$MASTER_INSTANCE \\\nInstanceGroupType=CORE,InstanceCount=4,InstanceType=$WORKER_INSTANCE \\\n--bootstrap-actions Name='install-pillow-boto3',Path=s3://aws-dl-emr-bootstrap/mxnet-spark-demo-bootstrap.sh \\\n--region us-east-1 \\\n--name \"NACHO\"\n\n\n# Things that you could add but I don't want to\n# --log-uri 's3n://<YOUR-S3-BUCKET-FOR-EMR-LOGS>/' \\\n"
}
] | 3 |
UST-CTT/radon-ctt
|
https://github.com/UST-CTT/radon-ctt
|
20035d61f6dc052a4e463860f457491314375c6b
|
10d074c46ee99ef0a05020305fbb4931912d0ff1
|
de8ae6f5dca9d5da887830070e9ac5042c6407af
|
refs/heads/master
| 2020-12-19T13:51:07.763240 | 2020-05-19T13:44:58 | 2020-05-19T13:44:58 | 235,752,502 | 0 | 0 | null | 2020-01-23T08:24:13 | 2020-01-21T15:49:11 | 2020-01-21T15:49:08 | null |
[
{
"alpha_fraction": 0.6600706577301025,
"alphanum_fraction": 0.6791519522666931,
"avg_line_length": 20.769229888916016,
"blob_id": "7d48f10790e51489624dec519a30157816c362fd",
"content_id": "bbf7fc0eb621906a4d8946c3862c14c7a501e1a2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1415,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 65,
"path": "/ctt-server/openapi_server/controllers/result_controller.py",
"repo_name": "UST-CTT/radon-ctt",
"src_encoding": "UTF-8",
"text": "import connexion\nimport six\n\nfrom openapi_server.models.result import Result # noqa: E501\nfrom openapi_server import util\n\nfrom models.result import Result as ResultImpl\nfrom util.marhsmallow_schemas import ResultSchema\n\nresult_schema = ResultSchema()\nresult_schema_many = ResultSchema(many=True)\n\n\ndef delete_result_by_uuid(result_uuid): # noqa: E501\n \"\"\"Delete a result\n\n Deletes the result with the given UUID on it # noqa: E501\n\n :param result_uuid: UUID of the result to delete\n :type result_uuid: str\n\n :rtype: Result\n \"\"\"\n result = ResultImpl.delete_by_uuid(result_uuid)\n return result_schema.dump(result)\n\n\ndef download_result_by_uuid(result_uuid): # noqa: E501\n \"\"\"Downloads the generated results\n\n # noqa: E501\n\n :param result_uuid: UUID of the result to download\n :type result_uuid: str\n\n :rtype: file\n \"\"\"\n raise Exception('Not Implemented')\n return 'do some magic!'\n\n\ndef get_result_by_uuid(result_uuid): # noqa: E501\n \"\"\"Retrieve a result\n\n # noqa: E501\n\n :param result_uuid: UUID of the result to return\n :type result_uuid: str\n\n :rtype: Result\n \"\"\"\n result = ResultImpl.get_by_uuid(result_uuid)\n return result_schema.dump(result)\n\n\ndef get_results(): # noqa: E501\n \"\"\"Get all results\n\n # noqa: E501\n\n\n :rtype: List[Result]\n \"\"\"\n results = ResultImpl.get_all()\n return result_schema_many.dump(results)\n"
},
{
"alpha_fraction": 0.6426011323928833,
"alphanum_fraction": 0.6431131362915039,
"avg_line_length": 28.149253845214844,
"blob_id": "885f55b4c96814f4bf64222d6be6e0598c710745",
"content_id": "61c3ef7b11d56863dceaef9b269a1d4f594cc01f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1953,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 67,
"path": "/ctt-server/models/execution.py",
"repo_name": "UST-CTT/radon-ctt",
"src_encoding": "UTF-8",
"text": "import uuid\nimport os\n\nfrom sqlalchemy import Column, String, ForeignKey\n\nfrom db_orm.database import Base, db_session\nfrom models.deployment import Deployment\nfrom models.abstract_model import AbstractModel\nfrom util.configuration import BasePath\n\n\nclass Execution(Base, AbstractModel):\n __tablename__ = 'execution'\n\n uuid: str\n deployment_uuid: str\n\n uuid = Column(String, primary_key=True)\n deployment_uuid = Column(String, ForeignKey('deployment.uuid'), nullable=False)\n\n def __init__(self, deployment):\n self.uuid = str(uuid.uuid4())\n self.deployment_uuid = deployment.uuid\n self.storage_path = os.path.join(BasePath, self.__tablename__, self.uuid)\n\n if deployment:\n db_session.add(self)\n db_session.commit()\n else:\n raise Exception(f'Linked entities do not exist.')\n\n def __repr__(self):\n return '<Execution UUID=%r, DP_UUID=%r>' % (self.uuid, self.deployment_uuid)\n\n def run(self):\n pass\n\n @classmethod\n def get_parent_type(cls):\n return Deployment\n\n @classmethod\n def create(cls, deployment_uuid):\n linked_deployment = Deployment.get_by_uuid(deployment_uuid)\n execution = Execution(linked_deployment)\n execution.run()\n return execution\n\n @classmethod\n def get_all(cls):\n return Execution.query.all()\n\n @classmethod\n def get_by_uuid(cls, get_uuid):\n return Execution.query.filter_by(uuid=get_uuid).first()\n\n @classmethod\n def delete_by_uuid(cls, del_uuid):\n execution = Execution.query.filter_by(uuid=del_uuid)\n if execution:\n from models.result import Result\n linked_results = Result.query.filter_by(execution_uuid=del_uuid)\n for result in linked_results:\n Result.delete_by_uuid(result.uuid)\n execution.delete()\n # rmtree(self.fq_storage_path)\n db_session.commit()\n"
},
{
"alpha_fraction": 0.7061910033226013,
"alphanum_fraction": 0.7187827825546265,
"avg_line_length": 38.6875,
"blob_id": "b59eebc8e313c78e47e5864c6a9625b9adda1577",
"content_id": "0bd077d98ac16de48c9922feb46e1a02f19b1627",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1906,
"license_type": "permissive",
"max_line_length": 146,
"num_lines": 48,
"path": "/README.md",
"repo_name": "UST-CTT/radon-ctt",
"src_encoding": "UTF-8",
"text": "# RADON Continuous Testing Tool (CTT)\n\n## \n\n| Items | Contents | \n| --- | --- |\n| **Description** | The Continuous Testing Tool supports RADON's continuous testing workflow. This repository contains the CTT server. |\n| **Licence**| Apache License, Version 2.0: https://opensource.org/licenses/Apache-2.0 |\n| **Maintainers**| <ul><li>Thomas F. Düllmann ([@duelle](https://github.com/duelle)) </li><li>Andre van Hoorn ([@avanhoorn](https://github.com/avanhoorn)) </li></ul> |\n\n## System Requirements\n\nThis README is currently tailored to Unix-like systems (Linux, MacOS). \n\nFor CTT users, the following software must be installed: \n\n1. Python3\n1. Python Virtual Environment (`virtualenv`)\n\nFor CTT developers, the following additional software must be installed: \n1. Docker \n1. Recommended: A Python IDE such as [PyCharm](https://www.jetbrains.com/pycharm/) \n\n## Starting the CTT Server\n\nExecute the following steps to start start and access the CTT server:\n\n1. Clone this repository (if not done, yet)\n1. Start the CTT server by executing `./radon_ctt_start.sh`\n1. Access the CTT server's (Swagger-based) UI by visiting the following URL in the Web browser: `http://localhost:8080/RadonCTT/ui/`\n\n## Developing/Extending the CTT Server\n\n### Editing the CTT Server Code\n\n1. When using an IDE: import the CTT Server repository into your IDE\n\n### Changing the CTT REST API and Regenerating the CTT Server Stub\n\nThe CTT Server's REST API is defined in the file `radonctt-openapi.yaml`. \n\n1. The following options exist to edit the API definition file `radonctt-openapi.yaml`: \n 1. Use a standard text editor (or your Python IDE) \n 1. Use the [Swagger editor](https://editor.swagger.io/): \n 1. File -> Import file -> `radonctt-openapi.yaml`\n 1. Edit the file in the editor\n 1. File -> Save as YAML -> `radonctt-openapi.yaml`\n1. For regenerating the CTT server stub, execute `generate_python_flask_stubs.sh` \n"
},
{
"alpha_fraction": 0.6893203854560852,
"alphanum_fraction": 0.6893203854560852,
"avg_line_length": 24.75,
"blob_id": "a6f2536e90f1e3d14fa972bbd3b5b63490516303",
"content_id": "df8a5358302495eae06ac49c20b8fb82e18be3ae",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "permissive",
"max_line_length": 26,
"num_lines": 4,
"path": "/ctt-server/util/configuration.py",
"repo_name": "UST-CTT/radon-ctt",
"src_encoding": "UTF-8",
"text": "BasePath = '/tmp/RadonCTT'\nDBFile = 'radon-ctt.db'\nSUTFile = 'sut_tosca.yaml'\nTIFile = 'ti_tosca.yaml'\n"
},
{
"alpha_fraction": 0.6584415435791016,
"alphanum_fraction": 0.6587662100791931,
"avg_line_length": 33.60674285888672,
"blob_id": "cd3ac67eb241557fcdbf3fa6137ff66cc1a070a8",
"content_id": "664e54cdc63eda82f1107cc1c6da0b7e7abd2553",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3080,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 89,
"path": "/ctt-server/models/testartifact.py",
"repo_name": "UST-CTT/radon-ctt",
"src_encoding": "UTF-8",
"text": "import uuid\nimport os\n\nfrom sqlalchemy import Column, String, ForeignKey\nfrom shutil import copytree, ignore_patterns, rmtree\n\nfrom util.configuration import BasePath\nfrom db_orm.database import Base, db_session\nfrom models.project import Project\nfrom models.abstract_model import AbstractModel\n\n\nclass TestArtifact(Base, AbstractModel):\n __tablename__ = 'testartifact'\n\n uuid: str\n commit_hash: str\n sut_tosca_path: str\n ti_tosca_path: str\n storage_path: str\n project_uuid: str\n\n uuid = Column(String, primary_key=True)\n commit_hash = Column(String, nullable=False)\n sut_tosca_path = Column(String, nullable=False)\n ti_tosca_path = Column(String, nullable=False)\n storage_path = Column(String, nullable=False)\n project_uuid = Column(String, ForeignKey('project.uuid'), nullable=False)\n\n parentType = Project\n\n def __init__(self, project, sut_tosca_path, ti_tosca_path):\n self.uuid = str(uuid.uuid4())\n self.project_uuid = project.uuid\n self.sut_tosca_path = sut_tosca_path\n self.ti_tosca_path = ti_tosca_path\n self.storage_path = os.path.join(BasePath, self.__tablename__, self.uuid)\n\n if not os.path.exists(self.fq_storage_path):\n os.makedirs(self.fq_storage_path)\n\n self.commit_hash = project.commit_hash\n\n # Copy repository excluding the '.git' directory\n src_dir = project.fq_storage_path\n if os.path.isdir(src_dir) and os.path.isdir(self.fq_storage_path):\n copytree(src_dir, self.fq_storage_path, ignore=ignore_patterns('.git'), dirs_exist_ok=True)\n\n db_session.add(self)\n db_session.commit()\n\n def __repr__(self):\n return '<TestArtifact UUID=%r, COMMIT_HASH=%r, SUT_PATH=%r, TI_PATH=%r, ST_PATH=%r, PR_UUID=%r >' % \\\n (self.uuid, self.commit_hash, self.sut_tosca_path,\n self.ti_tosca_path, self.storage_path, self.project_uuid)\n\n @property\n def fq_storage_path(self):\n return os.path.join(BasePath, self.storage_path)\n\n @classmethod\n def get_parent_type(cls):\n return Project\n\n @classmethod\n def create(cls, project_uuid, sut_tosca_path, ti_tosca_path):\n linked_project = Project.get_by_uuid(project_uuid)\n return TestArtifact(linked_project, sut_tosca_path, ti_tosca_path)\n\n @classmethod\n def get_all(cls):\n return TestArtifact.query.all()\n\n @classmethod\n def get_by_uuid(cls, get_uuid):\n return TestArtifact.query.filter_by(uuid=get_uuid).first()\n\n @classmethod\n def delete_by_uuid(cls, del_uuid):\n testartifact = TestArtifact.query.filter_by(uuid=del_uuid)\n if testartifact:\n folder_to_delete = testartifact.first().fq_storage_path\n from models.deployment import Deployment\n linked_deployments = Deployment.query.filter_by(testartifact_uuid=del_uuid)\n for result in linked_deployments:\n Deployment.delete_by_uuid(result.uuid)\n testartifact.delete()\n rmtree(folder_to_delete)\n db_session.commit()\n"
},
{
"alpha_fraction": 0.6414538025856018,
"alphanum_fraction": 0.6419450044631958,
"avg_line_length": 26.890411376953125,
"blob_id": "751f42892dc2c423ab15f68cd1f33a4f69756874",
"content_id": "ede779a2ed7477ffc22fd7624a80f9007cad8209",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2036,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 73,
"path": "/ctt-server/models/result.py",
"repo_name": "UST-CTT/radon-ctt",
"src_encoding": "UTF-8",
"text": "import uuid\nimport os\n\nfrom shutil import rmtree\nfrom sqlalchemy import Column, String, ForeignKey\n\nfrom db_orm.database import Base, db_session\nfrom models.execution import Execution\nfrom models.abstract_model import AbstractModel\nfrom util.configuration import BasePath\n\n\nclass Result(Base, AbstractModel):\n __tablename__ = 'result'\n\n uuid: str\n storage_path: str\n execution_uuid: str\n\n uuid = Column(String, primary_key=True)\n storage_path = Column(String, nullable=False)\n execution_uuid = Column(String, ForeignKey('execution.uuid'), nullable=False)\n\n def __init__(self, execution):\n self.uuid = str(uuid.uuid4())\n self.execution_uuid = execution.uuid\n self.storage_path = os.path.join(BasePath, self.__tablename__, self.uuid)\n\n self.fq_storage_path = os.path.join(BasePath, self.storage_path)\n\n if execution:\n db_session.add(self)\n db_session.commit()\n else:\n raise Exception(f'Linked entities do not exist.')\n\n def __repr__(self):\n return '<Result UUID=%r, EX_UUID=%r, ST_PATH=%r>' % \\\n (self.uuid, self.execution_uuid, self.storage_path)\n\n @property\n def fq_storage_path(self):\n return self.fq_storage_path\n\n @fq_storage_path.setter\n def fq_storage_path(self, value):\n self._fq_storage_path = value\n\n @classmethod\n def get_parent_type(cls):\n return Execution\n\n @classmethod\n def create(cls, execution_uuid):\n linked_execution = Execution.get_by_uuid(execution_uuid)\n result = Result(linked_execution)\n return result\n\n @classmethod\n def get_all(cls):\n return Result.query.all()\n\n @classmethod\n def get_by_uuid(cls, get_uuid):\n return Result.query.filter_by(uuid=get_uuid).first()\n\n @classmethod\n def delete_by_uuid(cls, del_uuid):\n result = Result.query.filter_by(uuid=del_uuid)\n if result:\n result.delete()\n # rmtree(self.fq_storage_path)\n db_session.commit()\n"
},
{
"alpha_fraction": 0.6457030177116394,
"alphanum_fraction": 0.6460322737693787,
"avg_line_length": 34.3139533996582,
"blob_id": "0fb969e39bb92bacd88927ded3cf315cb9fd1b3d",
"content_id": "59c0159e387db8de548271d7824d60aac1fc1fd4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3037,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 86,
"path": "/ctt-server/models/deployment.py",
"repo_name": "UST-CTT/radon-ctt",
"src_encoding": "UTF-8",
"text": "import opera\nimport os\nimport subprocess\nimport uuid\n\nfrom flask import current_app\nfrom sqlalchemy import Column, String, ForeignKey\n\nfrom db_orm.database import Base, db_session\nfrom models.testartifact import TestArtifact\nfrom models.abstract_model import AbstractModel\n\n\nclass Deployment(Base, AbstractModel):\n __tablename__ = 'deployment'\n\n uuid: str\n testartifact_uuid: str\n status: str\n\n uuid = Column(String, primary_key=True)\n testartifact_uuid = Column(String, ForeignKey('testartifact.uuid'), nullable=False)\n\n def __init__(self, testartifact):\n self.uuid = str(uuid.uuid4())\n self.testartifact_uuid = testartifact.uuid\n if testartifact:\n db_session.add(self)\n db_session.commit()\n else:\n raise Exception(f'Linked entities do not exist.')\n\n def __repr__(self):\n return '<Deployment UUID=%r, TA_UUID=%r>' % (self.uuid, self.testartifact_uuid)\n\n def deploy(self):\n test_artifact = TestArtifact.get_by_uuid(self.testartifact_uuid)\n sut_fq_path = os.path.join(test_artifact.fq_storage_path, test_artifact.sut_tosca_path)\n ti_fq_path = os.path.join(test_artifact.fq_storage_path, test_artifact.ti_tosca_path)\n\n # Deployment of SuT\n if os.path.isfile(sut_fq_path):\n current_app.logger.debug(f'Deploying SuT {str(test_artifact.sut_tosca_path)} with opera '\n f'in folder {str(test_artifact.fq_storage_path)}.')\n subprocess.call(['opera', 'deploy', test_artifact.sut_tosca_path], cwd=test_artifact.fq_storage_path)\n\n # Deployment of TI\n if os.path.isfile(ti_fq_path):\n current_app.logger.debug(f'Deploying TI {str(test_artifact.ti_tosca_path)} with opera '\n f'in folder {str(test_artifact.fq_storage_path)}.')\n subprocess.call(['opera', 'deploy', test_artifact.ti_tosca_path], cwd=test_artifact.fq_storage_path)\n\n @classmethod\n def get_parent_type(cls):\n return TestArtifact\n\n @classmethod\n def create(cls, testartifact_uuid):\n linked_testartifact = TestArtifact.get_by_uuid(testartifact_uuid)\n\n deployment = Deployment(linked_testartifact)\n deployment.deploy()\n\n # TODO: What to return here? Status of all deployments?\n return deployment\n\n @classmethod\n def get_all(cls):\n return Deployment.query.all()\n\n @classmethod\n def get_by_uuid(cls, get_uuid):\n return Deployment.query.filter_by(uuid=get_uuid).first()\n\n @classmethod\n def delete_by_uuid(cls, del_uuid):\n deployment = Deployment.query.filter_by(uuid=del_uuid)\n if deployment:\n from models.execution import Execution\n linked_executions = Execution.query.filter_by(deployment_uuid=del_uuid)\n for result in linked_executions:\n Execution.delete_by_uuid(result.uuid)\n\n deployment.delete()\n # rmtree(self.fq_storage_path)\n db_session.commit()\n"
}
] | 7 |
youngwoo-yoon/youtube-gesture-dataset
|
https://github.com/youngwoo-yoon/youtube-gesture-dataset
|
e8fd210304c663ae80f4d51d656e4b9fb2a5085d
|
09e2c5fc0c51d048ce03eaf1ba969c0ee983e11d
|
23d4e1b8cbeb5150814ba85a9c6239dca1507304
|
refs/heads/master
| 2022-09-04T13:31:32.648444 | 2022-02-18T00:05:36 | 2022-02-18T00:05:36 | 179,186,315 | 85 | 20 |
BSD-3-Clause
| 2019-04-03T01:25:24 | 2022-07-25T06:20:20 | 2022-08-05T21:58:18 |
Python
|
[
{
"alpha_fraction": 0.7038633227348328,
"alphanum_fraction": 0.7308428883552551,
"avg_line_length": 50.76859664916992,
"blob_id": "cdfdabb7926f96d09dba34c829628dd6a1a70761",
"content_id": "fbe1b4d79dde2ce4c82fe427df1203d055d83eb4",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6264,
"license_type": "permissive",
"max_line_length": 175,
"num_lines": 121,
"path": "/README.md",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# Youtube Gesture Dataset\n\nThis repository contains scripts to build *Youtube Gesture Dataset*.\nYou can download Youtube videos and transcripts, divide the videos into scenes, and extract human poses.\nPlease see the project page and paper for the details. \n \n[[Project page]](https://sites.google.com/view/youngwoo-yoon/projects/co-speech-gesture-generation) [[Paper]](https://arxiv.org/abs/1810.12541)\n\nIf you have any questions or comments, please feel free to contact me by email ([[email protected]](mailto:[email protected])).\n\n## Environment\n\nThe scripts are tested on Ubuntu 16.04 LTS and Python 3.5.2. \n#### Dependencies \n* [OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) (v1.4) for pose estimation\n* [PySceneDetect](https://pyscenedetect.readthedocs.io/en/latest/) (v0.5) for video scene segmentation\n* [OpenCV](https://pypi.org/project/opencv-python/) (v3.4) for video read\n * We uses FFMPEG. Use latest pip version of opencv-python or build OpenCV with FFMPEG.\n* [Gentle](https://github.com/lowerquality/gentle) (Jan. 2019 version) for transcript alignment\n * Download the source code from Gentle github and run ./install.sh. And then, you can import gentle library by specifying the path to the library. See `run_gentle.py`.\n * Add an option `-vn` to resample.py in gentle as follows:\n ```python\n cmd = [\n FFMPEG,\n '-loglevel', 'panic',\n '-y',\n ] + offset + [\n '-i', infile,\n ] + duration + [\n '-vn', # ADDED (it blocks video streams, see the ffmpeg option)\n '-ac', '1', '-ar', '8000',\n '-acodec', 'pcm_s16le',\n outfile\n ]\n ``` \n\n## A step-by-step guide\n\n1. Set config\n * Update paths and youtube developer key in `config.py` (the directories will be created if not exist).\n * Update target channel ID. The scripts are tested for TED and LaughFactory channels.\n\n2. Execute `download_video.py`\n * Download youtube videos, metadata, and subtitles (./videos/*.mp4, *.json, *.vtt).\n\n3. Execute `run_openpose.py`\n * Run [OpenPose](https://github.com/CMU-Perceptual-Computing-Lab/openpose) to extract body, hand, and face skeletons for all vidoes (./skeleton/*.pickle). \n\n4. Execute `run_scenedetect.py`\n * Run [PySceneDetect](https://pyscenedetect.readthedocs.io/en/latest/) to divide videos into scene clips (./clip/*.csv).\n \n5. Execute `run_gentle.py`\n * Run [Gentle](https://github.com/lowerquality/gentle) for word-level alignments (./videos/*_align_results.json).\n * You should skip this step if you use auto-generated subtitles. This step is necessary for the TED Talks channel. \n\n6. Execute `run_clip_filtering.py`\n * Remove inappropriate clips.\n * Save clips with body skeletons (./clip/*.json).\n\n7. *(optional)* Execute `review_filtered_clips.py`\n * Review filtering results.\n\n8. Execute `make_ted_dataset.py`\n * Do some post processing and split into train, validation, and test sets (./script/*.pickle).\n\n\n## Pre-built TED gesture dataset\n \nRunning whole data collection pipeline is complex and takes several days, so we provide the pre-built dataset for the videos in the TED channel. \n\n| | |\n| --- | --- |\n| Number of videos | 1,766 |\n| Average length of videos | 12.7 min |\n| Shots of interest | 35,685 (20.2 per video on average) |\n| Ratio of shots of interest | 25% (35,685 / 144,302) |\n| Total length of shots of interest | 106.1 h |\n\n* [[ted_raw_poses.zip]](https://drive.google.com/open?id=1vvweoCFAARODSa5J5Ew6dpGdHFHoEia2) \n[[z01]](https://drive.google.com/open?id=1zR-GIx3vbqCMkvJ1HdCMjthUpj03XKwB) \n[[z02]](https://kaistackr-my.sharepoint.com/:u:/g/personal/zeroyy_kaist_ac_kr/EeAaPXuWXYNJk9AWTKZ30zEBR0hHnSuXEmetiOD412cZ7g?e=qVSeYk) \n[[z03]](https://drive.google.com/open?id=1uhfv6k0Q3E7bUIxYDAVjxKIjPM_gL8Wm)\n[[z04]](https://drive.google.com/open?id=1VLi0oQBW8xetN7XmkGZ-S_KhD-DvbVQB)\n[[z05]](https://drive.google.com/open?id=1F2wiRX421f3hiUkEeKcTBbtsgOEBy7lh) (split zip files, Google Drive or OneDrive links, total 80.9 GB) \nThe result of Step 3. It contains the extracted human poses for all frames. \n* [[ted_shots_of_interest.zip, 13.3 GB]](https://drive.google.com/open?id=1kF7SVpxzhYEHCoSPpUt6aqSKvl9YaTEZ) \nThe result of Step 6. It contains shot segmentation results ({video_id}.csv files) and shots of interest ({video_id}.json files). \n'clip_info' elements in JSON files have start/end frame numbers and a boolean value indicating shots of interest. \nThe JSON files contain the extracted human poses for the shots of interest, \nso you don't need to download ted_raw_poses.zip unless the human poses for all frames are necessary.\n* [[ted_gesture_dataset.zip, 1.1 GB]](https://drive.google.com/open?id=1lZfvufQ_CIy3d2GFU2dgqIVo1gdmG6Dh) \nThe result of Step 8. Train/validation/test sets of speech-motion pairs. \n \n### Download videos and transcripts\nWe do not provide the videos and transcripts of TED talks due to copyright issues.\nYou should download actual videos and transcripts by yourself as follows: \n1. Download and copy [[video_ids.txt]](https://drive.google.com/open?id=1grFWC7GBIeF2zlaOEtCWw4YgqHe3AFU-) file which contains video ids into `./videos` directory.\n2. Run `download_video.py`. It downloads the videos and transcripts in `video_ids.txt`.\nSome videos may not match to the extracted poses that we provided if the videos are re-uploaded.\nPlease compare the numbers of frames, just in case.\n\n\n## Citation \n\nIf our code or dataset is helpful, please kindly cite the following paper:\n```\n@INPROCEEDINGS{\n yoonICRA19,\n title={Robots Learn Social Skills: End-to-End Learning of Co-Speech Gesture Generation for Humanoid Robots},\n author={Yoon, Youngwoo and Ko, Woo-Ri and Jang, Minsu and Lee, Jaeyeon and Kim, Jaehong and Lee, Geehyuk},\n booktitle={Proc. of The International Conference in Robotics and Automation (ICRA)},\n year={2019}\n}\n```\n\n## Related Projects\nSpeech Gesture Generation from the Trimodal Context of Text, Audio, and Speaker Identity (SIGGRAPH Asia 2020), https://github.com/ai4r/Gesture-Generation-from-Trimodal-Context\n\n## Acknowledgement\n* This work was supported by the ICT R&D program of MSIP/IITP. [2017-0-00162, Development of Human-care Robot Technology for Aging Society] \n* Thanks to [Eun-Sol Cho](https://github.com/euns2ol) and [Jongwon Kim](mailto:[email protected]) for contributions during their internships at ETRI.\n"
},
{
"alpha_fraction": 0.5835183262825012,
"alphanum_fraction": 0.5896226167678833,
"avg_line_length": 33,
"blob_id": "7444d7c5955c5e38e843849798643443efad1dbc",
"content_id": "bb86b073354994330e02f0500359b9a98b27b21f",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3604,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 106,
"path": "/script/run_openpose.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\n\"\"\"\nExtract pose skeletons by using OpenPose library\nNeed proper LD_LIBRARY_PATH before run this script\nPycharm: In RUN > Edit Configurations, add LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH\n\"\"\"\n\nimport glob\nimport json\nimport os\nimport pickle\nimport subprocess\n\nimport shutil\n\nfrom config import my_config\n\n# maximum accuracy, too slow (~1fps)\n#OPENPOSE_OPTION = \"--net_resolution -1x736 --scale_number 4 --scale_gap 0.25 --hand --hand_scale_number 6 --hand_scale_range 0.4 --face\"\nOPENPOSE_OPTION = \"--face --hand --number_people_max 3\"\n\nOUTPUT_SKELETON_PATH = my_config.WORK_PATH + \"/temp_skeleton_raw\"\nOUTPUT_VIDEO_PATH = my_config.WORK_PATH + \"/temp_skeleton_video\"\n\nRESUME_VID = \"\" # resume from this video\nSKIP_EXISTING_SKELETON = True # skip if the skeleton file is existing\n\n\ndef get_vid_from_filename(filename):\n return filename[-15:-4]\n\n\ndef read_skeleton_json(_file):\n with open(_file) as json_file:\n skeleton_json = json.load(json_file)\n return skeleton_json['people']\n\n\ndef save_skeleton_to_pickle(_vid):\n files = glob.glob(OUTPUT_SKELETON_PATH + '/' + _vid + '/*.json')\n if len(files) > 10:\n files = sorted(files)\n skeletons = []\n for file in files:\n skeletons.append(read_skeleton_json(file))\n with open(my_config.SKELETON_PATH + '/' + _vid + '.pickle', 'wb') as file:\n pickle.dump(skeletons, file)\n\n\nif __name__ == '__main__':\n if not os.path.exists(my_config.SKELETON_PATH):\n os.makedirs(my_config.SKELETON_PATH)\n if not os.path.exists(OUTPUT_SKELETON_PATH):\n os.makedirs(OUTPUT_SKELETON_PATH)\n if not os.path.exists(OUTPUT_VIDEO_PATH):\n os.makedirs(OUTPUT_VIDEO_PATH)\n\n os.chdir(my_config.OPENPOSE_BASE_DIR)\n if RESUME_VID == \"\":\n skip_flag = False\n else:\n skip_flag = True\n\n video_files = glob.glob(my_config.VIDEO_PATH + \"/*.mp4\")\n for file in sorted(video_files, key=os.path.getmtime):\n print(file)\n vid = get_vid_from_filename(file)\n print(vid)\n\n skip_iter = False\n\n # resume check\n if skip_flag and vid == RESUME_VID:\n skip_flag = False\n skip_iter = skip_flag\n\n # existing skeleton check\n if SKIP_EXISTING_SKELETON:\n if os.path.exists(my_config.SKELETON_PATH + '/' + vid + '.pickle'):\n print('existing skeleton')\n skip_iter = True\n\n if not skip_iter:\n # create out dir\n skeleton_dir = OUTPUT_SKELETON_PATH + \"/\" + vid + \"/\"\n if os.path.exists(skeleton_dir):\n shutil.rmtree(skeleton_dir)\n else:\n os.makedirs(skeleton_dir)\n\n # extract skeleton\n command = my_config.OPENPOSE_BIN_PATH + \" \" + OPENPOSE_OPTION + \" --video \\\"\" + file + \"\\\"\"\n # command += \" --write_video \" + OUTPUT_VIDEO_PATH + \"/\" + vid + \"_result.avi\" # write result video\n command += \" --write_json \" + skeleton_dir\n print(command)\n subprocess.call(command, shell=True)\n\n # save skeletons to a pickle file\n save_skeleton_to_pickle(vid)\n"
},
{
"alpha_fraction": 0.76106196641922,
"alphanum_fraction": 0.8141592741012573,
"avg_line_length": 10.300000190734863,
"blob_id": "da25632f21dafcfd9f149139086c15e68b32391e",
"content_id": "bb4f7ced9916b71eac062683c17c22abcfc37d8e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 113,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 10,
"path": "/requirements.txt",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "matplotlib\nnumpy\nscipy\ntqdm\nPillow\npyarrow==0.14.1\ngoogle-api-python-client\nwebvtt-py\nyoutube-dl\nscenedetect<0.6\n"
},
{
"alpha_fraction": 0.5129804015159607,
"alphanum_fraction": 0.5190888047218323,
"avg_line_length": 33.31004333496094,
"blob_id": "4fb4150f92ebbc85d707c11703437362a392b43e",
"content_id": "676af8b9ad274ae7f527ed13c0880abbaf751d85",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7858,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 229,
"path": "/script/download_video.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import unicode_literals\n\nimport glob\nimport json\nimport traceback\n\nimport youtube_dl\nimport urllib.request\nimport sys\nimport os\nfrom apiclient.discovery import build\nfrom datetime import datetime, timedelta\nfrom config import my_config\n\nYOUTUBE_API_SERVICE_NAME = \"youtube\"\nYOUTUBE_API_VERSION = \"v3\"\n\nRESUME_VIDEO_ID = \"\" # resume downloading from this video, set empty string to start over\n\n\ndef fetch_video_ids(channel_id, search_start_time): # load video ids in the channel\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=my_config.DEVELOPER_KEY)\n\n start_time = search_start_time\n td = timedelta(days=15)\n end_time = start_time + td\n\n res_items = []\n\n # multiple quires are necessary to get all results surely\n while start_time < datetime.now():\n start_string = str(start_time.isoformat()) + 'Z'\n end_string = str(end_time.isoformat()) + 'Z'\n\n res = youtube.search().list(part=\"id\", channelId=channel_id, maxResults=\"50\",\n publishedAfter=start_string,\n publishedBefore=end_string).execute()\n res_items += res['items']\n\n while True: # paging\n if len(res['items']) < 50 or 'nextPageToken' not in res:\n break\n\n next_page_token = res['nextPageToken']\n res = youtube.search().list(part=\"id\", channelId=channel_id, maxResults=\"50\",\n publishedAfter=start_string,\n publishedBefore=end_string,\n pageToken=next_page_token).execute()\n res_items += res['items']\n\n print(' {} to {}, no of videos: {}'.format(start_string, end_string, len(res_items)))\n\n start_time = end_time\n end_time = start_time + td\n\n # collect video ids\n vid_list = []\n for i in res_items:\n vid = (i.get('id')).get('videoId')\n if vid is not None:\n vid_list.append(vid)\n\n return vid_list\n\n\ndef video_filter(info):\n passed = True\n\n exist_proper_format = False\n format_data = info.get('formats')\n for i in format_data:\n if i.get('ext') == 'mp4' and i.get('height') >= 720 and i.get('acodec') != 'none':\n exist_proper_format = True\n if not exist_proper_format:\n passed = False\n\n if passed:\n duration_hours = info.get('duration') / 3600.0\n if duration_hours > 1.0:\n passed = False\n\n if passed:\n if len(info.get('automatic_captions')) == 0 and len(info.get('subtitles')) == 0:\n passed = False\n\n return passed\n\n\ndef download_subtitle(url, filename, postfix):\n urllib.request.urlretrieve(url, '{}-{}.vtt'.format(filename, postfix))\n\n\ndef download(vid_list):\n ydl_opts = {'format': 'best[height=720,ext=mp4]',\n 'writesubtitles': True,\n 'writeautomaticsub': True,\n 'outtmpl': 'dummy.mp4'\n } # download options\n language = my_config.LANG\n\n download_count = 0\n skip_count = 0\n sub_count = 0\n log = open(\"download_log.txt\", 'w', encoding=\"utf-8\")\n\n if len(RESUME_VIDEO_ID) < 10:\n skip_index = 0\n else:\n skip_index = vid_list.index(RESUME_VIDEO_ID)\n\n for i in range(len(vid_list)):\n error_count = 0\n print(vid_list[i])\n if i < skip_index:\n continue\n\n # rename video (vid.mp4)\n ydl_opts['outtmpl'] = my_config.VIDEO_PATH + '/' + vid_list[i] + '.mp4'\n\n # check existing file\n if os.path.exists(ydl_opts['outtmpl']) and os.path.getsize(ydl_opts['outtmpl']): # existing and not empty\n print('video file already exists ({})'.format(vid_list[i]))\n continue\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n vid = vid_list[i]\n url = \"https://youtu.be/{}\".format(vid)\n\n info = ydl.extract_info(url, download=False)\n if video_filter(info):\n with open(\"{}.json\".format(vid), \"w\", encoding=\"utf-8\") as js:\n json.dump(info, js)\n while 1:\n if error_count == 3:\n print('Exit...')\n sys.exit()\n try:\n ydl.download([url])\n except(youtube_dl.utils.DownloadError,\n youtube_dl.utils.ContentTooShortError,\n youtube_dl.utils.ExtractorError):\n error_count += 1\n print(' Retrying... (error count : {})\\n'.format(error_count))\n traceback.print_exc()\n continue\n else:\n def get_subtitle_url(subtitles, language, ext):\n subtitles = subtitles.get(language)\n url = None\n for sub in subtitles:\n if sub.get('ext') == ext:\n url = sub.get('url')\n break\n return url\n\n if info.get('subtitles') != {} and (info.get('subtitles')).get(language) != None:\n sub_url = get_subtitle_url(info.get('subtitles'), language, 'vtt')\n download_subtitle(sub_url, vid, language)\n sub_count += 1\n if info.get('automatic_captions') != {}:\n auto_sub_url = get_subtitle_url(info.get('automatic_captions'), language, 'vtt')\n download_subtitle(auto_sub_url, vid, language+'-auto')\n\n log.write(\"{} - downloaded\\n\".format(str(vid)))\n download_count += 1\n break\n else:\n log.write(\"{} - skipped\\n\".format(str(info.get('id'))))\n skip_count += 1\n\n print(\" downloaded: {}, skipped: {}\".format(download_count, skip_count))\n\n log.write(\"\\nno of subtitles : {}\\n\".format(sub_count))\n log.write(\"downloaded: {}, skipped : {}\\n\".format(download_count, skip_count))\n log.close()\n\n\ndef main():\n if not os.path.exists(my_config.VIDEO_PATH):\n os.makedirs(my_config.VIDEO_PATH)\n\n os.chdir(my_config.VIDEO_PATH)\n vid_list = []\n\n # read video list\n try:\n rf = open(\"video_ids.txt\", 'r')\n except FileNotFoundError:\n print(\"fetching video ids...\")\n vid_list = fetch_video_ids(my_config.YOUTUBE_CHANNEL_ID, my_config.VIDEO_SEARCH_START_DATE)\n wf = open(\"video_ids.txt\", \"w\")\n for j in vid_list:\n wf.write(str(j))\n wf.write('\\n')\n wf.close()\n else:\n while 1:\n value = rf.readline()[:11]\n if value == '':\n break\n vid_list.append(value)\n rf.close()\n\n print(\"downloading videos...\")\n download(vid_list)\n print(\"finished downloading videos\")\n\n print(\"removing unnecessary subtitles...\")\n for f in glob.glob(\"*.en.vtt\"):\n os.remove(f)\n\n\ndef test_fetch():\n vid_list = fetch_video_ids(my_config.YOUTUBE_CHANNEL_ID, my_config.VIDEO_SEARCH_START_DATE)\n print(vid_list)\n print(len(vid_list))\n\n\nif __name__ == '__main__':\n # test_fetch()\n main()\n\n"
},
{
"alpha_fraction": 0.4896448254585266,
"alphanum_fraction": 0.5050525069236755,
"avg_line_length": 34.193660736083984,
"blob_id": "7ef4103460ddc4ce17954d91cc390fc325c3a457",
"content_id": "f0f21fa9dcb4df07fcd1cfcc764f1d01f47abfa9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9995,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 284,
"path": "/script/data_utils.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nimport glob\nimport matplotlib\nimport cv2\nimport re\nimport json\nimport _pickle as pickle\nfrom webvtt import WebVTT\nfrom config import my_config\n\n\n###############################################################################\n# SKELETON\ndef draw_skeleton_on_image(img, skeleton, thickness=15):\n if not skeleton:\n return img\n\n new_img = img.copy()\n for pair in SkeletonWrapper.skeleton_line_pairs:\n pt1 = (int(skeleton[pair[0] * 3]), int(skeleton[pair[0] * 3 + 1]))\n pt2 = (int(skeleton[pair[1] * 3]), int(skeleton[pair[1] * 3 + 1]))\n if pt1[0] == 0 or pt2[1] == 0:\n pass\n else:\n rgb = [v * 255 for v in matplotlib.colors.to_rgba(pair[2])][:3]\n cv2.line(new_img, pt1, pt2, color=rgb[::-1], thickness=thickness)\n\n return new_img\n\n\ndef is_list_empty(my_list):\n return all(map(is_list_empty, my_list)) if isinstance(my_list, list) else False\n\n\ndef get_closest_skeleton(frame, selected_body):\n \"\"\" find the closest one to the selected skeleton \"\"\"\n diff_idx = [i * 3 for i in range(8)] + [i * 3 + 1 for i in range(8)] # upper-body\n\n min_diff = 10000000\n tracked_person = None\n for person in frame: # people\n body = get_skeleton_from_frame(person)\n\n diff = 0\n n_diff = 0\n for i in diff_idx:\n if body[i] > 0 and selected_body[i] > 0:\n diff += abs(body[i] - selected_body[i])\n n_diff += 1\n if n_diff > 0:\n diff /= n_diff\n if diff < min_diff:\n min_diff = diff\n tracked_person = person\n\n base_distance = max(abs(selected_body[0 * 3 + 1] - selected_body[1 * 3 + 1]) * 3,\n abs(selected_body[2 * 3] - selected_body[5 * 3]) * 2)\n if tracked_person and min_diff > base_distance: # tracking failed\n tracked_person = None\n\n return tracked_person\n\n\ndef get_skeleton_from_frame(frame):\n if 'pose_keypoints_2d' in frame:\n return frame['pose_keypoints_2d']\n elif 'pose_keypoints' in frame:\n return frame['pose_keypoints']\n else:\n return None\n\n\nclass SkeletonWrapper:\n # color names: https://matplotlib.org/mpl_examples/color/named_colors.png\n visualization_line_pairs = [(0, 1, 'b'), (1, 2, 'darkred'), (2, 3, 'r'), (3, 4, 'gold'), (1, 5, 'darkgreen'), (5, 6, 'g'),\n (6, 7, 'lightgreen'),\n (1, 8, 'darkcyan'), (8, 9, 'c'), (9, 10, 'skyblue'), (1, 11, 'deeppink'), (11, 12, 'hotpink'), (12, 13, 'lightpink')]\n skeletons = []\n skeleton_line_pairs = [(0, 1, 'b'), (1, 2, 'darkred'), (2, 3, 'r'), (3, 4, 'gold'), (1, 5, 'darkgreen'),\n (5, 6, 'g'), (6, 7, 'lightgreen')]\n\n def __init__(self, basepath, vid):\n # load skeleton data (and save it to pickle for next load)\n pickle_file = glob.glob(basepath + '/' + vid + '.pickle')\n\n if pickle_file:\n with open(pickle_file[0], 'rb') as file:\n self.skeletons = pickle.load(file)\n else:\n files = glob.glob(basepath + '/' + vid + '/*.json')\n if len(files) > 10:\n files = sorted(files)\n self.skeletons = []\n for file in files:\n self.skeletons.append(self.read_skeleton_json(file))\n with open(basepath + '/' + vid + '.pickle', 'wb') as file:\n pickle.dump(self.skeletons, file)\n else:\n self.skeletons = []\n\n\n def read_skeleton_json(self, file):\n with open(file) as json_file:\n skeleton_json = json.load(json_file)\n return skeleton_json['people']\n\n\n def get(self, start_frame_no, end_frame_no, interval=1):\n\n chunk = self.skeletons[start_frame_no:end_frame_no]\n\n if is_list_empty(chunk):\n return []\n else: \n if interval > 1:\n return chunk[::int(interval)]\n else:\n return chunk\n\n\n###############################################################################\n# VIDEO\ndef read_video(base_path, vid):\n files = glob.glob(base_path + '/*' + vid + '.mp4')\n if len(files) == 0:\n return None\n elif len(files) >= 2:\n assert False\n filepath = files[0]\n\n video_obj = VideoWrapper(filepath)\n\n return video_obj\n\n\nclass VideoWrapper:\n video = []\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.video = cv2.VideoCapture(filepath)\n self.total_frames = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n self.height = self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)\n self.framerate = self.video.get(cv2.CAP_PROP_FPS)\n\n def get_video_reader(self):\n return self.video\n\n def frame2second(self, frame_no):\n return frame_no / self.framerate\n\n def second2frame(self, second):\n return int(round(second * self.framerate))\n\n def set_current_frame(self, cur_frame_no):\n self.video.set(cv2.CAP_PROP_POS_FRAMES, cur_frame_no)\n\n\n###############################################################################\n# CLIP\ndef load_clip_data(vid):\n try:\n with open(\"{}/{}.json\".format(my_config.CLIP_PATH, vid)) as data_file:\n data = json.load(data_file)\n return data\n except FileNotFoundError:\n return None\n\n\ndef load_clip_filtering_aux_info(vid):\n try:\n with open(\"{}/{}_aux_info.json\".format(my_config.CLIP_PATH, vid)) as data_file:\n data = json.load(data_file)\n return data\n except FileNotFoundError:\n return None\n\n\n#################################################################################\n#SUBTITLE\nclass SubtitleWrapper:\n TIMESTAMP_PATTERN = re.compile('(\\d+)?:?(\\d{2}):(\\d{2})[.,](\\d{3})')\n\n def __init__(self, vid, mode):\n self.subtitle = []\n if mode == 'auto':\n self.load_auto_subtitle_data(vid)\n elif mode == 'gentle':\n self.laod_gentle_subtitle(vid)\n\n def get(self):\n return self.subtitle\n\n # using gentle lib\n def laod_gentle_subtitle(self,vid):\n try:\n with open(\"{}/{}_align_results.json\".format(my_config.VIDEO_PATH, vid)) as data_file:\n data = json.load(data_file)\n if 'words' in data:\n raw_subtitle = data['words']\n\n for word in raw_subtitle :\n if word['case'] == 'success':\n self.subtitle.append(word)\n else:\n self.subtitle = None\n return data\n except FileNotFoundError:\n self.subtitle = None\n\n # using youtube automatic subtitle\n def load_auto_subtitle_data(self, vid):\n lang = my_config.LANG\n postfix_in_filename = '-'+lang+'-auto.vtt'\n file_list = glob.glob(my_config.SUBTITLE_PATH + '/*' + vid + postfix_in_filename)\n if len(file_list) > 1:\n print('more than one subtitle. check this.', file_list)\n self.subtitle = None\n assert False\n if len(file_list) == 1:\n for i, subtitle_chunk in enumerate(WebVTT().read(file_list[0])):\n raw_subtitle = str(subtitle_chunk.raw_text)\n if raw_subtitle.find('\\n'):\n raw_subtitle = raw_subtitle.split('\\n')\n\n for raw_subtitle_chunk in raw_subtitle:\n if self.TIMESTAMP_PATTERN.search(raw_subtitle_chunk) is None:\n continue\n\n # removes html tags and timing tags from caption text\n raw_subtitle_chunk = raw_subtitle_chunk.replace(\"</c>\", \"\")\n raw_subtitle_chunk = re.sub(\"<c[.]\\w+>\", '', raw_subtitle_chunk)\n\n word_list = []\n raw_subtitle_s = subtitle_chunk.start_in_seconds\n raw_subtitle_e = subtitle_chunk.end_in_seconds\n\n word_chunk = raw_subtitle_chunk.split('<c>')\n\n for i, word in enumerate(word_chunk):\n word_info = {}\n\n if i == len(word_chunk)-1:\n word_info['word'] = word\n word_info['start'] = word_list[i-1]['end']\n word_info['end'] = raw_subtitle_e\n word_list.append(word_info)\n break\n\n word = word.split(\"<\")\n word_info['word'] = word[0]\n word_info['end'] = self.get_seconds(word[1][:-1])\n\n if i == 0:\n word_info['start'] = raw_subtitle_s\n word_list.append(word_info)\n continue\n\n word_info['start'] = word_list[i-1]['end']\n word_list.append(word_info)\n\n self.subtitle.extend(word_list)\n else:\n print('subtitle file is not exist')\n self.subtitle = None\n\n # convert timestamp to second\n def get_seconds(self, word_time_e):\n time_value = re.match(self.TIMESTAMP_PATTERN, word_time_e)\n if not time_value:\n print('wrong time stamp pattern')\n exit()\n\n values = list(map(lambda x: int(x) if x else 0, time_value.groups()))\n hours, minutes, seconds, milliseconds = values[0], values[1], values[2], values[3]\n\n return hours * 3600 + minutes * 60 + seconds + milliseconds / 1000\n"
},
{
"alpha_fraction": 0.5889496207237244,
"alphanum_fraction": 0.6059502363204956,
"avg_line_length": 34.04255294799805,
"blob_id": "4dc3ba3cf3f22ee6fab70f2bea9bf7fd146df8f3",
"content_id": "00377367fed82d30245df74b4e104b059471dd9b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1647,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 47,
"path": "/script/config.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom datetime import datetime\n\n\nclass Config:\n DEVELOPER_KEY = \"\" # your youtube developer id\n OPENPOSE_BASE_DIR = \"/mnt/work/work/openpose/\"\n OPENPOSE_BIN_PATH = \"build/examples/openpose/openpose.bin\"\n\n\nclass TEDConfig(Config):\n YOUTUBE_CHANNEL_ID = \"UCAuUUnT6oDeKwE6v1NGQxug\"\n WORK_PATH = '/mnt/work/work/Youtube_Dataset'\n CLIP_PATH = WORK_PATH + \"/clip_ted\"\n VIDEO_PATH = WORK_PATH + \"/videos_ted\"\n SKELETON_PATH = WORK_PATH + \"/skeleton_ted\"\n SUBTITLE_PATH = VIDEO_PATH\n OUTPUT_PATH = WORK_PATH + \"/output\"\n VIDEO_SEARCH_START_DATE = datetime(2011, 3, 1, 0, 0, 0)\n LANG = 'en'\n SUBTITLE_TYPE = 'gentle'\n FILTER_OPTION = {\"threshold\": 100}\n\n\nclass LaughConfig(Config):\n YOUTUBE_CHANNEL_ID = \"UCxyCzPY2pjAjrxoSYclpuLg\"\n WORK_PATH = '/mnt/work/work/Youtube_Dataset'\n CLIP_PATH = WORK_PATH + \"/clip_laugh\"\n VIDEO_PATH = WORK_PATH + \"/videos_laugh\"\n SKELETON_PATH = WORK_PATH + \"/skeleton_laugh\"\n SUBTITLE_PATH = VIDEO_PATH\n OUTPUT_PATH = WORK_PATH + \"/output\"\n VIDEO_SEARCH_START_DATE = datetime(2010, 5, 1, 0, 0, 0)\n LANG = 'en'\n SUBTITLE_TYPE = 'auto'\n FILTER_OPTION = {\"threshold\": 50}\n\n\n# SET THIS\nmy_config = TEDConfig\n"
},
{
"alpha_fraction": 0.5491610169410706,
"alphanum_fraction": 0.5674124360084534,
"avg_line_length": 37.168540954589844,
"blob_id": "943873e65e571396032b635d984afbe6bb77be96",
"content_id": "ee8b90223ac0156cc15f1857f916143cda157db9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6794,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 178,
"path": "/script/clip_filter.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nimport numpy as np\nimport cv2\nimport math\n\nfrom data_utils import get_skeleton_from_frame\nfrom config import my_config\n\n\nclass ClipFilter:\n def __init__(self, video, start_frame_no, end_frame_no, raw_skeleton, main_speaker_skeletons):\n self.skeleton_data = raw_skeleton\n self.main_speaker_skeletons = main_speaker_skeletons\n self.start_frame_no = start_frame_no\n self.end_frame_no = end_frame_no\n self.scene_length = end_frame_no - start_frame_no\n self.video = video\n self.filter_option = my_config.FILTER_OPTION\n\n # filtering criteria variable\n self.filtering_results = [0, 0, 0, 0, 0, 0, 0] # too short, many_people, looking_back, joint_missing, looking_sideways, small, picture\n self.message = ''\n self.debugging_info = ['None', 'None', 'None', 'None', 'None'] # looking back, joint missing, looking sideways, small, picture\n\n def is_skeleton_back(self, ratio):\n n_incorrect_frame = 0\n\n for ia, skeleton in enumerate(self.main_speaker_skeletons): # frames\n body = get_skeleton_from_frame(skeleton)\n if body:\n if body[2 * 3] > body[5 * 3]:\n n_incorrect_frame += 1\n else:\n n_incorrect_frame += 1\n\n self.debugging_info[0] = round(n_incorrect_frame / self.scene_length, 3)\n\n return n_incorrect_frame / self.scene_length > ratio\n\n def is_skeleton_sideways(self, ratio):\n n_incorrect_frame = 0\n\n for ia, skeleton in enumerate(self.main_speaker_skeletons): # frames\n body = get_skeleton_from_frame(skeleton)\n if body:\n if (body[0] < min(body[2 * 3], body[5 * 3]) or body[0] > max(body[2 * 3], body[5 * 3])):\n n_incorrect_frame += 1\n else:\n n_incorrect_frame += 1\n\n self.debugging_info[2] = round(n_incorrect_frame / self.scene_length, 3)\n\n return n_incorrect_frame / self.scene_length > ratio\n\n def is_skeleton_missing(self, ratio):\n n_incorrect_frame = 0\n\n if self.main_speaker_skeletons == []:\n n_incorrect_frame = self.scene_length\n else:\n for ia, skeleton in enumerate(self.main_speaker_skeletons): # frames\n\n body = get_skeleton_from_frame(skeleton)\n if body:\n point_idx = [0, 1, 2, 3, 4, 5, 6, 7] # head and arms\n if any(body[idx * 3] == 0 for idx in point_idx):\n n_incorrect_frame += 1\n\n else:\n n_incorrect_frame += 1\n\n self.debugging_info[1] = round(n_incorrect_frame / self.scene_length, 3)\n return n_incorrect_frame / self.scene_length > ratio\n\n def is_skeleton_small(self, ratio):\n n_incorrect_frame = 0\n\n def distance(x1, y1, x2, y2):\n return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n for ia, skeleton in enumerate(self.main_speaker_skeletons): # frames\n body = get_skeleton_from_frame(skeleton)\n if body:\n threshold = self.filter_option['threshold'] # for TED videos in 720p\n if distance(body[2 * 3], body[2 * 3 + 1], body[5 * 3], body[5 * 3 + 1]) < threshold: # shoulder length\n n_incorrect_frame += 1\n else:\n n_incorrect_frame += 1\n\n self.debugging_info[3] = round(n_incorrect_frame / self.scene_length, 3)\n return n_incorrect_frame / self.scene_length > ratio\n\n def is_too_short(self):\n MIN_SCENE_LENGTH = 25 * 3 # assumed fps = 25\n return self.scene_length < MIN_SCENE_LENGTH\n\n def is_picture(self):\n sampling_interval = int(math.floor(self.scene_length / 5))\n sampling_frames = list(range(self.start_frame_no + sampling_interval,\n self.end_frame_no - sampling_interval + 1, sampling_interval))\n frames = []\n for frame_no in sampling_frames:\n self.video.set(cv2.CAP_PROP_POS_FRAMES, frame_no)\n ret, frame = self.video.read()\n frames.append(frame)\n\n diff = 0\n n_diff = 0\n for frame, next_frame in zip(frames, frames[1:]):\n diff += cv2.norm(frame, next_frame, cv2.NORM_L1) # abs diff\n n_diff += 1\n diff /= n_diff\n self.debugging_info[4] = round(diff, 0)\n\n return diff < 3000000\n\n def is_many_people(self):\n n_people = []\n for skeleton in self.skeleton_data:\n n_people.append(len(skeleton))\n\n return len(n_people) > 0 and np.mean(n_people) > 5\n\n def is_correct_clip(self):\n # check if the clip is too short.\n if self.is_too_short():\n self.message = \"too Short\"\n return False\n self.filtering_results[0] = 1\n\n # check if there are too many people on the clip\n if self.is_many_people():\n self.message = \"too many people\"\n return False\n self.filtering_results[1] = 1\n\n # check if the ratio of back-facing skeletons in the clip exceeds the reference ratio\n if self.is_skeleton_back(0.3):\n self.message = \"looking behind\"\n return False\n self.filtering_results[2] = 1\n\n # check if the ratio of skeletons that missing joint in the clip exceeds the reference ratio\n if self.is_skeleton_missing(0.5):\n self.message = \"too many missing joints\"\n return False\n self.filtering_results[3] = 1\n\n # check if the ratio of sideways skeletons in the clip exceeds the reference ratio\n if self.is_skeleton_sideways(0.5):\n self.message = \"looking sideways\"\n return False\n self.filtering_results[4] = 1\n\n # check if the ratio of the too small skeleton in the clip exceeds the reference ratio\n if self.is_skeleton_small(0.5):\n self.message = \"too small.\"\n return False\n self.filtering_results[5] = 1\n\n # check if the clip is picture\n if self.is_picture():\n self.message = \"still picture\"\n return False\n self.filtering_results[6] = 1\n\n self.message = \"PASS\"\n return True\n\n def get_filter_variable(self):\n return self.filtering_results, self.message, self.debugging_info\n"
},
{
"alpha_fraction": 0.5784252882003784,
"alphanum_fraction": 0.5871047973632812,
"avg_line_length": 35.65909194946289,
"blob_id": "df4ab67faed75b8c6bbf748acdf2a89f9ec9a769",
"content_id": "00afa61b8e3d3c5d113d85a3fa9c3972648dce44",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1613,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 44,
"path": "/script/run_scenedetect.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright 2019 ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import unicode_literals\nimport subprocess\nimport glob\nimport os\nfrom tqdm import tqdm\nfrom config import my_config\n\n\ndef run_pyscenedetect(file_path, vid): # using Pyscenedetect\n os.chdir(my_config.VIDEO_PATH)\n\n cmd = 'scenedetect --input \"{}\" --output \"{}\" -d 4 detect-content list-scenes'.format(file_path, my_config.CLIP_PATH)\n print(' ' + cmd)\n subprocess.run(cmd, shell=True, check=True)\n subprocess.run(\"exit\", shell=True, check=True)\n\n\ndef main():\n if not os.path.exists(my_config.CLIP_PATH):\n os.makedirs(my_config.CLIP_PATH)\n\n videos = glob.glob(my_config.VIDEO_PATH + \"/*.mp4\")\n n_total = len(videos)\n for i, file_path in tqdm(enumerate(sorted(videos, key=os.path.getmtime))):\n print('{}/{}'.format(i+1, n_total))\n vid = os.path.split(file_path)[1][-15:-4]\n\n csv_files = glob.glob(my_config.CLIP_PATH + \"/{}*.csv\".format(vid))\n if len(csv_files) > 0 and os.path.getsize(csv_files[0]): # existing and not empty\n print(' CSV file already exists ({})'.format(vid))\n else:\n run_pyscenedetect(file_path, vid)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.4823918342590332,
"alphanum_fraction": 0.5062527060508728,
"avg_line_length": 36.40322494506836,
"blob_id": "a97639f8ad29f351b91f106e0264e6ff84d22ff7",
"content_id": "f0d7c9f1f42ed56cc74e6289f8b2c5f607c8be35",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6957,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 186,
"path": "/script/motion_preprocessor.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom scipy.signal import savgol_filter\nimport numpy as np\nfrom scipy.stats import circvar\n\n\ndef normalize_skeleton(data, resize_factor=None):\n def distance(x1, y1, x2, y2):\n return np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n anchor_pt = (data[1 * 2], data[1 * 2 + 1]) # neck\n if resize_factor is None:\n neck_height = float(abs(data[1] - data[1 * 2 + 1]))\n shoulder_length = distance(data[1 * 2], data[1 * 2 + 1], data[2 * 2], data[2 * 2 + 1]) + \\\n distance(data[1 * 2], data[1 * 2 + 1], data[5 * 2], data[5 * 2 + 1])\n resized_neck_height = neck_height / float(shoulder_length)\n if resized_neck_height > 0.6:\n resize_factor = shoulder_length * resized_neck_height / 0.6\n else:\n resize_factor = shoulder_length\n\n normalized_data = data.copy()\n for i in range(0, len(data), 2):\n normalized_data[i] = (data[i] - anchor_pt[0]) / resize_factor\n normalized_data[i + 1] = (data[i + 1] - anchor_pt[1]) / resize_factor\n\n return normalized_data, resize_factor\n\n\nclass MotionPreprocessor:\n def __init__(self, skeletons):\n self.skeletons = np.array(skeletons)\n self.filtering_message = \"PASS\"\n\n def get(self):\n assert (self.skeletons is not None)\n\n # filtering\n if self.has_missing_frames():\n self.skeletons = []\n self.filtering_message = \"too many missing frames\"\n\n # fill missing joints\n if self.skeletons != []:\n self.fill_missing_joints()\n if self.skeletons is None or np.isnan(self.skeletons).any():\n self.filtering_message = \"failed to fill missing joints\"\n self.skeletons = []\n\n # filtering\n if self.skeletons != []:\n if self.is_static():\n self.skeletons = []\n self.filtering_message = \"static motion\"\n elif self.has_jumping_joint():\n self.skeletons = []\n self.filtering_message = \"jumping joint\"\n\n # preprocessing\n if self.skeletons != []:\n\n self.smooth_motion()\n\n is_side_view = False\n self.skeletons = self.skeletons.tolist()\n for i, frame in enumerate(self.skeletons):\n del frame[2::3] # remove confidence values\n self.skeletons[i], _ = normalize_skeleton(frame) # translate and scale\n\n # assertion: missing joints\n assert not np.isnan(self.skeletons[i]).any()\n\n # side view check\n if (self.skeletons[i][0] < min(self.skeletons[i][2 * 2],\n self.skeletons[i][5 * 2]) or\n self.skeletons[i][0] > max(self.skeletons[i][2 * 2],\n self.skeletons[i][5 * 2])):\n is_side_view = True\n break\n\n if len(self.skeletons) == 0 or is_side_view:\n self.filtering_message = \"sideview\"\n self.skeletons = []\n\n return self.skeletons, self.filtering_message\n\n def is_static(self, verbose=False):\n def joint_angle(p1, p2, p3):\n v1 = p1 - p2\n v2 = p3 - p2\n ang1 = np.arctan2(*v1[::-1])\n ang2 = np.arctan2(*v2[::-1])\n return np.rad2deg((ang1 - ang2) % (2 * np.pi))\n\n def get_joint_variance(skeleton, index1, index2, index3):\n angles = []\n\n for i in range(skeleton.shape[0]):\n x1, y1 = skeleton[i, index1 * 3], skeleton[i, index1 * 3 + 1]\n x2, y2 = skeleton[i, index2 * 3], skeleton[i, index2 * 3 + 1]\n x3, y3 = skeleton[i, index3 * 3], skeleton[i, index3 * 3 + 1]\n angle = joint_angle(np.array([x1, y1]), np.array([x2, y2]), np.array([x3, y3]))\n angles.append(angle)\n\n variance = circvar(angles, low=0, high=360)\n return variance\n\n left_arm_var = get_joint_variance(self.skeletons, 2, 3, 4)\n right_arm_var = get_joint_variance(self.skeletons, 5, 6, 7)\n\n th = 150\n if left_arm_var < th and right_arm_var < th:\n print('too static - left var {}, right var {}'.format(left_arm_var, right_arm_var))\n return True\n else:\n if verbose:\n print('not static - left var {}, right var {}'.format(left_arm_var, right_arm_var))\n return False\n\n def has_jumping_joint(self, verbose=False):\n frame_diff = np.squeeze(self.skeletons[1:, :24] - self.skeletons[:-1, :24])\n diffs = abs(frame_diff.flatten())\n width = max(self.skeletons[0, :24:3]) - min(self.skeletons[0, :24:3])\n\n if max(diffs) > width / 2.0:\n print('jumping joint - diff {}, width {}'.format(max(diffs), width))\n return True\n else:\n if verbose:\n print('no jumping joint - diff {}, width {}'.format(max(diffs), width))\n return False\n\n def has_missing_frames(self):\n n_empty_frames = 0\n n_frames = self.skeletons.shape[0]\n for i in range(n_frames):\n if np.sum(self.skeletons[i]) == 0:\n n_empty_frames += 1\n\n ret = n_empty_frames > n_frames * 0.1\n if ret:\n print('missing frames - {} / {}'.format(n_empty_frames, n_frames))\n return ret\n\n def smooth_motion(self):\n for i in range(24):\n self.skeletons[:, i] = savgol_filter(self.skeletons[:, i], 5, 2)\n\n def fill_missing_joints(self):\n skeletons = self.skeletons\n n_joints = 8 # only upper body\n\n def nan_helper(y):\n return np.isnan(y), lambda z: z.nonzero()[0]\n\n for i in range(n_joints):\n xs, ys = skeletons[:, i * 3], skeletons[:, i * 3 + 1]\n xs[xs == 0] = np.nan\n ys[ys == 0] = np.nan\n\n if sum(np.isnan(xs)) > len(xs) / 2:\n skeletons = None\n break\n\n if sum(np.isnan(ys)) > len(ys) / 2:\n skeletons = None\n break\n\n if np.isnan(xs).any():\n nans, t = nan_helper(xs)\n xs[nans] = np.interp(t(nans), t(~nans), xs[~nans])\n skeletons[:, i * 3] = xs\n\n if np.isnan(ys).any():\n nans, t = nan_helper(ys)\n ys[nans] = np.interp(t(nans), t(~nans), ys[~nans])\n skeletons[:, i * 3 + 1] = ys\n\n return skeletons\n"
},
{
"alpha_fraction": 0.5091000199317932,
"alphanum_fraction": 0.5191183686256409,
"avg_line_length": 35.29697036743164,
"blob_id": "f8c4fb66511501dae6bd74d212f7ebd6749bd6f8",
"content_id": "f96a4fb3e32a211fe3d9173ad285a26c5c799a4b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5989,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 165,
"path": "/script/make_ted_dataset.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nimport os\n\nfrom tqdm import tqdm_gui\nimport unicodedata\n\nfrom data_utils import *\n\n\ndef read_subtitle(vid):\n postfix_in_filename = '-en.vtt'\n file_list = glob.glob(my_config.SUBTITLE_PATH + '/*' + vid + postfix_in_filename)\n if len(file_list) > 1:\n print('more than one subtitle. check this.', file_list)\n assert False\n if len(file_list) == 1:\n return WebVTT().read(file_list[0])\n else:\n return []\n\n\n# turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427\ndef unicode_to_ascii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\n\n# lowercase, trim, and remove non-letter characters\ndef normalize_string(s):\n s = unicode_to_ascii(s.lower().strip())\n s = re.sub(r\"([,.!?])\", r\" \\1 \", s) # isolate some marks\n s = re.sub(r\"(['])\", r\"\", s) # remove apostrophe\n s = re.sub(r\"[^a-zA-Z,.!?]+\", r\" \", s) # replace other characters with whitespace\n s = re.sub(r\"\\s+\", r\" \", s).strip()\n return s\n\n\ndef normalize_subtitle(vtt_subtitle):\n for i, sub in enumerate(vtt_subtitle):\n vtt_subtitle[i].text = normalize_string(vtt_subtitle[i].text)\n return vtt_subtitle\n\n\ndef make_ted_gesture_dataset():\n dataset_train = []\n dataset_val = []\n dataset_test = []\n n_saved_clips = [0, 0, 0]\n\n video_files = sorted(glob.glob(my_config.VIDEO_PATH + \"/*.mp4\"), key=os.path.getmtime)\n for v_i, video_file in enumerate(tqdm_gui(video_files)):\n vid = os.path.split(video_file)[1][-15:-4]\n print(vid)\n\n # load clip, video, and subtitle\n clip_data = load_clip_data(vid)\n if clip_data is None:\n print('[ERROR] clip data file does not exist!')\n break\n\n video_wrapper = read_video(my_config.VIDEO_PATH, vid)\n\n subtitle_type = my_config.SUBTITLE_TYPE\n subtitle = SubtitleWrapper(vid, subtitle_type).get()\n\n if subtitle is None:\n print('[WARNING] subtitle does not exist! skipping this video.')\n continue\n\n dataset_train.append({'vid': vid, 'clips': []})\n dataset_val.append({'vid': vid, 'clips': []})\n dataset_test.append({'vid': vid, 'clips': []})\n\n word_index = 0\n valid_clip_count = 0\n for ia, clip in enumerate(clip_data):\n start_frame_no, end_frame_no, clip_pose_all = clip['clip_info'][0], clip['clip_info'][1], clip['frames']\n clip_word_list = []\n\n # skip FALSE clips\n if not clip['clip_info'][2]:\n continue\n\n # train/val/test split\n if valid_clip_count % 10 == 9:\n dataset = dataset_test\n dataset_idx = 2\n elif valid_clip_count % 10 == 8:\n dataset = dataset_val\n dataset_idx = 1\n else:\n dataset = dataset_train\n dataset_idx = 0\n valid_clip_count += 1\n\n # get subtitle that fits clip\n for ib in range(word_index - 1, len(subtitle)):\n if ib < 0:\n continue\n\n word_s = video_wrapper.second2frame(subtitle[ib]['start'])\n word_e = video_wrapper.second2frame(subtitle[ib]['end'])\n word = subtitle[ib]['word']\n\n if word_s >= end_frame_no:\n word_index = ib\n break\n\n if word_e <= start_frame_no:\n continue\n\n word = normalize_string(word)\n clip_word_list.append([word, word_s, word_e])\n\n if clip_word_list:\n clip_skeleton = []\n\n # get skeletons of the upper body in the clip\n for frame in clip_pose_all:\n if frame:\n clip_skeleton.append(get_skeleton_from_frame(frame)[:24])\n else: # frame with no skeleton\n clip_skeleton.append([0] * 24)\n\n # proceed if skeleton list is not empty\n if len(clip_skeleton) > 0:\n # save subtitles and skeletons corresponding to clips\n n_saved_clips[dataset_idx] += 1\n dataset[-1]['clips'].append({'words': clip_word_list,\n 'skeletons': clip_skeleton,\n 'start_frame_no': start_frame_no, 'end_frame_no': end_frame_no,\n 'vid': vid\n })\n print('{} ({}, {})'.format(vid, start_frame_no, end_frame_no))\n else:\n print('{} ({}, {}) - consecutive missing frames'.format(vid, start_frame_no, end_frame_no))\n\n # for debugging\n # if vid == 'yq3TQoMjXTw':\n # break\n\n print('writing to pickle...')\n with open('ted_gesture_dataset_train.pickle', 'wb') as f:\n pickle.dump(dataset_train, f)\n with open('ted_gesture_dataset_train_small.pickle', 'wb') as f: # for debugging\n pickle.dump(dataset_train[0:10], f)\n with open('ted_gesture_dataset_val.pickle', 'wb') as f:\n pickle.dump(dataset_val, f)\n with open('ted_gesture_dataset_test.pickle', 'wb') as f:\n pickle.dump(dataset_test, f)\n\n print('no. of saved clips: train {}, val {}, test {}'.format(n_saved_clips[0], n_saved_clips[1], n_saved_clips[2]))\n\n\nif __name__ == '__main__':\n make_ted_gesture_dataset()\n"
},
{
"alpha_fraction": 0.4967791736125946,
"alphanum_fraction": 0.5099201202392578,
"avg_line_length": 38.20201873779297,
"blob_id": "6b7afa1f47e3e1feee1f38db9b8b891e80cf96fa",
"content_id": "ed7eade542c12608f57627eada0e55e3f46f94ce",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3881,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 99,
"path": "/script/main_speaker_selector.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright 2019 ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nimport matplotlib\nfrom config import *\nimport copy\nimport os\nfrom data_utils import *\nfrom tqdm import *\nfrom config import *\nimport numpy as np\n\n\nclass MainSpeakerSelector:\n def __init__(self, raw_skeleton_chunk):\n self.main_speaker_skeletons = self.find_main_speaker_skeletons(raw_skeleton_chunk)\n\n def get(self):\n return self.main_speaker_skeletons\n\n def find_main_speaker_skeletons(self, raw_skeleton_chunk):\n tracked_skeletons = []\n selected_skeletons = [] # reference skeleton\n for raw_frame in raw_skeleton_chunk: # frame\n tracked_person = []\n if selected_skeletons == []:\n # select a main speaker\n confidence_list = []\n for person in raw_frame: # people\n body = get_skeleton_from_frame(person)\n mean_confidence = 0\n n_points = 0\n\n # Calculate the average of confidences of each person\n for i in range(8): # upper-body only\n x = body[i * 3]\n y = body[i * 3 + 1]\n confidence = body[i * 3 + 2]\n if x > 0 and y > 0 and confidence > 0:\n n_points += 1\n mean_confidence += confidence\n if n_points > 0:\n mean_confidence /= n_points\n else:\n mean_confidence = 0\n confidence_list.append(mean_confidence)\n\n # select main_speaker with the highest average of confidence\n if len(confidence_list) > 0:\n max_index = confidence_list.index(max(confidence_list))\n selected_skeletons = get_skeleton_from_frame(raw_frame[max_index])\n\n if selected_skeletons != []:\n # find the closest one to the selected main_speaker's skeleton\n tracked_person = self.get_closest_skeleton(raw_frame, selected_skeletons)\n\n # save\n if tracked_person:\n skeleton_data = tracked_person\n selected_skeletons = get_skeleton_from_frame(tracked_person)\n else:\n skeleton_data = {}\n\n tracked_skeletons.append(skeleton_data)\n\n return tracked_skeletons\n\n def get_closest_skeleton(self, frame, selected_body):\n \"\"\" find the closest one to the selected skeleton \"\"\"\n diff_idx = [i * 3 for i in range(8)] + [i * 3 + 1 for i in range(8)] # upper-body\n\n min_diff = 10000000\n tracked_person = None\n for person in frame: # people\n body = get_skeleton_from_frame(person)\n\n diff = 0\n n_diff = 0\n for i in diff_idx:\n if body[i] > 0 and selected_body[i] > 0:\n diff += abs(body[i] - selected_body[i])\n n_diff += 1\n if n_diff > 0:\n diff /= n_diff\n if diff < min_diff:\n min_diff = diff\n tracked_person = person\n\n base_distance = max(abs(selected_body[0 * 3 + 1] - selected_body[1 * 3 + 1]) * 3,\n abs(selected_body[2 * 3] - selected_body[5 * 3]) * 2)\n if tracked_person and min_diff > base_distance: # tracking failed\n tracked_person = None\n\n return tracked_person\n"
},
{
"alpha_fraction": 0.5700006484985352,
"alphanum_fraction": 0.5868286490440369,
"avg_line_length": 40.928382873535156,
"blob_id": "8351ecc63072b41b3c30eb1f9ccc389e12865b94",
"content_id": "49b1003fe56223888b0eab517e25f78e8405d598",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15811,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 377,
"path": "/script/review_filtered_clips.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom tkinter import ttk\nimport tkinter as tk\nimport os\nfrom PIL import Image, ImageTk\nfrom data_utils import *\nfrom config import *\nimport numpy as np\nimport enum\nfrom config import my_config\n\nreview_img_width = 3000\nreview_img_height = 1500\n\n\nclass Criteria(enum.Enum):\n # too short, many_people, skeleton_back, skeleton_missing, skeleton_side, skeleton_small,is_picture\n too_short = 0\n many_people = 1\n skeleton_back = 2\n skeleton_missing = 3\n skeleton_side = 4\n skeleton_small = 5\n is_picture = 6\n\n\nclass ReviewApp:\n MODE = 'ALL'\n vid = '-1'\n\n def __init__(self):\n self.win = tk.Tk()\n self.win.geometry(\"1500x800+100+100\")\n\n self.make_frame()\n self.make_label()\n self.make_filtering_box()\n self.make_img_canvas()\n self.make_view_combobox()\n\n self.make_vid_treeView()\n self.vid_tree.bind(\"<Double-1>\", self.OnVideoListClick)\n\n self.make_clip_treeView()\n self.clip_tree.bind(\"<Double-1>\", self.OnClipListClick)\n self.clip_tree.bind(\"<<TreeviewSelect>>\", self.OnClipTreeSelect)\n self.img_canvas.focus_set()\n\n self.win.mainloop()\n\n def make_frame(self):\n # main grid\n self.win.rowconfigure(0, weight=1)\n self.win.rowconfigure(1, weight=9)\n self.win.columnconfigure(0, weight=1)\n\n self.top_frame = tk.Frame(self.win, bg='#e9e9e9')\n self.top_frame.grid(row=0, sticky='nsew')\n self.top_frame.columnconfigure(0, weight=1)\n self.top_frame.columnconfigure(1, weight=12)\n\n self.top_frame.rowconfigure(0, weight=1)\n self.top_frame.rowconfigure(1, weight=1)\n self.top_frame.rowconfigure(2, weight=1)\n\n self.bottom_frame = tk.Frame(self.win)\n self.bottom_frame.grid(row=1, sticky='nsew', padx=5, pady=5)\n self.bottom_frame.columnconfigure(0, weight=1)\n self.bottom_frame.columnconfigure(1, weight=1)\n\n # bottom frame grid\n self.bottom_frame.columnconfigure(0, weight=1)\n self.bottom_frame.columnconfigure(1, weight=1)\n self.bottom_frame.columnconfigure(2, weight=15)\n self.bottom_frame.rowconfigure(0, weight=1)\n\n self.img_frame = tk.Frame(self.bottom_frame)\n self.img_frame.grid(row=0, column=2, sticky='nsew', padx=5, pady=5)\n\n def make_label(self):\n self.tx_vid_name = tk.Label(self.top_frame, bg='#8C8C8C', text='No selected video')\n self.tx_clip_interval = tk.Label(self.top_frame, bg='#8C8C8C', text='No selected clip')\n self.tx_vid_name.grid(row=0, column=0, sticky=(tk.N + tk.S + tk.E + tk.W))\n self.tx_clip_interval.grid(row=1, column=0, sticky=(tk.N + tk.S + tk.E + tk.W))\n\n def make_view_combobox(self):\n self.mode = tk.StringVar()\n self.view_combo = ttk.Combobox(self.top_frame, values=('ALL', 'TRUE', 'FALSE'), textvariable=self.mode)\n self.view_combo.grid(row=2, column=0, sticky=(tk.N + tk.S + tk.E + tk.W), padx=5, pady=5)\n self.view_combo.current(0)\n self.view_combo.bind('<<ComboboxSelected>>', self.OnComboSelected)\n\n def make_filtering_box(self):\n self.skeltonoptionFrame = tk.Frame(self.top_frame, bg='#e9e9e9')\n self.skeltonoptionFrame.grid(row=0, column=1, sticky='nsew')\n ratioFrame = tk.Frame(self.top_frame, bg='#e9e9e9')\n ratioFrame.grid(row=1, column=1, sticky='nsew')\n\n msgFrame = tk.Frame(self.top_frame, bg='#e9e9e9')\n msgFrame.grid(row=2, column=1, sticky='nsew')\n\n tx_back = tk.Label(ratioFrame, text=\"looking behind ratio: \", foreground='#3985F8', bg='#e9e9e9')\n tx_back.pack(side=tk.LEFT, padx=5)\n self.tx_ratio_back = tk.Label(ratioFrame, text=\"None\", bg='#e9e9e9')\n self.tx_ratio_back.pack(side=tk.LEFT)\n\n tx_missing = tk.Label(ratioFrame, text=\"missing joints ratio: \", foreground='#3985F8', bg='#e9e9e9')\n tx_missing.pack(side=tk.LEFT, padx=10)\n self.tx_ratio_missing = tk.Label(ratioFrame, text=\"None\", bg='#e9e9e9')\n self.tx_ratio_missing.pack(side=tk.LEFT)\n\n tx_side = tk.Label(ratioFrame, text=\"looking sideways ratio: \", foreground='#3985F8', bg='#e9e9e9')\n tx_side.pack(side=tk.LEFT, padx=10)\n self.tx_ratio_side = tk.Label(ratioFrame, text=\"None\", bg='#e9e9e9')\n self.tx_ratio_side.pack(side=tk.LEFT)\n\n tx_small = tk.Label(ratioFrame, text=\"small person ratio: \", foreground='#3985F8', bg='#e9e9e9')\n tx_small.pack(side=tk.LEFT, padx=10)\n self.tx_ratio_small = tk.Label(ratioFrame, text=\"None\", bg='#e9e9e9')\n self.tx_ratio_small.pack(side=tk.LEFT)\n\n tx_diff = tk.Label(ratioFrame, text=\"frame diff: \", foreground='#3985F8', bg='#e9e9e9')\n tx_diff.pack(side=tk.LEFT, padx=10)\n self.tx_frame_diff = tk.Label(ratioFrame, text=\"None\", bg='#e9e9e9')\n self.tx_frame_diff.pack(side=tk.LEFT)\n\n tx_option = tk.Label(self.skeltonoptionFrame, text='Criteria: ', foreground='#3985F8', bg='#e9e9e9')\n tx_option.pack(side=tk.LEFT, padx=5, pady=5)\n tx_res = tk.Label(msgFrame, text='Message:', foreground='#3985F8', bg='#e9e9e9')\n tx_res.pack(side=tk.LEFT, padx=5)\n self.message = tk.Label(msgFrame, text=' ', bg='#e9e9e9')\n self.message.pack(side=tk.LEFT)\n\n skeleton_option = [\"too Short\", \"many people\", \"looking behind\", \"joint missing\", \"sideways\", \"small\", \"picture\"]\n self.item = []\n for i in range(7):\n self.item.append(tk.IntVar())\n\n for val, option in enumerate(skeleton_option):\n tk.Checkbutton(self.skeltonoptionFrame,\n text=option,\n padx=5,\n pady=5,\n bg='#e9e9e9',\n variable=self.item[val],\n activebackground=\"blue\").pack(side=tk.LEFT, padx=5, pady=5)\n\n def make_vid_treeView(self):\n self.vid_tree = tk.ttk.Treeview(self.bottom_frame)\n self.vid_tree.grid(row=0, column=0, sticky='nsew', padx=5, pady=5)\n self.vid_tree.heading(\"#0\", text=\"Video List\")\n\n for file in sorted(glob.glob(my_config.VIDEO_PATH + \"/*.mp4\"), key=os.path.getmtime):\n vid = os.path.split(file)[1][-15:-4]\n self.vid_tree.insert('', 'end', text=vid, values=vid, iid=vid)\n\n def make_clip_treeView(self):\n self.clip_tree = tk.ttk.Treeview(self.bottom_frame)\n self.clip_tree.grid(row=0, column=1, sticky='nsew', padx=5, pady=5)\n self.clip_tree.heading(\"#0\", text=\"Clip List\")\n self.clip_tree.tag_configure('False', background='#E8E8E8')\n\n def make_img_canvas(self):\n self.img_canvas = tk.Canvas(self.img_frame, bg='black')\n self.img_canvas.config(scrollregion=(0, 0, review_img_width, review_img_height))\n\n hbar = tk.Scrollbar(self.img_frame, orient=tk.HORIZONTAL)\n hbar.pack(side=tk.BOTTOM, fill=tk.X)\n hbar.config(command=self.img_canvas.xview)\n vbar = tk.Scrollbar(self.img_frame, orient=tk.VERTICAL)\n vbar.pack(side=tk.RIGHT, fill=tk.Y)\n vbar.config(command=self.img_canvas.yview)\n self.img_canvas.bind(\"<MouseWheel>\", self._on_mousewheel)\n\n self.img_canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)\n self.img_canvas.pack(expand=tk.YES, fill=tk.BOTH)\n\n def _on_mousewheel(self, event):\n self.img_canvas.yview_scroll(-1 * event.delta, \"units\")\n\n def OnComboSelected(self, event):\n change_mode = self.view_combo.get()\n\n if change_mode != self.MODE:\n self.MODE = change_mode\n self.load_clip()\n\n def OnVideoListClick(self, event):\n \"\"\" load clip data \"\"\"\n item = self.vid_tree.identify('item', event.x, event.y)\n vid = self.vid_tree.item(item, \"text\")\n self.vid = vid\n\n self.tx_vid_name.configure(text=vid)\n self.tx_clip_interval.configure(text='No selected clip')\n self.img_canvas.delete(tk.ALL)\n self.message.config(text=' ')\n self.tx_ratio_small.config(text='None')\n self.tx_ratio_side.config(text='None')\n self.tx_ratio_missing.config(text='None')\n self.tx_ratio_back.config(text='None')\n self.tx_frame_diff.config(text='None')\n\n print(vid)\n\n self.clip_data = load_clip_data(vid)\n self.skeleton = SkeletonWrapper(my_config.SKELETON_PATH, vid)\n self.video_wrapper = read_video(my_config.VIDEO_PATH, vid)\n self.clip_filter_data = load_clip_filtering_aux_info(vid)\n\n self.load_clip()\n self.win.update()\n\n def OnClipListClick(self, event):\n item = self.clip_tree.identify('item', event.x, event.y)\n item_index = int(self.clip_tree.item(item, \"values\")[0])\n print(item_index, 'Double_Click')\n\n def OnClipTreeSelect(self, event):\n item = self.clip_tree.item(self.clip_tree.focus())\n item_index = int(self.clip_tree.item(self.clip_tree.focus(), 'values')[0])\n print('Load clip, idx:', item_index)\n\n # load image\n self.review_clip = self.clip_data[item_index]\n start_frame_no = self.review_clip['clip_info'][0]\n end_frame_no = self.review_clip['clip_info'][1]\n correct_clip = self.review_clip['clip_info'][2]\n\n image = self.show_clips(clip=self.review_clip, correct_clip=correct_clip)\n\n b, g, r = cv2.split(image)\n img = cv2.merge((r, g, b))\n im = Image.fromarray(img)\n imgtk = ImageTk.PhotoImage(image=im)\n\n self.image = imgtk\n self.img_canvas.delete(tk.ALL)\n self.img_canvas.create_image(0, 0, image=imgtk, anchor=tk.NW)\n\n # self.img_label.image = self.image\n # self.img_label.config(image=self.image)\n # self.img_label.place(x=0, y=0)\n\n # load filtering results\n clip_filter_data = self.clip_filter_data[item_index]\n filtering_results = clip_filter_data['filtering_results']\n message = clip_filter_data['message']\n debugging_info = clip_filter_data['debugging_info']\n\n # tooshort, many_people, skeleton_back, skeleton_missing, skeleton_side, skeleton_small, is_picture 순서\n self.item[Criteria.too_short.value].set(filtering_results[Criteria.too_short.value])\n self.item[Criteria.many_people.value].set(filtering_results[Criteria.many_people.value])\n self.item[Criteria.skeleton_back.value].set(filtering_results[Criteria.skeleton_back.value])\n self.item[Criteria.skeleton_missing.value].set(filtering_results[Criteria.skeleton_missing.value])\n self.item[Criteria.skeleton_side.value].set(filtering_results[Criteria.skeleton_side.value])\n self.item[Criteria.skeleton_small.value].set(filtering_results[Criteria.skeleton_small.value])\n self.item[Criteria.is_picture.value].set(filtering_results[Criteria.is_picture.value])\n self.message.config(text=message)\n\n self.tx_ratio_back.config(text=debugging_info[0])\n self.tx_ratio_missing.config(text=debugging_info[1])\n self.tx_ratio_side.config(text=debugging_info[2])\n self.tx_ratio_small.config(text=debugging_info[3])\n self.tx_frame_diff.config(text=debugging_info[4])\n\n self.tx_clip_interval.configure(text=str(start_frame_no) + ' ~ ' + str(end_frame_no) + ' ' + str(correct_clip))\n # self.win.update()\n\n def load_clip(self):\n if self.vid == '-1':\n print('Error: load video first')\n return\n\n # init clip tree\n for i in self.clip_tree.get_children():\n self.clip_tree.delete(i)\n\n self.tx_clip_interval.configure(text='No selected clip')\n self.img_canvas.delete(tk.ALL)\n\n for item in self.item:\n item.set(False)\n\n if self.clip_data and self.skeleton.skeletons != []:\n # load clips\n for i, clip in enumerate(self.clip_data):\n start_frame_no = clip['clip_info'][0]\n end_frame_no = clip['clip_info'][1]\n correct_clip = clip['clip_info'][2]\n\n if self.MODE == 'ALL':\n self.clip_tree.insert('', 'end', text=str(start_frame_no) + ' ~ ' + str(end_frame_no), values=i,\n iid=i, tag=str(correct_clip))\n elif self.MODE == 'TRUE':\n if correct_clip:\n self.clip_tree.insert('', 'end', text=str(start_frame_no) + ' ~ ' + str(end_frame_no), values=i,\n iid=i, tag=str(correct_clip))\n elif self.MODE == 'FALSE':\n if not correct_clip:\n self.clip_tree.insert('', 'end', text=str(start_frame_no) + ' ~ ' + str(end_frame_no), values=i,\n iid=i, tag=str(correct_clip))\n else:\n print('[Error] Data file does not exist')\n self.tx_clip_interval.configure(text=\"Data file does not exist\")\n\n self.win.update()\n\n def show_clips(self, clip, correct_clip):\n N_IMAGES_PER_VIEW = 20\n\n start_frame_no = clip['clip_info'][0]\n end_frame_no = clip['clip_info'][1]\n print(start_frame_no, end_frame_no) # start and end frame no\n\n # get frames\n resized_frames = []\n skip_amount = int(max((end_frame_no - start_frame_no) / N_IMAGES_PER_VIEW, 1))\n self.video_wrapper.set_current_frame(start_frame_no)\n skeleton_chunk = self.skeleton.get(start_frame_no, end_frame_no)\n for i in range(end_frame_no - start_frame_no):\n ret, frame = self.video_wrapper.video.read()\n\n if i % skip_amount == 0:\n # overlay raw skeleton on the frame\n if skeleton_chunk and skeleton_chunk[i]:\n for person in skeleton_chunk[i]:\n body_pose = get_skeleton_from_frame(person)\n frame = draw_skeleton_on_image(frame, body_pose, thickness=5)\n\n if correct_clip and clip['frames']:\n # overlay selected skeleton\n\n if clip['frames'][i]:\n body_pose = get_skeleton_from_frame(clip['frames'][i])\n frame = draw_skeleton_on_image(frame, body_pose, thickness=20)\n\n resized_frame = cv2.resize(frame, (0, 0), None, .35, .35)\n resized_frames.append(resized_frame)\n\n # make summary img\n n_imgs_per_row = 4\n n_rows_per_page = 5\n frame_idx = 0\n page_img = []\n for row_idx in range(n_rows_per_page):\n row_img = []\n for col_idx in range(n_imgs_per_row):\n if frame_idx >= len(resized_frames):\n break\n\n if row_img == []:\n row_img = resized_frames[frame_idx]\n else:\n row_img = np.hstack((row_img, resized_frames[frame_idx]))\n frame_idx += 1\n\n if page_img == []:\n page_img = row_img\n elif row_img != []:\n n_pad = page_img.shape[1] - row_img.shape[1]\n if n_pad > 0:\n row_img = np.pad(row_img, ((0, 0), (0, n_pad), (0, 0)), mode='constant')\n page_img = np.vstack((page_img, row_img))\n\n return page_img\n\n\nif __name__ == '__main__':\n myReviewApp = ReviewApp()\n"
},
{
"alpha_fraction": 0.6011823415756226,
"alphanum_fraction": 0.6061846017837524,
"avg_line_length": 32.83076858520508,
"blob_id": "b96b82c7d7191130a799853b3f5993f601daaa08",
"content_id": "16f9fd006d1f29376f96fe28b5d25ea62fe692c5",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2199,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 65,
"path": "/script/run_gentle.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nimport glob\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport sys\n\nfrom tqdm import tqdm\n\nfrom config import *\nfrom make_ted_dataset import read_subtitle\nfrom config import my_config\n\nsys.path.insert(0, '../../../gentle')\nimport gentle\n\n\n# prepare gentle\nnthreads = multiprocessing.cpu_count() - 2\nlogging.getLogger().setLevel(\"WARNING\")\ndisfluencies = set(['uh', 'um'])\nresources = gentle.Resources()\n\n\ndef run_gentle(video_path, vid, result_path):\n vtt_subtitle = read_subtitle(vid)\n transcript = ''\n for i, sub in enumerate(vtt_subtitle):\n transcript += (vtt_subtitle[i].text + ' ')\n transcript = re.sub('\\n', ' ', transcript) # remove newline characters\n\n # align\n with gentle.resampled(video_path) as wav_file:\n aligner = gentle.ForcedAligner(resources, transcript, nthreads=nthreads, disfluency=False, conservative=False,\n disfluencies=disfluencies)\n result = aligner.transcribe(wav_file, logging=logging)\n\n # write results\n with open(result_path, 'w', encoding=\"utf-8\") as fh:\n fh.write(result.to_json(indent=2))\n\n\ndef main():\n videos = glob.glob(my_config.VIDEO_PATH + \"/*.mp4\")\n n_total = len(videos)\n for i, file_path in tqdm(enumerate(sorted(videos, key=os.path.getmtime))):\n vid = os.path.split(file_path)[1][-15:-4]\n print('{}/{} - {}'.format(i+1, n_total, vid))\n result_path = my_config.VIDEO_PATH + '/' + vid + '_align_results.json'\n if os.path.exists(result_path) and os.path.getsize(result_path): # existing and not empty\n print('JSON file already exists ({})'.format(vid))\n else:\n run_gentle(file_path, vid, result_path)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5539193749427795,
"alphanum_fraction": 0.5575441718101501,
"avg_line_length": 40.25233459472656,
"blob_id": "14818d32fbeffa7e1448f6cf889c487f5c06441a",
"content_id": "a54b1e1d492c55e9401d1d48b590a0c56011f954",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4414,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 107,
"path": "/script/run_clip_filtering.py",
"repo_name": "youngwoo-yoon/youtube-gesture-dataset",
"src_encoding": "UTF-8",
"text": "# ------------------------------------------------------------------------------\n# Copyright (c) ETRI. All rights reserved.\n# Licensed under the BSD 3-Clause License.\n# This file is part of Youtube-Gesture-Dataset, a sub-project of AIR(AI for Robots) project.\n# You can refer to details of AIR project at https://aiforrobots.github.io\n# Written by Youngwoo Yoon ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import unicode_literals\nimport csv\nfrom clip_filter import *\nfrom main_speaker_selector import *\nfrom config import my_config\n\nRESUME_VID = '' # resume the process from this video\n\n\ndef read_sceneinfo(filepath): # reading csv file\n with open(filepath, 'r') as csv_file:\n frame_list = [0]\n for row in csv.reader(csv_file):\n if row:\n frame_list.append((row[1]))\n frame_list[0:3] = [] # skip header\n\n frame_list = [int(x) for x in frame_list] # str to int\n\n return frame_list\n\n\ndef run_filtering(scene_data, skeleton_wrapper, video_wrapper):\n filtered_clip_data = []\n aux_info = []\n video = video_wrapper.get_video_reader()\n\n for i in range(len(scene_data) - 1): # note: last scene is not processed\n start_frame_no, end_frame_no = scene_data[i], scene_data[i + 1]\n raw_skeleton_chunk = skeleton_wrapper.get(start_frame_no, end_frame_no)\n main_speaker_skeletons = MainSpeakerSelector(raw_skeleton_chunk=raw_skeleton_chunk).get()\n\n # run clip filtering\n clip_filter = ClipFilter(video=video, start_frame_no=start_frame_no, end_frame_no=end_frame_no,\n raw_skeleton=raw_skeleton_chunk, main_speaker_skeletons=main_speaker_skeletons)\n correct_clip = clip_filter.is_correct_clip()\n\n filtering_results, message, debugging_info = clip_filter.get_filter_variable()\n filter_elem = {'clip_info': [start_frame_no, end_frame_no, correct_clip], 'filtering_results': filtering_results,\n 'message': message, 'debugging_info': debugging_info}\n aux_info.append(filter_elem)\n\n # save\n elem = {'clip_info': [start_frame_no, end_frame_no, correct_clip], 'frames': []}\n\n if not correct_clip:\n filtered_clip_data.append(elem)\n continue\n elem['frames'] = main_speaker_skeletons\n filtered_clip_data.append(elem)\n\n return filtered_clip_data, aux_info\n\n\ndef main():\n if RESUME_VID == \"\":\n skip_flag = False\n else:\n skip_flag = True\n\n for csv_path in tqdm(sorted(glob.glob(my_config.CLIP_PATH + \"/*.csv\"), key=os.path.getmtime)):\n\n vid = os.path.split(csv_path)[1][0:11]\n tqdm.write(vid)\n\n # resume check\n if skip_flag and vid == RESUME_VID:\n skip_flag = False\n\n if not skip_flag:\n scene_data = read_sceneinfo(csv_path)\n skeleton_wrapper = SkeletonWrapper(my_config.SKELETON_PATH, vid)\n video_wrapper = read_video(my_config.VIDEO_PATH, vid)\n\n if video_wrapper.height < 720:\n print('[Fatal error] wrong video size (height: {})'.format(video_wrapper.height))\n assert False\n\n if abs(video_wrapper.total_frames - len(skeleton_wrapper.skeletons)) > 10:\n print('[Fatal error] video and skeleton object have different lengths (video: {}, skeletons: {})'.format\n (video_wrapper.total_frames, len(skeleton_wrapper.skeletons)))\n assert False\n\n if skeleton_wrapper.skeletons == [] or video_wrapper is None:\n print('[warning] no skeleton or video! skipped this video.')\n else:\n ###############################################################################################\n filtered_clip_data, aux_info = run_filtering(scene_data, skeleton_wrapper, video_wrapper)\n ###############################################################################################\n\n # save filtered clips and aux info\n with open(\"{}/{}.json\".format(my_config.CLIP_PATH, vid), 'w') as clip_file:\n json.dump(filtered_clip_data, clip_file)\n with open(\"{}/{}_aux_info.json\".format(my_config.CLIP_PATH, vid), 'w') as aux_file:\n json.dump(aux_info, aux_file)\n\n\nif __name__ == '__main__':\n main()\n"
}
] | 14 |
BernardoPiedade/simple-password-manager
|
https://github.com/BernardoPiedade/simple-password-manager
|
3d060eebb88466ecffd699370f7ecb462eb8bbcd
|
2426fde66ce1f7ffdb4453214b24a1552e8af697
|
e1874176efdd6ee052c3f8f417a230dd1490b93f
|
refs/heads/master
| 2022-11-20T01:41:30.781484 | 2020-07-21T19:36:45 | 2020-07-21T19:36:45 | 281,477,505 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6179104447364807,
"alphanum_fraction": 0.6322388052940369,
"avg_line_length": 21.635135650634766,
"blob_id": "4928d99ef7b0440e6d25f7409654163adf445d9c",
"content_id": "d2d6144b298ad651fd6091e177ae3dbcc66cae6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1675,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 74,
"path": "/main.py",
"repo_name": "BernardoPiedade/simple-password-manager",
"src_encoding": "UTF-8",
"text": "import mysql.connector\n\nconn = mysql.connector.connect(\n\thost=\"localhost\",\n\tuser=\"root\",\n\tpassword=\"mysql\"\n)\n\ncursor = conn.cursor()\n\ntry:\n\tcursor.execute(\"CREATE DATABASE password_manager\")\n\tcursor.execute(\"USE password_manager\")\n\tcursor.execute(\"CREATE TABLE manager (platform VARCHAR(255), password VARCHAR(255))\")\n\tprint(\"Manager created, you're good to go\")\nexcept:\n\tprint(\"Welcome back!\")\n\nconn.close()\n\ndef add_platform(platform, password):\n\tconn = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"mysql\",\n database=\"password_manager\"\n )\n\n\tcursor = conn.cursor()\n\tcommand = 'INSERT INTO manager (platform, password) VALUES (%s, %s);'\n\tval = (platform, password)\n\tcursor.execute(command, val)\n\tconn.commit()\n\tconn.close()\n\tprint(\"\\n\\n\"*20)\n\n\ndef get_password(platform):\n\tconn = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"mysql\",\n database=\"password_manager\"\n )\n\tcursor = conn.cursor()\n\tcursor.execute(\"SELECT * FROM manager WHERE platform='\"+platform+\"'\")\n\tresult = cursor.fetchall()\n\tprint(\"\\n\\n\"+ str(result) +\"\\n\\n\")\n\tconn.close()\n\n\ndef main():\n\twhile True:\n\t\tprint(\"*\"*30)\n\t\tprint(\"\\tCommands\")\n\t\tprint(\"q = Exit\")\n\t\tprint(\"get <platform> (Ex.: get youtube) = Get password from x platform\")\n\t\tprint(\"add <platform> <password> (Ex.: add youtube 123456) [DO NOT USE SPACES ON PLATFORM NAME] = Add new password to manager\")\n\t\tprint(\"*\"*30)\n\n\t\top = input(\"\\n\\n->\")\n\n\t\tx = op.split()\n\n\t\tif(x[0] == \"q\"):\n\t\t\texit()\n\t\telif(x[0] == \"get\"):\n\t\t\tget_password(x[1])\n\t\telif(x[0] == \"add\"):\n\t\t\tadd_platform(x[1], x[2])\n\n\nif __name__ == '__main__':\n\tmain()\n"
}
] | 1 |
wooogi123/pythonstudy
|
https://github.com/wooogi123/pythonstudy
|
bcd4f740e89d1cb12a7cd954d63421c7aec0afb0
|
401726e2f7045231a035b36111ebc11988e6f5e8
|
99e8aa4b74f6a2faa4e30cb2e0f6115317624b81
|
refs/heads/master
| 2018-03-25T20:43:42.707842 | 2017-05-30T12:19:34 | 2017-05-30T12:19:34 | 86,994,943 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.486887127161026,
"alphanum_fraction": 0.5199543833732605,
"avg_line_length": 18.065217971801758,
"blob_id": "087da88b6d53a74cbc7f2811d9092d187f0def18",
"content_id": "aab800a9f2c19598d1a3961cfc665a3339905a52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 877,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 46,
"path": "/SimpleLinkedList.py",
"repo_name": "wooogi123/pythonstudy",
"src_encoding": "UTF-8",
"text": "class Node:\n def __init__(self, data, next=None):\n self.data = data\n self.next = next\n\ndef init_list():\n global n1, n2, n3, n4\n n1 = Node(1)\n n2 = Node(2)\n n3 = Node(3.33)\n n4 = Node(\"Four\")\n n1.next = n2\n n2.next = n3\n n3.next = n4\n\ndef del_node(del_data):\n global node1\n pre_node = node1\n next_node = pre_node.next\n\n if pre_node.data == del_data:\n node1 = next_node\n del pre_node\n return node1\n\n while next_node:\n if next_node.data == del_data:\n pre_node.next=next_node.next\n del next_node\n\n break\n\n pre_node = next_node\n next_node = next_node.next\n return node1\n\ndef main():\n init_list()\n node = n1\n print node, n1, n2, n3, n4\n while node:\n print node.data\n node = node.next\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5744456052780151,
"alphanum_fraction": 0.575501561164856,
"avg_line_length": 31.620689392089844,
"blob_id": "526ffea3140cba92233d16b7dea524b3bdd29095",
"content_id": "8a70bd4ca77da286bcdac15a7772c11824ed9770",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 947,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 29,
"path": "/WordCount_2.py",
"repo_name": "wooogi123/pythonstudy",
"src_encoding": "UTF-8",
"text": "def getFile():\n input_file_opened = False\n while not input_file_opened:\n try:\n file_name = input('Enter input file name (with extension): ')\n input_file = open(file_name, 'r')\n input_file_opened = True\n except IOError:\n print ('Input file not found - please reenter')\n return (file_name, input_file)\n\ndef countWords(input_file):\n word_delimiters = (' ', ',', ';', ':', '.','\\n',\n '\"',\"'\", '(', ')')\n file_line = \"\"\n for line in input_file:\n file_line += line\n for word_replace in word_delimiters:\n file_line = file_line.replace(word_replace, \" \")\n return len(file_line.split())\n\nfile_name, input_file = getFile()\n\nnum_occurrences = countWords(input_file)\n\nif num_occurrences == 0:\n print('No occurrences of word', 'found in file', file_name)\nelse:\n print('The word', 'occurs', num_occurrences, 'times in file', file_name)\n\n"
},
{
"alpha_fraction": 0.5266884565353394,
"alphanum_fraction": 0.5522875785827637,
"avg_line_length": 20.600000381469727,
"blob_id": "f99aea4dc4b11d4719c4462805216b24ec488ed3",
"content_id": "87ac8892255076820de42320b3e795b56ca8849e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1836,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 85,
"path": "/LinkedListnQuickSort.py",
"repo_name": "wooogi123/pythonstudy",
"src_encoding": "UTF-8",
"text": "import re\nfrom random import randint\n\nclass Node:\n def __init__(self, data, next = None, pre = None):\n self.data = data\n self.next = next\n self.pre = pre\n\ndef init_list():\n global node1\n node1 = Node(randint(1,128)*(randint(1,8)+randint(1,4))/randint(1,2))\n\ndef delete_node(del_data):\n global node1\n pre_node = node1\n next_node = pre_node.next\n next_node.pre = pre_node\n\n if pre_node.data == del_data:\n node1 = next_node\n next_node.pre = node1\n del pre_node\n return node1\n while next_node:\n if next_node.data == del_data:\n pre_node.next = next_node.next\n next_node.next.pre = pre_node\n del next_node\n break\n pre_node = next_node\n next_node.pre = pre_node\n next_node = next_node.next\n\n return node1\n\ndef insert_node(ins_data):\n global node1\n new_node = Node(ins_data)\n new_node.next = node1\n node1.pre = new_node\n node1 = new_node\n return node1\n\ndef print_list():\n global node1\n node = node1\n while node:\n j = 1\n print str(j)+\" \"+str(node.data)\n node = node.next\n\ndef quick_sort():\n global node1\n l = list()\n r = list()\n node = node1\n while node:\n if node.data<pivot():\n l.append(node.data)\n elif node.data>pivot():\n r.append(node.data)\n node = node.next\n l.sort()\n r.sort()\n\n l.append(node1.data)\n l.extend(r)\n print l\n\ndef pivot():\n global node1\n node = node1\n pivot = (node.data + node.next.data + node.next.next.data)/3\n return pivot\n\ndef main():\n init_list()\n for i in range(2, 101):\n node = insert_node(randint(1,2)*i-(randint(1,4)+randint(1,8))/randint(1,128))\n i += 1\n quick_sort()\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5412555932998657,
"alphanum_fraction": 0.5748878717422485,
"avg_line_length": 24.632183074951172,
"blob_id": "1cb0430e1b94edd6dc1cde9003cfbc31d74d5010",
"content_id": "a41537677969875d89927aded13109300cbafdbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2236,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 87,
"path": "/pygame/main.py",
"repo_name": "wooogi123/pythonstudy",
"src_encoding": "UTF-8",
"text": "import pygame\nfrom pygame.color import Color\nfrom pygame.sprite import Sprite\nfrom pygame.surface import Surface\nfrom runner import Runner\n\nFPS = 28\n\nclass Bullet(Sprite):\n def __init__(self):\n Sprite.__init__(self)\n self.image = Surface((20, 20))\n pygame.draw.rect(self.image,\n Color(255, 0, 0),\n (0, 0, 20, 20))\n self.rect = self.image.get_rect()\n\n def update(self):\n self.rect.x -= 3\n\nif __name__ == \"__main__\":\n pygame.init()\n size = (400, 300)\n screen = pygame.display.set_mode(size)\n pygame.display.set_caption(\"Runner Animation\")\n\n run = True\n clock = pygame.time.Clock()\n\n background_img = pygame.image.load(\"background.bmp\")\n\n runner1 = Runner()\n runner1.rect.x = 0\n runner1.rect.y = 180\n\n runner2 = Runner()\n runner2.rect.x = 130\n runner2.rect.y = 180\n\n runner3 = Runner()\n runner3.rect.x = 250\n runner3.rect.y = 180\n\n runner_group = pygame.sprite.Group()\n runner_group.add(runner1)\n runner_group.add(runner2)\n runner_group.add(runner3)\n\n bullet = Bullet()\n bullet.rect.x = screen.get_width()\n bullet.rect.y = 200\n bullet_group = pygame.sprite.Group()\n bullet_group.add(bullet)\n\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n if runner1.rect.x > screen.get_width():\n runner1.rect.x = -16\n else:\n runner1.rect.x += 5\n\n if runner2.rect.x > screen.get_width():\n runner2.rect.x = -16\n else:\n runner2.rect.x += 3\n\n if runner3.rect.x >screen.get_width():\n runner3.rect.x = -16\n else:\n runner3.rect.x += 1\n\n runner_group.update()\n bullet_group.update()\n collided = pygame.sprite.groupcollide(bullet_group, runner_group, False, True)\n if len(collided.items()) > 0:\n print(\"남은 Runner 수: {0}\".format(len(runner_group.sprites())))\n bullet.rect.x = screen.get_width()\n\n screen.blit(background_img, screen.get_rect())\n runner_group.draw(screen)\n bullet_group.draw(screen)\n pygame.display.flip()\n\n clock.tick(FPS)\n"
},
{
"alpha_fraction": 0.5598651170730591,
"alphanum_fraction": 0.5674536228179932,
"avg_line_length": 27.261905670166016,
"blob_id": "afaa0c63f7386045e4f1e7f1cf879102340f5818",
"content_id": "cacbf2ba7ad7e08f8c74530578d3f97961c56c43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1186,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 42,
"path": "/Findroadcurve45.py",
"repo_name": "wooogi123/pythonstudy",
"src_encoding": "UTF-8",
"text": "import os\n\nfilelist = list()\n\ndef search(dirname, keyword):\n try:\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n if os.path.isdir(full_filename):\n search(full_filename)\n else:\n ext = os.path.splitext(full_filename)[0]\n if ext.find(keyword) != -1:\n print(full_filename.split(\"\\\\\")[-1])\n print(full_filename)\n filelist.append(str(full_filename.split(\"\\\\\")[-1]))\n except PermissionError:\n pass\n\ndef findKeyword():\n keyword = input(\"Find Keyword: \")\n return keyword\n\ndef moveFile(file, dirname, originaldir):\n fulldirname = str(originaldir)+str(\"\\\\\")+str(file)\n print(fulldirname)\n movedirname = str(dirname)+str(\"\\\\\")+str(file)\n if not os.path.isdir(dirname):\n os.mkdir(dirname)\n os.rename(fulldirname, movedirname)\n\ndef main():\n a1 = findKeyword()\n search(input(\"Search: \"), a1)\n b1 = input(\"Move file directory: \")\n print(filelist)\n for i in filelist:\n moveFile(i, b1, a1)\n\nif __name__ == \"__main__\":\n main()"
}
] | 5 |
matipark/javacafe-tensorflow
|
https://github.com/matipark/javacafe-tensorflow
|
fb22513aea44205f391d820edd88508051f54575
|
837af511f7769f8c586f4c64f17a2616cc77638e
|
5b3a28ef29696eadaebddcbcc4a7ab36e61f5e85
|
refs/heads/master
| 2020-06-12T05:33:31.757377 | 2016-11-11T06:59:01 | 2016-11-11T06:59:01 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6816720366477966,
"alphanum_fraction": 0.7218649387359619,
"avg_line_length": 17.84848403930664,
"blob_id": "695aa2aa6476c6fbfca75a7dc28d8185a2aedcb8",
"content_id": "87e8e5dec162f5590c07adf349297d5e5ff8f376",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 33,
"path": "/README.md",
"repo_name": "matipark/javacafe-tensorflow",
"src_encoding": "UTF-8",
"text": "# 자바카페 텐서플로우 스터디 실습\n\n* OCR 을 텐서플로우로 해보자\n* web 으로 글자를 쓰면 인식해서 결과 도출\n\n## MNIST 모듈 사용법 \n\n* resize-script.sh 로 파일 resize\n* convert-images-to-mnist-format.py 로 MNIST 생성됨.\n* 스크립트 실행한 디렉토리 에 생성됨 \n\n\n## Installation\n\n```bash\nbrew install imagemagick\npip install pillow\nbrew search imagick\nbrew install php{version)-imagick\npip install flask\nchmod 755 ./mnist/resize-script.sh\n```\n## Run Sample\n\n## 프로그램 동작 상상도\n\n\n\n## 오늘 해볼 것\n\n* 그림판에서 이미지 데이터 FLASK 로 전송 --> 현재는 로그만 찍음 --> Flask API 에서 특정경로에 파일저장\n\n* mnist 생성된 이미지로 텐서플로우 연동해보기\n"
},
{
"alpha_fraction": 0.606955885887146,
"alphanum_fraction": 0.6133133769035339,
"avg_line_length": 30.0930233001709,
"blob_id": "96e98c21c59c613a1ba3bf1d36a14030aba8b893",
"content_id": "6c84440e6be73b717eb053d3bc0d581d44ddc38f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2674,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 86,
"path": "/FlaskApi/main.py",
"repo_name": "matipark/javacafe-tensorflow",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nimport flask\nimport os\nimport sys\nimport base64\n\nfrom datetime import timedelta\nfrom flask import make_response, request, current_app\nfrom functools import update_wrapper\nfrom mnist_javacafe_study import *\n\ndef crossdomain(origin=None, methods=None, headers=None,\n max_age=21600, attach_to_all=True,\n automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, str):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, str):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n\n h = resp.headers\n\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\napp = Flask(__name__)\n\[email protected]_request\ndef log_request_info():\n app.logger.debug('Headers: %s', request.headers)\n app.logger.debug('Body: %s', request.get_data())\n\[email protected](\"/hello\", methods=['GET', 'POST'])\ndef hello():\n return \"Hello World!\"\n\[email protected](\"/upload\", methods=['GET', 'POST'])\n@crossdomain(origin='*')\ndef upload():\n with open(\"../TensorFlow-mnist/blog/canvas.png\", \"wb\") as fh:\n fh.write(base64.b64decode(request.form.get(\"imgBase64\").replace(\"data:image/png;base64,\",\"\")))\n print('New image created.')\n\n # os.system(\"sh ../mnist/resize-script.sh\")\n # return \"Hello World!\"\n # print os.system(\"python ./mnist_javacafe_study.py\")\n result = ocr()\n print result\n return flask.jsonify(**result)\n\n # return \"11\"\n\[email protected](\"/test\", methods=['GET', 'POST'])\ndef test():\n\treturn \"\" + os.system(\"./test.py\")\n\napp.debug = True\nif __name__ == \"__main__\":\n app.run()\n"
},
{
"alpha_fraction": 0.47456493973731995,
"alphanum_fraction": 0.5582329034805298,
"avg_line_length": 28.294116973876953,
"blob_id": "529dcba2962409a67a7e576a6b33453db007284e",
"content_id": "f00bc3c6747c0eedbf2a9a0f4431eee4132ca91f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1760,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 51,
"path": "/TensorFlow-example/2_multi_feature_linear_regression_mat.py",
"repo_name": "matipark/javacafe-tensorflow",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\n# ======== 학습데이터 ========\nx_data = [\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], # 상수가 될 b\n [54, 8, 30, 24, 46, 12, 20, 37, 40, 48], # 학습시간\n [12, 0, 12, 15, 12, 0, 36, 12, 12, 24] # 해외거주\n]\n\n# 토익점수\ny_data = [800, 320, 600, 630, 700, 300, 920, 720, 700, 920]\n\n# ======== 초기값 설정 =========\nW = tf.Variable(tf.random_uniform([1, 3], -1.0, 1.0))\n\n# ======== 변수 설정 ========\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n# ======== Multiple Linear Regression 에서 학습될 가설 ========\nhypothesis = tf.matmul(W, X)\n\n# ======== Multiple Linear Regression 에서 학습될 가설의 Cost Function ========\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\n\n# ======== Gradient Descent Algorithm 에서 Step ========\nlearning_rate = 0.0006\n\n# ======== 텐서플로우에 내장된 GradientDescentOptimizer ========\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init)\n\n# ======== 학습 시작 ========\nfor step in xrange(40000):\n sess.run(optimizer, feed_dict={X: x_data, Y: y_data})\n if step % 100 == 0:\n print step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)\n\n# ======== 학습된 우리의 프로그램에 예측 문의 ========\n\n# 30 시간 공부하고 10개월 해외에서 거주했을 경우 토익점수 예측\nprint \"30시간, 10개월 >> \" + str(sess.run(hypothesis, feed_dict={X: [[1], [30], [10]]})) + \" 점\"\n\n# 2 시간 공부하고 24개월 해외에서 거주했을 경우 토익점수 예측\nprint \"2시간, 24개월 >> \" + str(sess.run(hypothesis, feed_dict={X: [[1], [2], [24]]})) + \" 점\"\n"
},
{
"alpha_fraction": 0.4950980246067047,
"alphanum_fraction": 0.5857843160629272,
"avg_line_length": 30.384614944458008,
"blob_id": "8e6473ce536e232b987be0cebecd43bc50e46e66",
"content_id": "a7b8845692bb5c5fe5be04baa00aa5067e3306bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1892,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 52,
"path": "/TensorFlow-example/2_multi_feature_linear_regression.py",
"repo_name": "matipark/javacafe-tensorflow",
"src_encoding": "UTF-8",
"text": "#-*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\n# ======== 학습데이터 ========\n# 학습 시간\nx1_data = [54, 8, 30, 24, 46, 12, 20, 37, 40, 48]\n# 해외거주(월)\nx2_data = [12, 0, 12, 15, 12, 0, 36, 12, 12, 24]\n# 토익점수\ny_data = [800, 320, 600, 630, 700, 300, 920, 720, 700, 920]\n\n# ======== 초기값 설정 =========\nW1 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\nW2 = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\nb = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\n\n# ======== 변수 설정 ========\nX1 = tf.placeholder(tf.float32)\nX2 = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n# ======== Multiple Linear Regression 에서 학습될 가설 ========\nhypothesis = W1 * X1 + W2 * X2 + b\n\n# ======== Multiple Linear Regression 에서 학습될 가설의 Cost Function ========\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\n\n# ======== Gradient Descent Algorithm 에서 Step ========\nlearning_rate = 0.0006\n\n# ======== 텐서플로우에 내장된 GradientDescentOptimizer ========\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init)\n\n# ======== 학습 시작 ========\nfor step in xrange(40000):\n sess.run(optimizer, feed_dict={X1: x1_data, X2: x2_data, Y: y_data})\n if step % 100 == 0:\n print step, sess.run(cost, feed_dict={X1: x1_data, X2: x2_data, Y: y_data}), sess.run(W1), sess.run(W2), sess.run(b)\n\n# ======== 학습된 우리의 프로그램에 예측 문의 ========\n\n# 30 시간 공부하고 10개월 해외에서 거주했을 경우 토익점수 예측\nprint \"30시간, 10개월 >> \" + str(sess.run(hypothesis, feed_dict={X1: 30, X2: 10})) + \" 점\"\n\n# 2 시간 공부하고 24개월 해외에서 거주했을 경우 토익점수 예측\nprint \"2시간, 24개월 >> \" + str(sess.run(hypothesis, feed_dict={X1: 2, X2: 24})) + \" 점\"\n"
},
{
"alpha_fraction": 0.6266666650772095,
"alphanum_fraction": 0.6495237946510315,
"avg_line_length": 23.952381134033203,
"blob_id": "45103175b983959c89fc3cae6164ea32ec603d9b",
"content_id": "db6d81dd8d8aca1268170f1c133f16016ee36c61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 525,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 21,
"path": "/mnist/resize-script.sh",
"repo_name": "matipark/javacafe-tensorflow",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#simple script for resizing images in all class directories\n#also reformats everything from whatever to png\n\nif [ `ls ../upload/*.jpg 2> /dev/null | wc -l ` -gt 0 ]; then\necho hi\nfor file in ../upload/*.jpg; do\nconvert \"$file\" -resize 28x28\\! \"${file%.*}.png\"\nfile \"$file\" #uncomment for testing\nrm \"$file\"\ndone\nfi\n\nif [ `ls ../upload/*.png 2> /dev/null | wc -l ` -gt 0 ]; then\necho hi\nfor file in ../upload/*.png; do\nconvert \"$file\" -resize 28x28\\! \"${file%.*}.png\"\nfile \"$file\" #uncomment for testing\ndone\nfi\n\n"
},
{
"alpha_fraction": 0.5092936754226685,
"alphanum_fraction": 0.5762081742286682,
"avg_line_length": 27.02083396911621,
"blob_id": "574d5e0540f0d669dbc27f8eeea451d8db38fcf3",
"content_id": "1e29e0c018578648bab5e62a62f371923b15da6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1543,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 48,
"path": "/TensorFlow-example/1_linear_regression.py",
"repo_name": "matipark/javacafe-tensorflow",
"src_encoding": "UTF-8",
"text": "#-*- coding: utf-8 -*-\n\nimport tensorflow as tf\n\n# ======== 학습데이터 ========\n# 학습시간\nx_data = [54, 8, 30, 24, 46, 12, 20, 37, 40, 48]\n# 토익점수\ny_data = [800, 320, 600, 630, 700, 680, 730, 720, 700, 920]\n\n# ======== 초기값 설정 =========\nW = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\nb = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\n\n# ======== 변수 설정 ========\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n# ======== Linear Regression 에서 학습될 가설 ========\nhypothesis = W * X + b\n\n# ======== Linear Regression 에서 학습될 가설의 Cost Function ========\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\n\n# ======== Gradient Descent Algorithm 에서 Step ========\nlearning_rate = 0.0008\n\n# ======== 텐서플로우에 내장된 GradientDescentOptimizer ========\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init)\n\n# ======== 학습 시작 ========\nfor step in xrange(20000):\n sess.run(optimizer, feed_dict={X: x_data, Y: y_data})\n\n if step % 100 == 0:\n print step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W), sess.run(b)\n\n# ======== 학습된 우리의 프로그램에 예측 문의 ========\n\n# 10 시간 공부한 경우 토익점수 예측\nprint \"10시간:\" + str(sess.run(hypothesis, feed_dict={X: 10})) + \" 점\"\n# 40 시간 공부한 경우 토익점수 예측\nprint \"40시간:\" + str(sess.run(hypothesis, feed_dict={X: 40})) + \" 점\"\n"
},
{
"alpha_fraction": 0.5468975305557251,
"alphanum_fraction": 0.5851370692253113,
"avg_line_length": 27.875,
"blob_id": "7e5369a2a2cd9ba96e448198a28c9a47ce3baf70",
"content_id": "ef25d3d679aab7cf9cee513cc3a68d77f6ef0c3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1636,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 48,
"path": "/TensorFlow-example/2_multi_feature_linear_regression_mat_from_file.py",
"repo_name": "matipark/javacafe-tensorflow",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\n\n# ======== 학습데이터를 파일에서 읽어옴 ========\nxy = np.loadtxt('train.csv', unpack=True, dtype='float32')\n\nx_data = xy[0:-1]\ny_data = xy[-1]\n\n# ======== 초기값 설정 =========\nW = tf.Variable(tf.random_uniform([1, 3], -1.0, 1.0))\n\n# ======== 변수 설정 ========\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n# ======== Multiple Linear Regression 에서 학습될 가설 ========\nhypothesis = tf.matmul(W, X)\n\n# ======== Multiple Linear Regression 에서 학습될 가설의 Cost Function ========\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\n\n# ======== Gradient Descent Algorithm 에서 Step ========\nlearning_rate = 0.0006\n\n# ======== 텐서플로우에 내장된 GradientDescentOptimizer ========\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\ninit = tf.initialize_all_variables()\n\nsess = tf.Session()\nsess.run(init)\n\n# ======== 학습 시작 ========\nfor step in xrange(40000):\n sess.run(optimizer, feed_dict={X: x_data, Y: y_data})\n if step % 100 == 0:\n print step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W)\n\n# ======== 학습된 우리의 프로그램에 예측 문의 ========\n\n# 30 시간 공부하고 10개월 해외에서 거주했을 경우 토익점수 예측\nprint \"30시간, 10개월 >> \" + str(sess.run(hypothesis, feed_dict={X: [[1], [30], [10]]})) + \" 점\"\n\n# 2 시간 공부하고 24개월 해외에서 거주했을 경우 토익점수 예측\nprint \"2시간, 24개월 >> \" + str(sess.run(hypothesis, feed_dict={X: [[1], [2], [24]]})) + \" 점\"\n"
}
] | 7 |
kmn01/Sequence-Alignment
|
https://github.com/kmn01/Sequence-Alignment
|
bd97438fd133a4dd72d64a411c9557f852cf033e
|
447248534de4c789f8b7803a72f6be985ff5d3d7
|
5cc26f979e87784c03ed8b0bda27cc486d7667df
|
refs/heads/main
| 2023-04-21T03:53:19.414831 | 2021-05-16T16:41:37 | 2021-05-16T16:41:37 | 367,928,699 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6066252589225769,
"alphanum_fraction": 0.6451345682144165,
"avg_line_length": 31.635135650634766,
"blob_id": "073bde0f6d0be1cf74f591fc5eb89ff9e1319228",
"content_id": "a10f0e9afd7ffe8503fb7ae4cb2d10fb74dcce17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4830,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 148,
"path": "/sequence-alignment.py",
"repo_name": "kmn01/Sequence-Alignment",
"src_encoding": "UTF-8",
"text": "# ASSUMPTION: Input sequences are either all in lower case or upper case letters\n# In traceback matrix, 1: up; 2: diagonal; 3: left\n\ndef globalAlignment(seq1, seq2):\n\n\tmatrix = [[0 for i in range(cols)] for j in range(rows)]\t#create matrix\n\tctr = 0\n\t#setting first row and column with gap penalties\n\tfor j in range(cols):\t\n\t\tmatrix[0][j] = ctr\n\t\tctr+=gap\n\n\tctr = 0\n\tfor i in range(rows):\n\t\tmatrix[i][0] = ctr\n\t\tctr+=gap\n\n\t#computing dynamic programming matrix\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tif i>0 and j>0:\n\t\t\t\ttop = matrix[i-1][j] + gap #gap in sequence 1\n\t\t\t\tleft = matrix[i][j-1] + gap #gap in sequence 2\n\t\t\t\tif seq1[j-1] == seq2[i-1]:\t#match\n\t\t\t\t\tdiag = matrix[i-1][j-1] + match\n\t\t\t\telse:\t#mismatch\n\t\t\t\t\tdiag = matrix[i-1][j-1] + mismatch\n\n\t\t\t\t#taking max of 3 possibilities\n\t\t\t\tmatrix[i][j] = max(top, left, diag)\n\t\n\tfor row in matrix:\n\t\tprint(row)\n\n\tprint('\\nOptimal score: ')\n\tprint(matrix[rows-1][cols-1])\n\tprint('\\nAligned sequences: ')\n\t#backtrack from last element in matrix to find aligned sequences\n\treconstruct(matrix, rows-1, cols-1, '', '', '')\n\ndef localAlignment(seq1, seq2):\n\n\tmatrix = [[0 for i in range(cols)] for j in range(rows)]\t#create matrix\n\tmaxrow = 0\n\tmaxcol = 0\n\t#computing dynamic programming matrix\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tif i>0 and j>0:\n\t\t\t\ttop = matrix[i-1][j] + gap #gap in sequence 1\n\t\t\t\tleft = matrix[i][j-1] + gap #gap in sequence 2\n\t\t\t\tif seq1[j-1] == seq2[i-1]:\t#match\n\t\t\t\t\tdiag = matrix[i-1][j-1] + match\n\t\t\t\telse:\t#mismatch\n\t\t\t\t\tdiag = matrix[i-1][j-1] + mismatch\n\n\t\t\t\t#taking max of 3 possibilities\n\t\t\t\tmaxval = max(top, left, diag)\t\n\n\t\t\t\tif maxval<0:\t#handling negative values\n\t\t\t\t\tmatrix[i][j] = 0\n\t\t\t\telse:\n\t\t\t\t\tmatrix[i][j] = maxval\n\n\t\t\t\t#finding position in matrix with max value\n\t\t\t\tif matrix[i][j] >= matrix[maxrow][maxcol]:\t\n\t\t\t\t\tmaxrow = i\n\t\t\t\t\tmaxcol = j\n\t\n\tfor row in matrix:\n\t\tprint(row)\n\n\tprint('\\nOptimal score: ')\n\tprint(matrix[maxrow][maxcol])\n\tprint('\\nAligned sequences: ')\n\n\t#backtrack from max element to find aligned sequences\n\tfor i in range(rows):\n\t\tfor j in range(cols): \n\t\t\tif matrix[i][j] == matrix[maxrow][maxcol]:\n\t\t\t\treconstructLocal(matrix, i, j, '', '', '') \n\ndef reconstruct(matrix, rowpos, colpos, a1, a2, a3):\t#generates optimal aligments\n\tif rowpos==0 and colpos==0:\t#base case\n\t\tprint(a1)\n\t\tprint(a2)\n\t\tprint(a3)\n\t\treturn \n\n\tif rowpos==0 and colpos!=0:\n\t\tif matrix[rowpos][colpos-1] == matrix[rowpos][colpos] - gap:\n\t\t\treconstruct(matrix, rowpos, colpos-1, seq1[colpos-1] + a1, ' ' + a2, '_' + a3)\n\t\t\treturn\n\n\tif rowpos!=0 and colpos==0:\n\t\tif matrix[rowpos-1][colpos] == matrix[rowpos][colpos] - gap:\n\t\t\treconstruct(matrix, rowpos-1, colpos, '_' + a1, ' ' + a2, seq2[rowpos-1] + a3)\n\t\t\treturn \n\n\tif seq1[colpos-1] == seq2[rowpos-1]:\n\t\tif matrix[rowpos-1][colpos-1] == matrix[rowpos][colpos] - match:\n\t\t\treconstruct(matrix, rowpos-1, colpos-1, seq1[colpos-1] + a1, '|' + a2, seq2[rowpos-1] + a3)\n\n\tif seq1[colpos-1] != seq2[rowpos-1]:\n\t\tif matrix[rowpos-1][colpos-1] == matrix[rowpos][colpos] - mismatch:\n\t\t\treconstruct(matrix, rowpos-1, colpos-1, seq1[colpos-1] + a1, ' ' + a2, seq2[rowpos-1] + a3)\n\n\tif matrix[rowpos-1][colpos] == matrix[rowpos][colpos] - gap:\n\t\treconstruct(matrix, rowpos-1, colpos, '_' + a1, ' ' + a2, seq2[rowpos-1] + a3)\n\n\tif matrix[rowpos][colpos-1] == matrix[rowpos][colpos] - gap:\n\t\treconstruct(matrix, rowpos, colpos-1, seq1[colpos-1] + a1, ' ' + a2, '_' + a3)\n\n\ndef reconstructLocal(matrix, rowpos, colpos, a1, a2, a3):\t#generates optimal aligments\n\tif matrix[rowpos][colpos] == 0:\t#base case\n\t\t\n\t\tprint(a1)\n\t\tprint(a2)\n\t\tprint(a3)\n\t\treturn \n\n\tif seq1[colpos-1] == seq2[rowpos-1]:\n\t\tif matrix[rowpos-1][colpos-1] == matrix[rowpos][colpos] - match:\n\t\t\treconstructLocal(matrix, rowpos-1, colpos-1, seq1[colpos-1] + a1, '|' + a2, seq2[rowpos-1] + a3)\n\n\tif seq1[colpos-1] != seq2[rowpos-1]:\n\t\tif matrix[rowpos-1][colpos-1] == matrix[rowpos][colpos] - mismatch:\n\t\t\treconstructLocal(matrix, rowpos-1, colpos-1, seq1[colpos-1] + a1, ' ' + a2, seq2[rowpos-1] + a3)\n\n\tif matrix[rowpos-1][colpos] == matrix[rowpos][colpos] - gap:\n\t\treconstructLocal(matrix, rowpos-1, colpos, '_' + a1, ' ' + a2, seq2[rowpos-1] + a3)\n\n\tif matrix[rowpos][colpos-1] == matrix[rowpos][colpos] - gap:\n\t\treconstructLocal(matrix, rowpos, colpos-1, seq1[colpos-1] + a1, ' ' + a2, '_' + a3)\n\n\nseq1 = list(input('Sequence 1: '))\t#input sequence 1\nseq2 = list(input('Sequence 2: '))\t#input sequence 2\nmatch = int(input('Enter scoring function.\\nMatch: '))\t#input scoring function\nmismatch = int(input('Mismatch: '))\ngap = int(input('Gap: '))\ncols = len(seq1) + 1\nrows = len(seq2) + 1 \nprint('\\nGlobal Alignment:\\n')\nglobalAlignment(seq1, seq2)\t#compute global alignment using dynamic programming\nprint('\\nLocal Alignment:\\n')\nlocalAlignment(seq1, seq2)\t#compute local alignment using dynamic programming\n"
},
{
"alpha_fraction": 0.8409090638160706,
"alphanum_fraction": 0.8409090638160706,
"avg_line_length": 65,
"blob_id": "c7c584aa4f7b1c8ea389b7e7e6ddccb968d72d34",
"content_id": "dd3c5f7ebcc02a1880bb24e65a5b67ef7993cab1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 2,
"path": "/README.md",
"repo_name": "kmn01/Sequence-Alignment",
"src_encoding": "UTF-8",
"text": "# Sequence-Alignment\nImplementation of dynamic programming algorithm to find the best local and global alignments of DNA sequences.\n"
}
] | 2 |
tjsousa/dotfiles
|
https://github.com/tjsousa/dotfiles
|
771d1d26bb7063dbc0fd7a5f89bd8d39f79be224
|
d5e3e5721a4dcc5d92260719d66e6ea2e2f971b9
|
ab0229cf01f9acbf812d545d67ae5f342122be5f
|
refs/heads/master
| 2020-04-12T16:03:50.888143 | 2018-12-18T22:08:47 | 2018-12-18T22:08:47 | 12,460,744 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6762948036193848,
"alphanum_fraction": 0.6802788972854614,
"avg_line_length": 29.42424201965332,
"blob_id": "6a3d17fa315463312569a5d91d1492a6b13b7907",
"content_id": "83c029adb5f132f4fd94405af67c96627b879b2e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1004,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 33,
"path": "/qutebrowser/config.py",
"repo_name": "tjsousa/dotfiles",
"src_encoding": "UTF-8",
"text": "import dracula.draw\n\n# Use the Dracula theme: https://github.com/evannagle/qutebrowser-dracula-theme/\ndracula.draw.blood(c, {\n 'spacing': {\n 'vertical': 6,\n 'horizontal': 8\n },\n 'font': {\n 'family': 'Menlo, Terminus, Monaco, Monospace',\n 'size': 10\n }\n})\n\nc.auto_save.session = True\nc.scrolling.smooth = True\nc.session.lazy_restore = True\nc.content.autoplay = False\nc.qt.highdpi = True\n\n# Better default fonts\nc.fonts.web.family.standard = \"Bitstream Vera Sans\"\nc.fonts.web.family.serif = \"Bitstream Vera Serif\"\nc.fonts.web.family.sans_serif = \"Bitstream Vera Sans\"\nc.fonts.web.family.fixed = \"Fira Mono\"\n\n# Make Ctrl+g quit everything like in Emacs\nconfig.bind('<Ctrl-g>', 'leave-mode', mode='insert')\nconfig.bind('<Ctrl-g>', 'leave-mode', mode='command')\nconfig.bind('<Ctrl-g>', 'leave-mode', mode='prompt')\nconfig.bind('<Ctrl-g>', 'leave-mode', mode='hint')\n\n# More binding hints here: https://gitlab.com/Kaligule/qutebrowser-emacs-config/blob/master/config.py\n"
},
{
"alpha_fraction": 0.5811359286308289,
"alphanum_fraction": 0.5953347086906433,
"avg_line_length": 25.62162208557129,
"blob_id": "e0d7f552fa7d4f065410580d3f35421e7ef79ba7",
"content_id": "ceed0ece979131c1f96ee599af77fa24c406ac07",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 986,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 37,
"path": "/bootstrap.sh",
"repo_name": "tjsousa/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Basic bootstrapping script for daviwil's dotfiles\n\nfunction bootstrap_mac {\n\n # Is Homebrew installed?\n if which brew 2> /dev/null; then\n echo \"* Homebrew already installed\"\n else\n echo \"Can't find Homebrew\"\n echo \"To install it open a Terminal window and type :\"\n echo /usr/bin/ruby -e \\\"\\$\\(curl\\ \\-fsSL\\ https\\:\\/\\/raw\\.github\\.com\\/Homebrew\\/homebrew\\/go\\/install\\)\\\"\n fi\n}\n\nif [ \"$(uname -s)\" == \"Darwin\" ]; then\n # Bootstrap Mac OS X\n echo \"\"\n echo -e \"\\033[34mConfiguring for Mac OS X...\\033[0m\"\n echo \"\"\n bootstrap_mac\n\nelif [ \"$(expr substr $(uname -s) 1 5)\" == \"Linux\" ]; then\n # Bootstrap Linux\n # TODO: Ubuntu vs other distro?a\n echo \"TODO: Bootstrap Linux\"\n\nelif [ \"$(expr substr $(uname -s) 1 6)\" == \"CYGWIN\" ]; then\n # Bootstrap Cygwin\n # TODO: Steps\n # - Set up apt-cyg\n # - Install git, zsh, wget, etc\n # - Build tmux\n echo \"TODO: Bootstrap Cygwin\"\nfi\n\n# Pull github repo\n\n"
},
{
"alpha_fraction": 0.7714285850524902,
"alphanum_fraction": 0.7714285850524902,
"avg_line_length": 16.5,
"blob_id": "3b5dc60716d628e845b3fc24a8b9186e20201d9f",
"content_id": "a7870dcecf86a1667274bd370021020c42f0e21a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 35,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 2,
"path": "/zsh/.zshenv",
"repo_name": "tjsousa/dotfiles",
"src_encoding": "UTF-8",
"text": "ZDOTDIR=~/.dotfiles/zsh\nEDITOR=vim\n"
},
{
"alpha_fraction": 0.689306378364563,
"alphanum_fraction": 0.689306378364563,
"avg_line_length": 28.446807861328125,
"blob_id": "963517ca13d32fa3bbd697f8e9b35d8819323fa8",
"content_id": "b6b4a93d03007fb18cd774e4acefe73b9a8adb6e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1384,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 47,
"path": "/archive/conkeror/.conkerorrc",
"repo_name": "tjsousa/dotfiles",
"src_encoding": "UTF-8",
"text": "// -*-js-*-\n\n// Set up a config reload key binding\ninteractive(\n \"reload-config\", \n \"reload conkerorrc\",\n function(I) {\n\tload_rc();\n\tI.window.minibuffer.message(\"config reloaded\");\n }\n);\ndefine_key(default_global_keymap, \"C-c r\", \"reload-config\");\n\n// Load urls from the command line in new buffers\nurl_remoting_fn = load_url_in_new_buffer;\n\n// Use emacs as external editor\n// TODO: Set up emacs daemon\n//editor_shell_command = \"emacsclient\";\n//view_source_use_external_editor = true;\n\n// Turn on session management and session auto-save\nrequire(\"session.js\");\nsession_auto_save_mode();\nsession_auto_save_auto_load = true;\n\n// Enable org-mode capturing of links\nfunction org_capture (url, title, window) {\n var cmd_str = 'emacsclient \\\"org-protocol://capture:/w/'+url+'/'+title+'/\"';\n if (window != null) {\n\twindow.minibuffer.message('Issuing ' + cmd_str);\n }\n shell_command_blind(cmd_str);\n}\ninteractive(\n \"org-capture\", \n \"Clip url, title, and selection to capture via org-protocol\",\n function (I) {\n\torg_capture(encodeURIComponent(I.buffer.display_uri_string), encodeURIComponent(I.buffer.document.title), I.window);\n });\n\ndefine_key(content_buffer_normal_keymap, \"C-c c\", \"org-capture\");\n\n// Show favicons in the modeline\nrequire(\"favicon\");\nadd_hook(\"mode_line_hook\", mode_line_adder(buffer_icon_widget), true);\nread_buffer_show_icons = true;\n"
},
{
"alpha_fraction": 0.66458660364151,
"alphanum_fraction": 0.7301092147827148,
"avg_line_length": 79.125,
"blob_id": "940a3b1820c3984b74cda87161ec65570dc1fb1b",
"content_id": "2c98eedc640025dfa90eeb057f638513692665c6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 641,
"license_type": "permissive",
"max_line_length": 261,
"num_lines": 8,
"path": "/screen-layouts/x1/docked.sh",
"repo_name": "tjsousa/dotfiles",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\nxrandr --output VIRTUAL1 --off --output eDP1 --mode 2560x1440 --pos 3840x416 --rotate normal --output DP1 --off --output HDMI1 --off --output DP1-3 --off --output DP1-2 --off --output DP1-1 --primary --mode 3840x2160 --pos 0x0 --rotate normal --output DP2 --off\n\ni3-msg -q \"workspace --no-auto-back-and-forth number 1; move workspace to output primary; workspace back_and_forth\"\ni3-msg -q \"workspace --no-auto-back-and-forth number 2; move workspace to output primary; workspace back_and_forth\"\ni3-msg -q \"workspace --no-auto-back-and-forth number 3; move workspace to output primary; workspace back_and_forth\"\n\nnitrogen --restore\n"
},
{
"alpha_fraction": 0.7424242496490479,
"alphanum_fraction": 0.7424242496490479,
"avg_line_length": 32.16666793823242,
"blob_id": "cef182c5a0fefe9c55451cc330c715647579fb83",
"content_id": "90eb260818570d999d73598df7aa7026cbdccf62",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 198,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 6,
"path": "/readme.md",
"repo_name": "tjsousa/dotfiles",
"src_encoding": "UTF-8",
"text": "daviwil's dotfiles\n==================\n\nConfiguration files for various tools and environments.\n\nReleased under the [MIT License](./LICENSE) unless otherwise specified by license files in subfolders."
},
{
"alpha_fraction": 0.6903553009033203,
"alphanum_fraction": 0.6954314708709717,
"avg_line_length": 23.625,
"blob_id": "0a153415678a5830d0e80a6a42306ee880ea4be7",
"content_id": "06882b57677075a6d5e0b599f4b1fc2211ff5a00",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 197,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 8,
"path": "/mail/fetch_mail.sh",
"repo_name": "tjsousa/dotfiles",
"src_encoding": "UTF-8",
"text": "# This is meant to be run by cron\n# Run `crontab -e` and add the following line:\n# */3 * * * * /home/user/.dotfiles/mail/fetch_mail.sh\n\nkillall offlineimap\nofflineimap -u quiet\n\n# TODO: Run tagger\n"
}
] | 7 |
nikheelpandey/StatoilChallangeKaggle
|
https://github.com/nikheelpandey/StatoilChallangeKaggle
|
abdff294e774fecb311e55d42584c82b74235ece
|
a051fb3cc1abb6237ef19296d9aaec91ade707d9
|
a1cd5a24c28c2acc214966614792ed1cf29d9246
|
refs/heads/master
| 2021-09-06T01:47:53.711753 | 2018-02-01T11:58:22 | 2018-02-01T11:58:22 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.574471652507782,
"alphanum_fraction": 0.5997670292854309,
"avg_line_length": 27.87980842590332,
"blob_id": "c2803ee14a5654cd0701d45db15ffacd33db29f2",
"content_id": "22be1c9a7b5aefa2064a4ae0e275d8f7b38efc9f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6009,
"license_type": "permissive",
"max_line_length": 150,
"num_lines": 208,
"path": "/agument.py",
"repo_name": "nikheelpandey/StatoilChallangeKaggle",
"src_encoding": "UTF-8",
"text": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport cv2\nimport numpy as np\n\n\n# In[3]:\n\n\ndef rotate_image(img, angle = 20):\n \n '''a function to rotate image by a given degree'''\n \n # rotate image\n original = img.copy()\n\n M_rotate = cv2.getRotationMatrix2D((37,37),angle,1)\n img_new = cv2.warpAffine(img,M_rotate,(75,75))\n \n length_row = 0\n length_column = 0\n boundary_step = 5\n \n for i in range(len(img_new)):\n if img_new[0,i]!=float(0.0):\n length_row = i\n break\n for i in range(len(img_new)):\n if img_new[i,0]!=float(0.0):\n length_column = i\n break\n \n # subsitute the padding from original image\n img_new[:length_column+boundary_step,:length_row+boundary_step] = original[:length_column+boundary_step,:length_row+boundary_step] \n \n img_new[-(length_row+boundary_step):,:length_column+boundary_step] = original[-(length_row+boundary_step):,:length_column+boundary_step]\n \n img_new[:length_row+boundary_step,-(length_column+boundary_step):] = original[:length_row+boundary_step,-(length_column+boundary_step):]\n \n img_new[-(length_column+boundary_step):,-(length_row+boundary_step):] = original[-(length_column+boundary_step):,-(length_row+boundary_step):]\n \n return img_new\n\n\n# In[11]:\n\n\ndef translate_negative_diagonal(image, shift_diagonal = 5):\n \n '''a function to translate image along negative diagonal'''\n \n # translate image along negative diagonal\n img = image.copy()\n \n if shift_diagonal<0:\n hor_slice = img[:-shift_diagonal,:].copy()\n ver_slice = img[:,shift_diagonal:].copy()\n if shift_diagonal>0:\n hor_slice = img[-shift_diagonal:,:].copy()\n ver_slice = img[:,:shift_diagonal].copy()\n M_translate = np.float32([[1,0,shift_diagonal],[0,1,-shift_diagonal]])\n img_new = cv2.warpAffine(img,M_translate,(75,75))\n \n # subsitute the padding from original image\n if shift_diagonal<0:\n img_new[:-shift_diagonal,:] = hor_slice\n img_new[:,shift_diagonal:] = ver_slice\n if shift_diagonal>0:\n img_new[-shift_diagonal:,:] = hor_slice\n img_new[:,:shift_diagonal] = ver_slice\n \n return img_new.reshape(75,75).astype(np.float32)\n\n\n# In[4]:\n\n\ndef translate_horizontal(image, shift_horizontal = 5):\n \n '''a function to translate image horizontally by a shift'''\n \n # horizontally shift image\n img = image.copy()\n \n shift_vertical = 0; \n if shift_horizontal<0:\n image_slice = img[:,shift_horizontal:].copy()\n if shift_horizontal>0:\n image_slice = img[:,:shift_horizontal].copy()\n M_translate = np.float32([[1,0,shift_horizontal],[0,1,shift_vertical]])\n img_new = cv2.warpAffine(img,M_translate,(75,75))\n \n # subsitute the padding from original image\n if shift_horizontal<0:\n img_new[:,shift_horizontal:] = image_slice\n if shift_horizontal>0:\n img_new[:,:shift_horizontal] = image_slice\n \n return img_new.reshape(75,75).astype(np.float32)\n\n\n# In[5]:\n\n\ndef translate_vertical(image, shift_vertical = 5):\n \n '''a function to translate image vertically by a shift'''\n \n # vertically shift image\n img = image.copy()\n \n shift_horizontal = 0;\n if shift_vertical<0:\n image_slice = img[shift_vertical:,:].copy()\n if shift_vertical>0:\n image_slice = img[:shift_vertical,:].copy()\n M_translate = np.float32([[1,0,shift_horizontal],[0,1,shift_vertical]])\n img_new = cv2.warpAffine(img,M_translate,(75,75))\n \n # subsitute the padding from original image\n if shift_vertical<0:\n img_new[shift_vertical:,:] = image_slice\n if shift_vertical>0:\n img_new[:shift_vertical,:] = image_slice\n \n return img_new.reshape(75,75).astype(np.float32)\n\n\n# In[10]:\n\n\ndef translate_positive_diagonal(image, shift_diagonal = 5):\n \n '''a function to translate image along positive diagonal'''\n \n # translate image along positive diagonal\n img = image.copy()\n \n if shift_diagonal<0:\n hor_slice = img[shift_diagonal:,:].copy()\n ver_slice = img[:,shift_diagonal:].copy()\n else:\n hor_slice = img[:shift_diagonal,:].copy()\n ver_slice = img[:,:shift_diagonal].copy()\n M_translate = np.float32([[1,0,shift_diagonal],[0,1,shift_diagonal]])\n img_new = cv2.warpAffine(img,M_translate,(75,75))\n \n # subsitute the padding from original image\n if shift_diagonal<0:\n img_new[shift_diagonal:,:] = hor_slice\n img_new[:,shift_diagonal:] = ver_slice\n else:\n img_new[:shift_diagonal,:] = hor_slice\n img_new[:,:shift_diagonal] = ver_slice\n \n return img_new.reshape(75,75).astype(np.float32)\n\n\n# In[9]:\n\n\ndef flip(image, direction = 0):\n \n '''a function to flip image'''\n img = image.copy()\n return cv2.flip(img,direction)\n\n\n# In[7]:\n\n\ndef zoom(image, zoom_shift = 5):\n \n '''a function to zoom image'''\n \n # zoom image\n img = image.copy()\n \n # zoom in \n if zoom_shift>0:\n # scale\n img_new = cv2.resize(img, (75+zoom_shift*2,75+zoom_shift*2)) \n # crop\n img_new = img_new[zoom_shift:-zoom_shift,zoom_shift:-zoom_shift] \n # zoom out\n else:\n zoom_shift *=-1\n \n hor_top = img[:zoom_shift,:]\n hor_bottom =img[-zoom_shift:,:]\n ver_left = img[:,:zoom_shift]\n ver_right = img[:,-zoom_shift:]\n \n # scale\n img_new = cv2.resize(img, (75-zoom_shift*2,75-zoom_shift*2)) \n # zero padding\n img_new = cv2.copyMakeBorder(img_new,zoom_shift,zoom_shift,zoom_shift,zoom_shift,\n cv2.BORDER_CONSTANT,value=0.0)\n # subsitute the padding from original image\n img_new[:zoom_shift,:] = hor_top\n img_new[-zoom_shift:,:] = hor_bottom\n img_new[:,:zoom_shift] = ver_left\n img_new[:,-zoom_shift:] = ver_right \n \n return img_new.reshape(75,75).astype(np.float32)\n\n"
},
{
"alpha_fraction": 0.5308437943458557,
"alphanum_fraction": 0.5863763689994812,
"avg_line_length": 30.781021118164062,
"blob_id": "05c4e1ffae238abf48f7f16bc56028fd94aa90af",
"content_id": "d1cff16cd0bac65d9fabf72f5a42763e7756a5a8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 21771,
"license_type": "permissive",
"max_line_length": 167,
"num_lines": 685,
"path": "/readme.md",
"repo_name": "nikheelpandey/StatoilChallangeKaggle",
"src_encoding": "UTF-8",
"text": "\n# Statoil iceberg detection using satelite data \n\nThe following notebook represent one of the solution for the Statoil challange hosted on kaggle in which we had to binary classify the data taken from satelites.\n\nThere are two highlight of my proposed solution:\n- Training data augmentation\n- CNN classifier\n\nI used 12 fold augmentation. The augmentation is done in the folowing fashion-\n- Rotation by +ve degree\n- Rotation by -ve degree\n- Horizonation translation to right\n- Horizonation translation to left\n- Vertical translation downward\n- Vertical translation upward\n- SE translation\n- NW translation\n- NE translation\n- SW translation\n- Vertical flip\n- Horizontal flip\n- Horizontal flip\n- Zoom in\n- Zoom out\n\n\nThe score without image augmentation was 0.299 (2556th rank). After augmentation the score was 0.1571(400th rank).\nCouple of tweaks and optimazation may yet be needed to generate better result.\n\n\n```python\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom os.path import join as opj\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pylab\nplt.rcParams['figure.figsize'] = 20, 14\n%matplotlib inline\nimport math\n```\n\n\n```python\ntrain = pd.read_json(\"./statoil-iceberg/train.json\")\n```\n\n\n```python\ntrain[train['inc_angle']=='na'] = train[train['inc_angle']!='na']['inc_angle'].mean()\ntrain['inc_angle'] = train['inc_angle'].apply(lambda x: math.radians(x))\n```\n\n\n```python\ndef standardise_vector(vector):\n '''standardise vector'''\n standardised_vector = (np.array(vector) - np.mean(vector)) / np.std(vector)\n return standardised_vector.tolist()\n```\n\n\n```python\ntrain['band_1'] = train['band_1'].apply(standardise_vector)\ntrain['band_2'] = train['band_2'].apply(standardise_vector)\n```\n\n\n```python\ndef find_missing_data(series, shape):\n \n '''function which return the count and the index of mismatched data''' \n count = 0\n missing_list = []\n for i,x in enumerate(series): \n if np.shape(series.iloc[i]) != shape:\n missing_list.append(i)\n count += 1\n \n return missing_list, count\n```\n\n\n```python\nmissing_list1, count1 = find_missing_data(train.band_1, (5625,))\nprint(\"count: \", count1)\n```\n\n count: 133\n\n\n\n```python\nmissing_list2, count2 = find_missing_data(train.band_2, (5625,))\nprint(\"count: \", count1)\nmissing_list1 == missing_list2\n```\n\n count: 133\n\n\n\n\n\n True\n\n\n\n\n```python\ntrain =train.drop(train.index[missing_list1])\n```\n\n\n```python\ntrain.head(5)\n```\n\n\n\n\n<div>\n<style scoped>\n .dataframe tbody tr th:only-of-type {\n vertical-align: middle;\n }\n\n .dataframe tbody tr th {\n vertical-align: top;\n }\n\n .dataframe thead th {\n text-align: right;\n }\n</style>\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>band_1</th>\n <th>band_2</th>\n <th>id</th>\n <th>inc_angle</th>\n <th>is_iceberg</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>[0.01182174564467684, 0.27378275943768166, -0....</td>\n <td>[1.1573585860406173, 0.15631457838356574, -0.4...</td>\n <td>dfd5f913</td>\n <td>0.766617</td>\n <td>0.0</td>\n </tr>\n <tr>\n <th>1</th>\n <td>[0.42137323598577087, -0.43078366534450846, -0...</td>\n <td>[-2.0950953014724543, -0.8948057535299927, -0....</td>\n <td>e25388fd</td>\n <td>0.665951</td>\n <td>0.0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>[-0.6969623073924855, -0.6969793944501068, -0....</td>\n <td>[-0.014649839717716016, 0.35157292381049343, 1...</td>\n <td>58b2aaa0</td>\n <td>0.790388</td>\n <td>1.0</td>\n </tr>\n <tr>\n <th>3</th>\n <td>[0.2946591706447792, 0.04985985071106006, -0.3...</td>\n <td>[0.7023040788844376, 0.8569610477684707, 1.005...</td>\n <td>4cfc3a18</td>\n <td>0.764988</td>\n <td>0.0</td>\n </tr>\n <tr>\n <th>4</th>\n <td>[-0.31533024179271146, 0.9175181439939705, 0.9...</td>\n <td>[-0.693769690678262, -2.0346940986353044, -2.0...</td>\n <td>271f93f4</td>\n <td>0.621784</td>\n <td>0.0</td>\n </tr>\n </tbody>\n</table>\n</div>\n\n\n\n\n```python\nband_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train[\"band_1\"]])\nband_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train[\"band_2\"]])\n```\n\n\n```python\nlabels = train.is_iceberg.as_matrix()\nangles = train.inc_angle.as_matrix()\n```\n\n\n```python\n# randomly choosing the train and validation indices\ntrain_indices = np.random.choice(len(labels), round(len(labels)*0.75), replace=False)\nvalidation_indices = np.array(list(set(range(len(labels))) - set(train_indices)))\n```\n\n\n```python\n# extract train set\nband_1_train = band_1[train_indices]\nband_2_train = band_2[train_indices]\nangles_train = angles[train_indices]\nlabels_train = labels[train_indices]\n\n# extract validation set\nband_1_validation = band_1[validation_indices]\nband_2_validation = band_2[validation_indices]\nangles_validation = angles[validation_indices]\nlabels_validation = labels[validation_indices]\n\n\n\n# # extract test set\n# band_1_test = band_1_test\n# band_2_test = band_2_test\n# angles_test = test_data.inc_angle.as_matrix()\n# iD = test_data.id.as_matrix()\n```\n\n\n```python\n# Converting the data to floating point\n\nband_1_train = band_1_train.astype(np.float32)\nband_1_validation = band_1_validation.astype(np.float32)\n# band_1_test = band_1_test.astype(np.float32)\nband_2_train = band_2_train.astype(np.float32)\nband_2_validation = band_2_validation.astype(np.float32)\n# band_2_test = band_2_test.astype(np.float32)\nangles_train = angles_train.astype(np.float32)\nangles_validation = angles_validation.astype(np.float32)\n# angles_test = angles_test.astype(np.float32)\nlabels_train = labels_train.astype(np.float32)\nlabels_validation = labels_validation.astype(np.float32)\n# iD = iD.astype(np.str)\n```\n\n\n```python\nfrom agument import *\nplt.rcParams['figure.figsize'] = (20.0, 14.0)\nimage = band_1_train[3].copy()\nplt.subplot(3, 5, 1)\nplt.title(\"Original Image\")\nplt.imshow(image)\nplt.subplot(3, 5, 2)\ngenerated_image = rotate_image(image,40)\nplt.title(\"Rotation by +ve degree\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 3)\ngenerated_image = rotate_image(image,-40)\nplt.title(\"Rotation by -ve degree\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 4)\ngenerated_image = translate_horizontal(image,10)\nplt.title(\"Horizonation translation to right\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 5)\ngenerated_image = translate_horizontal(image,-10)\nplt.title(\"Horizonation translation to left\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 6)\ngenerated_image = translate_vertical(image,10)\nplt.title(\"Vertical translation downward\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 7)\ngenerated_image = translate_vertical(image,-10)\nplt.title(\"Vertical translation upward\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 8)\ngenerated_image = translate_positive_diagonal(image,10)\nplt.title(\"SE translation\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 9)\ngenerated_image = translate_positive_diagonal(image,-10)\nplt.title(\"NW translation\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 10)\ngenerated_image = translate_negative_diagonal(image,10)\nplt.title(\"NE translation\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 11)\ngenerated_image = translate_negative_diagonal(image,-10)\nplt.title(\"SW translation\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 12)\ngenerated_image = flip(image,0)\nplt.title(\"Vertical flip\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 13)\ngenerated_image = flip(image,1)\nplt.title(\"Horizontal flip\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 14)\ngenerated_image = zoom(image,10)\nplt.title(\"Zoom in\")\nplt.imshow(generated_image)\nplt.subplot(3, 5, 15)\ngenerated_image = zoom(image,-10)\nplt.title(\"Zoom out\")\nplt.imshow(generated_image)\nplt.show()\n```\n\n\n\n\n\n\n```python\ndef augment_data(band1, band2, angles, labels):\n \n '''a function to augment band1 and band2 image'''\n \n # list to store the generated data\n band1_generated = []\n band2_generated = []\n angles_generated = []\n labels_generated = []\n \n # iterate through each point in train set\n for i in range(labels.shape[0]):\n \n # rotate by positive degree\n angle = np.random.randint(5,20)\n band1_generated.append(rotate_image(band1[i],angle)) \n band2_generated.append(rotate_image(band2[i],angle))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # rotate by negative degree\n angle = np.random.randint(5,20)\n band1_generated.append(rotate_image(band1[i],-angle)) \n band2_generated.append(rotate_image(band2[i],-angle))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # positive horizontal shift\n shift = np.random.randint(3,7)\n band1_generated.append(translate_horizontal(band1[i],+shift)) \n band2_generated.append(translate_horizontal(band2[i],+shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # negative horizontal shift\n shift = np.random.randint(3,7) \n band1_generated.append(translate_horizontal(band1[i],-shift)) \n band2_generated.append(translate_horizontal(band2[i],-shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # positive vertical shift\n shift = np.random.randint(0,7) \n band1_generated.append(translate_vertical(band1[i],+shift)) \n band2_generated.append(translate_vertical(band2[i],+shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # negative vertical shift\n shift = np.random.randint(3,7) \n band1_generated.append(translate_vertical(band1[i],-shift)) \n band2_generated.append(translate_vertical(band2[i],-shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # translate along positive diagonal in positive direction\n shift = np.random.randint(3,7) \n band1_generated.append(translate_positive_diagonal(band1[i],+shift)) \n band2_generated.append(translate_positive_diagonal(band2[i],+shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # translate along positive diagonal in negative direction\n shift = np.random.randint(3,7) \n band1_generated.append(translate_positive_diagonal(band1[i],-shift)) \n band2_generated.append(translate_positive_diagonal(band2[i],-shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # translate along negative diagonal in positive direction\n shift = np.random.randint(3,7) \n band1_generated.append(translate_negative_diagonal(band1[i],+shift)) \n band2_generated.append(translate_negative_diagonal(band2[i],+shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # translate along negative diagonal in negative direction\n shift = np.random.randint(3,7) \n band1_generated.append(translate_negative_diagonal(band1[i],-shift)) \n band2_generated.append(translate_negative_diagonal(band2[i],-shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # vertical flip\n band1_generated.append(flip(band1[i],0)) \n band2_generated.append(flip(band2[i],0))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # horizontal flip\n band1_generated.append(flip(band1[i],1)) \n band2_generated.append(flip(band2[i],1))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # zoom in image\n zoom_shift = np.random.randint(2,5)\n band1_generated.append(zoom(band1[i],zoom_shift)) \n band2_generated.append(zoom(band2[i],zoom_shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i])\n \n # zoom out image\n zoom_shift = np.random.randint(2,5) \n band1_generated.append(zoom(band1[i],-zoom_shift)) \n band2_generated.append(zoom(band2[i],-zoom_shift))\n angles_generated.append(angles[i])\n labels_generated.append(labels[i]) \n \n # convert the generated data into numpy array\n band1_generated = np.array(band1_generated)\n band2_generated = np.array(band2_generated)\n angles_generated = np.array(angles_generated)\n labels_generated = np.array(labels_generated)\n \n # concatenate the generated data to original train set\n band1_augmented = np.concatenate((band1, band1_generated),axis=0)\n band2_augmented = np.concatenate((band2, band2_generated),axis=0)\n angles_augmented = np.concatenate((angles, angles_generated),axis=0)\n labels_augmented = np.concatenate((labels, labels_generated),axis=0)\n \n return band1_augmented, band2_augmented, angles_augmented, labels_augmented\n```\n\n\n```python\n\nband_1_train, band_2_train, angles_train, labels_train = \\\n augment_data(band_1_train, band_2_train, angles_train, labels_train)\n \nprint(\"Shape of band_1_train:\",band_1_train.shape)\nprint(\"Shape of band_2_train:\",band_2_train.shape)\nprint(\"Shape of angles_train:\",angles_train.shape)\nprint(\"Shape of labels_train:\",labels_train.shape)\n```\n\n Shape of band_1_train: (16545, 75, 75)\n Shape of band_2_train: (16545, 75, 75)\n Shape of angles_train: (16545,)\n Shape of labels_train: (16545,)\n\n\n\n```python\nimage_train = np.concatenate([band_1_train[:, :, :, np.newaxis],\n band_2_train[:, :, :, np.newaxis],\n ((band_1_train+band_2_train)/2)[:, :, :, np.newaxis]],\n axis=-1)\n```\n\n\n```python\nimage_validation = np.concatenate([band_1_validation[:, :, :, np.newaxis],\n band_2_validation[:, :, :, np.newaxis],\n ((band_1_validation+band_2_validation)/2)[:, :, :, np.newaxis]],\n axis=-1)\n```\n\n\n```python\nprint(\"Shape of image_train:\",image_train.shape)\nprint(\"Shape of image_validation:\",image_validation.shape)\n```\n\n Shape of image_train: (16545, 75, 75, 3)\n Shape of image_validation: (368, 75, 75, 3)\n\n\n\n```python\nimport plotly.offline as py\nimport plotly.graph_objs as go\npy.init_notebook_mode(connected=True)\n\ndef plotmy3d(c, name):\n data = [go.Surface(z=c)]\n layout = go.Layout(\n title=name,\n autosize=False,\n width=700,\n height=700,\n margin=dict(\n l=65,\n r=50,\n b=65,\n t=90\n )\n )\n fig = go.Figure(data=data, layout=layout)\n py.iplot(fig)\nplotmy3d(band_1_train[17,:,:], 'Ship!!!')\n```\n\n\n\n\n```python\ndel(band_1_train, band_1_validation, band_2_train, band_2_validation)\n```\n\n\n```python\nfrom matplotlib import pyplot\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten, Activation\nfrom keras.layers import GlobalMaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.merge import Concatenate\nfrom keras.models import Model\nfrom keras import initializers\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, Callback, EarlyStopping\n```\n\n /usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning:\n \n Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n \n Using TensorFlow backend.\n\n\n\n```python\ndef getModel():\n #Building the model\n gmodel=Sequential()\n #Conv Layer 1\n gmodel.add(Conv2D(64, kernel_size=(3, 3),activation='relu', input_shape=(75, 75, 3)))\n gmodel.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))\n gmodel.add(Dropout(0.2))\n\n #Conv Layer 2\n gmodel.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))\n gmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n gmodel.add(Dropout(0.2))\n\n# #Conv Layer 3.1\n# gmodel.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))\n# gmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n# gmodel.add(Dropout(0.2))\n \n #Conv Layer 3.2\n gmodel.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))\n gmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n gmodel.add(Dropout(0.2))\n\n #Conv Layer 4\n gmodel.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))\n gmodel.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n gmodel.add(Dropout(0.2))\n\n #Flatten the data for upcoming dense layers\n gmodel.add(Flatten())\n\n #Dense Layers\n gmodel.add(Dense(512))\n gmodel.add(Activation('relu'))\n gmodel.add(Dropout(0.2))\n\n #Dense Layer 2\n gmodel.add(Dense(256))\n gmodel.add(Activation('relu'))\n gmodel.add(Dropout(0.2))\n\n #Sigmoid Layer\n gmodel.add(Dense(1))\n gmodel.add(Activation('sigmoid'))\n\n mypotim=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n gmodel.compile(loss='binary_crossentropy',\n optimizer=mypotim,\n metrics=['accuracy'])\n gmodel.summary()\n return gmodel\n```\n\n\n```python\nX_train_cv, _, y_train_cv,_ = train_test_split(image_train, labels_train, random_state=1, train_size=0.90)\n\n_, X_valid,_, y_valid = train_test_split(image_validation, labels_validation, random_state=1, train_size=0.10)\n```\n\n /usr/local/lib/python3.5/dist-packages/sklearn/model_selection/_split.py:2026: FutureWarning:\n \n From version 0.21, test_size will always complement train_size unless both are specified.\n \n\n\n\n```python\ndel (image_train, image_validation, labels_train, labels_validation)\n```\n\n\n```python\nimport os\ngmodel=getModel()\ngmodel.fit(X_train_cv, y_train_cv,\n batch_size=24,\n epochs=8,\n verbose=1,\n validation_data=(X_valid, y_valid))\n```\n\n _________________________________________________________________\n Layer (type) Output Shape Param # \n =================================================================\n conv2d_1 (Conv2D) (None, 73, 73, 64) 1792 \n _________________________________________________________________\n max_pooling2d_1 (MaxPooling2 (None, 36, 36, 64) 0 \n _________________________________________________________________\n dropout_1 (Dropout) (None, 36, 36, 64) 0 \n _________________________________________________________________\n conv2d_2 (Conv2D) (None, 34, 34, 128) 73856 \n _________________________________________________________________\n max_pooling2d_2 (MaxPooling2 (None, 17, 17, 128) 0 \n _________________________________________________________________\n dropout_2 (Dropout) (None, 17, 17, 128) 0 \n _________________________________________________________________\n conv2d_3 (Conv2D) (None, 15, 15, 128) 147584 \n _________________________________________________________________\n max_pooling2d_3 (MaxPooling2 (None, 7, 7, 128) 0 \n _________________________________________________________________\n dropout_3 (Dropout) (None, 7, 7, 128) 0 \n _________________________________________________________________\n conv2d_4 (Conv2D) (None, 5, 5, 64) 73792 \n _________________________________________________________________\n max_pooling2d_4 (MaxPooling2 (None, 2, 2, 64) 0 \n _________________________________________________________________\n dropout_4 (Dropout) (None, 2, 2, 64) 0 \n _________________________________________________________________\n flatten_1 (Flatten) (None, 256) 0 \n _________________________________________________________________\n dense_1 (Dense) (None, 512) 131584 \n _________________________________________________________________\n activation_1 (Activation) (None, 512) 0 \n _________________________________________________________________\n dropout_5 (Dropout) (None, 512) 0 \n _________________________________________________________________\n dense_2 (Dense) (None, 256) 131328 \n _________________________________________________________________\n activation_2 (Activation) (None, 256) 0 \n _________________________________________________________________\n dropout_6 (Dropout) (None, 256) 0 \n _________________________________________________________________\n dense_3 (Dense) (None, 1) 257 \n _________________________________________________________________\n activation_3 (Activation) (None, 1) 0 \n =================================================================\n Total params: 560,193\n Trainable params: 560,193\n Non-trainable params: 0\n _________________________________________________________________\n Train on 14890 samples, validate on 332 samples\n Epoch 1/8\n 14890/14890 [>>>>>>>>>>>>>>>>>>>>>>>>>>>] - ETA: 8:20 - loss: 0.2981 - acc: 0.8541\n"
}
] | 2 |
cmdalbem/filmow_to_letterboxd
|
https://github.com/cmdalbem/filmow_to_letterboxd
|
8b41f5c735c261fb32b56ba5fadbd2eaf13c7d9e
|
427b64b2551a204162df3eb92ba8f07ae92db042
|
22d6b8eeb7558a61f974aca3837384a95752785c
|
refs/heads/master
| 2022-04-21T12:32:24.111106 | 2019-09-09T01:56:58 | 2019-09-09T01:56:58 | 257,437,424 | 0 | 0 |
MIT
| 2020-04-21T00:18:34 | 2020-04-09T03:29:46 | 2020-01-08T16:52:57 | null |
[
{
"alpha_fraction": 0.7248027920722961,
"alphanum_fraction": 0.7318142056465149,
"avg_line_length": 53.33333206176758,
"blob_id": "bb9168f1651398c319f9646ca844b312556b869a",
"content_id": "2486bdff5165d2d5bb2fe167dbb2fb0545acead8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1145,
"license_type": "permissive",
"max_line_length": 225,
"num_lines": 21,
"path": "/README.md",
"repo_name": "cmdalbem/filmow_to_letterboxd",
"src_encoding": "UTF-8",
"text": "# Filmow to Letterboxd\n\nPrograma pra pegar filmes assistidos no Filmow pra serem importados pelo Letterboxd.\n\n#### Windows: [Download](https://github.com/yanari/filmow_to_letterboxd/releases/download/v2.2.4/filmow_to_letterboxd.exe)\n#### Linux:\n - [Extraia o arquivo .zip](https://github.com/myanari/filmow_to_letterboxd/archive/master.zip)\n - Abra o terminal na pasta com todos os arquivos\n - Copie e cole o seguinte: `chmod +x init.sh && ./init.sh`\n\n## Usando o programa\n\n1. Digite seu nome de usuário e espere o programa terminar. \n2. Vá para https://letterboxd.com/import/, **SELECT A FILE**, selecione o(s) arquivo(s) de extensão **.csv** criado(s) pelo programa\n3. Corrigir caso algum filme não tiver sido importado, erros, etc.\n\n\n#### Se quiser dar uma ajudinha ;)\n<a href=\"https://www.buymeacoffee.com/yanari\" target=\"_blank\"><img src=\"https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png\" alt=\"Buy Me A Coffee\" style=\"height: auto !important;width: auto !important;\" ></a>\n\n[Instagram](https://www.instagram.com/v.llanelle/), [Letterboxd](https://letterboxd.com/r00t/), [Filmow](https://filmow.com/usuario/shadazz/)\n"
},
{
"alpha_fraction": 0.746268630027771,
"alphanum_fraction": 0.7835820913314819,
"avg_line_length": 21.5,
"blob_id": "0ad9c87914a1c8f6daf37c09163d60ab3ca5972d",
"content_id": "b87206b1fe16df23c4d8af43b05fca535e2b7b81",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 134,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 6,
"path": "/init.sh",
"repo_name": "cmdalbem/filmow_to_letterboxd",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nsudo apt-get update\nsudo apt-get install python3.7\nsudo pip install -U pip\nsudo pip install bs4\npython3.7 parser_filmow.py"
},
{
"alpha_fraction": 0.6014169454574585,
"alphanum_fraction": 0.6218840479850769,
"avg_line_length": 26.824817657470703,
"blob_id": "47aa47b7c46bb344eb44025cec03e4820be46123",
"content_id": "91cdc253a06cee4cc1c4fd23ab36ea7600532387",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3824,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 137,
"path": "/filmow_to_letterboxd.py",
"repo_name": "cmdalbem/filmow_to_letterboxd",
"src_encoding": "UTF-8",
"text": "import wx\nimport wx.lib.agw.hyperlink as hl\nimport webbrowser\n\nfrom parser_filmow import Parser\nfrom utils import delay\n\nclass Frame(wx.Frame):\n def __init__(self, *args, **kwargs):\n super(Frame, self).__init__(*args, **kwargs)\n\n self.MyFrame = self\n\n self.is_running = False\n\n self.panel = wx.Panel(\n self,\n pos=(0, 0),\n size=(500,100),\n style=wx.CLOSE_BOX | wx.CAPTION | wx.MINIMIZE_BOX | wx.SYSTEM_MENU\n )\n self.panel.SetBackgroundColour('#ffffff')\n self.SetTitle('Filmow to Letterboxd')\n self.SetMinSize((500, 300))\n self.SetMaxSize((500, 300))\n\n self.letterboxd_link = hl.HyperLinkCtrl(\n self.panel,\n -1,\n 'letterboxd',\n URL='https://letterboxd.com/import/',\n pos=(420,240)\n )\n self.letterboxd_link.SetToolTip(wx.ToolTip('Clica só quando o programa tiver rodado e sua conta no Letterboxd tiver criada, beleza?'))\n\n self.coffee_link = hl.HyperLinkCtrl(\n self.panel,\n -1,\n 'quer me agradecer?',\n URL='https://www.buymeacoffee.com/yanari',\n pos=(310,240)\n )\n self.coffee_link.SetToolTip(wx.ToolTip('Se tiver dado tudo certo cê pode me pagar um cafézinho, que tal?. Não é obrigatório, claro.'))\n\n wx.StaticText(self.panel, -1, 'Username no Filmow:', pos=(25, 54))\n self.username = wx.TextCtrl(self.panel, size=(200, 25), pos=(150, 50))\n submit_button = wx.Button(self.panel, wx.ID_SAVE, 'Submit', pos=(360, 50))\n\n self.Bind(wx.EVT_BUTTON, self.Submit, submit_button)\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n\n self.Show(True)\n\n\n def Submit(self, event):\n self.button = event.GetEventObject()\n self.button.Disable()\n\n self.text_control = wx.TextCtrl(\n self.panel,\n -1,\n '',\n pos=(50, 120),\n size=(400, 100),\n style=wx.TE_MULTILINE | wx.TE_CENTRE | wx.TE_READONLY \n | wx.TE_NO_VSCROLL | wx.TE_AUTO_URL | wx.TE_RICH2 | wx.BORDER_NONE\n )\n self.Parse(self.MyFrame)\n\n\n @delay(1.0)\n def Parse(self, MyFrame):\n self.user = self.username.GetValue().lower().strip()\n if len(self.user) == 0:\n self.is_running = False\n self.text_control.ChangeValue('O campo não deve ficar em branco.')\n self.button.Enable()\n return\n else:\n try:\n msg = \"\"\"Seus filmes estão sendo importados no plano de fundo :)\\n\\n\n Não feche a janela e aguarde um momento.\"\"\"\n \n self.text_control.ChangeValue(msg)\n self.is_running = True\n self.parser = Parser(self.user)\n\n except Exception:\n self.text_control.ChangeValue('Usuário {} não encontrado. Tem certeza que digitou certo?'.format(self.user))\n self.button.Enable()\n self.is_running = False\n return\n \n self.ChangeMsg()\n \n \n @delay(1.0)\n def ChangeMsg(self):\n msg = \"\"\"Pronto!\\n\\n Agora clica no link aqui embaixo pra ir pro Letterboxd, \n SELECT A FILE e selecione o(s) arquivo(s) de extensão .csv \n (tá tudo aqui nessa mesma pasta) criado(s) pelo programa.\"\"\"\n\n self.text_control.ChangeValue(msg)\n self.Bind(wx.EVT_TEXT_URL, self.GoToLetterboxd, self.text_control)\n self.is_running = False\n\n\n def GoToLetterboxd(self, event):\n webbrowser.open('https://letterboxd.com/import/')\n\n\n def BuyMeACoffee(self, event):\n webbrowser.open('https://www.buymeacoffee.com/yanari')\n\n\n def OnClose(self, event):\n if self.is_running:\n confirm_exit = wx.MessageDialog(\n self,\n 'Tem certeza que quer parar o programa?',\n 'Sair',\n wx.YES_NO | wx.ICON_QUESTION\n )\n\n if confirm_exit.ShowModal() == wx.ID_YES:\n self.Destroy()\n wx.Window.Destroy(self)\n else:\n confirm_exit.Destroy()\n else:\n event.Skip()\n \n\nif __name__ == '__main__':\n app = wx.App()\n Frame(None, size=(500, 300))\n app.MainLoop()"
},
{
"alpha_fraction": 0.5919248461723328,
"alphanum_fraction": 0.5980193018913269,
"avg_line_length": 29.06106948852539,
"blob_id": "d1d90afbb892d5c6652732d88a800326d6256007",
"content_id": "38dd7498948416bd12e0a7430a08e6614ca662d1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3950,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 131,
"path": "/parser_filmow.py",
"repo_name": "cmdalbem/filmow_to_letterboxd",
"src_encoding": "UTF-8",
"text": "import os\nimport csv\nimport string\nimport requests\nimport re\nimport webbrowser\nfrom bs4 import BeautifulSoup\n\nclass Parser():\n def __init__(self, user):\n self.page = 1\n self.total_files = 1\n self.soup = BeautifulSoup(features='html.parser')\n\n self.user = user\n self.movies_parsed = 0\n self.total_files = 1\n\n self.create_csv(self.total_files)\n self.parse(user)\n \n def create_csv(self, all_movies):\n with open(str(all_movies) + self.user + '.csv', 'w', encoding='UTF-8') as f:\n writer = csv.writer(f)\n writer.writerow(('Title', 'Directors', 'Year'))\n \n def parse(self, user):\n self.page = 1\n last_page = self.get_last_page(user)\n\n while self.page <= last_page:\n url = 'https://filmow.com/usuario/'+ user + '/filmes/ja-vi/?pagina=' + str(self.page)\n\n source_code = requests.get(url).text\n\n soup = BeautifulSoup(source_code, 'html.parser')\n\n if soup.find('h1').text == 'Vixi! - Página não encontrada':\n raise Exception\n\n for title in soup.find_all('a', {'class': 'tip-movie'}):\n self.parse_movie('https://filmow.com' + title.get('href'))\n self.movies_parsed += 1\n self.page += 1\n\n def parse_movie(self, url):\n movie = {'title': None, 'director': None, 'year': None}\n source_code = requests.get(url).text\n soup = BeautifulSoup(source_code, 'html.parser')\n\n try:\n movie['title'] = soup.find('h2', {'class': 'movie-original-title'}).get_text().strip()\n except AttributeError:\n movie['title'] = soup.find('h1').get_text().strip()\n\n try:\n movie['director'] = soup.find('span', {'itemprop': 'director'}).select('strong')[0].get_text()\n except AttributeError:\n try:\n movie['director'] = soup.find('span', {'itemprop': 'directors'}).getText().strip()\n except AttributeError:\n movie['director'] = ''\n\n try:\n movie['year'] = soup.find('small', {'class': 'release'}).get_text()\n except AttributeError:\n movie['year'] = ''\n\n self.write_to_csv(movie)\n\n def write_to_csv(self, movie):\n if self.movies_parsed < 1900:\n with open(str(self.total_files) + self.user + '.csv', 'a', encoding='UTF-8') as f:\n writer = csv.writer(f)\n writer.writerow((\n movie['title'],\n movie['director'],\n movie['year']\n ))\n else:\n self.total_files += 1\n self.movies_parsed = 0\n self.create_csv(self.total_files)\n \n def get_last_page(self, user):\n url = 'https://filmow.com/usuario/'+ user + '/filmes/ja-vi/'\n\n source_code = requests.get(url).text\n\n soup = BeautifulSoup(source_code, 'html.parser')\n\n try:\n tag = list(soup.find('div', {'class': 'pagination'}).find('ul').children)[-2]\n match = re.search(r'pagina=(\\d*)', str(tag)).group(1)\n return int(match)\n except:\n return 1\n\nif __name__ == \"__main__\":\n try:\n username = input('Digite seu nome de usuário do Filmow: ')\n msg = \"\"\"\n Seus filmes estão sendo importados no plano de fundo :)\\n\n Não feche a janela e aguarde um momento.\n \"\"\"\n print(msg)\n Parser(username.lower().strip())\n except Exception:\n print('Usuário {} não encontrado. Tem certeza que digitou certo?'.format(username))\n username = input('Digite seu nome de usuário do Filmow: ')\n Parser(username.lower().strip())\n\n msg = \"\"\"\n Pronto!\n Vá para https://letterboxd.com/import/, SELECT A FILE, \n e selecione o(s) arquivo(s) de extensão csv criado(s) pelo programa\n \"\"\"\n print(msg)\n\n while True:\n go_to_letterboxd = input('Gostaria de ser direcionado para \"https://letterboxd.com/import/\"? (s/n) ').lower()\n if not go_ to_letterboxhd == '' and go_ to_letterboxhd[0] in ('s', 'n'):\n break\n else:\n print('Opcao inválida.')\n\n if go_to_letterboxd.startswith('s'):\n webbrowser.open('https://letterboxd.com/import/')\n else:\n print('Então tchau')\n input()\n"
}
] | 4 |
surya1singh/FreeWorkCLI
|
https://github.com/surya1singh/FreeWorkCLI
|
817ea82d3de00cc3d9e00d8fb327c84ee15d23e9
|
49fba70fb6ca1d82aec4530352350a538f10facb
|
3f8645545167d8c67fa4ff57921e2145146371d3
|
refs/heads/master
| 2020-03-25T10:42:16.200935 | 2018-09-10T11:28:47 | 2018-09-10T11:28:47 | 143,702,613 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.681654691696167,
"alphanum_fraction": 0.681654691696167,
"avg_line_length": 23.173913955688477,
"blob_id": "e8c0a6123dafdc57d933273c2d211add46411a34",
"content_id": "5eda4d337f2aa12f1fa4cfd8ccd32ee8ae8e7908",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 556,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 23,
"path": "/db/user.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "import datetime\nimport mongoengine\n\n\nclass User(mongoengine.Document):\n id = mongoengine.IntField()\n registered_date = mongoengine.DateTimeField(default=datetime.datetime.now)\n name = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n team = mongoengine.StringField(required=False)\n\n meta = {\n 'db_alias': 'core',\n 'collection': 'users'\n }\n\n\ndef login_user(user, password):\n if user == 'surya' and password == 'surya':\n return user\n\ndef change_password(user):\n pass\n"
},
{
"alpha_fraction": 0.7074829936027527,
"alphanum_fraction": 0.7074829936027527,
"avg_line_length": 23.5,
"blob_id": "a98d9a2fd12ad243418068291e7c527c1c1a7e81",
"content_id": "1e74e5673a9d1a3df09620582e843a1a0165a2ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 147,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 6,
"path": "/common/errors.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "class Error(Exception):\n \"\"\" Base Class Exception for Custom Exceptions\"\"\"\n\n\nclass OptionParserError(Error):\n \"\"\" Option Parser Exception\"\"\"\n"
},
{
"alpha_fraction": 0.6059544682502747,
"alphanum_fraction": 0.6129597425460815,
"avg_line_length": 21.760000228881836,
"blob_id": "709f16582e2e63325e7f73b192a7d18ba650dd05",
"content_id": "33e46f2ac01f2d038a788d885901e3d7834ae113",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 25,
"path": "/db/todo.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "\n\ntasks = [\n {\n 'id': 1,\n 'title': u'Create FreeWorkCLI',\n 'status': u'In progress',\n 'estimated_time': 10,\n 'time_spent' : 0\n }\n]\n\n\nimport mongoengine\n\n\nclass Tasks(mongoengine.Document):\n id = mongoengine.IntField()\n registered_date = mongoengine.DateTimeField(default=datetime.datetime.now)\n name = mongoengine.StringField(required=True)\n email = mongoengine.StringField(required=True)\n team = mongoengine.StringField(required=False)\n\n meta = {\n 'db_alias': 'core',\n 'collection': 'users'\n }\n"
},
{
"alpha_fraction": 0.44787168502807617,
"alphanum_fraction": 0.44787168502807617,
"avg_line_length": 38.53658676147461,
"blob_id": "d85f930ea767752d00e9374d5a78b461ef7654e9",
"content_id": "5890791105262c28adc1fdd33eb36824ad487eff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1621,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 41,
"path": "/common/option_parser.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "from optparse import OptionParser\nimport traceback\nfrom .errors import OptionParserError\n\nclass OptParser:\n @classmethod\n def parseopts(self):\n try:\n parser = OptionParser(usage = \"Arguments for Free Work\")\n\n parser.add_option(\"-u\", \"--user\", action = \"store\",\n type = \"string\",\n help = \"Username\",\n dest = \"username\",\n )\n\n parser.add_option(\"-p\", \"--password\", action = \"store\",\n type = \"string\",\n help = \"Passowrd for user\",\n dest = \"password\",\n )\n try:\n (options, args) = parser.parse_args()\n except:\n raise OptionParserError\n\n if not options.username or not options.password :\n raise ValueError('Username and password are mandatory.')\n\n if options.username and options.password:\n kwargs = {\n \"username\": options.username,\n \"password\": options.password,\n }\n return kwargs\n raise OptionParserError('There is some unknown erros \\n') # code should never reach this line.\n\n except OptionParserError:\n raise OptionParserError('Type -h for help \\n\\t %s' % USAGE)\n except:\n raise OptionParserError(traceback.format_exc())\n"
},
{
"alpha_fraction": 0.6849315166473389,
"alphanum_fraction": 0.6849315166473389,
"avg_line_length": 15.222222328186035,
"blob_id": "53d58c9967809b21c3515b3b45852accfc34b922",
"content_id": "8d557a98e2dedbad528c5bc32277744321122264",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 9,
"path": "/infrastructure/state.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "from db.user import User\n\nactive_account: User = None\n\n\ndef reload_account():\n global active_account\n if not active_account:\n return\n"
},
{
"alpha_fraction": 0.6276595592498779,
"alphanum_fraction": 0.6276595592498779,
"avg_line_length": 20.69230842590332,
"blob_id": "2cd683832be97d5c1aaf05fc7ed3901d01dbbac6",
"content_id": "aafac6fd0eedcf47267d2e755221ca9d8d5d6e89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 13,
"path": "/common/general.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "from colorama import Fore\n\ndef get_action(user):\n text = '> '\n if user:\n text = f'{user}> '\n action = input(Fore.YELLOW + text + Fore.WHITE)\n return action.strip().lower()\n\n\n\ndef unknown_command():\n print(Fore.BLACK,\"Sorry we didn't understand that command.\")\n"
},
{
"alpha_fraction": 0.6088677644729614,
"alphanum_fraction": 0.6088677644729614,
"avg_line_length": 25.82978630065918,
"blob_id": "acee2e9a7ae5a37a8291ff67916f0be452fd2f95",
"content_id": "de322ab40659a39d32f5415cca656f693672e09c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1263,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 47,
"path": "/personal/tasks.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "\n\ndef create_task(user):\n print(' ****************** Create Task **************** ')\n title = input(\"Please enter title for task \")\n description = input(\"Please enter description for task \")\n title = input(\"Please enter title for task \")\n\n\ndef backlog(user)):\n pass\n\ndef completed_task(user)):\n pass\n\ndef in_progress(user)):\n pass\n\ndef log_work(user)):\n pass\n\ndef change_password(user)):\n pass\n\n\ndef register_cage():\n if not state.active_account:\n error_msg('You must login first to register a cage.')\n return\n\n meters = input('How many square meters is the cage? ')\n if not meters:\n error_msg('Cancelled')\n return\n\n meters = float(meters)\n carpeted = input(\"Is it carpeted [y, n]? \").lower().startswith('y')\n has_toys = input(\"Have snake toys [y, n]? \").lower().startswith('y')\n allow_dangerous = input(\"Can you host venomous snakes [y, n]? \").lower().startswith('y')\n name = input(\"Give your cage a name: \")\n price = float(input(\"How much are you charging? \"))\n\n cage = svc.register_cage(\n state.active_account, name,\n allow_dangerous, has_toys, carpeted, meters, price\n )\n\n state.reload_account()\n success_msg(f'Register new cage with id {cage.id}.')\n"
},
{
"alpha_fraction": 0.7665198445320129,
"alphanum_fraction": 0.7687224745750427,
"avg_line_length": 27.375,
"blob_id": "3e1688b60bccbaf9f44bc396998b6ef38ed15d4e",
"content_id": "839f2cff857b3a8fcc0c0559ebfeeeae59370e77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 454,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 16,
"path": "/README.md",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "# FreeWorkCLI\n\nCommand Line Project Management System.\n\n## Problem Statement\n\nThere are 2 ways of using this project. Personal and Office\n\n## Office Space\nIt is a task management system. User can create, read and delete task/subtask according\nto access user has. User can assign task to other user. Can add estimations to there task.\nUser log there work. User can see list of pending, completed, on going task. \n\n\n## Personal Space\nTo-do list for user.\n"
},
{
"alpha_fraction": 0.5381944179534912,
"alphanum_fraction": 0.5491898059844971,
"avg_line_length": 28.79310417175293,
"blob_id": "db3c1b9b3220fabcb64b1352aa15af90b9c0df38",
"content_id": "4376f6de6109b5e871d78befe9140092f30f96e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1728,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 58,
"path": "/personal/__init__.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "from colorama import Fore\nfrom common.general import get_action, unknown_command\nfrom db.todo import tasks\nfrom infrastructure.switchlang import switch\nimport infrastructure.state as state\nfrom db.user import change_password\n\ndef Personal(user):\n global brk\n brk = 0\n show_commands()\n while True:\n action = get_action(user)\n with switch(action) as s:\n s.case('t', lambda: create_task(user))\n s.case('b', lambda: backlog(user))\n s.case('c', lambda: completed_task(user))\n s.case('o', lambda: in_progress(user))\n s.case('l', lambda: log_work(user))\n s.case('p', lambda: change_password(user))\n s.case(['<'], break_loop )\n s.case(['e', 'bye', 'exit', 'exit()'], exit )\n s.case('?', show_commands)\n s.case('', lambda: None)\n s.default(unknown_command)\n if brk:\n break\n\n\ndef show_commands():\n print(Fore.BLACK)\n print(' '*5,'What action would you like to take:')\n print(' '*5,'Create a [T]ask')\n print(' '*5,'View [B]acklog')\n print(' '*5,'View [C]ompleted')\n print(' '*5,'View [O]n going task')\n print(' '*5,'[L]og work for task')\n print(' '*5,'[C]hange [P]assoword')\n print(' '*5,'[<] Main menu')\n print(' '*5,'[E]xit app')\n print(' '*5,'[?] Help (this info)')\n print()\n\ndef create_task():\n if not request.json or not 'title' in request.json:\n abort(400)\n task = {\n 'id': tasks[-1]['id'] + 1,\n 'title': request.json['title'],\n 'status': request.json.get('status', \"\"),\n 'estimated_time':10\n }\n tasks.append(task)\n return task\n\ndef break_loop():\n global brk\n brk = 1\n"
},
{
"alpha_fraction": 0.59779953956604,
"alphanum_fraction": 0.6088019609451294,
"avg_line_length": 30.461538314819336,
"blob_id": "c750d19802078020b10a86e0c4bce29028e203e4",
"content_id": "75bb5da073216dbeb9a178e41eaa24a25ec97a39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1636,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 52,
"path": "/start.py",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "from colorama import Fore\nfrom common.general import get_action, unknown_command\nfrom infrastructure.switchlang import switch\nimport infrastructure.state as state\nfrom common.option_parser import OptParser\nfrom personal import Personal\nfrom db.user import login_user\n\n\ndef main(kwargs):\n user = login_user(kwargs['username'],kwargs['password'])\n if not user:\n raise ValueError(\"Incorrect User/password\")\n try:\n welcome(user)\n while True:\n show_commands()\n action = get_action(user)\n with switch(action) as s:\n s.case('p', lambda: Personal(user))\n s.case('o', lambda: Office(user))\n s.case(['e', 'bye', 'exit', 'exit()'], exit)\n s.case('?', show_commands)\n s.case('', lambda: None)\n s.default(unknown_command)\n\n except KeyboardInterrupt:\n return \"\"\n finally:\n print(Fore.BLACK,\"Thank you For using FreeWork\".center(110,\" \"),end='')\n print()\n\n\ndef welcome(user):\n print(('Hello ' + user).center(110,\" \"))\n print('welcome to FreeWork.'.center(110,\" \"))\n print('Here you can manage your personal to-do list.'.center(110,\" \"))\n print('Here you can manage team task.'.center(110,\" \"))\n print('And many more. Check documentations for more details.'.center(110,\" \"))\n print()\n\ndef show_commands():\n print(Fore.BLACK)\n print(\"[p] Checkout your personal workspace\")\n print(\"[o] Checkout your office workspace\")\n print(\"[e] or ctrl-c to exit\")\n print()\n\n\nif __name__ == '__main__':\n kwargs = OptParser.parseopts()\n main(kwargs)\n"
},
{
"alpha_fraction": 0.5454545617103577,
"alphanum_fraction": 0.7575757503509521,
"avg_line_length": 15.5,
"blob_id": "a18635e05e82cb5335a52a7240966e76b6430985",
"content_id": "6ee90343ef3bb795fa1a53ead9fdb6e10d710ce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "surya1singh/FreeWorkCLI",
"src_encoding": "UTF-8",
"text": "pymongo=3.7.1\nmongoengine=0.15.3\n"
}
] | 11 |
andrewbuss/tinystack
|
https://github.com/andrewbuss/tinystack
|
92d3c0fa80724ca187c95619fa97735aad7d6039
|
6a95cbd42538f4a630b504f1bae7828cb8bfeba7
|
393c71b8e4559c211194ddcd69dace3dcab87da9
|
refs/heads/master
| 2020-05-18T11:59:21.027931 | 2015-08-25T07:32:16 | 2015-08-25T07:32:16 | 40,432,725 | 3 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6173020601272583,
"alphanum_fraction": 0.6231671571731567,
"avg_line_length": 19.058822631835938,
"blob_id": "af118320ebe901d91f3732a78f333bec37ecf801",
"content_id": "648e8a90d0b2ae6ca5086e5a0233a54b850f3fb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 682,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 34,
"path": "/udisasm.py",
"repo_name": "andrewbuss/tinystack",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python2\n\n# Disassembler for Tinystack\n\nfrom argparse import ArgumentParser, FileType\nfrom sys import stdin\n\nfrom tinystack_emu import by_opcode\n\nparser = ArgumentParser()\nparser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin)\nargs = parser.parse_args()\n\nlit = False\n\ndef print_nibble(n):\n global lit\n print hex(n)[2:], '\\t',\n if lit:\n lit = False\n print\n return\n instr = by_opcode[n]\n print instr.__name__\n if instr.__name__ == 'lit':\n lit = True\n\n\n\nfor addr, byte in enumerate(map(ord, args.infile.read())):\n print addr, '\\t',\n print_nibble(byte >> 4)\n print '\\t',\n print_nibble(byte & 0xf)\n"
},
{
"alpha_fraction": 0.5238585472106934,
"alphanum_fraction": 0.5420528650283813,
"avg_line_length": 25.243244171142578,
"blob_id": "36190ad9cd2053ce3dedc81bca19c4acf5522559",
"content_id": "9dc8fd2056b33743204e2bc1f49481052a9cd139",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2913,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 111,
"path": "/uasm.py",
"repo_name": "andrewbuss/tinystack",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python2\n\n# Microassembler for Tinystack. This doesn't do much except map mnemonics to\n# nibbles and generate lit instruction sequences.\n\nfrom tinystack_emu import *\n\nparser = ArgumentParser()\nparser.add_argument('infile', nargs='?',\n type=FileType('r'), default=sys.stdin)\nparser.add_argument('-o', '--outfile',\n nargs='?', type=FileType('w+'), default=sys.stdout)\nargs = parser.parse_args()\n\nnibbles = []\nmacros = {}\nlabels = {}\n\n\ndef align(nibbles, n=0):\n if n:\n while len(nibbles) % (n * 2):\n nibbles.append(0)\n else:\n while len(nibbles) & 1:\n nibbles.append(0)\n\n\ndef emit_lit(n):\n if n in labels:\n n = labels[n]\n lit = lambda x: nibbles.extend([Tinystack.lit_instr.opcode, x])\n lit(n & 0xF)\n while n > 0xF:\n n >>= 4\n lit(n & 0xF)\n\n\ndef emit_word(n):\n if n in labels:\n n = labels[n]\n if type(n) is str:\n n = int(n, 0) & 0xffff\n for _ in range(4):\n nibbles.append((n >> 12) & 0xF)\n n <<= 4\n\n\ndef proc_line(line):\n global nibbles\n line = line.strip().split(' ')\n if line == ['']: return # ignore blank lines\n first = line[0]\n if first[0] == ';': return # ignore lines which start with a comment\n if first[0] == '$':\n align(nibbles, 2)\n sign = {'-': -1, '+': 1}[first[1]]\n offset = sign * int(first[1:], 0)\n return emit_word(len(nibbles) / 2 + offset)\n if first[0] == '&':\n align(nibbles)\n return emit_word(first[1:])\n if first[-1] == ':':\n align(nibbles)\n labels[first.strip(':')] = len(nibbles) / 2\n return\n if first == 'align':\n try:\n addr = int(line[1], 0) & 0xffff\n return align(nibbles, addr)\n except ValueError:\n return align(nibbles)\n if first == 'skip':\n nibbles.append(0) if len(nibbles) & 3 == 3 else 0\n if first == 'call':\n align(nibbles)\n if first == 'lit':\n if line[1] in labels:\n return emit_lit(line[1])\n else:\n return emit_lit(int(line[1], 0) & 0xffff)\n if first == 'include':\n return map(proc_line, open(line[1]))\n if first[-2:] == '.s':\n return map(proc_line, open(first))\n if first == 'defmacro':\n return macros.update({line[1]: line[2:]})\n try:\n return emit_lit(int(first, 0) & 0xffff)\n except ValueError: # not a number\n pass\n if first in by_name:\n return nibbles.append(by_name[first].opcode)\n if first in labels:\n return emit_lit(labels[first])\n return map(proc_line, macros[first])\n\n\nmap(proc_line, args.infile)\n# stderr.write(str(labels) + '\\n')\n\nbyte = 0\nfor i, nibble in enumerate(nibbles):\n if i % 2:\n byte |= nibble\n args.outfile.write(chr(byte))\n else:\n byte = nibble << 4\n\nif not i % 2:\n args.outfile.write(chr(byte))\n"
},
{
"alpha_fraction": 0.5064152479171753,
"alphanum_fraction": 0.5272161960601807,
"avg_line_length": 26.95652198791504,
"blob_id": "778b930a9348dd2ae29ad5f8cd471486e9b3f552",
"content_id": "72d16aaf23422a3fc21264f61a94c2a34f76c834",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5144,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 184,
"path": "/tinystack_emu.py",
"repo_name": "andrewbuss/tinystack",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python2\n\n# Emulator for Tinystack\n\nimport sys\nfrom argparse import ArgumentParser, FileType\nfrom itertools import chain\n\nby_opcode = {}\nby_name = {}\n\n\ndef instruction(opcode):\n def instruction_(f):\n def call_instr(cpu):\n return f(cpu)\n\n call_instr.opcode = opcode\n call_instr.__name__ = f.__name__.split('_')[0]\n by_opcode[opcode] = call_instr\n by_name[call_instr.__name__] = call_instr\n return call_instr\n\n return instruction_\n\n\nclass Tinystack(object):\n @instruction(0x0)\n def nop_instr(cpu):\n \"do nothing\"\n\n @instruction(0x1)\n def nand_instr(cpu):\n \"bitwise NAND x and y, store result in x\"\n cpu.stack.append((~(cpu.stack.pop() & cpu.stack.pop())) & 0xffff)\n\n @instruction(0x2)\n def neg_instr(cpu):\n \"x = -x\"\n cpu.stack.append((~cpu.stack.pop() + 1) & 0xffff)\n\n @instruction(0x3)\n def add_instr(cpu):\n \"add x and y, unsigned\"\n cpu.stack.append((cpu.stack.pop() + cpu.stack.pop()) & 0xffff)\n\n @instruction(0x4)\n def rol_instr(cpu):\n \"x = y << x\"\n x = cpu.stack.pop() & 0xf\n y = cpu.stack.pop()\n cpu.stack.append(((y << x) | (y >> (16 - x))) & 0xffff)\n\n @instruction(0x5)\n def sign_instr(cpu):\n \"fill x with x's high bit. That is, 0xa99f -> 0xffff, 0x4485 -> 0x0000\"\n cpu.stack.append(0xFFFF if cpu.stack.pop() & 0x8000 else 0)\n\n @instruction(0x6)\n def swap_instr(cpu):\n \"swap x and y\"\n x, y = cpu.stack.pop(), cpu.stack.pop()\n cpu.stack.append(x), cpu.stack.append(y)\n\n @instruction(0x7)\n def save_instr(cpu):\n \"pop a value from the stack and push it onto the stash\"\n cpu.stash.append(cpu.stack.pop())\n\n @instruction(0x8)\n def rstor_instr(cpu):\n \"pop a value from the stash and push it onto the stack\"\n cpu.stack.append(cpu.stash.pop())\n\n @instruction(0x9)\n def dup_instr(cpu):\n \"duplicate the top of the stack\"\n cpu.stack.append(cpu.stack[-1])\n\n @instruction(0xa)\n def disc_instr(cpu):\n \"pop x and drop it on the floor\"\n cpu.stack.pop()\n\n @instruction(0xb)\n def lit_instr(cpu):\n \"push a literal nibble\"\n if cpu.last_lit != cpu.cycle_count - 1 or cpu.lit_shift == 16:\n cpu.lit_shift = 0\n if not cpu.lit_shift:\n cpu.stack.append(0)\n cpu.lit_next = True\n cpu.last_lit = cpu.cycle_count\n\n @instruction(0xc)\n def skip_instr(cpu):\n \"IP += x; push old IP+1 onto the stack\"\n offset = cpu.stack.pop()\n cpu.stack.append(cpu.ip + 1)\n if not offset: return\n cpu.new_ip = (cpu.ip + 1 + offset) & 0xFFFF\n\n @instruction(0xd)\n def call_instr(cpu):\n \"IP = x\"\n addr = cpu.stack.pop()\n cpu.stack.append(cpu.ip + 1)\n cpu.new_ip = addr\n\n @instruction(0xe)\n def ld_instr(cpu):\n \"x = *x\"\n x = cpu.stack.pop()\n value = cpu.mem[x]\n if not x & 1:\n value = (value << 8) | cpu.mem[x + 1]\n cpu.stack.append(value)\n\n @instruction(0xf)\n def st_instr(cpu):\n \"*x = y; x = (x+2)\"\n x = cpu.stack.pop()\n y = cpu.stack.pop()\n if y & 1:\n cpu.mem[x] = y & 0xff\n else:\n cpu.mem[x] = (y & 0xff00) >> 8\n cpu.mem[x + 1] = y & 0x00ff\n cpu.stack.append(y)\n\n def __init__(self, memory):\n self.ip = 0\n self.cycle_count = 0\n self.mem = memory\n self.stack = []\n self.stash = []\n self.last_lit = -2\n self.lit_shift = 16\n self.lit_next = False\n self.new_ip = None\n self.half = 0\n\n def execute_instruction(self, instr):\n h = lambda x: hex(x)[2:].rjust(4, '0')\n print h(self.ip),\n if self.lit_next:\n self.stack[-1] |= (instr << self.lit_shift)\n self.lit_next = False\n self.lit_shift += 4\n print '\\t',\n else:\n instr = by_opcode[instr]\n print '\\t', instr.__name__,\n instr(self)\n self.cycle_count += 1\n print '\\t', ' '.join(chain(map(h, self.stack), '|', map(h, reversed(self.stash))))\n\n def step_once(self):\n if self.half:\n self.execute_instruction(self.mem[self.ip] & 0x0F)\n else:\n self.execute_instruction(self.mem[self.ip] >> 4)\n if self.half:\n self.ip += 1\n self.half ^= 1\n\n def step_until(self, end_addr):\n while self.ip < end_addr:\n self.step_once()\n if self.new_ip is not None:\n # We cannot skip in the last quarter of a word\n assert self.half or (self.ip & 1)\n self.step_once()\n self.ip = self.new_ip\n self.half = 0\n self.new_ip = None\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('infile', nargs='?', type=FileType('r'), default=sys.stdin)\n args = parser.parse_args()\n memory = map(ord, args.infile.read())\n Tinystack(memory + [0] * (65536 - len(memory))).step_until(len(memory))\n"
}
] | 3 |
Crucizer/Audio-Player
|
https://github.com/Crucizer/Audio-Player
|
a115bed97fc37660545f1b51792bfa6e47c59159
|
d2617af2002ded1f7b8b88b2aacb5ccf5ae94802
|
575c65f490117c12d033a235fa68e3cb4438c602
|
refs/heads/master
| 2022-11-15T00:45:29.742484 | 2020-07-11T14:47:11 | 2020-07-11T14:47:11 | 274,173,683 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5429221987724304,
"alphanum_fraction": 0.565822958946228,
"avg_line_length": 29.639829635620117,
"blob_id": "587b7372864dc0539ed6c1cfc0f20d8d6a086cdd",
"content_id": "0d75c45e71b74f025352fd2536ded28618121711",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7467,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 236,
"path": "/main.py",
"repo_name": "Crucizer/Audio-Player",
"src_encoding": "UTF-8",
"text": "from PyQt5 import QtWidgets, QtGui\r\n# , QLineEdit, QVBoxLayout, QHBoxLayout\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QSlider\r\nfrom PyQt5.QtGui import QColor # , QMovie\r\nfrom PyQt5.QtCore import Qt\r\nimport sys\r\nimport pygame as pg\r\nfrom mutagen.mp3 import MP3\r\nimport os\r\nimport threading\r\n\r\npg.init()\r\n\r\n\r\nclass window(QMainWindow):\r\n def __init__(self):\r\n\r\n super(window, self).__init__()\r\n self.setGeometry(425, 65, 400, 190)\r\n self.setWindowIcon(QtGui.QIcon(\"icon\"))\r\n self.setWindowTitle(\"MultiMedia Player\")\r\n # MenuBar\r\n file = QtWidgets.QAction(\"&Open Mp3\", self)\r\n file.setShortcut(\"Ctrl + O\")\r\n file.triggered.connect(self.open_mp3)\r\n\r\n # Quit\r\n quit = QtWidgets.QAction(\"&Quit\", self)\r\n quit.setShortcut(\"Ctrl + Q\")\r\n quit.triggered.connect(self.close_app)\r\n\r\n # Add Items\r\n\r\n items = QtWidgets.QAction(\"&Add Items\", self)\r\n items.setShortcut(\"Ctrl + A\")\r\n # items.triggered.connect(self.items)\r\n\r\n mainmenu = self.menuBar()\r\n filemenu = mainmenu.addMenu(\"&Open\")\r\n filemenu.addAction(file)\r\n add_items = mainmenu.addMenu(\"&Add Items\")\r\n add_items.addAction(items)\r\n filemenu.addAction(quit)\r\n\r\n\r\n self.flag = 0\r\n\r\n self.home()\r\n\r\n def home(self):\r\n\r\n # colors\r\n black = (13, 13, 13)\r\n light_black = (36, 36, 36)\r\n\r\n # Pause Button\r\n self.pause_btn = QtWidgets.QPushButton(self)\r\n self.pause_btn.setText(\"Pause\")\r\n self.pause_btn.setShortcut(\"p\")\r\n self.pause_btn.move(0, 120)\r\n self.pause_btn.clicked.connect(self.pause)\r\n\r\n # Play Button\r\n self.play_btn = QtWidgets.QPushButton(self)\r\n self.play_btn.setText(\"Play\")\r\n self.play_btn.setShortcut(\"Space\")\r\n self.play_btn.move(150, 120)\r\n self.play_btn.clicked.connect(self.play)\r\n # Stop Button\r\n self.stop_btn = QtWidgets.QPushButton(self)\r\n self.stop_btn.setText(\"Stop\")\r\n self.stop_btn.setShortcut(\"s\")\r\n self.stop_btn.move(300, 120)\r\n\r\n self.stop_btn.clicked.connect(self.stop)\r\n # color for the window\r\n color = QColor(70, 70, 70)\r\n # Volume_Up Button\r\n self.vup_btn = QtWidgets.QPushButton(self)\r\n self.vup_btn.setText(\"V(+)\")\r\n self.vup_btn.setShortcut(\"+\")\r\n self.vup_btn.move(300, 160)\r\n self.vup_btn.clicked.connect(self.volume_up)\r\n\r\n # Volume_Down Button\r\n self.vdown_btn = QtWidgets.QPushButton(self)\r\n self.vdown_btn.setText(\"V(-)\")\r\n self.vdown_btn.setShortcut(\"-\")\r\n self.vdown_btn.move(0, 160)\r\n self.vdown_btn.clicked.connect(self.volume_down)\r\n\r\n # Seek Slider\r\n\r\n self.slider = QSlider(Qt.Horizontal, self)\r\n self.slider.setGeometry(20, 75, 350, 20)\r\n\r\n # Volume Slider\r\n\r\n self.v_slider = QSlider(Qt.Horizontal, self)\r\n self.v_slider.setGeometry(120, 165, 160, 20)\r\n self.v_slider.setMinimum(0)\r\n self.v_slider.setMaximum(100)\r\n self.v_slider.setValue(70)\r\n self.volume_value = self.v_slider.value()\r\n\r\n def msg(self, title, message):\r\n msg1 = QtWidgets.QMessageBox() # self maybe\r\n msg1.setWindowIcon(QtGui.QIcon(\"icon\"))\r\n msg1.setWindowTitle(title)\r\n msg1.setText(message)\r\n msg1.setStandardButtons(QtWidgets.QMessageBox.Ok)\r\n msg1.exec_()\r\n\r\n def open_mp3(self):\r\n name = QtWidgets.QFileDialog.getOpenFileName(self)\r\n\r\n format = os.path.splitext(name[0])\r\n if format[1] == \".mp3\":\r\n\r\n self.audio = MP3(name[0])\r\n self.duration = self.audio.info.length // 1\r\n\r\n self.min = int(self.duration // 60)\r\n self.sec = int(self.duration % 60)\r\n\r\n self.total_time = str(self.min) + \":\" + str(self.sec)\r\n\r\n self.slider.setMaximum(self.duration)\r\n self.slider.setMinimum(0)\r\n self.label = QtWidgets.QLabel(self)\r\n self.label.setText(self.total_time)\r\n self.label.setFont(QtGui.QFont(\"Arial\", 9))\r\n self.label.adjustSize()\r\n self.label.move(373, 77)\r\n self.label.show()\r\n\r\n song = name[0]\r\n pg.mixer.music.load(song)\r\n pg.mixer.music.play(1)\r\n pg.mixer.music.set_volume(self.v_slider.value() / 100)\r\n\r\n self.label = QtWidgets.QLabel(self)\r\n self.label.setText(song.split(\"/\")[-1])\r\n self.label.setFont(QtGui.QFont(\"Arial\", 15))\r\n self.label.adjustSize()\r\n self.label.move(0, 36)\r\n self.label.show()\r\n threading_1 = threading.Thread(target=self.cur_time).start()\r\n\r\n else:\r\n self.msg(\"Invalid Format\", \"Choose A .Mp3 File Only!\")\r\n\r\n volume_level = pg.mixer.music.get_volume()\r\n # print(volume_level)\r\n\r\n def cur_time(self):\r\n\r\n # NEEDS EDITING-----NEEDS EDITING-----NEEDS EDITING-----NEEDS EDITING-----NEEDS EDITING-----NEEDS EDITING\r\n true = 1\r\n while true == 1:\r\n if self.flag == 0:\r\n self.m_time = pg.mixer.music.get_pos()\r\n self.mm_time = self.m_time * 0.001\r\n self.s_time = self.mm_time // 1\r\n self.slider.setValue(self.s_time)\r\n if self.s_time == -1:\r\n true = 2\r\n\r\n def slider_value_changed(self):\r\n self.volume_value = self.v_slider.value()\r\n pg.mixer.music.set_volume(self.v_slider.value() / 100)\r\n\r\n def volume_up(self):\r\n #self.v_slider.value() - 10\r\n self.volume_value = self.volume_value + 10\r\n # print(self.volume_value)\r\n self.v_slider.setValue(self.volume_value)\r\n\r\n if self.volume_value >= 100:\r\n self.volume_value = 100\r\n\r\n # pg.mixer.music.set_volume(self.sound)\r\n pg.mixer.music.set_volume(self.v_slider.value() / 100)\r\n # print(self.v_slider.value() / 100)\r\n\r\n def volume_down(self):\r\n self.volume_value = self.volume_value - 10\r\n self.v_slider.setValue(self.volume_value)\r\n\r\n if self.volume_value <= 0:\r\n self.volume_value = 0\r\n\r\n pg.mixer.music.set_volume(self.v_slider.value() / 100)\r\n # print(self.v_slider.value() / 100)\r\n\r\n def pause(self):\r\n pg.mixer.music.pause()\r\n self.flag = 1\r\n\r\n def stop(self):\r\n pg.mixer.music.stop()\r\n self.flag = -1\r\n\r\n def play(self):\r\n pg.mixer.music.unpause()\r\n self.flag = 0\r\n\r\n def close_app(self):\r\n choice = QtWidgets.QMessageBox.question(\r\n self, \"QUIT\", \"You Sure You Wanna Quit?\", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)\r\n if choice == QtWidgets.QMessageBox.Yes:\r\n sys.exit()\r\n else:\r\n pass\r\n\r\n def items(self):\r\n # add item name to a list and then use this to add\r\n layout = QtWidgets.QVBoxLayout(self)\r\n song_name = QtWidgets.QFileDialog.getOpenFileName(self)\r\n\r\n widget = QtWidgets.QListWidget()\r\n widget.setAlternatingRowColors(True)\r\n widget.setDragDropMode(\r\n QtWidgets.QAbstractItemView.InternalMove)\r\n\r\n widget.addItems([str(i) for i in range(1, 6)])\r\n layout.addWidget(widget)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n win = window()\r\n win.show()\r\n sys.exit(app.exec_())\r\n"
}
] | 1 |
xryash/pipeline
|
https://github.com/xryash/pipeline
|
e2ed1b7cbae0046d2f5c7bb1b2d51b81d30a3440
|
f396da5bcfaad0da356e1a9daa9bf49a7fa9d3f0
|
2318cc901c0e4bd6bdbf480cd071caa68252b912
|
refs/heads/master
| 2020-07-21T12:24:20.886436 | 2019-12-27T18:28:47 | 2019-12-27T18:28:47 | 206,864,143 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8020477890968323,
"alphanum_fraction": 0.8020477890968323,
"avg_line_length": 31.55555534362793,
"blob_id": "e23291724dbf0c763074545aeba8c51fd28e75c5",
"content_id": "f36d3ea7c9e2eeb98ef1dad007143949291bd386",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 9,
"path": "/docker-compose_runner.sh",
"repo_name": "xryash/pipeline",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nexport MONGODB_NODES=your_nodes\nexport MONGODB_ARGS=your_args\nexport MONGODB_CREDENTIALS=your_login:your_password\n\nexport MONGODB_DATABASE=\"hh-mongo\"\nexport MONGODB_COLLECTION=\"raw_vacancies\"\n\ndocker-compose -f docker-compose.kafka.yml -f docker-compose.etl.yml up --build\n"
},
{
"alpha_fraction": 0.6259204745292664,
"alphanum_fraction": 0.6597937941551208,
"avg_line_length": 21.600000381469727,
"blob_id": "71dc2eddae2fa56a1449662bd992d5290a5b4dbc",
"content_id": "dd4439b1e3123b285141241eb423ac9a083f1389",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 679,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 30,
"path": "/spark_job/Dockerfile",
"repo_name": "xryash/pipeline",
"src_encoding": "UTF-8",
"text": "FROM ubuntu:latest\n\n\n# Python\nRUN \\\n apt-get update && \\\n apt-get install -y python python-dev python-pip python-virtualenv && \\\n rm -rf /var/lib/apt/lists/*\n\n# Install dependencies\nRUN \\\n pip install kafka-python\n\n# OpenJDK 8\nRUN \\\n apt-get update && \\\n apt-get install -y openjdk-8-jdk wget gnupg2 && \\\n rm -rf /var/lib/apt/lists/*\n\n\n# Apache Spark\nRUN wget --no-verbose http://mirror.linux-ia64.org/apache/spark/spark-2.4.4/spark-2.4.4-bin-hadoop2.7.tgz\n\n\nRUN tar -xzf /spark-2.4.4-bin-hadoop2.7.tgz && \\\n mv spark-2.4.4-bin-hadoop2.7 /spark && \\\n echo \"export PATH=$PATH:/spark/bin\" >> ~/.bashrc\n\n# Python script for running Spark\nCOPY spark_job.py /\n\n"
},
{
"alpha_fraction": 0.6400556564331055,
"alphanum_fraction": 0.6408901214599609,
"avg_line_length": 29.982759475708008,
"blob_id": "d9cc45681770e5fcad3134b3c7eb263fc55fe2b8",
"content_id": "fd26174d7e4a9878e6cc1964d5f53596164cc8c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3595,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 116,
"path": "/spark_job/spark_job.py",
"repo_name": "xryash/pipeline",
"src_encoding": "UTF-8",
"text": "from pyspark.sql import SparkSession\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nfrom pyspark.sql.types import IntegerType\nimport json\nimport logging\nimport os\nimport time\nfrom kafka import KafkaProducer\n\n\ndef setup_custom_logger(filename):\n \"\"\"Set configuration for logging\"\"\"\n\n logger = logging.getLogger('root')\n logger.setLevel(logging.INFO)\n\n # set file output handler and formatter for that\n file_handler = logging.FileHandler(filename)\n file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n\n # set console output handler and formatter\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))\n\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)\n\n return logger\n\ndef wait_for_kafka_connection(delay=5):\n \"\"\"Try to connect to kafka with the given delay\"\"\"\n while True:\n try:\n kafka = KafkaProducer(bootstrap_servers=KAFKA_BROKERS)\n LOGGER.info('Connection to kafka cluster established')\n kafka.close()\n break\n except:\n LOGGER.error('Can not connect to kafka cluster')\n time.sleep(delay)\n\n\nif __name__ == \"__main__\":\n\n KAFKA_BROKERS = os.environ.get('KAFKA_BROKERS').split()\n\n KAFKA_TOPIC = os.environ.get('KAFKA_TOPIC')\n\n SPARK_STREAMING_DELAY = os.environ.get('SPARK_STREAMING_DELAY')\n\n MONGODB_URI = os.environ.get('MONGODB_URI')\n\n SPARK_LOGS = os.environ.get('SPARK_LOGS')\n\n # init logger\n LOGGER = setup_custom_logger(SPARK_LOGS)\n\n LOGGER.info('Starting Spark session...')\n\n # create spark session\n spark = SparkSession \\\n .builder \\\n .appName(\"BigDataAnalyzer\") \\\n .config(\"spark.mongodb.output.uri\", MONGODB_URI) \\\n .getOrCreate()\n\n LOGGER.info('Spark session started')\n\n # init spark context\n sc = spark.sparkContext\n\n # init streaming spark context\n streaming_sc = StreamingContext(sc, int(SPARK_STREAMING_DELAY))\n\n LOGGER.info('Creating direct stream to kafka cluster......')\n\n\n # wait for connection to kafka cluster\n wait_for_kafka_connection()\n\n # create direct stream to kafka cluster\n kafka_stream = KafkaUtils.createDirectStream(streaming_sc, [KAFKA_TOPIC],\n {\"metadata.broker.list\": (\n ','.join(str(x) for x in KAFKA_BROKERS))})\n\n LOGGER.info('Direct stream to kafka cluster created')\n\n # extract messages\n messages = kafka_stream.map(lambda x: x[1])\n\n LOGGER.info('{} messages received'.format(messages.count()))\n\n def func(rdd):\n \"\"\"Handle spark rdd data and save it to database\"\"\"\n if not rdd.isEmpty():\n df = spark.read.json(sc.parallelize([json.loads(row) for row in rdd.collect()]))\n df = df.withColumn('id', df.id.cast(IntegerType()))\n\n try:\n LOGGER.info('Saving messages to database...')\n df.write.format(\"mongo\").mode(\"append\").save()\n LOGGER.info(\"Saving messages to database completed successfully\")\n except Exception as err:\n LOGGER.error('Error saving messages to database')\n LOGGER.error(err)\n\n\n if messages.count() is not 0:\n LOGGER.info('Handling messages')\n messages.foreachRDD(func)\n\n # start listening\n streaming_sc.start()\n streaming_sc.awaitTermination()\n\n"
},
{
"alpha_fraction": 0.6322008967399597,
"alphanum_fraction": 0.6353027820587158,
"avg_line_length": 32.01463317871094,
"blob_id": "fe2a76ba25e8c2dd754048d156ca2a458a28379e",
"content_id": "807d64975b879ba763c4c4cb964bf9e1ad1c9794",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6770,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 205,
"path": "/connector/connector.py",
"repo_name": "xryash/pipeline",
"src_encoding": "UTF-8",
"text": "import requests\nimport json\nimport logging\nimport os\nimport time\nimport sys\n\nfrom concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED\nfrom kafka import KafkaProducer\nimport multiprocessing as mp\n\n\ndef setup_custom_logger(filename):\n \"\"\"Set configuration for logging\"\"\"\n\n logger = logging.getLogger('root')\n logger.setLevel(logging.INFO)\n\n # set file output handler and formatter for that\n file_handler = logging.FileHandler(filename)\n file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n\n # set console output handler and formatter\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))\n\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)\n\n return logger\n\n\ndef worker_func(vacancy_id):\n \"\"\"Thread function for doing requests to API\"\"\"\n url = REQUESTS_URL_PATTERN % vacancy_id\n try:\n # do request and decode body\n response = requests.get(url)\n content = response.content.decode('utf-8')\n\n return {'status_code': response.status_code, 'id': vacancy_id, 'content': content}\n\n except:\n # if there is an error, return 0 as a status_code and id of vacancy\n LOGGER.error('An error occurred on id {}'.format(vacancy_id))\n return {'status_code': 0, 'id': vacancy_id}\n\n\ndef handle_error_data(error_data, queue):\n \"\"\"Handle data that was returned with an error\"\"\"\n for item in error_data:\n queue.put(int(item['id']))\n\n\ndef send_to_kafka(correct_data):\n \"\"\"Handle correct data from futures and send it to Kafka\"\"\"\n\n # init kafka producer\n kafka_producer = KafkaProducer(bootstrap_servers=KAFKA_BROKERS,\n value_serializer=lambda x: json.dumps(x).encode('utf-8'))\n\n sent = 0\n\n for elem in correct_data:\n try:\n # try to send data to kafka\n message = elem['content']\n future = kafka_producer.send(KAFKA_TOPIC, message)\n future.get(timeout=5)\n sent += 1\n except:\n LOGGER.error('An error occurred on id {}'.format(elem['id']))\n\n # finally flush data\n kafka_producer.flush()\n LOGGER.info(\n ' {}/{} messages have been sent to Kafka, Kafka topic: {}'.format(sent, len(correct_data),\n KAFKA_TOPIC))\n\n\ndef handle_not_done_requests(not_done_futures):\n \"\"\"Handle futures that failed\"\"\"\n pass\n\n\ndef sort_done_requests(done_requests):\n \"\"\"Handle futures that done\"\"\"\n\n correct_data, incorrect_data, error_data = [], [], []\n\n for future in done_requests:\n future_body = future.result()\n\n # if requests was successful\n if future_body['status_code'] is 200:\n correct_data.append(future_body)\n\n # if requests was failed with a connection error\n elif future_body['status_code'] is 0:\n error_data.append(future_body)\n\n # if requests failed with errors like 404, 403 and etc..\n else:\n incorrect_data.append(future_body)\n\n return correct_data, incorrect_data, error_data\n\n\ndef handler_func(done, not_done, queue):\n \"\"\"Thread handler function\"\"\"\n\n LOGGER.info('Done requests: {}, Not done requests {}'.format(len(done), len(not_done)))\n\n # handle not done requests\n handle_not_done_requests(not_done)\n\n # sort requests by their status\n correct_data, incorrect_data, error_data = sort_done_requests(done)\n\n LOGGER.info(\n 'Correct messages: {},Incorrect messages: {}, Error messages {}'.format(len(correct_data), len(incorrect_data),\n len(error_data)))\n # send correct data to kafka cluster\n send_to_kafka(correct_data)\n\n handle_error_data(error_data, queue)\n\n\ndef start_jobs(ids, worker_func, workers_number=6):\n \"\"\"Start workers with specified range of indexes\"\"\"\n with ThreadPoolExecutor(max_workers=workers_number) as executor:\n # do requests asynchronously\n futures = [executor.submit(worker_func, i) for i in ids]\n\n # wait for all threads to finish executing with specified timeout\n done, not_done = wait(futures, timeout=REQUESTS_TIMEOUT, return_when=ALL_COMPLETED)\n\n return done, not_done\n\n\ndef wait_for_kafka_connection(delay=5):\n \"\"\"Try to connect to kafka with the given delay\"\"\"\n while True:\n try:\n kafka = KafkaProducer(bootstrap_servers=KAFKA_BROKERS)\n LOGGER.info('Connection to kafka cluster established')\n kafka.close()\n break\n except:\n LOGGER.error('Can not connect to kafka cluster')\n time.sleep(delay)\n\n\n\nif __name__ == \"__main__\":\n # start and sto values for range of ids\n START_ID = int(os.environ.get('START_ID'))\n STOP_ID = int(os.environ.get('STOP_ID'))\n # value defines the number of requests for each iteration\n STEP = int(os.environ.get('STEP'))\n # timeout for function waits for completion of all requests\n REQUESTS_TIMEOUT = int(os.environ.get('REQUESTS_TIMEOUT'))\n # pattern url that is used for doing requests to api\n REQUESTS_URL_PATTERN = os.environ.get('REQUESTS_URL_PATTERN')\n # path to logs\n CONNECTOR_LOGS = os.environ.get('CONNECTOR_LOGS')\n # delay to job\n START_DOWNLOADING_AFTER = int(os.environ.get('START_DOWNLOADING_AFTER'))\n # kafka data\n KAFKA_TOPIC = os.environ.get('KAFKA_TOPIC')\n KAFKA_BROKERS = os.environ.get('KAFKA_BROKERS').split(' ')\n\n # init queue for not successful requests\n queue = mp.Queue()\n\n current = START_ID\n\n LOGGER = setup_custom_logger(CONNECTOR_LOGS)\n\n # wait for connection to kafka cluster\n wait_for_kafka_connection()\n\n time.sleep(START_DOWNLOADING_AFTER)\n\n while current < STOP_ID:\n # check queue size and if it is bigger or equal to a step, use values from queue\n LOGGER.info('{} elements are waiting to be downloaded'.format(queue.qsize()))\n if queue.qsize() > STEP:\n LOGGER.info('Downloading elements from queue...')\n\n # take N elements from queue\n ids = [queue.get() for _ in range(STEP)]\n\n else:\n LOGGER.info('Downloading elements from range {} to {}...'.format(current, current + STEP))\n\n ids = range(current, current + STEP)\n current += STEP\n\n # start jobs\n done_futures, not_done_futures = start_jobs(ids, worker_func=worker_func, workers_number=10)\n\n # start handler for workers results\n futures_handler = mp.Process(target=handler_func, args=(done_futures, not_done_futures, queue))\n futures_handler.start()\n\n\n"
},
{
"alpha_fraction": 0.8110429644584656,
"alphanum_fraction": 0.820858895778656,
"avg_line_length": 37.85714340209961,
"blob_id": "83c972c14635a66be49aae0f1d0bc67c51f762c0",
"content_id": "037fda50d1e12d42a60a7d01fb87408e731d497d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 815,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 21,
"path": "/deploy_to_kubernetes.sh",
"repo_name": "xryash/pipeline",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nexport MONGODB_NODES=your_nodes\nexport MONGODB_ARGS=your_args\nexport MONGODB_CREDENTIALS=your_login:your_password\n\nexport MONGODB_DATABASE=\"hh-mongo\"\nexport MONGODB_COLLECTION=\"raw_vacancies\"\n\nkompose convert -f docker-compose.kafka.yml -f docker-compose.etl.yml\n\n# deploy Kafka instances and zookeeper\nkubectl create -f kafka-1-service.yaml,kafka-2-service.yaml,zookeeper-service.yaml,kafka-1-deployment.yaml,kafka-2-deployment.yaml,zookeeper-deployment.yaml\n\n# deploy connector\nkubectl create -f connector-deployment.yaml\n\n# deploy spark streaming job\nkubectl create -f spark-job-deployment.yaml\n\nrm kafka-1-service.yaml kafka-2-service.yaml zookeeper-service.yaml kafka-1-deployment.yaml kafka-2-deployment.yaml zookeeper-deployment.yaml\nrm connector-deployment.yaml spark-job-deployment.yaml"
},
{
"alpha_fraction": 0.6628788113594055,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 16.53333282470703,
"blob_id": "2578255335519e5e271a1e7536c860965eed82c2",
"content_id": "6fc0541786aad8d267d6b533ecb5ce88a14b1dee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 264,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 15,
"path": "/connector/Dockerfile",
"repo_name": "xryash/pipeline",
"src_encoding": "UTF-8",
"text": "FROM ubuntu:latest\n\n# Python\nRUN \\\n apt-get update && \\\n apt-get install -y python3 python3-dev python3-pip python3-virtualenv && \\\n rm -rf /var/lib/apt/lists/*\n\n\n# Install dependencies\nRUN \\\n pip3 install kafka-python requests\n\n\nCOPY connector.py /\n\n"
}
] | 6 |
michalregula/autonomous-vehicle
|
https://github.com/michalregula/autonomous-vehicle
|
0af55945a06e3cd1786c06019bd6d507f506c119
|
cb085c86dfaf2a90a8ef5c74e3504a202200d2d3
|
0ab8b5f9332d26cfc360cf2747225205a744968d
|
refs/heads/master
| 2018-11-23T11:59:05.267891 | 2018-09-04T13:45:11 | 2018-09-04T13:45:11 | 147,325,337 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6078665256500244,
"alphanum_fraction": 0.6233611702919006,
"avg_line_length": 30.074073791503906,
"blob_id": "31bb41bf4c0747bb3abd1c8e1252c04f0c90b9ee",
"content_id": "226e121765fb8262a1b2ab4576d9f662adc107ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 839,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 27,
"path": "/autonomous_vehicle/autonomous_vehicle.py",
"repo_name": "michalregula/autonomous-vehicle",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom vehicles import vehicle\nfrom image_analyzer import routes, route_detector, camera\nfrom controllers import vehicle_controller\n\n\nroute = routes[190]\n\nwith vehicle, camera:\n for frame in camera:\n if route_detector.detect(frame, route):\n direction = route_detector.get_direction(frame)\n vehicle_controller.update(direction)\n vehicle.motors_speed = vehicle_controller.output\n else:\n vehicle.stop()\n vehicle_controller.clear()\n route_detector.draw_info(frame)\n route.draw_info(frame)\n cv2.namedWindow('Camera', cv2.WINDOW_NORMAL)\n cv2.imshow('Camera', frame)\n key = cv2.waitKey(1) & 0xFF\n if key in routes:\n route = routes[key]\n if key == 27:\n cv2.destroyAllWindows()\n break\n"
},
{
"alpha_fraction": 0.5658769607543945,
"alphanum_fraction": 0.5967373251914978,
"avg_line_length": 37.650604248046875,
"blob_id": "93a7ca9d4cfc3b11ed98f16e38cbb2bfb1f6ab0b",
"content_id": "f76fac8fb04c347b053cd18cf55e411072346d57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9624,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 249,
"path": "/autonomous_vehicle/image_analyzer.py",
"repo_name": "michalregula/autonomous-vehicle",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom picamera import PiCamera\nfrom picamera.array import PiRGBArray\nfrom functools import lru_cache\n\n\nclass Color:\n def __init__(self, lower, upper, name='UNDEFINED'):\n self.lower = np.uint8(lower)\n self.upper = np.uint8(upper)\n self.name = name\n\n @classmethod\n def from_hue(cls, lower, upper, name='UNDEFINED'):\n return cls(lower=[lower, 50, 50], upper=[upper, 255, 255], name=name)\n\n\ncolors = {\n 'red': Color(name='RED', lower=[170, 100, 30], upper=[10, 255, 255]),\n 'blue': Color(name='BLUE', lower=[90, 40, 30], upper=[130, 255, 255])\n}\n\n\nclass AbstractImageEditor:\n FONT, SCALE = cv2.FONT_HERSHEY_SIMPLEX, 0.4\n THICKNESS = 1\n BORDER = 7\n\n WHITE = (255, 255, 255)\n RED = (0, 0, 255)\n GREEN = (0, 255, 0)\n BLUE = (255, 0, 0)\n\n\nclass Route(AbstractImageEditor):\n def __init__(self, color, width):\n self.color = color\n self.width = width\n\n def draw_info(self, image):\n _, text_height = cv2.getTextSize(' ', self.FONT, self.SCALE, self.THICKNESS)[0]\n dy = text_height + self.BORDER\n\n text_color = f'COLOR: {self.color.name}'\n cv2.putText(image, text_color, (self.BORDER, dy), self.FONT, self.SCALE, self.WHITE, self.THICKNESS)\n\n color_text_width, _ = cv2.getTextSize(text_color, self.FONT, self.SCALE, self.THICKNESS)[0]\n colored_rect_x1 = color_text_width + 2 * self.BORDER\n colored_rect_y1 = dy - text_height + 1\n rectangle_width = 15\n colored_rect_x2 = colored_rect_x1 + rectangle_width\n colored_rect_y2 = colored_rect_y1 + text_height - 1\n color_lower = tuple(map(int, cv2.cvtColor(np.uint8([[self.color.lower]]), cv2.COLOR_HSV2BGR)[0][0]))\n color_upper = tuple(map(int, cv2.cvtColor(np.uint8([[self.color.upper]]), cv2.COLOR_HSV2BGR)[0][0]))\n cv2.rectangle(image, (colored_rect_x1, colored_rect_y1), (colored_rect_x2, colored_rect_y2), color_upper, -1)\n cv2.rectangle(image, (colored_rect_x2, colored_rect_y1), (colored_rect_x2 + rectangle_width, colored_rect_y2),\n color_lower, -1)\n\n cv2.putText(image, f'WIDTH: {self.width} mm', (self.BORDER, 2 * dy), self.FONT, self.SCALE, self.WHITE,\n self.THICKNESS)\n\n\nroutes = {\n 190: Route(color=colors['red'], width=18),\n 191: Route(color=colors['blue'], width=14),\n}\n\n\nclass RouteDetector(AbstractImageEditor):\n def __init__(self, horizon, dist_between_detection_points, accuracy, image_resolution):\n self.horizon = horizon\n self.dist_between_detection_points = dist_between_detection_points\n self.accuracy = accuracy\n self._detection_rows = self._get_detection_rows(image_resolution[1])\n self._detected_points = None\n\n def detect(self, image, route):\n image_height = image.shape[0]\n\n horizon_line = int((1 - self.horizon) * image_height - 1)\n median = cv2.medianBlur(image[horizon_line:, :], 7)\n hsv = cv2.cvtColor(median, cv2.COLOR_BGR2HSV)\n if route.color.lower[0] < route.color.upper[0]:\n mask = cv2.inRange(hsv, route.color.lower, route.color.upper)\n else:\n upper_s, upper_v = route.color.upper[1], route.color.upper[2]\n mask1 = cv2.inRange(hsv, route.color.lower, np.uint8([179, upper_s, upper_v]))\n lower_s, lower_v = route.color.lower[1], route.color.lower[2]\n mask2 = cv2.inRange(hsv, np.uint8([0, lower_s, lower_v]), route.color.upper)\n mask = mask1 + mask2\n\n self._detected_points = []\n for row in self._detection_rows:\n non_zero_pixels = np.flatnonzero(mask[row - int((1 - self.horizon) * image_height - 1)])\n min_width, max_width = self._get_route_width(row, route)\n if max_width > len(non_zero_pixels) > min_width:\n expected_standard_deviation = np.std(np.array([x for x in range(0, max_width)]))\n actual_standard_deviation = np.std(non_zero_pixels)\n if actual_standard_deviation < 1.05 * expected_standard_deviation:\n middle = len(non_zero_pixels) // 2\n self._detected_points.append((non_zero_pixels[middle], row))\n\n acceptance_threshold = self.accuracy * len(self._detection_rows)\n if len(self._detected_points) < acceptance_threshold:\n self._detected_points = None\n return False\n return True\n\n def _get_detection_rows(self, image_height):\n measured_real_distance = np.array([0, 1, 2, 3, 4.5, 6, 8, 10, 12.5])\n measured_rows_on_image = np.array([0, 13, 24, 34, 45, 54, 63, 70, 77])\n polynomial = np.poly1d(np.polyfit(measured_real_distance, measured_rows_on_image, 3))\n\n detection_rows = []\n real_distance = 0\n while True:\n row = image_height - 1 - int(round(polynomial(real_distance)))\n if row < (1 - self.horizon) * image_height:\n break\n detection_rows.append(row)\n real_distance += self.dist_between_detection_points\n return detection_rows\n\n @lru_cache(maxsize=40)\n def _get_route_width(self, row, route):\n reference_route_real_width = 19\n reference_route_width_at_bottom = 73\n reference_route_width_at_quarter = 38\n reference_image_height = 240\n x1 = reference_image_height - 1\n x2 = 0.75 * reference_image_height - 1\n x = np.array([x1, x2])\n\n route_resize_ratio = route.width / reference_route_real_width\n y1 = route_resize_ratio * reference_route_width_at_bottom\n y2 = route_resize_ratio * reference_route_width_at_quarter\n y = np.array([y1, y2])\n polynomial = np.poly1d(np.polyfit(x, y, 1))\n\n width = round(polynomial(row))\n tolerance = 0.4\n\n min_width = int((1 - tolerance) * width)\n if min_width < 2:\n min_width = 2\n\n max_width = int((1 + tolerance) * width)\n if max_width < min_width:\n max_width = min_width\n\n return min_width, max_width\n\n def get_direction(self, image):\n if self._detected_points is None:\n return None\n _, image_width, _ = image.shape\n x, _ = self._detected_points[-1]\n return x - (image_width // 2)\n\n def draw_info(self, image):\n if self._detected_points is None:\n self._draw_warning(image)\n else:\n self._draw_route(image)\n self._draw_detected_points(image)\n self._draw_horizon(image)\n self._draw_direction(image)\n\n def _draw_route(self, image):\n points = np.array([self._detected_points])\n cv2.polylines(image, points, False, self.GREEN, 6)\n\n def _draw_detected_points(self, image):\n for point in self._detected_points:\n cv2.circle(image, point, 1, self.BLUE, -1)\n\n def _draw_direction(self, image):\n direction = self.get_direction(image)\n if direction is None:\n direction = '---'\n text = f'DIRECTION: {direction:4}'\n image_height, image_width, _ = image.shape\n cv2.putText(image, text, (self.BORDER, image_height - 1 - self.BORDER), self.FONT, self.SCALE, self.WHITE,\n self.THICKNESS)\n\n def _draw_horizon(self, image):\n line_width = 20\n image_height, image_width, _ = image.shape\n y = int(round((1 - self.horizon) * image_height))\n cv2.line(image, (0, y), (line_width, y), self.BLUE, self.THICKNESS)\n cv2.line(image, (image_width - 1 - line_width, y), (image_width - 1, y), self.BLUE, self.THICKNESS)\n\n def _draw_warning(self, image):\n text = \"ROUTE NOT DETECTED\"\n text_width, text_height = cv2.getTextSize(text, self.FONT, self.SCALE, self.THICKNESS)[0]\n image_height, image_width, _ = image.shape\n text_x = (image_width - text_width) // 2\n text_y = (image_height + text_height) // 2\n text_border = self.BORDER + 2\n background_x1 = text_x - text_border\n background_y1 = text_y - text_height - text_border\n background_x2 = background_x1 + text_width + 2 * text_border\n background_y2 = background_y1 + text_height + 2 * text_border\n cv2.rectangle(image, (background_x1, background_y1), (background_x2, background_y2), self.RED, -1)\n cv2.putText(image, text, (text_x, text_y), self.FONT, self.SCALE, self.WHITE, self.THICKNESS)\n\n\nclass Camera(PiCamera):\n def __init__(self, resolution):\n super().__init__(resolution=resolution)\n self.RGBArray = PiRGBArray(self)\n\n def __iter__(self):\n self._frames = self.capture_continuous(self.RGBArray, format='bgr', use_video_port=True)\n return self\n\n def __next__(self):\n self.RGBArray.truncate(0)\n return next(self._frames).array\n\n def __exit__(self, *args, **kwargs):\n del self._frames\n super().__exit__(*args, **kwargs)\n\n\ncamera = Camera(resolution=(320, 240))\nroute_detector = RouteDetector(horizon=0.3, dist_between_detection_points=1.4, accuracy=0.25, image_resolution=(320, 240))\n\n\ndef main():\n route = routes[190]\n with camera:\n for frame in camera:\n route_detector.detect(frame, route)\n route_detector.draw_info(frame)\n route.draw_info(frame)\n cv2.namedWindow('Camera', cv2.WINDOW_NORMAL)\n cv2.imshow('Camera', frame)\n key = cv2.waitKey(1) & 0xFF\n if key in routes:\n route = routes[key]\n if key == 27:\n cv2.destroyAllWindows()\n break\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5600000023841858,
"alphanum_fraction": 0.5644943714141846,
"avg_line_length": 25.488094329833984,
"blob_id": "7ddb4c648406a1f980c67aa1748b7d77fc5a28a6",
"content_id": "29dd40411a8613871c449b6154df88a84c53a5ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2225,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 84,
"path": "/autonomous_vehicle/vehicles.py",
"repo_name": "michalregula/autonomous-vehicle",
"src_encoding": "UTF-8",
"text": "import curses\nfrom motors import Motor, left_motors, right_motors\n\n\nclass Vehicle:\n DEFAULT_SPEED = Motor.MAX_SPEED\n\n def __init__(self, left_motors, right_motors):\n self.left_motors = left_motors\n self.right_motors = right_motors\n self.motors_speed = 0, 0\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self.stop()\n\n @property\n def motors_speed(self):\n return self._motors_speed\n\n @motors_speed.setter\n def motors_speed(self, value):\n self.left_motors.speed, self.right_motors.speed = value\n self._motors_speed = self.left_motors.speed, self.right_motors.speed\n\n def forward(self, speed=DEFAULT_SPEED):\n self.motors_speed = speed, speed\n\n def backward(self, speed=DEFAULT_SPEED):\n self.motors_speed = -speed, -speed\n\n def stop(self):\n self.motors_speed = 0, 0\n\n def left(self, speed=DEFAULT_SPEED):\n self.motors_speed = 0, speed\n\n def right(self, speed=DEFAULT_SPEED):\n self.motors_speed = speed, 0\n\n def rotate_left(self, speed=DEFAULT_SPEED):\n self.motors_speed = -speed, speed\n\n def rotate_right(self, speed=DEFAULT_SPEED):\n self.motors_speed = speed, -speed\n\n\nvehicle = Vehicle(left_motors, right_motors)\n\n\ndef main(stdscr):\n actions = {\n curses.KEY_UP: vehicle.forward,\n curses.KEY_DOWN: vehicle.backward,\n curses.KEY_LEFT: vehicle.left,\n curses.KEY_RIGHT: vehicle.right,\n ord('q'): vehicle.rotate_left,\n ord('w'): vehicle.rotate_right\n }\n curses.halfdelay(1)\n next_key = None\n with vehicle:\n while True:\n if next_key is None:\n key = stdscr.getch()\n else:\n key = next_key\n next_key = None\n if key != -1:\n action = actions.get(key)\n if action is not None:\n action()\n curses.halfdelay(5)\n next_key = stdscr.getch()\n curses.halfdelay(1)\n while next_key == key:\n next_key = stdscr.getch()\n vehicle.stop()\n\n\nif __name__ == '__main__':\n curses.wrapper(main)\n"
},
{
"alpha_fraction": 0.5732553601264954,
"alphanum_fraction": 0.5995615720748901,
"avg_line_length": 28.43010711669922,
"blob_id": "9ab6599036d9af90da34efaa4410c6310d596ca4",
"content_id": "6c7fde995bcc252f3037d533f920f16dd1806cdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2737,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 93,
"path": "/autonomous_vehicle/motors.py",
"repo_name": "michalregula/autonomous-vehicle",
"src_encoding": "UTF-8",
"text": "import os\nimport time\nimport wiringpi\n\n\"\"\"\npwm_frequency[Hz] = 19.2e6[Hz] / pwm_clock / pwm_range\npwm_clock = 2, pwm_range = 480\npwm_frequency = 19.2[MHz] / 2 / 480 = 20[kHz]\n\"\"\"\n\nif os.geteuid() != 0:\n exit(\"Due to using 'wiringipi' and accessing the PWM hardware you must have root privileges. \"\n \"Please try again using 'sudo'. Exiting.\")\n\n\nclass Motor:\n _PWM_RANGE = 480\n _SPEED_OFFSET = 335\n MAX_SPEED = _PWM_RANGE - _SPEED_OFFSET\n\n def __init__(self, name, pwm_pin, direction_pins):\n self.name = name\n self.pwm_pin = pwm_pin\n self.direction_pins = direction_pins\n self._setup_gpio(pwm_pin, direction_pins)\n self.speed = 0\n\n def __str__(self):\n return 'MOTORS: {:10} SPEED: {:>3}'.format(self.name, self.speed)\n\n @property\n def speed(self):\n return self._speed\n\n @speed.setter\n def speed(self, value):\n value = max(min(value, self.MAX_SPEED), -self.MAX_SPEED)\n self._speed = value\n self._set_speed(value)\n\n def _set_speed(self, speed):\n if speed > 0:\n direction_values = 0, 1\n speed = speed + self._SPEED_OFFSET\n elif speed == 0:\n direction_values = 1, 1\n speed = self._PWM_RANGE\n else:\n direction_values = 1, 0\n speed = -speed + self._SPEED_OFFSET\n\n wiringpi.digitalWrite(self.direction_pins[0], direction_values[0])\n wiringpi.digitalWrite(self.direction_pins[1], direction_values[1])\n wiringpi.pwmWrite(self.pwm_pin, speed)\n\n def _setup_gpio(self, pwm_pin, direction_pins):\n wiringpi.wiringPiSetupGpio()\n\n wiringpi.pinMode(pwm_pin, wiringpi.GPIO.PWM_OUTPUT)\n wiringpi.pwmSetMode(wiringpi.GPIO.PWM_MODE_MS)\n wiringpi.pwmSetRange(self._PWM_RANGE)\n wiringpi.pwmSetClock(2)\n\n wiringpi.pinMode(direction_pins[0], wiringpi.GPIO.OUTPUT)\n wiringpi.pinMode(direction_pins[1], wiringpi.GPIO.OUTPUT)\n\n\nleft_motors = Motor(name='LEFT', pwm_pin=12, direction_pins=(16, 20),)\nright_motors = Motor(name='RIGHT', pwm_pin=13, direction_pins=(19, 26))\n\n\ndef test_motor(motor, speeds, interval):\n for s in speeds:\n motor.speed = s\n print(motor)\n time.sleep(interval)\n\n\ndef main():\n speeds = [s for s in range(0, Motor.MAX_SPEED, 1)] + \\\n [Motor.MAX_SPEED] * 50 + \\\n [s for s in range(Motor.MAX_SPEED, 0, -1)] + \\\n [0]\n speeds = speeds + [-s for s in speeds]\n try:\n test_motor(motor=left_motors, speeds=speeds, interval=0.025)\n test_motor(motor=right_motors, speeds=speeds, interval=0.025)\n finally:\n left_motors.speed, right_motors.speed = 0, 0\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5526742339134216,
"alphanum_fraction": 0.5672609210014343,
"avg_line_length": 28.854839324951172,
"blob_id": "c6eaab0ac0f5a9f594e7741e5d2ecdf2fa8f71f7",
"content_id": "7a1b73e931b02ea3f8b2685a2b4524a130d09d13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1851,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 62,
"path": "/autonomous_vehicle/controllers.py",
"repo_name": "michalregula/autonomous-vehicle",
"src_encoding": "UTF-8",
"text": "import time\n\n\ndef limit(value, minimum, maximum):\n return max(min(value, maximum), minimum)\n\n\nclass PID:\n def __init__(self, k_p=0, k_i=0, k_d=0, setpoint=0, anti_windup=0):\n self.k_p = k_p\n self.k_i = k_i\n self.k_d = k_d\n self.setpoint = setpoint\n self.anti_windup = anti_windup\n self.output = 0\n self.last_time = time.time()\n self.last_error = 0\n self._integral = 0\n\n def clear(self):\n self.output = 0\n self.last_time = time.time()\n self.last_error = 0\n self._integral = 0\n\n def update(self, feedback):\n error = self.setpoint - feedback\n delta_time = time.time() - self.last_time\n\n p_term = self.k_p * error\n\n i_term = self.k_i * (self._integral + error * delta_time)\n i_term = limit(value=i_term, minimum=-self.anti_windup, maximum=self.anti_windup)\n self._integral = i_term\n\n d_term = self.k_d * (error - self.last_error) / delta_time\n\n self.last_error = error\n self.output = p_term + i_term + d_term\n\n\nclass VehicleController:\n def __init__(self, pid, base_speed):\n self.pid = pid\n self.base_speed = base_speed\n self.output = 0, 0\n\n def clear(self):\n self.output = 0, 0\n self.pid.clear()\n\n def update(self, direction):\n self.pid.update(direction)\n left_motor_speed = limit(value=(self.base_speed - int(self.pid.output)),\n minimum=0, maximum=(2 * self.base_speed))\n right_motor_speed = limit(value=(self.base_speed + int(self.pid.output)),\n minimum=0, maximum=(2 * self.base_speed))\n self.output = left_motor_speed, right_motor_speed\n\n\npid = PID(k_p=0.85, k_i=0.2, anti_windup=2)\nvehicle_controller = VehicleController(pid=pid, base_speed=40)\n"
},
{
"alpha_fraction": 0.6657717823982239,
"alphanum_fraction": 0.6984339952468872,
"avg_line_length": 36.25,
"blob_id": "502a805a592f63a838457c10eb14a75d1278dc2e",
"content_id": "dacb04ff7c4101b6f364e1292d16504b0253048e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2235,
"license_type": "no_license",
"max_line_length": 376,
"num_lines": 60,
"path": "/README.md",
"repo_name": "michalregula/autonomous-vehicle",
"src_encoding": "UTF-8",
"text": "# Electric vehicle controlled by computer vision system\n<p align=\"center\">\n <img src=\"/other/img/routes.jpg\" alt=\"routes\" width=\"250\"/>\n</p>\nThe main goal of this project was to develop a method of image processing and analysis and vehicle motion control which in combination enable its autonomous navigation on routes represented by different colors and widths. In order to test the software, an electric vehicle model was built. The vehicle is controlled by Raspberry Pi computer. The software is written in Python.\n\n## Electric vehicle model\n<p align=\"center\">\n <img src=\"/other/img/vehicle.jpg\" alt=\"vehicle\" width=\"400\"/>\n <img src=\"/other/img/vehicle_front.jpg\" alt=\"vehicle_front\" width=\"400\"/>\n <img src=\"/other/img/vehicle_left.jpg\" alt=\"vehicle_left\" width=\"400\"/>\n <img src=\"/other/img/vehicle_up.jpg\" alt=\"vehicle_up\" width=\"400\"/>\n</p>\n\n\n## Software\nThe software is divided into modules which are resposible for:\n- motors.py - motors control,\n- vehicles.py - basic vehicle movements, \n- image_analyzer.py - routes detection,\n- controllers.py - motors speed control,\n- autonomous_vehicle.py - main program, which allows the vehicle to move autonomously on the designated routes.\n\n### Class diagram\n<p align=\"center\">\n <img src=\"/other/img/class_diagram.jpg\" alt=\"class_diagram\" width=\"500\"/>\n</p>\n\n### Image processing and route recognition\n- 1st step\n\n\n\n- 2nd step\n\n\n\n- 3rd step\n\n\n\n- 4th step\n\n\n\n- 5th step\n\n\n\n## Tests\n<p align=\"center\">\n <img src=\"/other/img/test_1.jpg?raw=true\" alt=\"test_1\" width=\"200\"/>\n <img src=\"/other/img/test_2.jpg?raw=true\" alt=\"test_2\" width=\"200\"/>\n <img src=\"/other/img/test_3.jpg?raw=true\" alt=\"test_3\" width=\"200\"/>\n <img src=\"/other/img/test_4.jpg?raw=true\" alt=\"test_4\" width=\"200\"/>\n <img src=\"/other/img/test_5.jpg?raw=true\" alt=\"test_5\" width=\"200\"/>\n <img src=\"/other/img/test_6.jpg?raw=true\" alt=\"test_6\" width=\"200\"/>\n <img src=\"/other/img/test_7.jpg?raw=true\" alt=\"test_7\" width=\"200\"/>\n <img src=\"/other/img/test_8.jpg?raw=true\" alt=\"test_8\" width=\"200\"/>\n</p>\n"
}
] | 6 |
FloLrx/BraidCryptography
|
https://github.com/FloLrx/BraidCryptography
|
5b0f3453b02ed42779a9f3d79979d03488732f5f
|
28990aad6c7228549e0a7e2ef946e662b01c0f0d
|
ad1e03a5890ff62aef073624493388a7eaacfbe7
|
refs/heads/main
| 2023-04-28T17:43:23.183138 | 2021-05-19T20:09:58 | 2021-05-19T20:09:58 | 368,945,943 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.8066298365592957,
"alphanum_fraction": 0.8066298365592957,
"avg_line_length": 71.4000015258789,
"blob_id": "bfed90f000a758d1a3c7f2a6105e13c5beeaa2c9",
"content_id": "a228e2701bf6bfb75b3ad9b84fc8d13d43aea935",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 5,
"path": "/README.md",
"repo_name": "FloLrx/BraidCryptography",
"src_encoding": "UTF-8",
"text": "# Cryptage à l'aide des tresses mathématiques\n\nDans le cadre de mon second TIPE, je me suis penchée sur l'utilisation des tresses mathématiques en cryptologie.\nLe groupe des tresses est en effet propice à la mise en place de la technique de cryptographie à clés publiques.\nL'algorithme codé ici peut servir à crypter et à décrypter des messages avec ce procédé.\n"
},
{
"alpha_fraction": 0.5967981219291687,
"alphanum_fraction": 0.6110287308692932,
"avg_line_length": 33.90425491333008,
"blob_id": "502c7d0cd43409930c1905bd65b7eec1bbf6fdf1",
"content_id": "8ac0288579c0fe61fc0be6ccd123a6e85502b615",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3413,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 94,
"path": "/TIPE2CryptographieTresse.py",
"repo_name": "FloLrx/BraidCryptography",
"src_encoding": "UTF-8",
"text": "def inverse_tresse(t):# tresses sous forme de listes, calcule l'inverse d'une tresse\r\n t2=[]\r\n n=len(t)\r\n for i in range(n):\r\n t2.append(-t[n-1-i])\r\n return t2\r\n##\r\ndef produit_tresse(t1,t2):# tresses sous forme de listes, calcule le produit entre deux tresses\r\n return t1+t2\r\n##\r\ndef liste_a_str(l):# transforme une liste en str\r\n s=\"\"\r\n for i in l:\r\n s=s+str(i)\r\n return s\r\n##\r\ndef somme_en_base_2(a,b):# str de meme taille\r\n s=\"\"\r\n for i in range(len(a)):\r\n c=int(a[i])+int(b[i])\r\n if c==0 or c==2:\r\n s=s+'0'\r\n else:\r\n s=s+'1'\r\n return s\r\n##\r\ndef latin_a_binaire(m):# transforme un mot en sa représentation en code ASCII\r\n m2=\"\"\r\n for i in m:\r\n j=bin(ord(i))[2:]\r\n for k in range(8-len(j)):# chaque lettre de m est représentée par 8 bits\r\n j=\"0\"+j\r\n m2=m2+j\r\n return m2\r\n##\r\ndef binaire_a_latin(m):# transforme un mot écrit en binaire en français par le code ASCII\r\n m2=\"\"\r\n while len(m)!=0:\r\n i=int(m[:8],2)\r\n m2=m2+chr(i)\r\n m=m[8:]\r\n return m2\r\n##\r\ndef decoupage(m,n):# découpe m en messages de taille n\r\n L=[]\r\n for i in range(int(len(m)/n)):\r\n L.append(m[i*n:(i+1)*n])\r\n a=m[int(len(m)/n)*n:]\r\n while len(a) != n:\r\n a=a+latin_a_binaire(\" \")\r\n L.append(a)\r\n return L\r\n##\r\ndef concatene(Lm): # liste des mots, concatène les mots de Lm en un seul mot\r\n m=\"\"\r\n for i in Lm:\r\n m=m+i\r\n return m\r\n##\r\nimport hashlib\r\nimport numpy as np\r\n##\r\ndef cryptage(m,x,a,b):# tresses sous forme de listes, message en str, crypte un message écrit en binaire, retourne une liste de sous-messages cryptés car besoin que messages aient même taille que clé\r\n inv_a=inverse_tresse(a)\r\n inv_b=inverse_tresse(b)\r\n pb=produit_tresse(produit_tresse(b,x),inv_b)# tresses sous forme de listes\r\n K=liste_a_str(produit_tresse(produit_tresse(a,pb),inv_a))# clé en str\r\n K_hc=hashlib.blake2s(np.array(K)).hexdigest()# clé hachée en str en hexadéc\r\n K_fin=bin(int(K_hc,16))[2:]# clé presque finale, ie hachée et en binaire mais pas forcément bonne taille\r\n while len(K_fin)!=256:# met clé à la bonne taille\r\n K_fin='0'+K_fin\r\n n=len(K_fin)# on va découper en sous-messages de cette taille\r\n M=latin_a_binaire(m)\r\n Lm=decoupage(M,n)# liste de sous-messages\r\n Lmc=[]# future liste de sous-messages cryptés\r\n for i in Lm:# cryptage\r\n Lmc.append(somme_en_base_2(K_fin,i))\r\n return Lmc\r\n##\r\ndef decryptage(Lmc,x,a,b):# tresses sous forme de listes, liste de sous-messages en str, renvoie le message décrypté en binaire\r\n inv_a=inverse_tresse(a)\r\n inv_b=inverse_tresse(b)\r\n pb=produit_tresse(produit_tresse(b,x),inv_b)# tresses sous forme de listes\r\n K=liste_a_str(produit_tresse(produit_tresse(a,pb),inv_a))# clé en str\r\n K_hc=hashlib.blake2s(np.array(K)).hexdigest()# clé hachée en str en hexadéc\r\n K_fin=bin(int(K_hc,16))[2:]# clé presque finale, ie hachée et en binaire mais pas forcément bonne taille\r\n while len(K_fin)!=256:# met clé à la bonne taille\r\n K_fin='0'+K_fin\r\n Lm=[]# future liste de sous-messages décryptés\r\n for i in Lmc:# décryptage\r\n Lm.append(somme_en_base_2(K_fin,i))\r\n M=concatene(Lm)# message décrypté en binaire\r\n m=binaire_a_latin(M)# message décrypté final\r\n return m"
}
] | 2 |
lianguasth/MOBA-TeamCompDecoding
|
https://github.com/lianguasth/MOBA-TeamCompDecoding
|
1b4c226681456381d58349781492b78a2d260568
|
210c20cdd8755d4c0fccf7455e6d0c1918f0930e
|
58f2524d870071b19bf9cc358e47876977a1fade
|
refs/heads/master
| 2020-12-25T20:54:10.332964 | 2014-10-27T18:12:49 | 2014-10-27T18:12:49 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5633141994476318,
"alphanum_fraction": 0.5878061652183533,
"avg_line_length": 25.66666603088379,
"blob_id": "6df627c8810fb12d3f9d3f113c55a25b346a3729",
"content_id": "ab406e633b1e39219633e1ca3829044c0c3446ca",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1919,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 72,
"path": "/dota-data-collection/DotaFeatureConverter.py",
"repo_name": "lianguasth/MOBA-TeamCompDecoding",
"src_encoding": "UTF-8",
"text": "import sys\n\n# The id was mapped differently.\n# dota doesn't have champion id 24 and 108, therefore we mapped 110 to 24 and 109 to 108.\ndef getId(hero_id):\n if hero_id == '110':\n return 24\n if hero_id == '109':\n return 108\n return int(hero_id)\n\n\n# team radiant has positive team comp feature\n# team dire has negative team comp feature\n# y = 0 if radiant wins, y = 1 if dire wins\ndef convertRow(row):\n feature = [0] * 108\n info = row.split(' ')\n if info[1] == 'WIN:radiant':\n winner = 0\n else:\n winner = 1\n for hero_picks in info[2:]:\n hero = hero_picks.split(':')\n hero_id = getId(hero[1]) - 1\n team = hero[2]\n if team == 'radiant':\n feature[hero_id] = 1\n else:\n feature[hero_id] = -1\n return (winner, feature)\n\n\ndef convertFile(input, outputX, outputY):\n f = open(input)\n xf = open(outputX, 'w+')\n yf = open(outputY, 'w+')\n\n # first line is label\n data = f.readlines()\n f.close()\n\n for row in data[:-1]:\n y, x = convertRow(row)\n xf.write(\", \".join(str(i) for i in x) + '\\n')\n yf.write(str(y) + '\\n')\n row = data[-1]\n y, x = convertRow(row)\n xf.write(\", \".join(str(i) for i in x))\n yf.write(str(y))\n\n xf.close()\n yf.close()\n\n\n# This function will take the first argument as input file, then convert it to feature and labels. The outcome\n# are stored under data folder\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n if len(argv) == 1:\n print \"please provide input file as first argument\"\n feature = '../data/dotaFeature.csv'\n label = '../data/dotaLabel.csv'\n convertFile(argv[1], feature, label)\n print \"Program Succeeded\"\n print \"Output files are stored in\", feature, label\n\n# run this function by calling\n# $ python DotaFeatureConverter.py ../data/dota2_match.data\nif __name__ == \"__main__\":\n main()"
}
] | 1 |
anninireland/codeskulptor
|
https://github.com/anninireland/codeskulptor
|
696927e02e9fea63c48a63439ca82d5917f0b664
|
1c14cb2fbd4c4a434a1fa5bcf93be76d02e1037f
|
1d070127b86d67b5d8d8a4783afbfe0e1567027d
|
refs/heads/master
| 2016-08-12T04:47:00.818401 | 2015-10-03T18:35:20 | 2015-10-03T18:35:20 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5714792013168335,
"alphanum_fraction": 0.5971171855926514,
"avg_line_length": 32.31889724731445,
"blob_id": "65dfa17a831218598c990665f307742268e100e6",
"content_id": "a0a378f8e454dd28e06218b9c5f6aedbc6e5232b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8464,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 254,
"path": "/blackjack.py",
"repo_name": "anninireland/codeskulptor",
"src_encoding": "UTF-8",
"text": "\nBlackjack\nBuilt by Lisa Cavern as part of the Introduction to\nInteractive Programming in Python course on Coursera.\ncourse website: https://www.coursera.org/course/interactivepython2\nproject template: http://www.codeskulptor.org/#examples-blackjack_template.py\n\n\nGoal: Have a higher value hand than the dealer without going over 21.\n\nCards in Blackjack have the following values: an ace may be valued as\neither 1 or 11 (player's choice), face cards (kings, queens and jacks)\nare valued at 10 and the value of the remaining cards corresponds to\ntheir number. During a round of Blackjack, the players plays against\na dealer with the goal of building a hand (a collection of cards)\nwhose cards have a total value that is higher than the value of the\ndealer's hand, but not over 21.\n\n\"\"\"\n\n# Mini-project #6 - Blackjack\n\n\nimport simplegui\n\nimport random\n\n# load card sprite - 936x384 - source: jfitz.com\nCARD_SIZE = (72, 96)\nCARD_CENTER = (36, 48)\ncard_images = simplegui.load_image(\"http://storage.googleapis.com/codeskulptor-assets/cards_jfitz.png\")\n\nCARD_BACK_SIZE = (72, 96)\nCARD_BACK_CENTER = (36, 48)\ncard_back = simplegui.load_image(\"http://storage.googleapis.com/codeskulptor-assets/card_jfitz_back.png\")\n\n# initialize some useful global variables\nin_play = False\noutcome = \"\"\nmessage = \"\"\nscore = 0\nplayer_hand = 0\ndealer_hand = 0\n\n# define globals for cards\nSUITS = ('C', 'S', 'H', 'D')\nRANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')\nVALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}\n\n\n# define card class\nclass Card:\n def __init__(self, suit, rank):\n if (suit in SUITS) and (rank in RANKS):\n self.suit = suit\n self.rank = rank\n else:\n self.suit = None\n self.rank = None\n print \"Invalid card: \", suit, rank\n\n def __str__(self):\n return self.suit + self.rank\n\n def get_suit(self):\n return self.suit\n\n def get_rank(self):\n return self.rank\n\n def draw(self, canvas, pos):\n card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank),\n CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))\n canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE)\n\n# define hand class\nclass Hand:\n def __init__(self):\n # create Hand object\n self.hand = []\n\n def __str__(self):\n # return a string representation of a hand\n s = \"\"\n for i in range(len(self.hand)):\n s += str(self.hand[i]) + \" \"\n return s\n\n def add_card(self, card):\n # add a card object to a hand\n self.hand.append(card)\n return self.hand\n\n def get_value(self):\n # count aces as 1, if the hand has an ace, then add 10 to hand value if it doesn't bust\n # compute the value of the hand, see Blackjack video\n value = 0\n aces = False\n for card in self.hand:\n value += VALUES[card.get_rank()]\n if card.get_rank() == 'A':\n aces = True\n if aces:\n if value + 10 <= 21:\n value += 10\n\n return value\n\n def draw(self, canvas, pos):\n # draw a hand on the canvas, use the draw method for cards\n for card in self.hand:\n card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(card.get_rank()),\n CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(card.get_suit()))\n canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE)\n # change pos to next slot over\n pos[0] += CARD_SIZE[0]\n\n# define deck class\nclass Deck:\n def __init__(self):\n # create a Deck object\n self.deck = []\n # for each suit, loop through the ranks and append the pair to the deck\n for s in SUITS:\n for r in RANKS:\n new_card = Card(s,r)\n self.deck.append(new_card)\n\n def shuffle(self):\n # shuffle the deck\n # use random.shuffle()\n random.shuffle(self.deck)\n return self.deck\n\n def deal_card(self):\n # deal a card object from the deck\n # pick out the first card in the deck list\n dealt_card = self.deck[-1]\n # pop out that card from the deck\n self.deck.pop(-1)\n return dealt_card\n\n def __str__(self):\n # return a string representing the deck\n decklist = \"\"\n for c in self.deck:\n decklist += str(c)\n decklist += \" \"\n return \"Deck Contains: \" + decklist\n\n\n#define event handlers for buttons\n\ndef deal():\t\t# create deck, hands; shuffle deck, deal 2 cards to each hand\n global outcome, in_play,player_hand, dealer_hand, deck, message, score\n outcome = \"\"\n print \"New Hand\"\n if in_play == True:\n outcome = \"You have forfeited that last hand.\"\n score -= 1\n message = \"Hit or Stand?\"\n in_play = True\n deck = Deck()\n player_hand = Hand()\n dealer_hand = Hand()\n deck.shuffle()\n pc1 = deck.deal_card()\n player_hand.add_card(pc1)\n pc2 = deck.deal_card()\n player_hand.add_card(pc2)\n dc1 = deck.deal_card()\n dealer_hand.add_card(dc1)\n dc2 = deck.deal_card()\n dealer_hand.add_card(dc2)\n print \"Player's Hand: \", player_hand, \"Dealer's Hand\", dealer_hand\n print \"Player's value: \", player_hand.get_value(), \" Dealer's value\", dealer_hand.get_value()\n print outcome\n\ndef hit():\n global in_play, outcome, player_hand, deck, score, dealer_hand, message\n print \"Player Hits\"\n outcome = \"\"\n # if the hand is in play, hit the player\n if in_play == True:\n if player_hand.get_value() <= 21:\n player_hand.add_card(deck.deal_card())\n # if busted, assign a message to outcome, update in_play and score\n if player_hand.get_value() > 21:\n outcome = \"You Busted! Dealer Wins\"\n message = \"Try Again?\"\n in_play = False\n score -= 1\n else:\n outcome = \"You can't hit. Click Deal to play again.\"\n print \"Player's Hand: \", player_hand, \"Dealer's Hand\", dealer_hand\n print \"Player's value: \", player_hand.get_value(), \" Dealer's value\", dealer_hand.get_value()\n print outcome\n print\n\ndef stand():\n global in_play, outcome, player_hand, dealer_hand, deck, score, message\n # if hand is in play, repeatedly hit dealer until his hand has value 17 or more\n if in_play == True:\n print \"Player Stands\"\n while dealer_hand.get_value() < 17:\n dealer_hand.add_card(deck.deal_card())\n # assign a message to outcome, update in_play and score\n if dealer_hand.get_value() > 21:\n outcome = \"Dealer Busts! You win!\"\n score += 1\n else:\n if player_hand.get_value() <= dealer_hand.get_value():\n outcome = \"Dealer Wins\"\n score -= 1\n else:\n outcome = \"You Win!\"\n score += 1\n in_play = False\n message = \"New Deal?\"\n print outcome\n print\n\n# draw handler\ndef draw(canvas):\n global in_play, outcome, player_hand, dealer_hand, deck, score, card_back\n # test to make sure that card.draw works, replace with your code below\n canvas.draw_text(\"Blackjack\", [100,100], 36, \"Yellow\", \"sans-serif\")\n canvas.draw_text((\"Score: \" + str(score)), [400,100], 36, \"Black\", \"sans-serif\")\n canvas.draw_text(\"Dealer\", [100,150], 24, \"Black\", \"sans-serif\")\n canvas.draw_text(\"Player\", [100,350], 24, \"Black\", \"sans-serif\")\n canvas.draw_text(str(message), [200,350], 24, \"Black\", \"sans-serif\")\n canvas.draw_text(str(outcome), [50,500], 24, \"Black\", \"sans-serif\")\n player_hand.draw(canvas, [100,375])\n# dealer_hand.draw(canvas, [100,175])\n if in_play == True:\n dealer_hand.draw(canvas, [100,175])\n canvas.draw_image(card_back, [CARD_BACK_CENTER[0], CARD_BACK_CENTER[1]], CARD_BACK_SIZE, [136,223], CARD_BACK_SIZE)\n else:\n dealer_hand.draw(canvas, [100,175])\n\n# initialization frame\nframe = simplegui.create_frame(\"Blackjack\", 600, 600)\nframe.set_canvas_background(\"Green\")\n\n#create buttons and canvas callback\nframe.add_button(\"Deal\", deal, 200)\nframe.add_button(\"Hit\", hit, 200)\nframe.add_button(\"Stand\", stand, 200)\nframe.set_draw_handler(draw)\n\n\n# get things rolling\n\nframe.start()\ndeal()\n"
},
{
"alpha_fraction": 0.5063357949256897,
"alphanum_fraction": 0.5438225865364075,
"avg_line_length": 24.58108139038086,
"blob_id": "a7aab18dc2a1848293b0b641b034557028474bb1",
"content_id": "55cce2e5ae8a9c296b084f34cf05aa82283f2863",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1894,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 74,
"path": "/memory.py",
"repo_name": "anninireland/codeskulptor",
"src_encoding": "UTF-8",
"text": "# implementation of card game - Memory\n\nimport simplegui\nimport random\n\nDECK = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]\nexposed = []\nturns = 0\nstate = 0\nposA = ''\nposB = ''\n\n# helper function to initialize globals\ndef new_game():\n global DECK, exposed, state, turns\n state = 0\n turns = 0\n label.set_text('Turns = '+str(turns))\n random.shuffle(DECK)\n exposed = [False] * 16\n \n# define event handlers\ndef mouseclick(pos):\n # add game state logic here\n global DECK, exposed, state, posA, posB, turns \n clicked = pos[0] // 50\n if exposed[clicked] == False:\n exposed[clicked] = True \n \n if state == 0:\n state = 1\n posA = clicked\n elif state == 1:\n turns += 1\n state = 2\n posB = clicked \n else:\n if DECK[posA] == DECK[posB]:\n exposed[posA] = 'matched'\n exposed[posB] = 'matched'\n if DECK[posA] != DECK[posB]:\n exposed[posA] = False\n exposed[posB] = False \n posA = clicked\n# turns += 1\n posB = ''\n state = 1\n\n# cards are logically 50x100 pixels in size \ndef draw(canvas):\n global DECK, exposed\n point = 15\n x = 0\n label.set_text('Turns = '+str(turns))\n for i in range(0,16):\n if exposed[i] == False:\n canvas.draw_polygon([(x,0),(x+50,0),(x+50,100),(x,100)],5,'Red','Green')\n else:\n canvas.draw_text(str(DECK[i]), (point,60), 48, 'white')\n point += 50\n x += 50 \n\n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", 800, 100)\nframe.add_button(\"Reset\", new_game)\nlabel = frame.add_label(\"Turns = 0\")\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nnew_game()\nframe.start()\n\n"
},
{
"alpha_fraction": 0.5973214507102966,
"alphanum_fraction": 0.6020089387893677,
"avg_line_length": 32.68421173095703,
"blob_id": "0ef4487cdec9e4adfea39889cedc321721b1a19f",
"content_id": "865537ee2bb0d13463934899b9e845e5fa6958ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4480,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 133,
"path": "/solitaireMancala.py",
"repo_name": "anninireland/codeskulptor",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSolitaire Mancala Game\nBuilt by Lisa Cavern as part of Principles of Computing (Rice Universtiy) on Coursera \nhttps://www.coursera.org/course/principlescomputing1\n\nGoal: Move all seeds from given houses into the store.\nIn the GUI, you may ask computer AI to make move or click on a house to attempt a legal move\n\nIn Solitaire Mancala, the player has six houses and a store. \nAt the start of a game, a variable number of seeds are placed in each \nhouse (as opposed to the three seeds per house in two-player Mancala). \nAs in two-player Mancala, the player may select a house and gather all \nof the seeds in that house. The player then places the seeds one at time \nin the houses to the right of the selected house. \nThe last seed MUST be placed in the store.\n\nNote: \nGUI for this game was NOT developed by Lisa Cavern, but provided \nas part of the template for the game. Original template is here: \nhttp://www.codeskulptor.org/#poc_mancala_template.py\n\n\"\"\"\n\nclass SolitaireMancala:\n \"\"\"\n Simple class that implements Solitaire Mancala\n \"\"\"\n \n def __init__(self):\n \"\"\"\n Create Mancala game with empty store and no houses\n \"\"\"\n self._board = [0]\n \n def set_board(self, configuration):\n \"\"\"\n Take the list configuration of initial number of seeds for given houses\n house zero corresponds to the store and is on right\n houses are number in ascending order from right to left\n \"\"\"\n self._board = configuration[::-1]\n\n def __str__(self):\n \"\"\"\n Return string representation for Mancala board\n \"\"\"\n #return ''.join(str(e) for e in self._board)\n return str(self._board)\n \n def get_num_seeds(self, house_num):\n \"\"\"\n Return the number of seeds in given house on board\n \"\"\"\n return self._board[-1-house_num]\n\n def is_game_won(self):\n \"\"\"\n Check to see if all houses but house zero are empty\n \"\"\"\n all_empty = True\n \n for h in range(1,len(self._board)):\n if self.get_num_seeds(h) != 0:\n all_empty = False\n return all_empty\n \n def is_legal_move(self, house_num):\n \"\"\"\n Check whether a given move is legal\n Return True if moving the seeds from house house_num is legal. \n Otherwise, return False. \n If house_num is zero, is_legal_move should return False.\n \"\"\"\n if house_num == 0:\n return False\n elif self.get_num_seeds(house_num) == house_num:\n return True\n else:\n return False\n\n def apply_move(self, house_num):\n \"\"\"\n Move all of the stones from house to lower/left houses\n Last seed must be played in the store (house zero)\n \"\"\"\n if self.is_legal_move(house_num) == True:\n # get copy of board\n temp_board = self._board[::-1]\n \n # set house_num to 0\n temp_board[house_num] = 0\n \n # add 1 to each lower house and store \n for e in range(house_num):\n temp_board[e] += 1\n self.set_board(temp_board[::])\n\n def choose_move(self):\n \"\"\"\n Return the house for the next shortest legal move\n Shortest means legal move from house closest to store\n Note that using a longer legal move would make smaller illegal\n If no legal move, return house zero\n \"\"\"\n next_move = 0\n for h in range(1,len(self._board)):\n if self.is_legal_move(h):\n next_move = h\n break\n return next_move\n \n def plan_moves(self):\n \"\"\"\n Return a sequence (list) of legal moves based on the following heuristic: \n After each move, move the seeds in the house closest to the store \n when given a choice of legal moves\n Not used in GUI version, only for machine testing\n \"\"\"\n moves_plan = []\n current_board = self._board[:]\n while self.choose_move() != 0:\n next_move = self.choose_move()\n moves_plan.append(next_move)\n self.apply_move(next_move)\n current_board = self._board[:]\n return moves_plan\n\n# import user40_U3Iq1wUVB9_8 as poc_mancala_testsuite\n# poc_mancala_testsuite.run_suite(SolitaireMancala)\n\n# Import and run GUI to visualise game\nimport poc_mancala_gui\npoc_mancala_gui.run_gui(SolitaireMancala())\n"
},
{
"alpha_fraction": 0.7648953199386597,
"alphanum_fraction": 0.8003220558166504,
"avg_line_length": 37.8125,
"blob_id": "cc3acfea1c33d43eb3f796879ad4da6c8ca22d6e",
"content_id": "91356f03270cd192ba409d3067ffb7671775ec71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 621,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 16,
"path": "/README.md",
"repo_name": "anninireland/codeskulptor",
"src_encoding": "UTF-8",
"text": "# codeskulptor\n\nThese are a sample of the projects I completed in the Coursera course Introduction to Interactive Programming in Python. \n\nEach were developed on the codeskulptor platform using the simplegui library. \nTo see the games in action, follow the links below and click play. \n\nRiceRocks (aka Asteroids) http://www.codeskulptor.org/#user38_g60iQZuiQL_4.py\n\nBlackjack http://www.codeskulptor.org/#user40_INCwL31A8bObCOa.py\n\nSolitaire Mancala http://www.codeskulptor.org/#user40_GUaTZ0wwqj_1.py\n\nMemory http://www.codeskulptor.org/#user38_zOkRPoYXfU_15.py\n\nPong http://www.codeskulptor.org/#user38_EcRdUiY9fo_2.py\n"
},
{
"alpha_fraction": 0.553934633731842,
"alphanum_fraction": 0.5937949419021606,
"avg_line_length": 32.10884475708008,
"blob_id": "02d9a3ca44b9986d08d263e025eb85afd48be345",
"content_id": "0ebcd06f5143dab1e226ce5710f334d2c5c5d554",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4867,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 147,
"path": "/pong.py",
"repo_name": "anninireland/codeskulptor",
"src_encoding": "UTF-8",
"text": "# Implementation of classic arcade game Pong\n\nimport simplegui\nimport random\n\n# initialize globals - pos and vel encode vertical info for paddles\nWIDTH = 600\nHEIGHT = 400 \nBALL_RADIUS = 20\nPAD_WIDTH = 8\nPAD_HEIGHT = 80\nHALF_PAD_HEIGHT = PAD_HEIGHT/2\nLEFT = False\nRIGHT = True\npaddle1_pos = 200\npaddle2_pos = 200\npaddle1_vel = 0\npaddle2_vel = 0\nball_pos = [WIDTH/2,HEIGHT/2]\nball_vel = [0.5,-2.0]\nscore_p1 = 0\nscore_p2 = 0\n\n# initialize ball_pos and ball_vel for new ball in middle of table\n# if direction is RIGHT, the ball's velocity is upper right, else upper left\ndef spawn_ball(direction):\n global ball_pos, ball_vel # these are vectors stored as lists\n ball_pos = [WIDTH/2,HEIGHT/2]\n if direction is LEFT:\n ball_vel[0] = random.randrange(-5,-3)\n ball_vel[1] = random.randrange(-5,-3)\n if direction is RIGHT:\n ball_vel[0] = random.randrange(3,5)\n ball_vel[1] = random.randrange(-5,-3)\n \n# define event handlers\ndef new_game():\t\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers\n global score1, score2 # these are ints\n global ball_pos, ball_vel # these are vectors stored as lists\n global score_p1, score_p2\n score_p1 = 0\n score_p2 = 0\n spawn_ball(RIGHT)\n\ndef keydown(key):\n global paddle1_vel, paddle2_vel\n if key == simplegui.KEY_MAP['s']:\n paddle1_vel += 4\n if key == simplegui.KEY_MAP['w']:\n paddle1_vel -= 4 \n if key == simplegui.KEY_MAP['down']:\n paddle2_vel += 4\n if key == simplegui.KEY_MAP['up']:\n paddle2_vel -= 4 \n\ndef keyup(key):\n global paddle1_vel, paddle2_vel\n if key == simplegui.KEY_MAP['s']:\n paddle1_vel -= 4\n if key == simplegui.KEY_MAP['w']:\n paddle1_vel += 4\n if key == simplegui.KEY_MAP['down']:\n paddle2_vel -= 4\n if key == simplegui.KEY_MAP['up']:\n paddle2_vel += 4 \n \ndef draw(canvas):\n global score1, score2, paddle1_pos, paddle2_pos, ball_pos, ball_vel, score_p1, score_p2\n # draw mid line and gutters\n canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\n canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\n canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\n \n # Draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\n\n # draw paddles\n canvas.draw_line((0,paddle1_pos - HALF_PAD_HEIGHT),(0,paddle1_pos + HALF_PAD_HEIGHT), 16, \"Yellow\")\n canvas.draw_line((600,paddle2_pos - HALF_PAD_HEIGHT),(600,paddle2_pos + HALF_PAD_HEIGHT), 16, \"Yellow\")\n\n # draw scores\n canvas.draw_text(str(score_p1), (WIDTH*0.25, 40), 36, 'Red')\n canvas.draw_text(str(score_p2), (WIDTH*0.75, 40), 36, 'Red') \n \n # Update ball position\n ball_pos[0] += ball_vel[0]\n ball_pos[1] += ball_vel[1]\n \n # reflect off the top and bottom walls\n if ball_pos[1] <= BALL_RADIUS:\n ball_vel[1] = - ball_vel[1]\n elif ball_pos[1] >= HEIGHT - BALL_RADIUS:\n ball_vel[1] = - ball_vel[1]\n\n # reset ball when it touches gutter \t\t\t# check if it hits paddle \n if ball_pos[0] <= BALL_RADIUS+PAD_WIDTH: \t\t# check left paddle\n if ball_pos[1] <= paddle1_pos + HALF_PAD_HEIGHT:\n if ball_pos[1] >= paddle1_pos - HALF_PAD_HEIGHT:\n ball_pos[0] = BALL_RADIUS + PAD_WIDTH\n ball_vel[0] = -ball_vel[0]*1.1\n else:\n score_p2 += 1\n spawn_ball(RIGHT)\n else:\n score_p2 += 1\n spawn_ball(RIGHT)\n\n if ball_pos[0] >= WIDTH - PAD_WIDTH - BALL_RADIUS:\t# check right paddle\n if ball_pos[1] <= paddle2_pos + HALF_PAD_HEIGHT:\n if ball_pos[1] >= paddle2_pos - HALF_PAD_HEIGHT:\n ball_pos[0] = WIDTH - PAD_WIDTH - BALL_RADIUS\n ball_vel[0] = - ball_vel[0]*1.1\n else:\n score_p1 += 1\n spawn_ball(LEFT) \n else:\n score_p1 += 1\n spawn_ball(LEFT)\n\n # update paddle's vertical position, keep paddle on the screen\n paddle1_pos += paddle1_vel\n paddle1_pos += paddle1_vel\n paddle2_pos += paddle2_vel\n paddle2_pos += paddle2_vel\n \n if paddle1_pos <= HALF_PAD_HEIGHT:\t\t#\tkeep paddle on screen\n paddle1_pos = HALF_PAD_HEIGHT\n \n if paddle1_pos >= 400 - HALF_PAD_HEIGHT:\n paddle1_pos = 400 - HALF_PAD_HEIGHT\n \n if paddle2_pos <= HALF_PAD_HEIGHT:\t\t\n paddle2_pos = HALF_PAD_HEIGHT\n \n if paddle2_pos >= 400 - HALF_PAD_HEIGHT:\n paddle2_pos = 400 - HALF_PAD_HEIGHT\n \n# create frame\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\nbutton1 = frame.add_button('Reset', new_game)\n# start frame\nnew_game()\nframe.start()\n"
}
] | 5 |
ksdaklmk/falcon-mockapi
|
https://github.com/ksdaklmk/falcon-mockapi
|
0cfcf39d0b0114f44193e67017a24a1b344d6149
|
efe356a65e917ec1d61246e77af3bb2b8d30a704
|
8cefd57b3a9c5fead217250c8f64b646c1f291f6
|
refs/heads/master
| 2022-04-23T06:38:09.921232 | 2020-03-19T09:25:01 | 2020-03-19T09:25:01 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5737410187721252,
"alphanum_fraction": 0.6254496574401855,
"avg_line_length": 29.757143020629883,
"blob_id": "b119c58cf142f5c7aa9494bfff1cc33210d33b9a",
"content_id": "cee0d5fe78bbf954ec34c9b77c3aa39e7012a681",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4448,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 140,
"path": "/fapi.py",
"repo_name": "ksdaklmk/falcon-mockapi",
"src_encoding": "UTF-8",
"text": "import json\r\nimport requests\r\nimport datetime\r\nimport time\r\nimport uvicorn\r\n\r\nfrom fastapi import FastAPI, Depends\r\nfrom fastapi.responses import JSONResponse\r\nfrom pydantic import BaseModel, Field, Json\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\r\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\r\n\r\n\r\nclass Tools:\r\n\r\n def getDateTimeISO(self):\r\n utc_offset_sec = time.altzone if time.localtime().tm_isdst else time.timezone\r\n utc_offset = datetime.timedelta(seconds=-utc_offset_sec)\r\n return datetime.datetime.now().replace(tzinfo=datetime.timezone(offset=utc_offset)).isoformat(timespec='milliseconds')\r\n\r\n\r\nclass SenderTxnModel(BaseModel):\r\n account_no: str = Field(..., alias='accountNo')\r\n account_name: str = Field(..., alias='accountName')\r\n account_type: str = Field(..., alias='accountType')\r\n sender_tax: str = Field(..., alias='senderTax')\r\n \r\n\r\nclass ReceiverTxnModel(BaseModel):\r\n account_no: str = Field(..., alias='accountNo')\r\n account_type: str = Field(..., alias='accountType')\r\n bank_code: str = Field(..., alias='bankCode')\r\n\r\n\r\nclass OrftInquiry(BaseModel):\r\n sender: SenderTxnModel\r\n receiver: ReceiverTxnModel\r\n amount: float\r\n qr_flag: str = Field(..., alias='qrFlag')\r\n\r\n\r\nclass OrftConfirm(BaseModel):\r\n pass\r\n\r\n\r\nclass ChannelIdModel(BaseModel):\r\n channel_id: str\r\n\r\n\r\nclass NdidPayloadModel(BaseModel):\r\n namespace: str\r\n identifier: str\r\n min_ial: float\r\n min_aal: float\r\n product_id: str\r\n\r\n\r\nclass ListIdpModel(BaseModel):\r\n header: ChannelIdModel\r\n payload: NdidPayloadModel\r\n\r\n\r\napi = FastAPI(title=\"Mirai Mock APIs\", description=\"List of mock APIs for Mirai interface tesing\", version=\"1.0\", debug=True)\r\ntry_count = 1\r\n\r\[email protected](\"/ndid\", name=\"NDID interface\", description=\"\")\r\nasync def idp_list():\r\n response_message = {\r\n \"error\": {\r\n \"code\": \"-200\",\r\n \"message\": \"Cannot contact backend\",\r\n \"messageTH\": \"-\",\r\n \"serverDateTime\": Tools.getDateTimeISO(self=None),\r\n \"clientTransactionID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\r\n \"serverTransactionID\": \"47865470-7a51-444f-b676-679c83000302\"\r\n }\r\n }\r\n\r\n return JSONResponse(status_code=400, content=response_message)\r\n\r\n\r\[email protected](\"/transfer/orft/inquiry\", name=\"Actual account fund transfer inquiry transaction\", description=\"\")\r\nasync def orft_inquiry(res: OrftInquiry):\r\n payload_headers = {\r\n 'API-Key': 'l71ad26790bd0f4b9d81b215670b54fb3e',\r\n 'X-Client-Transaction-DateTime': Tools.getDateTimeISO(self=None),\r\n 'X-Client-Transaction-ID': 'e45dbd32-95a3-4ed3-b6fb-4ad36d1a6bcc',\r\n 'Content-Type': 'application/json'\r\n }\r\n url = 'https://sandbox.api.krungsri.net/transfer/orft/inquiry'\r\n payload = {\r\n\t \"sender\": {\r\n\t\t \"accountNo\": \"3007035315\",\r\n\t\t \"accountName\": \"PANUWAT BOVORNCHAICHARN\",\r\n\t\t \"accountType\": \"15\",\r\n\t\t \"senderTax\": \"3129900009286\"\r\n\t },\r\n\t \"receiver\": {\r\n\t\t \"accountNo\": \"1112000099\",\r\n\t\t \"accountType\": \"99\",\r\n\t\t \"bankCode\": \"014\"\r\n\t },\r\n\t \"amount\": 10.00,\r\n\t \"qrFlag\": \"N\"\r\n }\r\n response = requests.post(url, headers=payload_headers, json=payload, verify=False)\r\n return response.json()\r\n\r\n\r\[email protected](\"/transfer/orft/confirm\", name=\"Actual account fund transfer confirm transaction\", description=\"\")\r\nasync def orft_confirm(res: OrftConfirm):\r\n global try_count\r\n response_98 = {\r\n \"error\": {\r\n \"code\": \"98\",\r\n \"message\": \"This is response code 98\",\r\n \"messageTH\": \"-\",\r\n \"serverDateTime\": Tools.getDateTimeISO(self=None),\r\n \"clientTransactionID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\r\n \"serverTransactionID\": \"47865470-7a51-444f-b676-679c83000302\"\r\n }\r\n }\r\n response_55 = {\r\n \"error\": {\r\n \"code\": \"55\",\r\n \"message\": \"This is response code 55\",\r\n \"messageTH\": \"-\",\r\n \"serverDateTime\": Tools.getDateTimeISO(self=None),\r\n \"clientTransactionID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\r\n \"serverTransactionID\": \"47865470-7a51-444f-b676-679c83000302\"\r\n }\r\n }\r\n\r\n if try_count % 3 == 0:\r\n response = JSONResponse(status_code=400, content=response_55)\r\n else:\r\n response = JSONResponse(status_code=400, content=response_98)\r\n try_count += 1\r\n \r\n return response\r\n\r\n"
},
{
"alpha_fraction": 0.5220779180526733,
"alphanum_fraction": 0.5350649356842041,
"avg_line_length": 15.5,
"blob_id": "6a7d5a0ed29c4e1e68dd169f766210efdf9f4928",
"content_id": "055db38ad3b8155f1c0e61a3fbe449276b5fa5ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 403,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 22,
"path": "/main.py",
"repo_name": "ksdaklmk/falcon-mockapi",
"src_encoding": "UTF-8",
"text": "from fastapi import FastAPI\r\nimport uvicorn\r\n\r\n\r\napp = FastAPI()\r\n\r\[email protected](\"/\")\r\nasync def root():\r\n return {\r\n \"message\": \"Mirai Mock API\"\r\n }\r\n\r\[email protected](\"/external/dopa/idcard/laser\")\r\nasync def dopa():\r\n return {\r\n \"code\": \"0\",\r\n \"description\": \"สถานะปกติ\"\r\n }\r\n\r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host=\"localhost\", port=8000)\r\n"
},
{
"alpha_fraction": 0.5522643327713013,
"alphanum_fraction": 0.6083737015724182,
"avg_line_length": 34.383419036865234,
"blob_id": "d89bb5d70e05eec8be8327315d368b0d8a41f5b1",
"content_id": "9c304a2a71bd2116e7d782f9af4c065b3a2c2a2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7040,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 193,
"path": "/mockapi.py",
"repo_name": "ksdaklmk/falcon-mockapi",
"src_encoding": "UTF-8",
"text": "import falcon\r\nimport json\r\nimport requests\r\nimport datetime\r\nimport time\r\n\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\r\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\r\n\r\n\r\nclass Tools:\r\n\r\n def getDateTimeISO(self):\r\n utc_offset_sec = time.altzone if time.localtime().tm_isdst else time.timezone\r\n utc_offset = datetime.timedelta(seconds=-utc_offset_sec)\r\n return datetime.datetime.now().replace(tzinfo=datetime.timezone(offset=utc_offset)).isoformat(timespec='milliseconds')\r\n\r\n\r\nclass OrftInquiry:\r\n\r\n payloadHeaders = {\r\n 'API-Key': 'l71ad26790bd0f4b9d81b215670b54fb3e',\r\n 'X-Client-Transaction-DateTime': Tools.getDateTimeISO(self=None),\r\n 'X-Client-Transaction-ID': 'e45dbd32-95a3-4ed3-b6fb-4ad36d1a6bcc',\r\n 'Content-Type': 'application/json'\r\n }\r\n url = 'https://sandbox.api.krungsri.net/transfer/orft/inquiry'\r\n\r\n def on_post(self, req, resp):\r\n self.reqMessage = req.media\r\n self.response = requests.post(self.url, headers=self.payloadHeaders, json=self.reqMessage, verify=False)\r\n resp.body = self.response.text\r\n resp.status = str(self.response.status_code)\r\n\r\n\r\nclass OrftConfirm:\r\n\r\n tryCount = 1\r\n responseMessage1 = {\r\n \"error\": {\r\n \"code\": \"55\",\r\n \"message\": \"This is response code 55\",\r\n \"messageTH\": \"-\",\r\n \"serverDateTime\": Tools.getDateTimeISO(self=None),\r\n \"clientTransactionID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\r\n \"serverTransactionID\": \"47865470-7a51-444f-b676-679c83000302\"\r\n }\r\n }\r\n responseMessage2 = {\r\n \"error\": {\r\n \"code\": \"98\",\r\n \"message\": \"This is response code 98\",\r\n \"messageTH\": \"-\",\r\n \"serverDateTime\": Tools.getDateTimeISO(self=None),\r\n \"clientTransactionID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\r\n \"serverTransactionID\": \"47865470-7a51-444f-b676-679c83000302\"\r\n }\r\n }\r\n\r\n def on_post(self, req, resp):\r\n if self.tryCount % 3 == 0:\r\n resp.body = json.dumps(self.responseMessage1)\r\n else:\r\n resp.body = json.dumps(self.responseMessage2)\r\n resp.status = falcon.HTTP_400\r\n self.tryCount += 1\r\n\r\n\r\nclass Dopa:\r\n \r\n responseMessage = {\r\n \"code\": \"0\",\r\n \"description\": \"สถานะปกติ\"\r\n }\r\n\r\n def on_post(self, req, resp):\r\n resp.body = json.dumps(self.responseMessage)\r\n resp.status = falcon.HTTP_200\r\n\r\n\r\nclass NdidList:\r\n\r\n idp_headers = {\r\n 'API-Key': 'l726df48afe64548c9acb2430af3484009',\r\n 'X-Client-Transaction-DateTime': Tools.getDateTimeISO(self=None),\r\n 'X-Client-Transaction-ID': 'e45dbd32-95a3-4ed3-b6fb-4ad36d1a6bcc',\r\n 'Content-Type': 'application/json'\r\n }\r\n error_response = {\r\n \"error\": {\r\n \"code\": \"-200\",\r\n \"message\": \"Cannot contact backend\",\r\n \"messageTH\": \"-\",\r\n \"serverDateTime\": Tools.getDateTimeISO(self=None),\r\n \"clientTransactionID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\r\n \"serverTransactionID\": \"47865470-7a51-444f-b676-679c83000302\"\r\n }\r\n }\r\n list_idp_url = 'https://sit.api.krungsri.net/native/ndid/utility/idp'\r\n\r\n def on_post(self, req, resp):\r\n call_api = False # False: returns error, True: forward request to actual API GW\r\n if call_api:\r\n self.req_message = req.media\r\n self.response = requests.post(self.list_idp_url, headers=self.idp_headers, json=self.req_message, verify=False)\r\n resp.body = self.response.text\r\n resp.status = str(self.response.status_code)\r\n else:\r\n resp.body = json.dumps(self.error_response)\r\n resp.status = falcon.HTTP_400\r\n\r\n\r\nclass NdidRequest:\r\n\r\n idp_headers = {\r\n 'API-Key': 'l726df48afe64548c9acb2430af3484009',\r\n 'X-Client-Transaction-DateTime': Tools.getDateTimeISO(self=None),\r\n 'X-Client-Transaction-ID': 'e45dbd32-95a3-4ed3-b6fb-4ad36d1a6bcc',\r\n 'Content-Type': 'application/json'\r\n }\r\n error_response = {\r\n \"error\": {\r\n \"code\": \"-200\",\r\n \"message\": \"Cannot contact backend\",\r\n \"messageTH\": \"-\",\r\n \"serverDateTime\": Tools.getDateTimeISO(self=None),\r\n \"clientTransactionID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\r\n \"serverTransactionID\": \"47865470-7a51-444f-b676-679c83000302\"\r\n }\r\n }\r\n request_idp_url = 'https://sit.api.krungsri.net/native/ndid/rp/request'\r\n\r\n def on_post(self, req, resp):\r\n call_api = True # False: returns error, True: forward request to actual API GW\r\n if call_api:\r\n self.req_message = req.media\r\n self.response = requests.post(self.request_idp_url, headers=self.idp_headers, json=self.req_message, verify=False)\r\n resp.body = self.response.text\r\n resp.status = str(self.response.status_code)\r\n else:\r\n resp.body = json.dumps(self.error_response)\r\n resp.status = falcon.HTTP_400\r\n\r\n\r\nclass NdidGetData:\r\n\r\n error_response = {\r\n \"error\": {\r\n \"code\": \"-200\",\r\n \"message\": \"Cannot contact backend\",\r\n \"messageTH\": \"-\",\r\n \"serverDateTime\": Tools.getDateTimeISO(self=None),\r\n \"clientTransactionID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\r\n \"serverTransactionID\": \"47865470-7a51-444f-b676-679c83000302\"\r\n }\r\n }\r\n idp_headers = {\r\n 'API-Key': 'l726df48afe64548c9acb2430af3484009',\r\n 'X-Client-Transaction-DateTime': Tools.getDateTimeISO(self=None),\r\n 'X-Client-Transaction-ID': 'e45dbd32-95a3-4ed3-b6fb-4ad36d1a6bcc',\r\n 'Content-Type': 'application/json'\r\n }\r\n get_data_url = 'https://sit.api.krungsri.net/native/ndid/rp/data'\r\n\r\n def on_post(self, req, resp):\r\n call_api = True # False: returns error, True: forward request to actual API GW\r\n if call_api:\r\n self.req_message = req.media\r\n self.response = requests.post(self.get_data_url, headers=self.idp_headers, json=self.req_message, verify=False)\r\n resp.body = self.response.text\r\n resp.status = str(self.response.status_code)\r\n else:\r\n resp.body = json.dumps(self.error_response)\r\n resp.status = falcon.HTTP_400\r\n\r\n\r\n# Initiates the instance\r\napi = falcon.API()\r\norft_inquiry = OrftInquiry()\r\norft_confirm = OrftConfirm()\r\nndid_list = NdidList()\r\nndid_request = NdidRequest()\r\ndopa = Dopa()\r\nas_data = NdidGetData()\r\n\r\n# Adds API routes\r\n# api.add_route('/transfer/orft/inquiry', orft_inquiry)\r\n# api.add_route('/transfer/orft/confirm', orft_confirm)\r\napi.add_route('/native/ndid/utility/idp', ndid_list)\r\napi.add_route('/native/ndid/rp/request', ndid_request)\r\napi.add_route('/native/ndid/rp/data', as_data)\r\n# api.add_route('/external/dopa/idcard/chip', dopa)\r\n# api.add_route('/external/dopa/idcard/laser', dopa)\r\n"
},
{
"alpha_fraction": 0.5486806035041809,
"alphanum_fraction": 0.6615104675292969,
"avg_line_length": 42.959999084472656,
"blob_id": "63ae409f447a16e5ce9974529566f5b2b57f0f64",
"content_id": "f584f5cf4dfc008b2b2926ea07a62ba90f5043f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1099,
"license_type": "no_license",
"max_line_length": 485,
"num_lines": 25,
"path": "/sign_script.js",
"repo_name": "ksdaklmk/falcon-mockapi",
"src_encoding": "UTF-8",
"text": "import \"./libs/shim/core.js\";\n\nexport let options = { maxRedirects: 4 };\n\nconst Request = Symbol.for(\"request\");\npostman[Symbol.for(\"initial\")]({\n options\n});\n\nexport default function() {\n postman[Request]({\n name: \"Sign UAT\",\n id: \"66edffec-bc75-4581-a06c-eea38728a7f1\",\n method: \"POST\",\n address: \"https://sandbox.api.krungsri.net/native/ndid/eSignature/sign\",\n data:\n '{\\r\\n\\t\"payload\": {\\r\\n\\t\\t\"identifier\": \"3101800885320\",\\r\\n\\t\\t\"object_type\": \"01\",\\r\\n\\t\\t\"kid\": \"a3gyMDE4MTIyNjE2MjgxODgyNTRlOTY4NTJmYzQ3Y2RhNTMwNWE2Yzc5NDM2ODIybg==\",\\r\\n\\t\\t\"domain\": \"MIRAI\",\\r\\n\\t\\t\"namespace\": \"citizen_id\",\\r\\n\\t\\t\"request_id\": \"20200113112054117608\",\\r\\n\\t\\t\"reference_number\": \"x2020011311015301aabd27eaa44ae99590599257c88979\",\\r\\n\\t\\t\"object\": \"JVBERi0xLjUKJeLjz9MKNCAwIGVFT0YK\"\\r\\n\\t},\\r\\n\\t\"header\": {\\r\\n\\t\\t\"channel_id\": \"MIRAI\"\\r\\n\\t}\\r\\n}\\r\\n',\n headers: {\n \"API-Key\": \"l71ad26790bd0f4b9d81b215670b54fb3e\",\n \"X-Client-Transaction-DateTime\": \"2020-01-13T11:01:00.449+07:00\",\n \"X-Client-Transaction-ID\": \"a01fa0ae-190a-49d5-9cc1-465f9f9ab0dc\",\n \"Content-Type\": \"application/json\"\n }\n });\n}\n"
},
{
"alpha_fraction": 0.5490654110908508,
"alphanum_fraction": 0.5654205679893494,
"avg_line_length": 17.434782028198242,
"blob_id": "062ca124e1ea4025048d37c14b6a49954df441c8",
"content_id": "5d88d6ebccf6f40315b2d000fc8f68ab74f34e97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 23,
"path": "/locustfile.py",
"repo_name": "ksdaklmk/falcon-mockapi",
"src_encoding": "UTF-8",
"text": "from locust import HttpLocust, TaskSet, task, between\n\n\nclass TestDiis(TaskSet):\n\n def on_start(self):\n self.client.post(\"/login\", {\n \"username\": \"test\",\n \"password\": \"\"\n })\n\n def on_stop(self):\n pass\n\n @task\n def sign_request(self):\n self.client.get(\"http://localhost:5000\")\n\n\nclass PerfTest(HttpLocust):\n\n task_set = TestDiis\n wait_time = between(5, 15)\n "
}
] | 5 |
nilswagner/section-properties
|
https://github.com/nilswagner/section-properties
|
e8a5b23c198cf14d6df1b2ba71749ee0e677fd54
|
20c880a60d396d52feb9b8ff5abb371f2832da6a
|
9e2b8c2975f6a993e3bfd6e99e5c3de1df4b57d4
|
refs/heads/master
| 2021-05-20T00:18:38.992291 | 2020-04-01T07:30:17 | 2020-04-01T07:30:17 | 252,103,102 | 0 | 0 |
MIT
| 2020-04-01T07:27:56 | 2020-03-31T09:49:22 | 2020-01-24T00:54:55 | null |
[
{
"alpha_fraction": 0.7079557180404663,
"alphanum_fraction": 0.7291037440299988,
"avg_line_length": 22.64285659790039,
"blob_id": "0dd181819228c4d4fdf270d1e7a0d79c17d406c7",
"content_id": "49e233910098df8a6968e211beaa003429dfebd7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 993,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 42,
"path": "/CHANGELOG.rst",
"repo_name": "nilswagner/section-properties",
"src_encoding": "UTF-8",
"text": "Change Log:\n===========\n\nv1.0.5:\n-------\n\n- Added calculation of monosymmetric constants\n- Added tapered flange I-section and channel sections\n- Added solid elliptical and hollow elliptcal sections (BenjaminFraser)\n- Added polygonal section (Agent6-6-6)\n- Handle zero radius for all section classes; handle r_out < t for relevant sections\n- Update Cee and Zed sections to account for short lips\n\nv1.0.4:\n-------\n\n- Added a monosymmetric I-section class\n- Extend the plastic centroid search range to the entire section\n- Remove the pc_region variable from the plastic centroid calculation as it is no longer relevant\n- Better verbose output for the plastic centroid calculation\n\nv1.0.3:\n-------\n\n- Retrieve cross-section stresses using get_stress()\n\nv1.0.2:\n-------\n\n- Fix returns for adding to geometry\n\nv1.0.1:\n-------\n\n- Added calculate_frame_properties()\n- Added methods for adding points, facets and control points to geometries\n- New pypi README file\n\nv1.0.0:\n-------\n\n- Initial release.\n"
},
{
"alpha_fraction": 0.530285120010376,
"alphanum_fraction": 0.5412587523460388,
"avg_line_length": 36.6315803527832,
"blob_id": "f6f1978020b4960c048cb127bde0a32da4f28692",
"content_id": "f896969a05ddecdab04b1296deb36fb4f36a4bbe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27885,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 741,
"path": "/sectionproperties/pre/pre.py",
"repo_name": "nilswagner/section-properties",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport meshpy.triangle as triangle\n\n\nclass Material:\n \"\"\"Class for structural materials.\n\n Provides a way of storing material properties related to a specific\n material. The color can be a multitude of different formats, refer to\n https://matplotlib.org/api/colors_api.html and\n https://matplotlib.org/examples/color/named_colors.html for more\n information.\n\n :param string name: Material name\n :param float elastic_modulus: Material modulus of elasticity\n :param float poissons_ratio: Material Poisson's ratio\n :param float yield_strength: Material yield strength\n :param color: Material color for rendering\n :type color: :class:`matplotlib.colors`\n\n :cvar string name: Material name\n :cvar float elastic_modulus: Material modulus of elasticity\n :cvar float poissons_ratio: Material Poisson's ratio\n :cvar float shear_modulus: Material shear modulus, derived from the elastic\n modulus and Poisson's ratio assuming an isotropic material\n :cvar float yield_strength: Material yield strength\n :cvar color: Material color for rendering\n :vartype color: :class:`matplotlib.colors`\n\n The following example creates materials for concrete, steel and timber::\n\n from sectionproperties.pre.pre import Material\n\n concrete = Material(name='Concrete', elastic_modulus=30.1e3, poissons_ratio=0.2, yield_strength=32,\n color='lightgrey')\n steel = Material(name='Steel', elastic_modulus=200e3, poissons_ratio=0.3, yield_strength=500,\n color='grey')\n timber = Material(name='Timber', elastic_modulus=8e3, poissons_ratio=0.35, yield_strength=20,\n color='burlywood')\n \"\"\"\n\n def __init__(self, name, elastic_modulus, poissons_ratio, yield_strength,\n color='w'):\n \"\"\"Inits the Material class\"\"\"\n\n self.name = name\n self.elastic_modulus = elastic_modulus\n self.poissons_ratio = poissons_ratio\n self.shear_modulus = elastic_modulus / (2 * (1 + poissons_ratio))\n self.yield_strength = yield_strength\n self.color = color\n\n\nclass GeometryCleaner:\n \"\"\"Class for cleaning :class:`~sectionproperties.pre.sections.Geometry`\n objects.\n\n :param geometry: Geometry object to clean\n :type geometry: :class:`~sectionproperties.pre.sections.Geometry`\n :param bool verbose: If set to true, information related to the geometry\n cleaning process is printed to the terminal.\n\n Provides methods to clean various aspects of the geometry including:\n\n * Zipping nodes - Find nodes that are close together (relative and absolute\n tolerance) and deletes one of the nodes and rejoins the facets to the\n remaining node.\n * Removing zero length facets - Removes facets that start and end at the\n same point.\n * Remove duplicate facets - Removes facets that have the same starting\n and ending point as an existing facet.\n * Removing overlapping facets - Searches for facets that overlap each\n other, given a tolerance angle, and reconstructs a unique set of facets\n along the overlapping region.\n * Remove unused points - Removes points that are not connected to any\n facets.\n * Intersect facets - Searches for intersections between two facets and adds\n the intersection point to the points list and splits the intersected\n facets.\n\n Note that a geometry cleaning method is provided to all\n :class:`~sectionproperties.pre.sections.Geometry` objects.\n\n :cvar geometry: Geometry object to clean\n :vartype geometry: :class:`~sectionproperties.pre.sections.Geometry`\n :cvar bool verbose: If set to true, information related to the geometry\n cleaning process is printed to the terminal.\n\n The following example creates a back-to-back 200PFC geometry, rotates the\n geometry by 30 degrees, and cleans the geometry before meshing::\n\n import sectionproperties.pre.sections as sections\n\n pfc_right = sections.PfcSection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)\n pfc_left = sections.PfcSection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)\n pfc_left.mirror_section(axis='y', mirror_point=[0, 0])\n geometry = sections.MergedSection([pfc_left, pfc_right])\n geometry.rotate_section(angle=30)\n geometry.clean_geometry(verbose=True)\n mesh = geometry.create_mesh(mesh_sizes=[5, 5])\n\n .. warning:: If the geometry were not cleaned in the previous example, the\n meshing algorithm would crash (most likely return a segment error).\n Cleaning the geometry is always recommended when creating a merged\n section which may result in overlapping or intersecting facets, or\n duplicate nodes.\n \"\"\"\n\n def __init__(self, geometry, verbose):\n \"\"\"Inits the GeometryCleaner class.\"\"\"\n\n self.geometry = geometry\n self.verbose = verbose\n\n def clean_geometry(self):\n \"\"\"Performs a full geometry clean on the `geometry` object.\"\"\"\n\n self.zip_points()\n self.remove_zero_length_facets()\n self.remove_duplicate_facets()\n self.remove_overlapping_facets()\n self.remove_unused_points()\n self.intersect_facets()\n\n return self.geometry\n\n def zip_points(self, atol=1e-8, rtol=1e-5):\n \"\"\"Zips points that are close to each other. Searches through the point\n list and merges two points if there are deemed to be sufficiently\n close. The average value of the coordinates is used for the new point.\n One of the points is deleted from the point list and the facet list is\n updated to remove references to the old points and renumber the\n remaining point indices in the facet list.\n\n :param float atol: Absolute tolerance for point zipping\n :param float rtol: Relative tolerance (to geometry extents) for point\n zipping\n \"\"\"\n\n idx_to_remove = []\n\n # determine rtol\n (x_min, x_max, y_min, y_max) = self.geometry.calculate_extents()\n geom_range = max(x_max - x_min, y_max - y_min)\n rel_tol = rtol * geom_range\n\n # loop through the list of points\n for (i, pt1) in enumerate(self.geometry.points):\n # check all other points\n for (j, pt2) in enumerate(self.geometry.points[i + 1:]):\n # get point indices\n idx_1 = i\n idx_2 = i + j + 1\n\n # determine distance between two points\n dist = ((pt2[0] - pt1[0]) ** 2 +\n (pt2[1] - pt1[1]) ** 2) ** 0.5\n\n # if the points are sufficiently close together...\n # and the point has not already been removed\n if ((dist < atol or dist < rel_tol) and\n idx_2 not in idx_to_remove):\n # update point1 (average of point1 + point2)\n pt1[0] = 0.5 * (pt1[0] + pt2[0])\n pt1[1] = 0.5 * (pt1[1] + pt2[1])\n\n # join facets connected to pt2 to pt1 instead\n self.replace_point_id(idx_2, idx_1)\n\n # add pt2 to the list of points to remove\n idx_to_remove.append(idx_2)\n\n if self.verbose:\n str = \"Zipped point {0}\".format(idx_2)\n str += \" to point {0}\".format(idx_1)\n print(str)\n\n # sort list of indices to remove in reverse order so as not to\n # comprimise the indices\n idx_to_remove = sorted(idx_to_remove, reverse=True)\n\n for idx in idx_to_remove:\n self.remove_point_id(idx)\n\n def remove_zero_length_facets(self):\n \"\"\"Searches through all facets and removes those that have the same\n starting and ending point.\"\"\"\n\n idx_to_remove = []\n\n # loop through the list of facets\n for (idx, fct) in enumerate(self.geometry.facets):\n if fct[0] == fct[1]:\n idx_to_remove.append(idx)\n\n # sort list of indices to remove in reverse order so as not to\n # comprimise the indices\n idx_to_remove = sorted(idx_to_remove, reverse=True)\n\n for idx in idx_to_remove:\n self.geometry.facets.pop(idx)\n\n if self.verbose:\n print(\"Removed zero length facet {0}\".format(idx))\n\n def remove_overlapping_facets(self):\n \"\"\"Searches through all facet combinations and fixes facets that\n overlap within a tolerance.\"\"\"\n\n cleaning = True\n\n while cleaning:\n # loop through the list of facets\n for (i, fct1) in enumerate(self.geometry.facets):\n broken = False\n\n # check all other facets\n for (j, fct2) in enumerate(self.geometry.facets[i + 1:]):\n # get facet indices\n idx_1 = i\n idx_2 = i + j + 1\n\n # get facets points\n # facet 1: p -> p + r\n p = np.array(self.geometry.points[fct1[0]])\n r = self.geometry.points[fct1[1]] - p\n\n # facet 2: q -> q + s\n q = np.array(self.geometry.points[fct2[0]])\n s = self.geometry.points[fct2[1]] - q\n\n pts = self.is_overlap(p, q, r, s, fct1, fct2)\n\n if pts is not None:\n # delete both facets\n idx_to_remove = sorted([idx_1, idx_2], reverse=True)\n for idx in idx_to_remove:\n self.geometry.facets.pop(idx)\n\n # add new facets\n for i in range(len(pts) - 1):\n self.geometry.facets.append([pts[i], pts[i + 1]])\n\n # remove duplicate facets\n self.remove_duplicate_facets()\n\n if self.verbose:\n str = \"Removed overlapping facets {0}...\".format(\n idx_to_remove)\n str += \"Rebuilt with points: {0}\".format(pts)\n print(str)\n\n # break both loops and loop through all facets again\n broken = True\n break\n\n if broken:\n break\n\n # if we've arrived at the end without detecting any overlaps\n if not broken:\n cleaning = False\n\n def remove_unused_points(self):\n \"\"\"Searches through all facets and removes points that are not\n connected to any facets.\"\"\"\n\n idx_to_remove = []\n facet_flattened = [i for fct in self.geometry.facets for i in fct]\n\n # loop through number of points\n for pt in range(len(self.geometry.points)):\n if pt not in facet_flattened:\n idx_to_remove.append(pt)\n\n if self.verbose:\n print(\"Removed unused point {0}\".format(pt))\n\n # sort list of indices to remove in reverse order so as not to\n # comprimise the indices\n idx_to_remove = sorted(idx_to_remove, reverse=True)\n\n for idx in idx_to_remove:\n self.remove_point_id(idx)\n\n def intersect_facets(self):\n \"\"\"Searches through all facet combinations and finds facets that\n intersect each other. The intersection point is added and the facets\n rebuilt.\"\"\"\n\n cleaning = True\n\n while cleaning:\n # loop through the list of facets\n for (i, fct1) in enumerate(self.geometry.facets):\n broken = False\n\n # check all other facets\n for (j, fct2) in enumerate(self.geometry.facets[i + 1:]):\n # get facet indices\n idx_1 = i\n idx_2 = i + j + 1\n\n # get facets points\n # facet 1: p -> p + r\n p = np.array(self.geometry.points[fct1[0]])\n r = self.geometry.points[fct1[1]] - p\n\n # facet 2: q -> q + s\n q = np.array(self.geometry.points[fct2[0]])\n s = self.geometry.points[fct2[1]] - q\n\n pt = self.is_intersect(p, q, r, s)\n\n if pt is not None:\n # add point\n self.geometry.points.append([pt[0], pt[1]])\n pt_idx = len(self.geometry.points) - 1\n\n # delete both facets\n idx_to_remove = sorted([idx_1, idx_2], reverse=True)\n for idx in idx_to_remove:\n self.geometry.facets.pop(idx)\n\n # rebuild facet 1\n self.geometry.facets.append([fct1[0], pt_idx])\n self.geometry.facets.append([pt_idx, fct1[1]])\n\n # rebuild facet 2\n self.geometry.facets.append([fct2[0], pt_idx])\n self.geometry.facets.append([pt_idx, fct2[1]])\n\n if self.verbose:\n str = \"Intersected facets\"\n str += \" {0} and {1}\".format(idx_1, idx_2)\n str += \" at point: {0}\".format(pt)\n print(str)\n\n # break both loops and loop through all facets again\n broken = True\n break\n\n if broken:\n break\n\n # if we've arrived at the end without detecting any overlaps\n if not broken:\n cleaning = False\n\n def replace_point_id(self, id_old, id_new):\n \"\"\"Searches all facets and replaces references to point id_old with\n id_new.\n\n :param int id_old: Point index to be replaced\n :param int id_new: Point index to replace point id_old\n \"\"\"\n\n # loop through all facets\n for (i, facet) in enumerate(self.geometry.facets):\n # loop through the point indices defining the facet\n for (j, point_id) in enumerate(facet):\n if point_id == id_old:\n self.geometry.facets[i][j] = id_new\n\n def remove_point_id(self, point_id):\n \"\"\"Removes point point_id from the points list and renumbers the\n references to points after point_id in the facet list.\n\n :param int point_id: Index of point to be removed\n \"\"\"\n\n # remove index point_id from the points list\n self.geometry.points.pop(point_id)\n\n # renumber facet references to points after point_id\n for (i, facet) in enumerate(self.geometry.facets):\n # loop through the point indices defining the facet\n for (j, p_id) in enumerate(facet):\n # if the point index is greater the point to be deleted\n if p_id > point_id:\n # decrement the point index\n self.geometry.facets[i][j] -= 1\n\n def is_duplicate_facet(self, fct1, fct2):\n \"\"\"Checks to see if to facets are duplicates.\n\n :param fct1: First facet to compare\n :type fct1: list[int, int]\n :param fct2: Second facet to compare\n :type fct2: list[int, int]\n :return: Whether or not the facets are identical\n :rtype: bool\n \"\"\"\n\n # check for a facet duplicate\n if fct1 == fct2 or fct1 == list(reversed(fct2)):\n return True\n else:\n return False\n\n def is_intersect(self, p, q, r, s):\n \"\"\"Determines if the line segment p->p+r intersects q->q+s. Implements\n Gareth Rees's answer: https://stackoverflow.com/questions/563198.\n\n :param p: Starting point of the first line segment\n :type p: :class:`numpy.ndarray` [float, float]\n :param q: Starting point of the second line segment\n :type q: :class:`numpy.ndarray` [float, float]\n :param r: Vector of the first line segment\n :type r: :class:`numpy.ndarray` [float, float]\n :param s: Vector of the second line segment\n :type s: :class:`numpy.ndarray` [float, float]\n :returns: The intersection point of the line segments. If there is no\n intersection, returns None.\n :rtype: :class:`numpy.ndarray` [float, float]\n \"\"\"\n\n if np.cross(r, s) != 0:\n # calculate t and u\n t = np.cross(q - p, s) / np.cross(r, s)\n u = np.cross(p - q, r) / np.cross(s, r)\n\n # modify from closed inequality (<=) to open (<) so end...\n # intersections are not picked up\n if (t > 0 and t < 1) and (u > 0 and u < 1):\n return p + t * r\n else:\n return None\n\n def is_overlap(self, p, q, r, s, fct1, fct2):\n \"\"\"Determines if the line segment p->p+r overlaps q->q+s. Implements\n Gareth Rees's answer: https://stackoverflow.com/questions/563198.\n\n :param p: Starting point of the first line segment\n :type p: :class:`numpy.ndarray` [float, float]\n :param q: Starting point of the second line segment\n :type q: :class:`numpy.ndarray` [float, float]\n :param r: Vector of the first line segment\n :type r: :class:`numpy.ndarray` [float, float]\n :param s: Vector of the second line segment\n :type s: :class:`numpy.ndarray` [float, float]\n :param fct1: sadkjas;dkas;dj\n :returns: A list containing the points required for facet rebuilding.\n If there is no rebuild to be done, returns None.\n :rtype: list[list[float, float]]\n \"\"\"\n\n tol = 1e-3 # minimum angle tolerance (smaller is considered overlap)\n float_tol = 1e-12 # rounding error tolerance\n\n # relativise tolerance by length of smallest vector\n tol *= min(np.linalg.norm(r), np.linalg.norm(s))\n\n # are the line segments collinear?\n if abs(np.cross(r, s)) < tol:\n if abs(np.cross(q - p, r)) < tol:\n # CASE 1: two line segments are collinear\n # calculate end points of second segment in terms of the...\n # equation of the first line segment (p + t * r)\n if np.dot(s, r) >= 0:\n t0 = np.dot(q - p, r) / np.dot(r, r)\n t1 = np.dot(q + s - p, r) / np.dot(r, r)\n else:\n t0 = np.dot(q + s - p, r) / np.dot(r, r)\n t1 = np.dot(q - p, r) / np.dot(r, r)\n\n # check interval [t0, t1] intersects (0, 1)\n if t0 < 1 - float_tol and float_tol < t1:\n # recalculate t0 and t1 based on original assumptions\n t0 = np.dot(q - p, r) / np.dot(r, r)\n t1 = np.dot(q + s - p, r) / np.dot(r, r)\n\n t = sorted(list(set([0.0, t0, 1.0, t1])))\n idx_list = []\n\n # loop through new points\n for pt in t:\n if pt == 0.0:\n idx_list.append(fct1[0])\n elif pt == 1.0:\n idx_list.append(fct1[1])\n elif pt == t0:\n idx_list.append(fct2[0])\n elif pt == t1:\n idx_list.append(fct2[1])\n\n return idx_list\n else:\n # collinear and disjoint\n return None\n else:\n return None\n\n def remove_duplicate_facets(self):\n \"\"\"Searches through all facets and removes facets that are duplicates,\n independent of the point order.\"\"\"\n\n idx_to_remove = []\n\n # loop through the list of facets\n for (i, fct1) in enumerate(self.geometry.facets):\n # check all other facets\n for (j, fct2) in enumerate(self.geometry.facets[i + 1:]):\n # get facet indices\n idx_1 = i\n idx_2 = i + j + 1\n\n # check for a duplicate facet that has not already been deleted\n if (self.is_duplicate_facet(fct1, fct2) and\n idx_2 not in idx_to_remove):\n idx_to_remove.append(idx_2)\n\n if self.verbose:\n str = \"Removed duplicate facet: {0}\".format(idx_2)\n str += \" (identical to facet: {0})\".format(idx_1)\n print(str)\n\n # sort list of indices to remove in reverse order so as not to\n # comprimise the indices\n idx_to_remove = sorted(idx_to_remove, reverse=True)\n\n for idx in idx_to_remove:\n self.geometry.facets.pop(idx)\n\n\ndef create_mesh(points, facets, holes, control_points, mesh_sizes):\n \"\"\"Creates a quadratic triangular mesh using the meshpy module, which\n utilises the code 'Triangle', by Jonathan Shewchuk.\n\n :param points: List of points *(x, y)* defining the vertices of the\n cross-section\n :type points: list[list[float, float]]\n :param facets: List of point index pairs *(p1, p2)* defining the edges of\n the cross-section\n :type points: list[list[int, int]]\n :param holes: List of points *(x, y)* defining the locations of holes\n within the cross-section. If there are no holes, provide an empty list\n [].\n :type holes: list[list[float, float]]\n :param control_points: A list of points *(x, y)* that define different\n regions of the cross-section. A control point is an arbitrary point\n within a region enclosed by facets.\n :type control_points: list[list[float, float]]\n :param mesh_sizes: List of maximum element areas for each region defined by\n a control point\n :type mesh_sizes: list[float]\n\n :return: Object containing generated mesh data\n :rtype: :class:`meshpy.triangle.MeshInfo`\n \"\"\"\n\n mesh = triangle.MeshInfo() # create mesh info object\n mesh.set_points(points) # set points\n mesh.set_facets(facets) # set facets\n mesh.set_holes(holes) # set holes\n\n # set regions\n mesh.regions.resize(len(control_points)) # resize regions list\n region_id = 0 # initialise region ID variable\n\n for (i, cp) in enumerate(control_points):\n mesh.regions[i] = [cp[0], cp[1], region_id, mesh_sizes[i]]\n region_id += 1\n\n mesh = triangle.build(\n mesh, min_angle=30, mesh_order=2, quality_meshing=True,\n attributes=True, volume_constraints=True)\n\n return mesh\n\n\n# class LoadData:\n# \"\"\"\n# This class parses the input load data and stores the load values.\n# \"\"\"\n#\n# def __init__(self, loads):\n# self.containsLoads = False\n# try:\n# self.Nzz = loads[\"nzz\"]\n# self.containsLoads = True\n# except KeyError:\n# self.Nzz = 0.0\n#\n# try:\n# self.Vx = loads[\"vx\"]\n# self.containsLoads = True\n# except KeyError:\n# self.Vx = 0.0\n#\n# try:\n# self.Vy = loads[\"vy\"]\n# self.containsLoads = True\n# except KeyError:\n# self.Vy = 0.0\n#\n# try:\n# self.Mxx = loads[\"mxx\"]\n# self.containsLoads = True\n# except KeyError:\n# self.Mxx = 0.0\n#\n# try:\n# self.Myy = loads[\"myy\"]\n# self.containsLoads = True\n# except KeyError:\n# self.Myy = 0.0\n#\n# try:\n# self.M11 = loads[\"m11\"]\n# self.containsLoads = True\n# except KeyError:\n# self.M11 = 0.0\n#\n# try:\n# self.M22 = loads[\"m22\"]\n# self.containsLoads = True\n# except KeyError:\n# self.M22 = 0.0\n#\n# try:\n# self.Mzz = loads[\"mzz\"]\n# self.containsLoads = True\n# except KeyError:\n# self.Mzz = 0.0\n#\n#\n# class CrossSectionSettings:\n# \"\"\"\n# This class contains the settings used for the cross-section analysis.\n# \"\"\"\n#\n# def __init__(self, settings):\n# # load default settings\n# self.checkGeometry = True\n# self.checkMesh = True\n# self.outputLog = True\n# self.outputSettings = True\n# self.outputResults = True\n# self.plasticAnalysis = True\n# self.numberFormat = \".2f\"\n# self.solverType = \"cgs\"\n# self.tol = 1e-5\n# self.plots = []\n#\n# # load custom settings\n# self.applySettings(settings)\n#\n# def applySettings(self, settings):\n# # read all valid settings from the dictionary settings\n# try:\n# testBool = (settings[\"general\"][\"check-geometry\"].lower() in\n# [\"true\"])\n# self.checkGeometry = testBool\n# except KeyError:\n# pass\n#\n# try:\n# testBool = (settings[\"general\"][\"check-mesh\"].lower() in\n# [\"true\"])\n# self.checkMesh = testBool\n# except KeyError:\n# pass\n#\n# try:\n# testBool = (settings[\"general\"][\"output-log\"].lower() in\n# [\"true\"])\n# self.outputLog = testBool\n# except KeyError:\n# pass\n#\n# try:\n# testBool = (settings[\"general\"][\"output-settings\"].lower() in\n# [\"true\"])\n# self.outputSettings = testBool\n# except KeyError:\n# pass\n#\n# try:\n# testBool = (settings[\"general\"][\"output-results\"].lower() in\n# [\"true\"])\n# self.outputResults = testBool\n# except KeyError:\n# pass\n#\n# try:\n# testBool = (settings[\"general\"][\"plastic-analysis\"].lower() in\n# [\"true\"])\n# self.plasticAnalysis = testBool\n# except KeyError:\n# pass\n#\n# try:\n# width = int(settings[\"number-format\"][\"width\"])\n# precision = int(settings[\"number-format\"][\"precision\"])\n# numType = str(settings[\"number-format\"][\"type\"])\n# self.numberFormat = str(width) + \".\" + str(precision) + numType\n# except KeyError:\n# pass\n#\n# try:\n# solverType = settings[\"solver\"][\"type\"]\n#\n# if (solverType.lower() == \"cgs\"):\n# self.solverType = \"cgs\"\n# elif (solverType.lower() == \"direct\"):\n# self.solverType = \"direct\"\n# except KeyError:\n# pass\n#\n# try:\n# self.tol = settings[\"solver\"][\"tol\"]\n# except KeyError:\n# pass\n#\n# try:\n# self.plots = settings[\"plots\"]\n# except KeyError:\n# pass\n#\n# def printSettings(self):\n# \"\"\"\n# This method prints the current settings to the console.\n# \"\"\"\n#\n# print(\"\\n-----------------------------\")\n# print(\"Program Settings\")\n# print(\"-----------------------------\")\n# print(\"General Settings:\")\n# print(\"\\tcheck-geometry:\\t{}\".format(self.checkGeometry))\n# print(\"\\tcheck-mesh:\\t{}\".format(self.checkMesh))\n# print(\"\\toutput-log:\\t{}\".format(self.outputLog))\n# print(\"\\toutput-setting:\\t{}\".format(self.outputSettings))\n# print(\"\\toutput-results:\\t{}\".format(self.outputResults))\n# print(\"Output Settings:\")\n# print(\"\\tnumber-format:\\t{}\".format(self.numberFormat))\n# print(\"Solver Settings:\")\n# print(\"\\ttype:\\t\\t{}\".format(self.solverType))\n# print(\"\\ttol:\\t\\t{}\".format(self.tol))\n# print(\"Plot Settings:\")\n# print(\"\\tplots:\\t\\t{}\\n\".format(self.plots))\n"
},
{
"alpha_fraction": 0.5789473652839661,
"alphanum_fraction": 0.5789473652839661,
"avg_line_length": 18,
"blob_id": "9a921283952eb74fdde2031a9e0f01f81a617d80",
"content_id": "ed4bb67ea5cfef43caaad5078131fc7c0211943a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 38,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 2,
"path": "/todo.md",
"repo_name": "nilswagner/section-properties",
"src_encoding": "UTF-8",
"text": "# TODO LIST:\n- [ ] add lots of tests!\n"
}
] | 3 |
hpppereira/django-remo
|
https://github.com/hpppereira/django-remo
|
f89d710008fada48aa1929746392817674afb97d
|
9ee5227637cceb32cb45706ecd8b5e14585b50c8
|
be2e48278a8b780f50e7ffdab2d67900ebe0ed19
|
refs/heads/main
| 2023-08-17T00:23:28.195858 | 2023-08-11T00:02:08 | 2023-08-11T00:02:08 | 363,453,979 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.4977477490901947,
"alphanum_fraction": 0.5675675868988037,
"avg_line_length": 22.66666603088379,
"blob_id": "370df4e8ac87769930663370f9abd1b3e6cb7332",
"content_id": "4bc77e80b4a151ce1f0ffbaf8ec3ec006b895f17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 18,
"path": "/remo/migrations/0018_estacao_csv.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-07 03:30\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0017_auto_20181128_1942'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='estacao',\r\n name='csv',\r\n field=models.FileField(blank=True, null=True, upload_to='media', verbose_name='csv'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.494140625,
"alphanum_fraction": 0.556640625,
"avg_line_length": 26.44444465637207,
"blob_id": "364e91a0a5a653815cdecaf803e4a4d3b37fbfbe",
"content_id": "83b7ebc636e286a77837e7f6b38c21bb3e7e620b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 513,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 18,
"path": "/remo/migrations/0032_auto_20181220_2050.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-20 22:50\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0031_auto_20181220_2048'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='person',\r\n name='position',\r\n field=models.CharField(choices=[('C', 'Coordenador'), ('P', 'Pesquisador'), ('A', 'Aluno'), ('T', 'Técnico')], max_length=1, verbose_name='Cargo'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.4898989796638489,
"alphanum_fraction": 0.5681818127632141,
"avg_line_length": 21.294116973876953,
"blob_id": "732e77273c6303ff8962e143ea062509a7442382",
"content_id": "1e4db9a71b99f558366e10cd19a4ac161b10dff6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 17,
"path": "/remo/migrations/0033_auto_20181220_2052.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-20 22:52\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0032_auto_20181220_2050'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterModelOptions(\r\n name='person',\r\n options={'verbose_name': 'Equipe', 'verbose_name_plural': 'Equipe'},\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.553170382976532,
"alphanum_fraction": 0.5647292137145996,
"avg_line_length": 36.849998474121094,
"blob_id": "272b70b3d9e76330c57e863b40200f3f590681ad",
"content_id": "107ade07a3c305e23722ef13222645c9465885a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3045,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 80,
"path": "/remo/migrations/0017_auto_20181128_1942.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-11-28 19:42\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0016_auto_20181128_0014'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='destaque',\n options={'verbose_name': 'Destaques', 'verbose_name_plural': 'Destaques'},\n ),\n migrations.AlterModelOptions(\n name='doc',\n options={'verbose_name': 'Documentos', 'verbose_name_plural': 'Documentos'},\n ),\n migrations.AlterModelOptions(\n name='doi',\n options={'verbose_name': 'DOI', 'verbose_name_plural': 'DOI'},\n ),\n migrations.AlterModelOptions(\n name='estacao',\n options={'verbose_name': 'Estações', 'verbose_name_plural': 'Estações'},\n ),\n migrations.AlterModelOptions(\n name='index',\n options={'verbose_name': 'Index', 'verbose_name_plural': 'Index'},\n ),\n migrations.AlterModelOptions(\n name='institution',\n options={'verbose_name': 'Instituições Parceiras', 'verbose_name_plural': 'Instituições Parceiras'},\n ),\n migrations.AlterModelOptions(\n name='noticia',\n options={'verbose_name': 'Notícias', 'verbose_name_plural': 'Notícias'},\n ),\n migrations.AlterModelOptions(\n name='person',\n options={'verbose_name': 'Colaborador', 'verbose_name_plural': 'Colaborador'},\n ),\n migrations.AlterModelOptions(\n name='publicacao',\n options={'verbose_name': 'Publicações', 'verbose_name_plural': 'Publicações'},\n ),\n migrations.AddField(\n model_name='institution',\n name='estado',\n field=models.CharField(default='', max_length=2, verbose_name='UF'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='destaque',\n name='image',\n field=models.FileField(blank=True, null=True, upload_to='imagens/', verbose_name='Imagem'),\n ),\n migrations.AlterField(\n model_name='doc',\n name='image',\n field=models.FileField(blank=True, null=True, upload_to='imagens/', verbose_name='Documento'),\n ),\n migrations.AlterField(\n model_name='doi',\n name='doi',\n field=models.CharField(blank=True, help_text='Caso a publicação não tenha DOI, preencher tudo manualmente', max_length=200, null=True, verbose_name='Doi'),\n ),\n migrations.AlterField(\n model_name='noticia',\n name='image',\n field=models.FileField(blank=True, null=True, upload_to='imagens/', verbose_name='Imagem'),\n ),\n migrations.AlterField(\n model_name='paginas',\n name='image',\n field=models.FileField(blank=True, null=True, upload_to='imagens/', verbose_name='Documento'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5581853985786438,
"alphanum_fraction": 0.625246524810791,
"avg_line_length": 27.16666603088379,
"blob_id": "0fd30215fa622ee784283b5117ac5d1466596e7b",
"content_id": "7e30a05ae3790811d274a3b1b1dbf45f4d461364",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 510,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 18,
"path": "/remo/migrations/0040_auto_20190319_2144.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-03-20 00:44\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0039_auto_20190315_2048'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='doi',\n name='doi',\n field=models.CharField(blank=True, help_text='Caso a publicação não tenha DOI, preencher tudo manualmente', max_length=200, null=True, unique=True, verbose_name='DOI'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5315374732017517,
"alphanum_fraction": 0.5578186511993408,
"avg_line_length": 38.02564239501953,
"blob_id": "53e6b902c671fda12b28c978151015054d7b676c",
"content_id": "3f332ab73f26a0b1a557eefd3ecf0a411e9408eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1528,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 39,
"path": "/remo/migrations/0057_diretorios_documentos.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-05-09 21:16\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0056_auto_20190504_1943'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Diretorios',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Nome do diretório')),\n ],\n options={\n 'verbose_name': 'Diretório',\n 'verbose_name_plural': 'Diretórios',\n },\n ),\n migrations.CreateModel(\n name='Documentos',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Titulo em Português')),\n ('title_en', models.CharField(max_length=200, verbose_name='Titulo em Inglês')),\n ('docs', models.FileField(blank=True, null=True, upload_to='docs/', verbose_name='Documento')),\n ('dire', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='remo.Diretorios', verbose_name='Diretório')),\n ],\n options={\n 'verbose_name': 'Documento',\n 'verbose_name_plural': 'Documentos',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.7065989971160889,
"alphanum_fraction": 0.7065989971160889,
"avg_line_length": 35.48147964477539,
"blob_id": "40e4f1801c5567246fc307ef7eac405d216d622e",
"content_id": "32c9db90ea168b8cce1c914897b60cb620816038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1970,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 54,
"path": "/remo/admin.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import *\n\nclass IndexAdmin(admin.ModelAdmin):\n list_display = ('title_pt', 'title_en', 'text_pt', 'text_en', 'position')\n search_fields = ('title_pt', 'title_en', 'text_pt', 'text_en', 'position')\nadmin.site.register(Index, IndexAdmin)\n\nclass InstitutionAdmin(admin.ModelAdmin):\n list_display = ('name', 'initials', 'country', 'city')\n search_fields = ('name', 'initials', 'country', 'city')\nadmin.site.register(Institution, InstitutionAdmin)\n\nclass PersonAdmin(admin.ModelAdmin):\n list_display = ('name', 'email', 'institution', 'position')\n list_filter = ('institution', 'position')\nadmin.site.register(Person, PersonAdmin)\n\nclass NoticiaAdmin(admin.ModelAdmin):\n list_display = ('title_pt', 'published_date', 'author')\n list_filter = ('author',)\nadmin.site.register(Noticia, NoticiaAdmin)\n\nclass CampanhaAdmin(admin.ModelAdmin):\n list_display = ('estacao','prof', 'csv', 'data_i', 'data_f')\n list_filter = ('estacao',)\nadmin.site.register(Campanha,CampanhaAdmin)\n\nclass DestaqueAdmin(admin.ModelAdmin):\n\tlist_display = ('title_pt','text_pt')\n\tsearch_fields = ('title_pt','text_pt')\nadmin.site.register(Destaque,DestaqueAdmin)\n\nclass EstacaoAdmin(admin.ModelAdmin):\n\tlist_display = ('nome','lat','lon')\n\tsearch_fields = ('nome','lat','lon')\nadmin.site.register(Estacao,EstacaoAdmin)\n\nclass PublicacaoAdmin(admin.ModelAdmin):\n\tlist_display = ('doi', 'title','ano','authors', 'url', 'position')\n\tsearch_fields = ('doi', 'title','ano','authors', 'url', 'position')\nadmin.site.register(Publicacao,PublicacaoAdmin)\n\nclass ContactsAdmin(admin.ModelAdmin):\n list_display = ('name', 'email', 'tel')\nadmin.site.register(Contacts, ContactsAdmin)\n\n# class DiretoriosAdmin(admin.ModelAdmin):\n# list_display = ('title',)\n# admin.site.register(Diretorios,DiretoriosAdmin)\n\n# class DocumentosAdmin(admin.ModelAdmin):\n# list_display = ('title',)\n# admin.site.register(Documentos, DocumentosAdmin)\n"
},
{
"alpha_fraction": 0.5503292679786682,
"alphanum_fraction": 0.5794920325279236,
"avg_line_length": 36.96428680419922,
"blob_id": "9a2c788c537452e525299fb464f18b0944ee8b77",
"content_id": "888bc9b63b2782c1610a6c34ec19bc8a9ff2d2b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1069,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 28,
"path": "/remo/migrations/0018_serie.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-12-07 00:55\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0017_auto_20181128_1942'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Serie',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('dados', models.FileField(blank=True, null=True, upload_to='dados/', verbose_name='Dados')),\n ('data_i', models.DateTimeField(blank=True, null=True, verbose_name='Data inicial')),\n ('data_f', models.DateTimeField(blank=True, null=True, verbose_name='Data final')),\n ('estacao', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='remo.Estacao', verbose_name='Estação')),\n ],\n options={\n 'verbose_name': 'Estações',\n 'verbose_name_plural': 'Estações',\n },\n ),\n ]\n"
},
{
"alpha_fraction": 0.5756285190582275,
"alphanum_fraction": 0.5945459008216858,
"avg_line_length": 38.90522766113281,
"blob_id": "31ca52f552846d7c2c9f7e1984f109a26ea551ae",
"content_id": "1abca705b342813efd58afc4b3696c4075a662b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12266,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 306,
"path": "/remo/models.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.utils import timezone\n# import requests\nfrom crossref.restful import Works\nfrom datetime import datetime\nimport pandas as pd\nimport os\n\nclass Index(models.Model):\n POSITION = (('L', 'O que é'),\n ('C', 'Objetivos'),\n ('O', 'Dados'))\n\n position = models.CharField(max_length=1, choices=POSITION,verbose_name=\"Tipo de Publicação\")\n title_pt = models.CharField(verbose_name=\"Titulo em Português\",max_length=200)\n title_en = models.CharField(verbose_name=\"Titulo em Inglês\",max_length=200)\n text_pt = models.TextField(verbose_name=\"Texto em Português\")\n text_en = models.TextField(verbose_name=\"Texto em Inglês\")\n \n def __str__(self):\n return self.title_pt\n\n class Meta:\n verbose_name = 'Index'\n verbose_name_plural = 'Index'\n\nclass Institution(models.Model):\n name = models.CharField(max_length=60,verbose_name=\"Nome\")\n initials = models.CharField(max_length=8,verbose_name=\"Sigla\")\n country = models.CharField(max_length=30,verbose_name=\"Pais\")\n city = models.CharField(max_length=30,verbose_name=\"Cidade\")\n estado = models.CharField(max_length=2,verbose_name=\"UF\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Instituições'\n verbose_name_plural = 'Instituições'\n\nclass Person(models.Model):\n POSITION = (\n ('A', 'Coordenador'),\n ('B', 'Pesquisador'),\n ('C', 'Aluno Pós-Graduação'),\n ('D', 'Aluno Graduação'),\n ('E', 'Técnico'),\n ('F', 'Pesquisador Egresso'),\n ('G', 'Colaborador Externo'),\n )\n\n name = models.CharField(max_length=60,verbose_name=\"Nome\")\n email = models.EmailField(max_length=60,verbose_name=\"E-mail\", blank=True)\n institution = models.ForeignKey(Institution,verbose_name=\"Instituição\", on_delete=models.CASCADE)\n lattes = models.CharField(max_length=100,verbose_name=\"Link para o Lattes\", blank=True)\n position = models.CharField(max_length=2, choices=POSITION,verbose_name=\"Cargo\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Equipe'\n verbose_name_plural = 'Equipe'\n\nclass Noticia(models.Model):\n author = models.ForeignKey('auth.User', on_delete=models.CASCADE)\n title_pt = models.CharField(verbose_name=\"Titulo em Português\",max_length=200)\n title_en = models.CharField(verbose_name=\"Titulo em Inglês\",max_length=200)\n text_pt = models.TextField(verbose_name=\"Texto em Português\")\n text_en = models.TextField(verbose_name=\"Texto em Inglês\")\n image = models.FileField(verbose_name=\"Imagem\",upload_to='imagens/', blank=True, null=True)\n created_date = models.DateTimeField(default=timezone.now)\n published_date = models.DateTimeField(blank=True, null=True)\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title_pt\n\n class Meta:\n verbose_name = 'Notícias'\n verbose_name_plural = 'Notícias'\n\nclass Destaque(models.Model):\n title_pt = models.CharField(verbose_name=\"Titulo em Português\",max_length=200)\n title_en = models.CharField(verbose_name=\"Titulo em Inglês\",max_length=200)\n text_pt = models.TextField(verbose_name=\"Texto em Português\")\n text_en = models.TextField(verbose_name=\"Texto em Inglês\")\n image = models.FileField(verbose_name=\"Imagem\",upload_to='imagens/', blank=True, null=True)\n url = models.CharField(verbose_name=\"URL\",max_length=200)\n\n def publish(self):\n self.published_date = timezone.now()\n self.save()\n\n def __str__(self):\n return self.title_pt\n\n class Meta:\n verbose_name = 'Destaques'\n verbose_name_plural = 'Destaques'\n\nclass Publicacao(models.Model):\n POSITION = (\n ('A', 'Artigos'),\n ('B', 'Livros'),\n ('C', 'Congressos'),\n ('D', 'Monografia'),\n ('E', 'Mestrado'),\n ('F', 'Doutorado'),\n )\n\n doi = models.CharField(\"DOI\",max_length=200, unique=True, blank=True, null=True, \n help_text=\"Ex: 10.1007/s10236-017-1113-9. Caso a publicação não tenha DOI, preencher os outros campos manualmente\")\n title = models.CharField(\"Titulo\",max_length=500,blank=True, null=True, default='#')\n ano = models.CharField(\"Ano\",max_length=4,blank=True, null=True, default='#')\n authors = models.CharField(\"Autores\",max_length=200,blank=True, null=True, default='#')\n url = models.CharField(\"URL\",max_length=200,blank=True, null=True, default='#')\n position = models.CharField(max_length=1, choices=POSITION,verbose_name=\"Tipo de Publicação\")\n\n def save(self, *args, **kwargs):\n if self.doi != None:\n # a = requests.get(\"https://api.altmetric.com/v1/doi/\"+str(self.doi))\n # self.title = a.json()['title']\n # self.url = a.json()['url']\n # self.ano = datetime.utcfromtimestamp(a.json()['published_on']).strftime('%Y')\n # self.authors = str(a.json()['authors']).replace(\"[\",\"\").replace(\"]\",\"\").replace(\"'\",\"\")\n # print (str(a.json()['authors']).replace(\"[\",\"\").replace(\"]\",\"\").replace(\"'\",\"\"))\n\n works = Works()\n a = works.doi(str(self.doi))\n self.title = a['title'][0]\n self.ano = int(a['issued']['date-parts'][0][0])\n self.url = a['URL']\n n = []\n nn = []\n for i in range(len(a['author'])):\n names = a['author'][i]['given'].split()\n n.append(a['author'][i]['family'] + ', ')\n for ii in range(len(names)):\n n[-1] = n[-1] + names[ii][0] + '. '\n self.authors = str(n)[1:-1].replace(\"'\",\"\").replace(\". ,\", \".;\")\n # coloca et al se tiver mais que 2 autores\n if len(self.authors.split(';')) > 2:\n self.authors = self.authors.split(';')[0].split(',')[0] + ' et al.'\n\n super(Publicacao, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Publicações'\n verbose_name_plural = 'Publicações' \n\nclass Estacao(models.Model):\n nome = models.CharField(\"Nome da Estação\",max_length=200)\n lat = models.FloatField(\"Latitude\")\n lon = models.FloatField(\"Longitude\")\n\n def __str__(self):\n return self.nome\n\n class Meta:\n verbose_name = 'Estações'\n verbose_name_plural = 'Estações' \n\nclass Campanha(models.Model):\n estacao = models.ForeignKey(Estacao,verbose_name='Estação',on_delete=models.CASCADE)\n csv = models.FileField(verbose_name=\"csv\", upload_to='data/', blank=True, null=True)\n prof = models.CharField(max_length=60,verbose_name=\"Profundidade\")\n data_i = models.DateField(verbose_name=\"Data inicial\",blank=True, null=True)\n data_f = models.DateField(verbose_name=\"Data final\",blank=True, null=True)\n # infos = models.TextField(\"Informações\",blank=True, null=True)\n\n def __str__(self):\n return self.estacao.nome\n\n def save(self, *args, **kwargs):\n super(Campanha, self).save(*args, **kwargs)\n\n df = pd.read_csv(os.environ['HOME'] + '/media/' + self.csv.name, parse_dates=True, index_col='date')\n df.replace({pd.np.nan: None}, inplace=True)\n\n campanha = self\n\n for i in range(len(df)):\n dados = Dados(\n campanha = campanha,\n date = str(df.index[i]),\n ate = df['ate'][i],\n bat = df['bat'][i],\n bp = df['bp'][i],\n con = df['con'][i],\n dir1 = df['dir1'][i],\n dir2 = df['dir2'][i],\n dir3 = df['dir3'][i],\n dp = df['dp'][i],\n hs = df['hs'][i],\n lat = df['lat'][i],\n lon = df['lon'][i],\n mag1 = df['mag1'][i],\n mag2 = df['mag2'][i],\n mag3 = df['mag3'][i],\n psbe10 = df['psbe10'][i],\n psbe100 = df['psbe100'][i],\n rh = df['rh'][i],\n tdl = df['tdl'][i],\n tp = df['tp'][i],\n tsbe10 = df['tsbe10'][i],\n tsbe100 = df['tsbe100'][i],\n tsbe20 = df['tsbe20'][i],\n tsbe30 = df['tsbe30'][i],\n tsbe40 = df['tsbe40'][i],\n tsbe50 = df['tsbe50'][i],\n tsbe60 = df['tsbe60'][i],\n tsbe70 = df['tsbe70'][i],\n tsbe80 = df['tsbe80'][i],\n tsbe90 = df['tsbe90'][i],\n tsup = df['tsup'][i],\n wd = df['wd'][i],\n ws = df['ws'][i])\n dados.save(force_insert=True)\n \n class Meta:\n verbose_name = 'Campanhas'\n verbose_name_plural = 'Campanhas' \n\nclass Dados(models.Model):\n campanha = models.ForeignKey(Campanha, verbose_name='Estação',on_delete=models.CASCADE)\n date = models.CharField('Data', blank=True, null=True, max_length=60)\n ate = models.FloatField('ate', blank=True, null=True)\n bat = models.FloatField('bat', blank=True, null=True)\n bp = models.FloatField('bp', blank=True, null=True)\n con = models.FloatField('con', blank=True, null=True)\n dir1 = models.FloatField('dir1', blank=True, null=True)\n dir2 = models.FloatField('dir2', blank=True, null=True)\n dir3 = models.FloatField('dir3', blank=True, null=True)\n dp = models.FloatField('dp', blank=True, null=True)\n hs = models.FloatField('hs', blank=True, null=True)\n lat = models.FloatField('lat', blank=True, null=True)\n lon = models.FloatField('lon', blank=True, null=True)\n mag1 = models.FloatField('mag1', blank=True, null=True)\n mag2 = models.FloatField('mag2', blank=True, null=True)\n mag3 = models.FloatField('mag3', blank=True, null=True)\n psbe10 = models.FloatField('psbe10', blank=True, null=True)\n psbe100 = models.FloatField('psbe100', blank=True, null=True)\n rh = models.FloatField('rh', blank=True, null=True)\n tdl = models.FloatField('tdl', blank=True, null=True)\n tp = models.FloatField('tp', blank=True, null=True)\n tsbe10 = models.FloatField('tsbe10', blank=True, null=True)\n tsbe100 = models.FloatField('tsbe100', blank=True, null=True)\n tsbe20 = models.FloatField('tsbe20', blank=True, null=True)\n tsbe30 = models.FloatField('tsbe30', blank=True, null=True)\n tsbe40 = models.FloatField('tsbe40', blank=True, null=True)\n tsbe50 = models.FloatField('tsbe50', blank=True, null=True)\n tsbe60 = models.FloatField('tsbe60', blank=True, null=True)\n tsbe70 = models.FloatField('tsbe70', blank=True, null=True)\n tsbe80 = models.FloatField('tsbe80', blank=True, null=True)\n tsbe90 = models.FloatField('tsbe90', blank=True, null=True)\n tsup = models.FloatField('tsup', blank=True, null=True)\n wd = models.FloatField('wd', blank=True, null=True)\n ws = models.FloatField('ws', blank=True, null=True)\n\n def __str__(self):\n return self.date\n\n class Meta:\n verbose_name = 'Dados de Campanhas'\n verbose_name_plural = 'Dados de Campanhas' \n\nclass Contacts(models.Model):\n name = models.CharField(max_length=60,verbose_name=\"Nome\")\n email = models.CharField(max_length=30,verbose_name=\"Email\")\n tel = models.CharField(max_length=30,verbose_name=\"Tel\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n # verbose_name = 'Contatos'\n verbose_name_plural = 'Contatos'\n\n# class Diretorios(models.Model):\n# title = models.CharField(verbose_name=\"Nome do diretório\",max_length=200)\n\n# def __str__(self):\n# return self.title\n\n# class Meta:\n# verbose_name = 'Diretório'\n# verbose_name_plural = 'Diretórios'\n\n# class Documentos(models.Model):\n# title = models.CharField(verbose_name=\"Titulo em Português\",max_length=200)\n# docs = models.FileField(verbose_name=\"Documento\",upload_to='docs/', blank=True, null=True)\n# dire = models.ForeignKey(Diretorios,verbose_name='Diretório',on_delete=models.CASCADE)\n\n# def __str__(self):\n# return self.title\n\n# class Meta:\n# verbose_name = 'Documento'\n# verbose_name_plural = 'Documentos'\n"
},
{
"alpha_fraction": 0.5381818413734436,
"alphanum_fraction": 0.6345454454421997,
"avg_line_length": 29.55555534362793,
"blob_id": "dc317818ae73a0a855d765178c008e4d03e741cb",
"content_id": "96a1ce50bf01f09a92180357fad6cc872ff3d01e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 18,
"path": "/remo/migrations/0041_auto_20190320_0017.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-03-20 03:17\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0040_auto_20190319_2144'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='doi',\n name='doi',\n field=models.CharField(blank=True, help_text='Ex: 10.1007/s10236-017-1113-9. Caso a publicação não tenha DOI, preencher os outros campos manualmente', max_length=200, null=True, unique=True, verbose_name='DOI'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.523059606552124,
"alphanum_fraction": 0.5860517621040344,
"avg_line_length": 29.65517234802246,
"blob_id": "a65d495a1c104fdf0b24da90e18c45dd52f2aa38",
"content_id": "e43e41f1a9b2507b417cdcfb07de7e5d356641ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 892,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 29,
"path": "/remo/migrations/0043_auto_20190405_2018.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-04-05 20:18\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0042_auto_20190402_1051'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Doi',\n ),\n migrations.DeleteModel(\n name='Paginas',\n ),\n migrations.AddField(\n model_name='publicacao',\n name='doi',\n field=models.CharField(blank=True, help_text='Ex: 10.1007/s10236-017-1113-9. Caso a publicação não tenha DOI, preencher os outros campos manualmente', max_length=200, null=True, unique=True, verbose_name='DOI'),\n ),\n migrations.AddField(\n model_name='publicacao',\n name='url',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='URL'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5667350888252258,
"alphanum_fraction": 0.6057494878768921,
"avg_line_length": 23.63157844543457,
"blob_id": "726bb7ad06218d6c017642b020be8b9b6e67750b",
"content_id": "5f82b505f7bd5a90a9ab97fbe7f9594a68af6aa7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 487,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 19,
"path": "/remo/migrations/0014_auto_20181107_1917.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-11-07 21:17\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0013_institution'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='person',\r\n name='institution',\r\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='remo.Institution'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 12.833333015441895,
"blob_id": "a3bae09995ed95a4897b10919a1a713cf7a89bfb",
"content_id": "ea9c7cf857f1ded432fcafdbd0d9caf0f42420b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 84,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 6,
"path": "/README.md",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "Quickstart\n==========\n\nDjango rodado em servidor comum\n\npython manage.py runserver\n\n"
},
{
"alpha_fraction": 0.5308123230934143,
"alphanum_fraction": 0.5798319578170776,
"avg_line_length": 30.04347801208496,
"blob_id": "7b3e6f755dd66d2ba2071f87750fd770a1159d3d",
"content_id": "51a9cfe6d4976bf2652778d25cd29b118c715d2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 718,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 23,
"path": "/remo/migrations/0044_auto_20190405_2027.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-04-05 20:27\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0043_auto_20190405_2018'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='publicacao',\n name='position',\n field=models.CharField(choices=[('A', 'Artigos'), ('B', 'Livros'), ('C', 'Congressos'), ('D', 'Orientações')], max_length=1, verbose_name='Tipo de Publicação'),\n ),\n migrations.AlterField(\n model_name='publicacao',\n name='title',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Titulo'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5257731676101685,
"alphanum_fraction": 0.553755521774292,
"avg_line_length": 25.115385055541992,
"blob_id": "59ac235456ea86f567ca74100df3db1619222b75",
"content_id": "616833d244bbd9193205640c918714e013cf11ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 679,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 26,
"path": "/remo/migrations/0020_auto_20181207_1457.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-12-07 14:57\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0019_serie_prof'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='serie',\n options={'verbose_name': 'Campanhas', 'verbose_name_plural': 'Campanhas'},\n ),\n migrations.RemoveField(\n model_name='serie',\n name='dados',\n ),\n migrations.AddField(\n model_name='serie',\n name='csv',\n field=models.FileField(blank=True, null=True, upload_to='media', verbose_name='csv'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5446224212646484,
"alphanum_fraction": 0.5949656963348389,
"avg_line_length": 23.27777862548828,
"blob_id": "86b610480de5fb048f0eda8214ba4dce737105a0",
"content_id": "3034d398ccf66e5714f58a7b33af9c6a9d041d44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 18,
"path": "/remo/migrations/0047_auto_20190405_2223.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-04-05 22:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0046_publicacao_url'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='publicacao',\n name='url',\n field=models.CharField(blank=True, default='#', max_length=200, null=True, verbose_name='URL'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5094889402389526,
"alphanum_fraction": 0.5256947875022888,
"avg_line_length": 42.557273864746094,
"blob_id": "86b2e491ebdc83fb53f089141ac6921de1557422",
"content_id": "9e0bb68ed898a3c947e31e497b81cdfa1b839e8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14069,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 323,
"path": "/remo/views.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import auth\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator\nfrom .models import *\nimport os\nimport csv\nimport json\nimport ast\nfrom datetime import datetime\n# from .forms import NoticiaForm\n# from filemanager import FileManager\n\n\ndef index(request):\n # Number of visits to this view, as counted in the session variable.\n num_visits = request.session.get('num_visits', 0)\n request.session['num_visits'] = num_visits + 1\n\n noticias_f = Noticia.objects.all().order_by('-published_date')[0:4]\n dest = Destaque.objects.all()\n\n index_informs = Index.objects.all()\n # Render the HTML template index.html with the data in the context variable\n auth.logout(request)\n return render(request,'index.html',{\"noticias_f\": noticias_f, \"dest\":dest, \"index_informs\": index_informs})\n\ndef index_en(request):\n # Number of visits to this view, as counted in the session variable.\n num_visits = request.session.get('num_visits', 0)\n request.session['num_visits'] = num_visits + 1\n\n noticias_f = Noticia.objects.all().order_by('-published_date')[0:4]\n dest = Destaque.objects.all()\n\n index_informs = Index.objects.all()\n # Render the HTML template index.html with the data in the context variable\n auth.logout(request)\n return render(request,'index_en.html',{\"noticias_f\": noticias_f, \"dest\":dest, \"index_informs\": index_informs})\n\ndef equipe(request):\n lista_coor = Person.objects.filter(position=\"A\").order_by(\"name\")\n lista_pesq = Person.objects.filter(position=\"B\").order_by(\"name\")\n lista_alun_pos = Person.objects.filter(position=\"C\").order_by(\"name\")\n lista_alun_gra = Person.objects.filter(position=\"D\").order_by(\"name\")\n lista_tecn = Person.objects.filter(position=\"E\").order_by(\"name\")\n lista_pesq_eg = Person.objects.filter(position=\"F\").order_by(\"name\")\n lista_colab_ext = Person.objects.filter(position=\"G\").order_by(\"name\")\n auth.logout(request)\n return render(request, 'equipe.html', {\"lista_coor\": lista_coor,\n \"lista_pesq\": lista_pesq,\n \"lista_alun_pos\": lista_alun_pos, \n \"lista_alun_gra\": lista_alun_gra,\n \"lista_tecn\": lista_tecn,\n \"lista_pesq_eg\": lista_pesq_eg,\n \"lista_colab_ext\": lista_colab_ext})\n\ndef equipe_en(request):\n lista_coor = Person.objects.filter(position=\"A\").order_by(\"name\")\n lista_pesq = Person.objects.filter(position=\"B\").order_by(\"name\")\n lista_alun_pos = Person.objects.filter(position=\"C\").order_by(\"name\")\n lista_alun_gra = Person.objects.filter(position=\"D\").order_by(\"name\")\n lista_tecn = Person.objects.filter(position=\"E\").order_by(\"name\")\n lista_pesq_eg = Person.objects.filter(position=\"F\").order_by(\"name\")\n lista_colab_ext = Person.objects.filter(position=\"G\").order_by(\"name\")\n auth.logout(request)\n return render(request, 'equipe_en.html', {\"lista_coor\": lista_coor,\n \"lista_pesq\": lista_pesq,\n \"lista_alun_pos\": lista_alun_pos, \n \"lista_alun_gra\": lista_alun_gra,\n \"lista_tecn\": lista_tecn,\n \"lista_pesq_eg\": lista_pesq_eg,\n \"lista_colab_ext\": lista_colab_ext})\n\ndef publicacoes(request):\n artigo = Publicacao.objects.filter(position=\"A\").order_by('-ano')\n livro = Publicacao.objects.filter(position=\"B\").order_by('-ano')\n congresso = Publicacao.objects.filter(position=\"C\").order_by('-ano')\n monografia = Publicacao.objects.filter(position=\"D\").order_by('-ano')\n mestrado = Publicacao.objects.filter(position=\"E\").order_by('-ano')\n doutorado = Publicacao.objects.filter(position=\"F\").order_by('-ano')\n auth.logout(request)\n return render(request, 'publicacoes.html', {\"artigo\": artigo,\n \"livro\": livro,\n \"congresso\": congresso,\n \"monografia\": monografia,\n \"mestrado\": mestrado,\n \"doutorado\": doutorado}) \n\ndef publicacoes_en(request):\n artigo = Publicacao.objects.filter(position=\"A\").order_by('-ano')\n livro = Publicacao.objects.filter(position=\"B\").order_by('-ano')\n congresso = Publicacao.objects.filter(position=\"C\").order_by('-ano')\n monografia = Publicacao.objects.filter(position=\"D\").order_by('-ano')\n mestrado = Publicacao.objects.filter(position=\"E\").order_by('-ano')\n doutorado = Publicacao.objects.filter(position=\"F\").order_by('-ano')\n auth.logout(request)\n return render(request, 'publicacoes_en.html', {\"artigo\": artigo,\n \"livro\": livro,\n \"congresso\": congresso,\n \"monografia\": monografia,\n \"mestrado\": mestrado,\n \"doutorado\": doutorado}) \n\ndef noticias_list(request):\n posts_list = Noticia.objects.order_by('-published_date')\n paginator = Paginator(posts_list, 5)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n auth.logout(request)\n return render(request, 'noticias_list.html', {'posts': posts})\n\ndef noticias_list_en(request):\n posts_list = Noticia.objects.order_by('-published_date')\n paginator = Paginator(posts_list, 5)\n page = request.GET.get('page')\n posts = paginator.get_page(page)\n auth.logout(request)\n return render(request, 'noticias_list_en.html', {'posts': posts})\n\ndef noticias_detail(request, pk):\n post = Noticia.objects.get(pk=pk)\n auth.logout(request)\n return render(request, 'noticias_detail.html', {'post': post})\n\ndef noticias_detail_en(request, pk):\n post = Noticia.objects.get(pk=pk)\n auth.logout(request)\n return render(request, 'noticias_detail_en.html', {'post': post})\n\n# @login_required\ndef modelagem(request):\n return render(request, 'modelagem.html')\n\n# @login_required\ndef modelagem_en(request):\n return render(request, 'modelagem_en.html')\n\n# @login_required\ndef dados(request):\n lista_esta = Estacao.objects.all()\n dados_esta = []\n for linha in lista_esta:\n lista_sec = Campanha.objects.filter(estacao=linha)\n for linha2 in lista_sec:\n print(linha2.id)\n try:\n a = {\n 'id': linha.id,'nome': linha.nome,\n 'lat': str(linha.lat).replace(\",\",\".\"),\n 'lon': str(linha.lon).replace(\",\",\".\"),\n 'prof': linha2.prof,\n 'id_serie': linha2.id,\n 'data_i': linha2.data_i.strftime(\"%d/%m/%Y\"),\n 'data_f': linha2.data_f.strftime(\"%d/%m/%Y\"),\n 'dados': linha2.csv\n }\n except:\n a = {\n 'id': linha.id,'nome': linha.nome,\n 'lat': str(linha.lat).replace(\",\",\".\"),\n 'lon': str(linha.lon).replace(\",\",\".\"),\n 'prof': linha2.prof,\n 'id_serie': linha2.id,\n 'data_i': \"Sem Infos\",\n 'data_f': \"Sem Infos\",\n 'dados': linha2.csv\n } \n dados_esta.append(a)\n return render(request, 'dados.html', {'lista_esta': dados_esta})\n\n# @login_required\ndef dados_en(request):\n lista_esta = Estacao.objects.all()\n dados_esta = []\n for linha in lista_esta:\n lista_sec = Campanha.objects.filter(estacao=linha)\n for linha2 in lista_sec:\n print(linha2.id)\n try:\n a = {\n 'id': linha.id,'nome': linha.nome,\n 'lat': str(linha.lat).replace(\",\",\".\"),\n 'lon': str(linha.lon).replace(\",\",\".\"),\n 'prof': linha2.prof,\n 'id_serie': linha2.id,\n 'data_i': linha2.data_i.strftime(\"%d/%m/%Y\"),\n 'data_f': linha2.data_f.strftime(\"%d/%m/%Y\"),\n 'dados': linha2.csv\n }\n except:\n a = {\n 'id': linha.id,'nome': linha.nome,\n 'lat': str(linha.lat).replace(\",\",\".\"),\n 'lon': str(linha.lon).replace(\",\",\".\"),\n 'prof': linha2.prof,\n 'id_serie': linha2.id,\n 'data_i': \"Sem Infos\",\n 'data_f': \"Sem Infos\",\n 'dados': linha2.csv\n } \n dados_esta.append(a)\n return render(request, 'dados_en.html', {'lista_esta': dados_esta})\n\n# @login_required\ndef dados_grafic(request, pk):\n campanha = Campanha.objects.get(pk=pk)\n lista_csv = Dados.objects.all().filter(campanha=campanha)\n saida = []\n for x in lista_csv:\n data = (datetime.strptime(x.date, '%Y-%m-%d %H:%M:%S')).timestamp() * 1000\n ws = str(x.ws)\n wd = str(x.wd)\n tsup = str(x.tsup)\n ate = str(x.ate)\n rh = str(x.rh)\n bp = str(x.bp)\n hs = str(x.hs)\n tp = str(x.tp)\n mag1 = str(x.mag1)\n dir1 = str(x.dir1)\n tsbe10 = str(x.tsbe10)\n tsbe100 =str(x.tsbe100)\n tsbe20 = str(x.tsbe20)\n tsbe30 = str(x.tsbe30)\n tsbe40 = str(x.tsbe40)\n tsbe50 = str(x.tsbe50)\n tsbe60 = str(x.tsbe60)\n tsbe70 = str(x.tsbe70)\n tsbe80 = str(x.tsbe80)\n tsbe90 = str(x.tsbe90)\n saida.append({\"data\":\"{:.0f}\".format(data),\n \"ws\":ws.replace('None', 'null'),\n \"wd\":wd.replace('None', 'null'),\n \"tsup\":tsup.replace('None', 'null'),\n \"ate\":ate.replace('None', 'null'),\n \"rh\":rh.replace('None', 'null'),\n \"bp\":bp.replace('None', 'null'),\n \"hs\":hs.replace('None', 'null'),\n \"tp\":tp.replace('None', 'null'),\n \"mag1\":mag1.replace('None', 'null'),\n \"dir1\":dir1.replace('None', 'null'),\n \"tsbe10\":tsbe10.replace('None', 'null'),\n \"tsbe100\":tsbe100.replace('None', 'null'),\n \"tsbe20\":tsbe20.replace('None', 'null'),\n \"tsbe30\":tsbe30.replace('None', 'null'),\n \"tsbe40\":tsbe40.replace('None', 'null'),\n \"tsbe50\":tsbe50.replace('None', 'null'),\n \"tsbe60\":tsbe60.replace('None', 'null'),\n \"tsbe70\":tsbe70.replace('None', 'null'),\n \"tsbe80\":tsbe80.replace('None', 'null'),\n \"tsbe90\":tsbe90.replace('None', 'null')})\n return render(request, 'dados_grafic.html', {'lista_csv': saida})\n\n# @login_required\ndef dados_grafic_en(request, pk):\n campanha = Campanha.objects.get(pk=pk)\n lista_csv = Dados.objects.all().filter(campanha=campanha)\n saida = []\n for x in lista_csv:\n data = (datetime.strptime(x.date, '%Y-%m-%d %H:%M:%S')).timestamp() * 1000\n ws = str(x.ws)\n wd = str(x.wd)\n tsup = str(x.tsup)\n ate = str(x.ate)\n rh = str(x.rh)\n bp = str(x.bp)\n hs = str(x.hs)\n tp = str(x.tp)\n mag1 = str(x.mag1)\n dir1 = str(x.dir1)\n tsbe10 = str(x.tsbe10)\n tsbe100 =str(x.tsbe100)\n tsbe20 = str(x.tsbe20)\n tsbe30 = str(x.tsbe30)\n tsbe40 = str(x.tsbe40)\n tsbe50 = str(x.tsbe50)\n tsbe60 = str(x.tsbe60)\n tsbe70 = str(x.tsbe70)\n tsbe80 = str(x.tsbe80)\n tsbe90 = str(x.tsbe90)\n saida.append({\"data\":\"{:.0f}\".format(data),\n \"ws\":ws.replace('None', 'null'),\n \"wd\":wd.replace('None', 'null'),\n \"tsup\":tsup.replace('None', 'null'),\n \"ate\":ate.replace('None', 'null'),\n \"rh\":rh.replace('None', 'null'),\n \"bp\":bp.replace('None', 'null'),\n \"hs\":hs.replace('None', 'null'),\n \"tp\":tp.replace('None', 'null'),\n \"mag1\":mag1.replace('None', 'null'),\n \"dir1\":dir1.replace('None', 'null'),\n \"tsbe10\":tsbe10.replace('None', 'null'),\n \"tsbe100\":tsbe100.replace('None', 'null'),\n \"tsbe20\":tsbe20.replace('None', 'null'),\n \"tsbe30\":tsbe30.replace('None', 'null'),\n \"tsbe40\":tsbe40.replace('None', 'null'),\n \"tsbe50\":tsbe50.replace('None', 'null'),\n \"tsbe60\":tsbe60.replace('None', 'null'),\n \"tsbe70\":tsbe70.replace('None', 'null'),\n \"tsbe80\":tsbe80.replace('None', 'null'),\n \"tsbe90\":tsbe90.replace('None', 'null')})\n return render(request, 'dados_grafic_en.html', {'lista_csv': saida})\n\ndef contatos(request):\n contatos = Contacts.objects.all()\n return render(request, 'contatos.html', {\"contatos\": contatos})\n\ndef contatos_en(request):\n contatos = Contacts.objects.all()\n return render(request, 'contatos_en.html', {\"contatos\": contatos})\n\n@login_required\ndef documentos(request):\n return render(request, 'documentos.html')\n\n# @login_required\n# def documentos1(request, path):\n# extensions = ['html', 'htm', 'zip', 'py', 'css', 'js', 'jpeg', 'jpg', 'png', 'pdf', 'docx']\n# fm = FileManager(settings.MEDIA_ROOT + '/docs/', extensions=extensions)\n# return fm.render(request, path)\n"
},
{
"alpha_fraction": 0.49498844146728516,
"alphanum_fraction": 0.5265998244285583,
"avg_line_length": 27.822221755981445,
"blob_id": "7120153d1b608c6c166a3d462c3c59e06c11de7c",
"content_id": "8debda5af01ba0f6b5f3e73a7e61d7ae185849fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1297,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 45,
"path": "/remo/migrations/0006_auto_20181011_0132.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-10-11 01:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0005_auto_20181010_2306'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='doi',\n name='created_date',\n ),\n migrations.RemoveField(\n model_name='doi',\n name='image',\n ),\n migrations.AddField(\n model_name='doi',\n name='ano',\n field=models.CharField(default='', max_length=4, verbose_name='Ano'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='doi',\n name='authors',\n field=models.CharField(default='', max_length=200, verbose_name='Autores'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='doi',\n name='doi',\n field=models.CharField(default='', max_length=200, verbose_name='Doi'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='doi',\n name='url',\n field=models.CharField(default='', max_length=200, verbose_name='URL'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.4793388545513153,
"alphanum_fraction": 0.5647382736206055,
"avg_line_length": 19.16666603088379,
"blob_id": "7381bb12d1d8cf68eb01df46c2afb3019f62055c",
"content_id": "ea89158ff1bb7df2d56b74a66ffb9698b8e317d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 363,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 18,
"path": "/remo/migrations/0055_auto_20190504_0233.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-05-04 02:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0054_auto_20190504_0228'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='destaque',\n old_name='title',\n new_name='title_pt',\n ),\n ]\n"
},
{
"alpha_fraction": 0.4816955626010895,
"alphanum_fraction": 0.5517019629478455,
"avg_line_length": 28.923076629638672,
"blob_id": "1ab7c0ee03ae88c61ae74b2792f477b5da64c387",
"content_id": "8bf248225d4c2bdcd95e2152953965fa189b146f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1557,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 52,
"path": "/procdb/teste_doi.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "\"\"\"\npip install crossrefapi\n\"\"\"\n\n# import requests\nfrom crossref.restful import Works\n# from doi2bib import crossref\n# from refextract import extract_references_from_url\n\n\n\nworks = Works()\na = works.doi('10.1007/s10236-017-1113-9')\n\nfor k in a.keys():\n print ('============================================ \\n')\n print (k)\n print (a[k])\n\nn = []\nnn = []\nfor i in range(len(a['author'])):\n names = a['author'][i]['given'].split()\n n.append(a['author'][i]['family'] + ', ')\n for ii in range(len(names)):\n n[-1] = n[-1] + names[ii][0] + '. '\nc = str(n)[1:-1].replace(\"'\",\"\").replace(\". ,\", \".;\")\n\n# coloca et al se tiver mais que 2 autores\nif len(c.split(';')) > 2:\n c = c.split(';')[0].split(',')[0] + ' et al.'\n\ntitle = a['title'][0]\nano = int(a['issued']['date-parts'][0][0])\nurl = a['URL']\n\n\n# # iln = [last_names[i][0] for i in range(len(last_names))]\n# c = str(['{}, {}.'.format(a['author'][i]['family'], \n# a['author'][i]['given'][0]) for i in range(len(a['author']))]).replace(\"'\", \"\")[1:-1]\n\n\n# a = crossref.get_bib_from_doi('https://doi.org/10.1080/1755876X.2019.1606880')\n# a = crossref.get_bib('https://doi.org/10.1080/1755876X.2019.1606880')\n\n# b = requests.get('https://doi.org/10.1080/1755876X.2019.1606880')\n# a = requests.get(\"https://api.altmetric.com/v1/doi/\"+str(self.doi))\n\n# title = a.json()['title']\n# url = a.json()['url']\n# ano = datetime.utcfromtimestamp(a.json()['published_on']).strftime('%Y')\n# authors = str(a.json()['authors']).replace(\"[\",\"\").replace(\"]\",\"\").replace(\"'\",\"\")\n\n"
},
{
"alpha_fraction": 0.5403472781181335,
"alphanum_fraction": 0.5638406276702881,
"avg_line_length": 30.580644607543945,
"blob_id": "12711969ba25ffd34b9e034713c06b83e6ec529e",
"content_id": "63453f3366cbb3d154aa3ebf46273c982ad85a94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 979,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 31,
"path": "/remo/migrations/0036_auto_20181230_0018.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-12-30 00:18\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0035_remove_doi_journal'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Index_Doc',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Nome')),\n ],\n options={\n 'verbose_name': 'Categoria de Documento',\n 'verbose_name_plural': 'Categoria de Documento',\n },\n ),\n migrations.AddField(\n model_name='doc',\n name='categoria',\n field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='remo.Index_Doc'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5477855205535889,
"alphanum_fraction": 0.5990676283836365,
"avg_line_length": 22.83333396911621,
"blob_id": "f5c71b7e7954ebac16e897651cff719fb64d1453",
"content_id": "10f672f168151a2ca4c616c39f192ee71dea71ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 18,
"path": "/remo/migrations/0046_publicacao_url.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-04-05 21:44\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0045_remove_publicacao_url'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='publicacao',\n name='url',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='URL'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5208826065063477,
"alphanum_fraction": 0.54925137758255,
"avg_line_length": 29.95121955871582,
"blob_id": "ebf2e138c4c89cf05f492e584ec28a3a4dd7b165",
"content_id": "8bf1f482b3ebb66a270f19ab49c039a8a74fa295",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1273,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 41,
"path": "/remo/migrations/0010_auto_20181028_1914.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.7 on 2018-10-28 19:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0009_auto_20181028_1836'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='publicacoes',\n name='author',\n ),\n migrations.RemoveField(\n model_name='publicacoes',\n name='created_date',\n ),\n migrations.RemoveField(\n model_name='publicacoes',\n name='image',\n ),\n migrations.AddField(\n model_name='publicacoes',\n name='ano',\n field=models.CharField(blank=True, max_length=4, null=True, verbose_name='Ano'),\n ),\n migrations.AddField(\n model_name='publicacoes',\n name='authors',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Autores'),\n ),\n migrations.AddField(\n model_name='publicacoes',\n name='position',\n field=models.CharField(choices=[('L', 'Livros'), ('C', 'Congressos'), ('O', 'Orientações')], default='', max_length=1, verbose_name='Tipo de Publicação'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5494709014892578,
"alphanum_fraction": 0.5746031999588013,
"avg_line_length": 63.17241287231445,
"blob_id": "49dcb9607bd327925daa59c63731d1b989a2b175",
"content_id": "57e4772bdaa8d1ad3c160dada35be01009145028",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3782,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 58,
"path": "/remo/migrations/0021_dados.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-10 00:09\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0020_auto_20181207_1457'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='dados',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('date', models.DateTimeField(blank=True, null=True, verbose_name='Data')),\r\n ('ate', models.FloatField(blank=True, null=True, verbose_name='ate')),\r\n ('bat', models.FloatField(blank=True, null=True, verbose_name='bat')),\r\n ('bp', models.FloatField(blank=True, null=True, verbose_name='bp')),\r\n ('con', models.FloatField(blank=True, null=True, verbose_name='con')),\r\n ('dir1', models.FloatField(blank=True, null=True, verbose_name='dir1')),\r\n ('dir2', models.FloatField(blank=True, null=True, verbose_name='dir2')),\r\n ('dir3', models.FloatField(blank=True, null=True, verbose_name='dir3')),\r\n ('dp', models.FloatField(blank=True, null=True, verbose_name='dp')),\r\n ('hs', models.FloatField(blank=True, null=True, verbose_name='hs')),\r\n ('lat', models.FloatField(blank=True, null=True, verbose_name='lat')),\r\n ('lon', models.FloatField(blank=True, null=True, verbose_name='lon')),\r\n ('mag1', models.FloatField(blank=True, null=True, verbose_name='mag1')),\r\n ('mag2', models.FloatField(blank=True, null=True, verbose_name='mag2')),\r\n ('mag3', models.FloatField(blank=True, null=True, verbose_name='mag3')),\r\n ('psbe10', models.FloatField(blank=True, null=True, verbose_name='psbe10')),\r\n ('psbe100', models.FloatField(blank=True, null=True, verbose_name='psbe100')),\r\n ('rh', models.FloatField(blank=True, null=True, verbose_name='rh')),\r\n ('tdl', models.FloatField(blank=True, null=True, verbose_name='tdl')),\r\n ('tp', models.FloatField(blank=True, null=True, verbose_name='tp')),\r\n ('tsbe10', models.FloatField(blank=True, null=True, verbose_name='tsbe10')),\r\n ('tsbe100', models.FloatField(blank=True, null=True, verbose_name='tsbe100')),\r\n ('tsbe20', models.FloatField(blank=True, null=True, verbose_name='tsbe20')),\r\n ('tsbe30', models.FloatField(blank=True, null=True, verbose_name='tsbe30')),\r\n ('tsbe40', models.FloatField(blank=True, null=True, verbose_name='tsbe40')),\r\n ('tsbe50', models.FloatField(blank=True, null=True, verbose_name='tsbe50')),\r\n ('tsbe60', models.FloatField(blank=True, null=True, verbose_name='tsbe60')),\r\n ('tsbe70', models.FloatField(blank=True, null=True, verbose_name='tsbe70')),\r\n ('tsbe80', models.FloatField(blank=True, null=True, verbose_name='tsbe80')),\r\n ('tsbe90', models.FloatField(blank=True, null=True, verbose_name='tsbe90')),\r\n ('tsup', models.FloatField(blank=True, null=True, verbose_name='tsup')),\r\n ('wd', models.FloatField(blank=True, null=True, verbose_name='wd')),\r\n ('ws', models.FloatField(blank=True, null=True, verbose_name='ws')),\r\n ('campanha', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='remo.Serie', verbose_name='Estação')),\r\n ],\r\n options={\r\n 'verbose_name': 'Dados de Campanhas',\r\n 'verbose_name_plural': 'Dados de Campanhas',\r\n },\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.5032217502593994,
"alphanum_fraction": 0.5181518197059631,
"avg_line_length": 33.7471923828125,
"blob_id": "78620cbe63bad5a06950e36998930232e79dd816",
"content_id": "4c7e378b3071a8759dcff58028444c9c03990843",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6363,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 178,
"path": "/remo/migrations/0024_auto_20181209_2340.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-10 01:40\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0023_auto_20181209_2339'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='ate',\r\n field=models.FloatField(blank=True, null=True, verbose_name='ate'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='bat',\r\n field=models.FloatField(blank=True, null=True, verbose_name='bat'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='bp',\r\n field=models.FloatField(blank=True, null=True, verbose_name='bp'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='con',\r\n field=models.FloatField(blank=True, null=True, verbose_name='con'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='date',\r\n field=models.DateTimeField(blank=True, null=True, verbose_name='Data'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='dir1',\r\n field=models.FloatField(blank=True, null=True, verbose_name='dir1'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='dir2',\r\n field=models.FloatField(blank=True, null=True, verbose_name='dir2'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='dir3',\r\n field=models.FloatField(blank=True, null=True, verbose_name='dir3'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='dp',\r\n field=models.FloatField(blank=True, null=True, verbose_name='dp'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='hs',\r\n field=models.FloatField(blank=True, null=True, verbose_name='hs'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='lat',\r\n field=models.FloatField(blank=True, null=True, verbose_name='lat'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='lon',\r\n field=models.FloatField(blank=True, null=True, verbose_name='lon'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='mag1',\r\n field=models.FloatField(blank=True, null=True, verbose_name='mag1'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='mag2',\r\n field=models.FloatField(blank=True, null=True, verbose_name='mag2'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='mag3',\r\n field=models.FloatField(blank=True, null=True, verbose_name='mag3'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='psbe10',\r\n field=models.FloatField(blank=True, null=True, verbose_name='psbe10'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='psbe100',\r\n field=models.FloatField(blank=True, null=True, verbose_name='psbe100'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='rh',\r\n field=models.FloatField(blank=True, null=True, verbose_name='rh'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tdl',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tdl'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tp',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tp'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe10',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe10'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe100',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe100'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe20',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe20'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe30',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe30'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe40',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe40'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe50',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe50'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe60',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe60'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe70',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe70'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe80',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe80'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsbe90',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsbe90'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='tsup',\r\n field=models.FloatField(blank=True, null=True, verbose_name='tsup'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='wd',\r\n field=models.FloatField(blank=True, null=True, verbose_name='wd'),\r\n ),\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='ws',\r\n field=models.FloatField(blank=True, null=True, verbose_name='ws'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.5608646273612976,
"alphanum_fraction": 0.5961319804191589,
"avg_line_length": 29.310344696044922,
"blob_id": "95803283f8d202e1b230ee4d53280d417330ba3e",
"content_id": "731d0a9fc00cb9639c40402275607a0b3a385329",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 881,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 29,
"path": "/remo/migrations/0028_auto_20181212_0013.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.7 on 2018-12-12 00:13\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0027_auto_20181209_2351'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='person',\n name='institution',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='remo.Institution', verbose_name='Instituição'),\n ),\n migrations.AlterField(\n model_name='serie',\n name='data_f',\n field=models.DateField(blank=True, null=True, verbose_name='Data final'),\n ),\n migrations.AlterField(\n model_name='serie',\n name='data_i',\n field=models.DateField(blank=True, null=True, verbose_name='Data inicial'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5384849309921265,
"alphanum_fraction": 0.5555217266082764,
"avg_line_length": 39.085365295410156,
"blob_id": "5da558f4898ffdc3c2b81f9de93d0607d19e083b",
"content_id": "7ed6532c360ca3b290130748976bb70eb371901e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3292,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 82,
"path": "/remo/migrations/0005_auto_20181010_2306.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.7 on 2018-10-10 23:06\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0004_auto_20181008_1802'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Destaques',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Titulo')),\n ('image', models.FileField(upload_to='imagens/', verbose_name='Documento')),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ],\n ),\n migrations.CreateModel(\n name='Doi',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Titulo')),\n ('image', models.FileField(upload_to='imagens/', verbose_name='Documento')),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ],\n ),\n migrations.CreateModel(\n name='Paginas',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Titulo')),\n ('image', models.FileField(upload_to='imagens/', verbose_name='Documento')),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ],\n ),\n migrations.AddField(\n model_name='post',\n name='title_en',\n field=models.CharField(default='', max_length=200, verbose_name='Texto em Inglês'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='person',\n name='email',\n field=models.EmailField(max_length=60, verbose_name='E-mail'),\n ),\n migrations.AlterField(\n model_name='person',\n name='institution',\n field=models.CharField(max_length=60, verbose_name='Instituição'),\n ),\n migrations.AlterField(\n model_name='person',\n name='lattes',\n field=models.CharField(max_length=100, verbose_name='Link para o Lattes'),\n ),\n migrations.AlterField(\n model_name='person',\n name='name',\n field=models.CharField(max_length=60, verbose_name='Nome'),\n ),\n migrations.AlterField(\n model_name='person',\n name='position',\n field=models.CharField(choices=[('C', 'Coordenador'), ('P', 'Pesquisador'), ('A', 'Aluno')], max_length=1, verbose_name='Cargo'),\n ),\n migrations.AlterField(\n model_name='post',\n name='text_pt',\n field=models.TextField(verbose_name='Texto em Português'),\n ),\n migrations.AlterField(\n model_name='post',\n name='title',\n field=models.CharField(max_length=200, verbose_name='Titulo em Português'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.52173912525177,
"alphanum_fraction": 0.5585284233093262,
"avg_line_length": 30.473684310913086,
"blob_id": "9600d0eb346d98e539690eb5a3ff9e80356dd6ce",
"content_id": "5a26fb2bc8a1395d3e5cb80bfbfa35a139d8fe02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1196,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 38,
"path": "/remo/migrations/0007_auto_20181011_0133.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-10-11 01:33\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0006_auto_20181011_0132'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='doi',\n name='ano',\n field=models.CharField(blank=True, max_length=4, null=True, verbose_name='Ano'),\n ),\n migrations.AlterField(\n model_name='doi',\n name='authors',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Autores'),\n ),\n migrations.AlterField(\n model_name='doi',\n name='doi',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Doi'),\n ),\n migrations.AlterField(\n model_name='doi',\n name='title',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='Titulo'),\n ),\n migrations.AlterField(\n model_name='doi',\n name='url',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='URL'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5057914853096008,
"alphanum_fraction": 0.5656370520591736,
"avg_line_length": 22.545454025268555,
"blob_id": "5acfe800dc32c4da3c70d4592079848183fc6dcd",
"content_id": "9dcf90a7cc5e2442bf01191235444b5afea15287",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 520,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 22,
"path": "/remo/migrations/0029_auto_20181212_0032.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.7 on 2018-12-12 00:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0028_auto_20181212_0013'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='estacao',\n name='infos',\n ),\n migrations.AddField(\n model_name='serie',\n name='infos',\n field=models.TextField(blank=True, null=True, verbose_name='Informações'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5692108869552612,
"alphanum_fraction": 0.5853816270828247,
"avg_line_length": 38.68421173095703,
"blob_id": "7980ee9c4e34242aef6a04b7ae8fb10a0a0a07d9",
"content_id": "4f5967f482fc21e434ec9e4004112460841ca0db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1550,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 38,
"path": "/remo/migrations/0012_auto_20181107_0937.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-11-07 11:37\r\n\r\nfrom django.conf import settings\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\nimport django.utils.timezone\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\r\n ('remo', '0011_index'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Noticia',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('title', models.CharField(max_length=200, verbose_name='Titulo em Português')),\r\n ('title_en', models.CharField(max_length=200, verbose_name='Titulo em Inglês')),\r\n ('text_pt', models.TextField(verbose_name='Texto em Português')),\r\n ('text_en', models.TextField(verbose_name='Texto em Inglês')),\r\n ('image', models.FileField(upload_to='imagens/', verbose_name='Imagem')),\r\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\r\n ('published_date', models.DateTimeField(blank=True, null=True)),\r\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\r\n ],\r\n ),\r\n migrations.RemoveField(\r\n model_name='post',\r\n name='author',\r\n ),\r\n migrations.DeleteModel(\r\n name='Post',\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.5105328559875488,
"alphanum_fraction": 0.5576208233833313,
"avg_line_length": 33.08695602416992,
"blob_id": "f4f2c1545805f95c191b365b73b7289d5dcd9bd5",
"content_id": "210454d1909c9c03c509dbbd85455497580254b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 807,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 23,
"path": "/remo/migrations/0013_institution.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-11-07 12:08\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0012_auto_20181107_0937'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Institution',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('name', models.CharField(max_length=60, verbose_name='Nome')),\r\n ('initials', models.CharField(max_length=8, verbose_name='Sigla')),\r\n ('contry', models.CharField(max_length=30, verbose_name='Pais')),\r\n ('city', models.CharField(max_length=30, verbose_name='Cidade')),\r\n ],\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.3703448176383972,
"alphanum_fraction": 0.38413792848587036,
"avg_line_length": 33.5476188659668,
"blob_id": "7639946bb2096ba502aa0701f4cd0d65e5f7f497",
"content_id": "c2946615299062b841048d06026aa3f36944d512",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1450,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 42,
"path": "/remo/templates/noticias_detail_en.html",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "{% extends \"base_generic_en.html\" %}\n\n{% block content %}\n\n<section style=\"background-color: #111c28;\" id=\"home\"> \n <div class=\"container\"> \n <div class=\"row d-flex align-items-center justify-content-center\">\n <div class=\"about-content col-lg-12\" style=\"padding: 30px;\"></div> \n </div>\n </div>\n</section>\n\n<section class=\"post-content-area single-post-area\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-lg-12 posts-list\">\n <div class=\"single-post row\">\n <div class=\"col-lg-3\"></div>\n <div class=\"col-lg-6\">\n <div class=\"col-lg-12\">\n <h3>{{ post.title_en }}</h3>\n <br>\n </div>\n <div class=\"feature-img\">\n {% if post.image.url != None %}\n <p>{{ post.published_date }}</p>\n <img class=\"img-fluid\" src=\"{{ post.image.url }}\" alt=\"\">\n {% endif %}\n </div>\n <br>\n <div class=\"col-lg-12\">\n {{ post.text_en }}\n </div>\n </div>\n <div class=\"col-lg-3\"></div>\n </div>\n </div>\n </div>\n </div> \n</section>\n\n{% endblock %}"
},
{
"alpha_fraction": 0.5246636867523193,
"alphanum_fraction": 0.6008968353271484,
"avg_line_length": 23.77777862548828,
"blob_id": "6b8206dfefd6361f364b29cbe3b79ddc811749e9",
"content_id": "32a7896518741a8202215697d7c505ff4a90283d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 18,
"path": "/remo/migrations/0061_auto_20190705_1522.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.5 on 2019-07-05 15:22\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0060_auto_20190609_1435'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='publicacao',\n name='title',\n field=models.CharField(blank=True, default='#', max_length=500, null=True, verbose_name='Titulo'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6688762903213501,
"alphanum_fraction": 0.6796085834503174,
"avg_line_length": 27.549549102783203,
"blob_id": "de0166c93178f235702c118eefbc13f1ff006513",
"content_id": "af79097cf728c6767404d84efcf7e22afcd7f276",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3168,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 111,
"path": "/remoweb/settings.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "import os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', 'r8^d_)4zkep@uj@-86ix^a85in%b@dl)olpa*29_b3firuh%gb')\nSECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'r8^d_)4zkep@uj@-86ix^a85in%b@dl)olpa*29_b3firuh%gb')\n# DEBUG = os.getenv('DJANGO_DEBUG') == 'True'\nDEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )\n# ALLOWED_HOSTS = ['django','localhost','.rederemo.org']\nALLOWED_HOSTS = ['181.214.224.242','0.0.0.0','127.0.0.1','rederemo.org','www.rederemo.org']\n\n# if not DEBUG:\n# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n# SECURE_SSL_REDIRECT = True\n# SESSION_COOKIE_SECURE = True\n# CSRF_COOKIE_SECURE = True\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'remo.apps.RemoConfig',\n # 'filemanager',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django_session_timeout.middleware.SessionTimeoutMiddleware', \n ]\n\nSESSION_EXPIRE_SECONDS = 3600 # 1 hour\n\nROOT_URLCONF = 'remoweb.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'remoweb.wsgi.application'\n\n\n# Database\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',},\n {'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',},\n {'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',},\n {'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',},\n]\n\n\n# Internationalization\nDATETIME_FORMAT = 'd-m-Y'\n\nLANGUAGE_CODE = 'pt-br'\n# TIME_ZONE = 'America/Bahia'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True \nUSE_L10N = False\nUSE_TZ = True\n\n# SESSION AGE 5 Minutes\n# SESSION_COOKIE_AGE = 30*60\n\n# Static files (CSS, JavaScript, Images)\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# Simplified static file serving.\n# https://warehouse.python.org/project/whitenoise/\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\nDEFAULT_AUTO_FIELD='django.db.models.AutoField'"
},
{
"alpha_fraction": 0.5234248638153076,
"alphanum_fraction": 0.5815832018852234,
"avg_line_length": 25.913043975830078,
"blob_id": "77cc18a8497d6c2cc04100bbb149cd32aa8e9277",
"content_id": "be4457e212102098178b56795e449c2caabb42cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 619,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 23,
"path": "/remo/migrations/0060_auto_20190609_1435.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-06-09 14:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0059_auto_20190606_2234'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='person',\n name='email',\n field=models.EmailField(blank=True, max_length=60, verbose_name='E-mail'),\n ),\n migrations.AlterField(\n model_name='person',\n name='lattes',\n field=models.CharField(blank=True, max_length=100, verbose_name='Link para o Lattes'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4767801761627197,
"alphanum_fraction": 0.5727553963661194,
"avg_line_length": 18,
"blob_id": "d0d81e5372467220eb8e2929455f665aefb2352f",
"content_id": "46542d2022a4d4fd4f6f43b71ba19524d53dd42c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 323,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 17,
"path": "/remo/migrations/0035_remove_doi_journal.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-12-30 00:01\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0034_auto_20181225_1631'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='doi',\n name='journal',\n ),\n ]\n"
},
{
"alpha_fraction": 0.7115384340286255,
"alphanum_fraction": 0.7115384340286255,
"avg_line_length": 18.625,
"blob_id": "8dd1b39ad4f3d33fd2599d2a9faa5e08e0d76cd4",
"content_id": "55cbefd4ab57f40cb7f424b8b533ecfcf3a30b37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/filemanager/urls.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\n\nfrom filemanager import path_end\nfrom views import view\n\nurlpatterns = (\n url(r'^abc/' + path_end, view, name='view'),\n)"
},
{
"alpha_fraction": 0.5151915550231934,
"alphanum_fraction": 0.5495376586914062,
"avg_line_length": 31.913043975830078,
"blob_id": "ead338eb5b8ee2433292ef31f37287c3100e96a0",
"content_id": "119926821dfa9cc5f2c694c804e51849ddac09d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 757,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 23,
"path": "/remo/migrations/0002_person.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.5 on 2018-07-13 17:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=60)),\n ('email', models.CharField(max_length=60)),\n ('institution', models.CharField(max_length=60)),\n ('position', models.CharField(choices=[('C', 'Coordenador'), ('P', 'Pesquisador'), ('A', 'Aluno')], max_length=1)),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.6174142360687256,
"alphanum_fraction": 0.6253297924995422,
"avg_line_length": 18.237287521362305,
"blob_id": "13148d8dd47da68231e376cafdcb5834fefa6d5d",
"content_id": "39b08643357657d5a1d53f2429757ed0c59f0a1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1148,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 59,
"path": "/procdb/add_paper_sqlite3.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nFerramentas para popular o banco de dados da página da REMO\n\n**Funções**\n\n>> :func:`funcao` <<\n Popular BD.\n\n**Instituições**\n1 - UFRJ\n2 - UFBA\n3 - Petrobras\n\n**Cargos Pesquisador**\nA - Coordenador\nB - Pesquisador\nC - Aluno Pós-Graduação\nD - Aluno Graduação\nE - Técnico\n----------\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport sqlite3\nfrom sqlite3 import Error\n\n\n# read csv with data.\ndf = pd.read_excel('data/tabela_paper_REMO.xlsx')\n\n# subsitui palavras por indices do banco\n\n# list of tuples\ndata = [tuple([int(df.ano[i]), df.autor[i], df.tipo[i],\n df.doi[i], df.url[i], df.titulo[i]])\n for i in range(len(df))]\n\ntry:\n conn = sqlite3.connect('../db.sqlite3')\n conn.text_factory = str\nexcept Error as e:\n print(e)\n\ncur = conn.cursor()\n\n# delete from your_table;\n# delete from sqlite_sequence where name='your_table';\n \nsql = ''' INSERT INTO remo_publicacao(ano,authors,position,doi,url,title)\n VALUES (?,?,?,?,?,?)'''\n\nwith conn:\n # cur.execute(sql, ('teste', 'teste@teste', 1, 'teste.com', 'A'))\n cur.executemany(sql, data)\n\nconn.close()\n \n"
},
{
"alpha_fraction": 0.5164319276809692,
"alphanum_fraction": 0.5892018675804138,
"avg_line_length": 22.66666603088379,
"blob_id": "9feda5f0611ce3963d61e20611d3fbae11c57ce7",
"content_id": "9f605caf8e02585d98b578b54ec4fa8ee4d613ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 426,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 18,
"path": "/remo/migrations/0039_auto_20190315_2048.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-03-15 23:48\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0038_auto_20190315_2030'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='serie',\n name='csv',\n field=models.FileField(blank=True, null=True, upload_to='data/', verbose_name='csv'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.47145187854766846,
"alphanum_fraction": 0.5220228433609009,
"avg_line_length": 20.89285659790039,
"blob_id": "5be6561b00fa5a0bb2575bfec05f2f3850f11d8d",
"content_id": "17a6dc4c55a0adc172cfef8b51d0a38c999426ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 613,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 28,
"path": "/remo/migrations/0056_auto_20190504_1943.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-05-04 19:43\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0055_auto_20190504_0233'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='documentos',\n name='dire',\n ),\n migrations.RenameField(\n model_name='index',\n old_name='title',\n new_name='title_pt',\n ),\n migrations.DeleteModel(\n name='Diretorios',\n ),\n migrations.DeleteModel(\n name='Documentos',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5328282713890076,
"alphanum_fraction": 0.6111111044883728,
"avg_line_length": 22.294116973876953,
"blob_id": "8415fb2055091ac81189a329d867b48b234b528d",
"content_id": "bb54cc06f10f2e8d35e7fc2bb68aade8c687b777",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 400,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 17,
"path": "/remo/migrations/0049_auto_20190406_0351.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-04-06 03:51\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0048_auto_20190406_0322'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='institution',\n options={'verbose_name': 'Instituições', 'verbose_name_plural': 'Instituições'},\n ),\n ]\n"
},
{
"alpha_fraction": 0.6292135119438171,
"alphanum_fraction": 0.6292135119438171,
"avg_line_length": 21.125,
"blob_id": "604e8a3df4ffd347242580a5a0f81c3b2389226a",
"content_id": "8d4d90c4ee8c656044d22df8540a2b9d39c01f87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 178,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 8,
"path": "/remo/forms.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom .models import Noticia\n\n# class NoticiaForm(forms.ModelForm):\n\n# class Meta:\n# model = Noticia\n# fields = ('title', 'text_pt')\n\n"
},
{
"alpha_fraction": 0.5070028305053711,
"alphanum_fraction": 0.593837559223175,
"avg_line_length": 20,
"blob_id": "c3495a4710481dfee27137b0e134cb0b59715f89",
"content_id": "fa0f253ff8c4d1ef351a5e8bdf3ca4a9d886fd0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 357,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 17,
"path": "/remo/migrations/0063_auto_20190705_1725.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.5 on 2019-07-05 17:25\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0062_auto_20190705_1651'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='contacts',\n options={'verbose_name_plural': 'Contatos'},\n ),\n ]\n"
},
{
"alpha_fraction": 0.5271428823471069,
"alphanum_fraction": 0.5535714030265808,
"avg_line_length": 30.11111068725586,
"blob_id": "e5ec3da7d009a58faaf1613eb36e27eaf8d02ef1",
"content_id": "1e182abd34e85370418f3499a38065c3924b9443",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1404,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 45,
"path": "/remo/migrations/0009_auto_20181028_1836.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.7 on 2018-10-28 18:36\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0008_auto_20181028_1835'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='destaques',\n name='created_date',\n ),\n migrations.AddField(\n model_name='destaques',\n name='text_en',\n field=models.TextField(default='', verbose_name='Texto em Inglês'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='destaques',\n name='text_pt',\n field=models.TextField(default='', verbose_name='Texto em Português'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='destaques',\n name='title_en',\n field=models.CharField(default='', max_length=200, verbose_name='Titulo em Inglês'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='destaques',\n name='image',\n field=models.FileField(upload_to='imagens/', verbose_name='Imagem'),\n ),\n migrations.AlterField(\n model_name='destaques',\n name='title',\n field=models.CharField(max_length=200, verbose_name='Titulo em Português'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5390475988388062,
"alphanum_fraction": 0.6038095355033875,
"avg_line_length": 27.16666603088379,
"blob_id": "b66fdaaf9a3b8e53cbaa9c211e35c24615750f58",
"content_id": "6b0bd7b8a60786c511d12073e35f3d970c73d725",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 18,
"path": "/remo/migrations/0034_auto_20181225_1631.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-25 18:31\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0033_auto_20181220_2052'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='doi',\r\n name='doi',\r\n field=models.CharField(blank=True, help_text='Caso a publicação não tenha DOI, preencher tudo manualmente', max_length=200, null=True, unique=True, verbose_name='Doi'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.7349397540092468,
"alphanum_fraction": 0.7349397540092468,
"avg_line_length": 15.600000381469727,
"blob_id": "41ee79a4e3e53cd270fc5fcf5c1dde50b92b4203",
"content_id": "ced03d1995cc4c1afee47367798a19b376744106",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/remo/apps.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass RemoConfig(AppConfig):\n name = 'remo'\n"
},
{
"alpha_fraction": 0.5637746453285217,
"alphanum_fraction": 0.5734531879425049,
"avg_line_length": 40.92753601074219,
"blob_id": "0d3f5adbe749cca50eb925d94b5a5ec457c3833a",
"content_id": "a3bdcabadfb1f1e7a8ab76418fe90310504ef578",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2899,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 69,
"path": "/remo/migrations/0004_auto_20181008_1802.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2018-10-08 18:02\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('remo', '0003_person_lattes'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Doc',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Titulo')),\n ('image', models.FileField(upload_to='imagens/', verbose_name='Documento')),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Estacao',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nome', models.CharField(max_length=200, verbose_name='Nome da Estação')),\n ('lat', models.FloatField(verbose_name='Latitude')),\n ('lon', models.FloatField(verbose_name='Longitude')),\n ('infos', models.TextField(verbose_name='Informações')),\n ],\n ),\n migrations.CreateModel(\n name='Publicacoes',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Titulo')),\n ('image', models.FileField(upload_to='imagens/', verbose_name='Documento')),\n ('created_date', models.DateTimeField(default=django.utils.timezone.now)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.RemoveField(\n model_name='post',\n name='text',\n ),\n migrations.AddField(\n model_name='post',\n name='image',\n field=models.FileField(default='', upload_to='imagens/', verbose_name='Imagem'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='post',\n name='text_en',\n field=models.TextField(default='', verbose_name='Texto em Inglês'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='post',\n name='text_pt',\n field=models.TextField(default='', verbose_name='Texto em português'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.529940128326416,
"alphanum_fraction": 0.5868263244628906,
"avg_line_length": 18.647058486938477,
"blob_id": "819d23981e6b6ba5c0ea9aa20e27b94a458b2e78",
"content_id": "7d2aa59da649fa7a43c7d1b15473cbf46822e8dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 17,
"path": "/remo/migrations/0058_remove_documentos_title_en.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-05-09 21:23\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0057_diretorios_documentos'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='documentos',\n name='title_en',\n ),\n ]\n"
},
{
"alpha_fraction": 0.6846446990966797,
"alphanum_fraction": 0.6865482330322266,
"avg_line_length": 51.56666564941406,
"blob_id": "b62f397c397fde014a40ecf060a6fbcd937efea1",
"content_id": "14027122e8c204eb005ced391b7592367c4e4b20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1576,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 30,
"path": "/remo/urls.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\nfrom django.contrib.staticfiles.urls import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n \n# from filemanager import path_end\n# from django.conf.urls import url\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('index_en/', views.index_en, name='index_en'),\n path('equipe/', views.equipe, name='equipe'),\n path('equipe_en/', views.equipe_en, name='equipe_en'),\n path('publicacoes/', views.publicacoes, name='publicacoes'), \n path('publicacoes_en/', views.publicacoes_en, name='publicacoes_en'), \n path('noticias/', views.noticias_list, name='noticias_list'),\n path('noticias_en/', views.noticias_list_en, name='noticias_list_en'),\n path('noticias/post/<int:pk>', views.noticias_detail, name='noticias_detail'),\n path('noticias_en/post/<int:pk>', views.noticias_detail_en, name='noticias_detail_en'),\n path('modelagem/', views.modelagem, name='modelagem'), \n path('modelagem_en/', views.modelagem_en, name='modelagem_en'),\n path('dados/', views.dados, name='dados'),\n path('dados_en/', views.dados_en, name='dados_en'), \n path('dados/grafic/<int:pk>', views.dados_grafic, name=\"dados_grafic\"),\n path('dados_en/grafic/<int:pk>', views.dados_grafic_en, name=\"dados_grafic_en\"),\n path('contatos/', views.contatos, name='contatos'),\n path('contatos_en/', views.contatos_en, name='contatos_en'),\n path('documentos/', views.documentos, name='documentos'),\n # url(r'^documentos1/' + path_end, views.documentos1, name='documentos1'),\n]"
},
{
"alpha_fraction": 0.4954751133918762,
"alphanum_fraction": 0.570135772228241,
"avg_line_length": 22.55555534362793,
"blob_id": "1fd18051d58fb831d46d3f3153353c77cc3e7e15",
"content_id": "c86345c47216cfa94b300e55500e5cadbcbdbca3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 18,
"path": "/remo/migrations/0027_auto_20181209_2351.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-10 01:51\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0026_auto_20181209_2350'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='date',\r\n field=models.CharField(blank=True, max_length=60, null=True, verbose_name='Data'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.4776386320590973,
"alphanum_fraction": 0.5116279125213623,
"avg_line_length": 19.703702926635742,
"blob_id": "9e743735c4901cd989f2677f02279a40480b1996",
"content_id": "fe01ea7e3c33a20a8b3a974fc266f99d5cf2cfc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 559,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 27,
"path": "/remo/migrations/0038_auto_20190315_2030.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-03-15 23:30\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0037_destaque_url'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='doc',\n name='author',\n ),\n migrations.RemoveField(\n model_name='doc',\n name='categoria',\n ),\n migrations.DeleteModel(\n name='Doc',\n ),\n migrations.DeleteModel(\n name='Index_Doc',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5214285850524902,
"alphanum_fraction": 0.5738095045089722,
"avg_line_length": 21.105262756347656,
"blob_id": "aa1a2cba5511dadf3ae4a349a7a187cc010144e2",
"content_id": "fcf693a795e4f5e0193bc7af3271a1852f470454",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 420,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 19,
"path": "/remo/migrations/0003_person_lattes.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.5 on 2018-07-13 18:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0002_person'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='person',\n name='lattes',\n field=models.CharField(default='aa', max_length=100),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5150684714317322,
"alphanum_fraction": 0.567123293876648,
"avg_line_length": 19.27777862548828,
"blob_id": "740aa085b47c3c2ab90c5c6917aa8d6bdb9db81a",
"content_id": "19ba6959dc583476d40b1511cfec0107002594b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 18,
"path": "/remo/migrations/0054_auto_20190504_0228.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-05-04 02:28\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0053_remove_campanha_infos'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='noticia',\n old_name='title',\n new_name='title_pt',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5598006844520569,
"alphanum_fraction": 0.5930232405662537,
"avg_line_length": 32.44444274902344,
"blob_id": "e6f38dc6895e9333bf50cf34b75928d2c753049b",
"content_id": "46e17383df0c375a308ae8cbcf0d169cdb93ff47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 608,
"license_type": "no_license",
"max_line_length": 259,
"num_lines": 18,
"path": "/remo/migrations/0059_auto_20190606_2234.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-06-06 22:34\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0058_remove_documentos_title_en'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='person',\n name='position',\n field=models.CharField(choices=[('A', 'Coordenador'), ('B', 'Pesquisador'), ('C', 'Aluno Pós-Graduação'), ('D', 'Aluno Graduação'), ('E', 'Técnico'), ('F', 'Pesquisador Egresso'), ('G', 'Colaborador Externo')], max_length=2, verbose_name='Cargo'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5290841460227966,
"alphanum_fraction": 0.5581682920455933,
"avg_line_length": 37.47618865966797,
"blob_id": "aeaeb2fb3fd0ec7b253e84fbc25f9e42e682e333",
"content_id": "78469b977c137921807caa088cfeb34788121c74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1618,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 42,
"path": "/remo/migrations/0050_auto_20190501_1507.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2019-05-01 15:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0049_auto_20190406_0351'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Documentos',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Titulo em Português')),\n ('title_en', models.CharField(max_length=200, verbose_name='Titulo em Inglês')),\n ('docs', models.FileField(blank=True, null=True, upload_to='docs/', verbose_name='Imagem')),\n ('url', models.CharField(max_length=200, verbose_name='URL')),\n ],\n options={\n 'verbose_name': 'Documento',\n 'verbose_name_plural': 'Documentos',\n },\n ),\n migrations.AlterField(\n model_name='publicacao',\n name='ano',\n field=models.CharField(blank=True, default='#', max_length=4, null=True, verbose_name='Ano'),\n ),\n migrations.AlterField(\n model_name='publicacao',\n name='authors',\n field=models.CharField(blank=True, default='#', max_length=200, null=True, verbose_name='Autores'),\n ),\n migrations.AlterField(\n model_name='publicacao',\n name='title',\n field=models.CharField(blank=True, default='#', max_length=200, null=True, verbose_name='Titulo'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.4449814260005951,
"alphanum_fraction": 0.5027881264686584,
"avg_line_length": 32.709678649902344,
"blob_id": "432a29d34dde546065675c153a87bf0e5fc73485",
"content_id": "d72ecb7997e0043cb0b933309ff4592522b0f040",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5380,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 155,
"path": "/procdb/plotly_remo_bmop.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nGera HTML para as boias BMOBR da REMO\r\n\"\"\"\r\n\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom plotly import tools\r\nimport plotly.graph_objs as go\r\nfrom plotly.offline import plot\r\n\r\n\r\n# --------------------------------------------------------------------------------------------- #\r\n\r\ndef make_figs(station, vars_dict, html_name):\r\n\r\n\r\n\t\"\"\"\r\n\tfig = fig object plotly\r\n\tvars_dics = structure with var name, xlabel and unit\r\n\thtml_name = output filename with .html\r\n\t\"\"\"\t\r\n\r\n\ttitles = tuple([vars_dict[vars_dict.keys()[i]][0] for i in range(len(vars_dict))])\r\n\r\n\tfig = tools.make_subplots(rows=len(vars_dict), cols=1, shared_xaxes=True,\r\n\t vertical_spacing = .02)\r\n\r\n\tfig['layout'].update(title=station)\r\n\tfig['layout'].update(height=1800, width=1250)\r\n\tfig['layout'].update(showlegend= False)\r\n\r\n\tll = 0\r\n\tfor v in vars_dict.keys():\r\n\r\n\t ll += 1\r\n\r\n\t xx = df.index\r\n\t yy = df[v]\r\n\r\n\t plott = go.Scatter(\r\n\t x = xx,\r\n\t y = yy,\r\n\t mode = 'lines',\r\n\t name = vars_dict[v][1]\r\n\t )\r\n\r\n\t fig.append_trace(plott, ll, 1)\r\n\r\n\t fig['layout']['yaxis%s' %ll].update(title=titles[ll-1] + ' (%s)' %vars_dict[v][1])\r\n\r\n\tplot(fig, filename=html_name)\r\n\r\n# --------------------------------------------------------------------------------------------- #\r\n## CF01_201503_BMOBR03\r\n\r\npathname = os.environ['HOME'] + '/Dropbox/BMOP/Processamento/data/CF01_201503_BMOP03/telemetria/'\r\nfilename = 'CF01_201503_BMOP03.csv'\r\n\r\ndateparse = lambda x: pd.datetime.strptime(x, '%d/%m/%Y %H:%M:%S')\r\n\r\ndf = pd.read_csv(pathname + filename, header=0,\r\n parse_dates=['ExprDataHoraBMOP'],\r\n date_parser=dateparse,\r\n index_col=['ExprDataHoraBMOP'])\r\n\r\ndf = df[:'2015-05']\r\n\r\ncf01_201503 = {'velocidadeVento': ['Intensidade do Vento', 'm/s'],\r\n 'direcaoVento': ['Direcao do Vento', 'graus'],\r\n\t\t 'umidadeRelativa': ['Umidade Relativa', '%'],\r\n 'temperaturaAr': ['Temperatura do Ar', 'gC'],\r\n 'pressao': ['Pressao Atmosferica', 'hPa'], \r\n 'alturaOnda': ['Altura Significativa', 'm'], \r\n 'periodoPico': ['Periodo de Pico', 'seg'], \r\n 'direcaoPico': ['Direcao de Pico', 'graus']}\t\t\t \r\n\r\nmake_figs('CF01 - Mar/2015', cf01_201503, 'cf01_201503_graphs.html')\r\n\r\ndf.to_html('cf01_201503_table.html')\r\n\r\n# --------------------------------------------------------------------------------------------- #\r\n## CF01_201602_BMOBR03\r\n\r\npathname = os.environ['HOME'] + '/Dropbox/BMOP/Processamento/data/CF01_201602_BMOP05/telemetria/'\r\nfilename = 'CF01_201602_BMOP05.csv'\r\n\r\n#carrega os dados da boia\r\ndf = pd.read_csv(pathname + filename, index_col='date', parse_dates=True)\r\n\r\n#pega dados quando a boia foi para agua\r\ndf = df['2016-03':'2016-05']\r\n\r\n# retira valores negativos\r\ndf.ate[df.ate<0] = np.nan\r\ndf.rh[df.rh<0] = np.nan\r\n\r\ncf01_201602 = {'ws': ['Intensidade do Vento', 'm/s'],\r\n 'wd': ['Direcao do Vento', 'graus'],\r\n\t\t 'rh': ['Umidade Relativa', '%'],\r\n 'ate': ['Temperatura do Ar', 'gC'],\r\n 'bp': ['Pressao Atmosferica', 'hPa'], \r\n 'hs': ['Altura Significativa', 'm'], \r\n 'tp': ['Periodo de Pico', 'seg'], \r\n 'dp': ['Direcao de Pico', 'graus']}\t\t\t \r\n\r\nmake_figs('CF01 - Fev/2016', cf01_201602, 'cf01_201602_graphs.html')\r\n\r\ndf.to_html('cf01_201602_table.html')\r\n\r\n# --------------------------------------------------------------------------------------------- #\r\n## CF01_201611_BMOBR05\r\n\r\npathname = os.environ['HOME'] + '/Dropbox/BMOP/Processamento/data/CF01_201611_BMOP05/telemetria/'\r\nfilename = 'CF01_201611_BMOP05.csv'\r\n\r\n#carrega os dados da boia\r\ndf = pd.read_csv(pathname + filename, index_col='date', parse_dates=True)\r\n\r\n\r\ncf01_201611 = {'ws': ['Intensidade do Vento', 'm/s'],\r\n 'wd': ['Direcao do Vento', 'graus'],\r\n\t\t 'rh': ['Umidade Relativa', '%'],\r\n 'ate': ['Temperatura do Ar', 'gC'],\r\n 'bp': ['Pressao Atmosferica', 'hPa'], \r\n 'hs': ['Altura Significativa', 'm'], \r\n 'tp': ['Periodo de Pico', 'seg'], \r\n 'dp': ['Direcao de Pico', 'graus']}\t\t\t \r\n\r\nmake_figs('CF01 - Nov/2016', cf01_201611, 'cf01_201611_graphs.html')\r\n\r\ndf.to_html('cf01_201511_table.html')\r\n\r\n# --------------------------------------------------------------------------------------------- #\r\n## CF03_201606_BMOBR06\r\n\r\npathname = os.environ['HOME'] + '/Dropbox/BMOP/Processamento/data/CF03_201606_BMOP06/telemetria/'\r\nfilename = 'CF03_201606_BMOP06.csv'\r\n\r\n#carrega os dados da boia\r\ndf = pd.read_csv(pathname + filename, index_col='date', parse_dates=True)\r\n\r\ncf03_201606 = {'ws': ['Intensidade do Vento', 'm/s'],\r\n 'wd': ['Direcao do Vento', 'graus'],\r\n\t\t 'rh': ['Umidade Relativa', '%'],\r\n 'ate': ['Temperatura do Ar', 'gC'],\r\n 'bp': ['Pressao Atmosferica', 'hPa'], \r\n 'hs': ['Altura Significativa', 'm'], \r\n 'tp': ['Periodo de Pico', 'seg'], \r\n 'dp': ['Direcao de Pico', 'graus']}\t\t\t \r\n\r\nmake_figs('CF03 - Jun/2016', cf03_201606, 'cf03_201606_graphs.html')\r\n\r\ndf.to_html('cf03_201606_table.html')\r\n"
},
{
"alpha_fraction": 0.4919540286064148,
"alphanum_fraction": 0.5827586054801941,
"avg_line_length": 25.917526245117188,
"blob_id": "bb91eee272bbfb73efa4beaee41a1cae8e3579ba",
"content_id": "791ba02ffdaba1d05c5f8d2bacad6b83270dafd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2626,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 97,
"path": "/procdb/qc_remo.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "\"\"\"\nQualificação dos dados da REMO\nHenrique Pereira\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.close('all')\n\n\n# filename = 'CF01_201602_BMOP05_REMO'\n# filename = 'CF01_201611_BMOP05_REMO'\nfilename = 'CF03_201606_BMOP06_REMO'\n\n\ndef uv2id(ndr_ucomp, ndr_vcomp, str_conv='cart'):\n\n # Intensidade vetorial.\n icomp = np.sqrt(ndr_ucomp ** 2 + ndr_vcomp ** 2)\n\n if str_conv == 'cart':\n # Direção vetorial na convenção CARTESIANA.\n dcomp = np.rad2deg(np.arctan2(ndr_vcomp, ndr_ucomp)) % 360.\n elif str_conv == 'meteo':\n # Direção vetorial na convenção METEOROLÓGICA.\n dcomp = (np.rad2deg(np.arctan2(ndr_ucomp, ndr_vcomp)) - 180.) % 360.\n elif str_conv == 'ocean':\n # Direção vetorial na convenção OCEANOGRÁFICA.\n dcomp = np.rad2deg(np.arctan2(ndr_ucomp, ndr_vcomp)) % 360.\n\n return icomp, dcomp\n\n\ndf = pd.read_table('data/' + filename + '.csv', sep=',',\n index_col='date', parse_dates=True)\n\nif filename == 'CF01_201602_BMOP05_REMO':\n df = df['2016-02-23 17:00':'2016-06-21 18:00']\n\nelif filename == 'CF03_201606_BMOP06_REMO':\n df = df['2016-06-24 00:00':'2016-10-01 23:00']\n\n# subtitui dados e intensidade e direcao das\n# correntes pelos dados brutos\nelif filename == 'CF01_201611_BMOP05_REMO':\n df = df['2016-11-06':'2016-12-31']\n\n df.drop(labels=['dp_ax', 'err', 'hs_ax', 'tp_ax'], axis=1, inplace=True)\n\n df[['mag1','dir1']] = np.nan\n\n raw = pd.read_csv('data/dataframe_CF01_201611_ADCP_DPL1_003_1.csv',\n index_col='date', parse_dates=True)\n\n u = raw['Eas1'].values\n v = raw['Nor1'].values\n\n i, d = uv2id(u, v, str_conv='ocean')\n\n adcp = pd.DataFrame(index=raw.index)\n adcp['mag1'] = i\n adcp['dir1'] = d\n\n df['2016-11-05 17:00:00':adcp.index[-1]][['mag1', 'dir1']] = adcp[['mag1','dir1']]\n\n df['mag2'] = 0\n df['mag3'] = 0\n df['dir2'] = 0\n df['dir3'] = 0\n\ndf['ate'].loc[df.ate < 0] = np.nan\ndf['rh'].loc[df.rh < 0] = np.nan\ndf['bp'].loc[df.ate > 1030] = np.nan\ndf['tp'].loc[df.tp < 4] = np.nan\n\nsbe = ['tsbe10', 'tsbe100', 'tsbe20', 'tsbe30', 'tsbe40', 'tsbe50', 'tsbe60',\n 'tsbe70', 'tsbe80', 'tsbe90', 'tsup']\n\nfor m in ['mag1']:\n df[m].loc[df[m] > 1000] = np.nan\n df[m].loc[df[m] <= 0] = np.nan\n\nfor d in ['dir1']:\n df[d].loc[df[d] > 360] = np.nan\n df[d].loc[df[d] < 0] = np.nan\n\nfor s in sbe:\n df[s].loc[df[s] > 30] = np.nan\n df[s].loc[df[s] < 15] = np.nan\n\nfor c in df.columns:\n plt.figure()\n df[c].plot(title=c)\nplt.show()\n\ndf.to_csv('data/' + filename + '_qc.csv', sep=',', na_rep='NaN', float_format='%.2f')"
},
{
"alpha_fraction": 0.5314465165138245,
"alphanum_fraction": 0.5849056839942932,
"avg_line_length": 26.65217399597168,
"blob_id": "bd245fe0fbc56215880e6232ac9a52256622fadf",
"content_id": "f0fbc98d14c7c76c8a41bb68c421ee2a2d4149e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 636,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 23,
"path": "/remo/migrations/0051_auto_20190501_1509.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2019-05-01 15:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0050_auto_20190501_1507'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='documentos',\n name='docs',\n field=models.FileField(blank=True, null=True, upload_to='docs/', verbose_name='Documento'),\n ),\n migrations.AlterField(\n model_name='documentos',\n name='url',\n field=models.CharField(blank=True, max_length=200, null=True, verbose_name='URL'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.48220065236091614,
"alphanum_fraction": 0.5339806079864502,
"avg_line_length": 26.090909957885742,
"blob_id": "5dfdc8b7ec146cffa36fb321ef96f9ce75f52280",
"content_id": "40e208be43bf3da805b2739537d631fe1b7df0c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 22,
"path": "/remo/migrations/0031_auto_20181220_2048.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-20 22:48\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0030_merge_20181220_2048'),\r\n ]\r\n\r\n operations = [\r\n migrations.RemoveField(\r\n model_name='estacao',\r\n name='csv',\r\n ),\r\n migrations.AlterField(\r\n model_name='person',\r\n name='position',\r\n field=models.CharField(choices=[('C', 'Coordenador'), ('P', 'Pesquisador'), ('A', 'Aluno'), ('T', 'Tecnico')], max_length=1, verbose_name='Cargo'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.5242424011230469,
"alphanum_fraction": 0.5626262426376343,
"avg_line_length": 39.25,
"blob_id": "2cc3c88b4a3fdf28ac1e2762191db95e01fde55c",
"content_id": "fd5847645985130515326903d77a7d60295fa0f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 997,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 24,
"path": "/remo/migrations/0011_index.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-11-07 10:22\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0010_auto_20181028_1914'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Index',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('position', models.CharField(choices=[('L', 'O que é'), ('C', 'Objetivos'), ('O', 'Dados')], max_length=1, verbose_name='Tipo de Publicação')),\r\n ('title', models.CharField(max_length=200, verbose_name='Titulo em Português')),\r\n ('title_en', models.CharField(max_length=200, verbose_name='Titulo em Inglês')),\r\n ('text_pt', models.TextField(verbose_name='Texto em Português')),\r\n ('text_en', models.TextField(verbose_name='Texto em Inglês')),\r\n ],\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.6796610355377197,
"alphanum_fraction": 0.693220317363739,
"avg_line_length": 28.848100662231445,
"blob_id": "4a131744907b396370c10e53c36e23155377701e",
"content_id": "b78c00ec9eb5ab616061823019ba46f894f6f32f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2377,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 79,
"path": "/procdb/add_equipe_sqlite3.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nFerramentas para popular o banco de dados da página da REMO\n\n**Funções**\n\n>> :func:`funcao` <<\n Popular BD.\n\n**Instituições**\n1 - UFRJ\n2 - UFBA\n3 - Petrobras\n\n**Cargos Pesquisador**\nA - Coordenador\nB - Pesquisador\nC - Aluno Pós-Graduação\nD - Aluno Graduação\nE - Técnico\n----------\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport sqlite3\nfrom sqlite3 import Error\n\n\n# read csv with data.\ndf = pd.read_excel('data/tabela_equipe_REMO.xlsx', sep=',')\n\n# subsitui palavras por indices do banco (verificar no sqlite browser o ID)\ndf.replace(to_replace='UFRJ', value=1, inplace=True)\ndf.replace(to_replace='UFBA', value=2, inplace=True)\ndf.replace(to_replace='Petrobras', value=3, inplace=True)\ndf.replace(to_replace='CHM', value=5, inplace=True)\ndf.replace(to_replace='ATLANTIS', value=6, inplace=True)\ndf.replace(to_replace='IEAPM', value=7, inplace=True)\ndf.replace(to_replace='University of Reading', value=8, inplace=True)\ndf.replace(to_replace='Mercator Ocean', value=9, inplace=True)\ndf.replace(to_replace='CMCC', value=10, inplace=True)\ndf.replace(to_replace='NERSC', value=11, inplace=True)\ndf.replace(to_replace='IAP/CAS', value=12, inplace=True)\ndf.replace(to_replace='USP', value=13, inplace=True)\ndf.replace(to_replace='TENDRAL', value=14, inplace=True)\n\ndf.replace(to_replace='Coordenador', value=u'A', inplace=True)\ndf.replace(to_replace='Pesquisador', value=u'B', inplace=True)\ndf.replace(to_replace='Aluno Pós-Graduação', value=u'C', inplace=True)\ndf.replace(to_replace='Aluno Graduação', value=u'D', inplace=True)\ndf.replace(to_replace='Técnico', value=u'E', inplace=True)\ndf.replace(to_replace='Pesquisador Egresso', value=u'F', inplace=True)\ndf.replace(to_replace='Colaborador Externo', value=u'G', inplace=True)\n\n# list of tuples\ndata = [tuple([df.iloc[i][0], df.iloc[i][1], int(df.iloc[i][2]),\n df.iloc[i][3], df.iloc[i][4]]) for i in range(len(df))]\n\ntry:\n conn = sqlite3.connect('../db.sqlite3')\n conn.text_factory = str\nexcept Error as e:\n print(e)\n\ncur = conn.cursor()\n\n# delete from your_table;\n# delete from sqlite_sequence where name='your_table';\n \nsql = ''' INSERT INTO remo_person(name,email,institution_id,lattes,position)\n VALUES (?,?,?,?,?)'''\n\nwith conn:\n # cur.execute(sql, ('teste', 'teste@teste', 1, 'teste.com', 'A'))\n cur.executemany(sql, data)\n\nconn.close()\n \n"
},
{
"alpha_fraction": 0.49821045994758606,
"alphanum_fraction": 0.5254116058349609,
"avg_line_length": 33.07316970825195,
"blob_id": "afdf29f028266fa7ad15d7ffa39527661eba011d",
"content_id": "53ea445e387d661bc153c7a6f767bcf6c2372beb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1399,
"license_type": "no_license",
"max_line_length": 210,
"num_lines": 41,
"path": "/remo/migrations/0062_auto_20190705_1651.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.5 on 2019-07-05 16:51\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0061_auto_20190705_1522'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contacts',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=60, verbose_name='Nome')),\n ('email', models.CharField(max_length=30, verbose_name='Email')),\n ('tel', models.CharField(max_length=30, verbose_name='Tel')),\n ],\n options={\n 'verbose_name': 'Contatos',\n 'verbose_name_plural': 'Contatos',\n },\n ),\n migrations.RemoveField(\n model_name='documentos',\n name='dire',\n ),\n migrations.AlterField(\n model_name='publicacao',\n name='position',\n field=models.CharField(choices=[('A', 'Artigos'), ('B', 'Livros'), ('C', 'Congressos'), ('D', 'Monografia'), ('E', 'Mestrado'), ('F', 'Doutorado')], max_length=1, verbose_name='Tipo de Publicação'),\n ),\n migrations.DeleteModel(\n name='Diretorios',\n ),\n migrations.DeleteModel(\n name='Documentos',\n ),\n ]\n"
},
{
"alpha_fraction": 0.45255473256111145,
"alphanum_fraction": 0.5802919864654541,
"avg_line_length": 17.571428298950195,
"blob_id": "3eafdb2a483c0bd07102b3e387040e99d4dacf91",
"content_id": "4f753bb4886e27b2070c071cb37ef80afd268913",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 274,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 14,
"path": "/remo/migrations/0030_merge_20181220_2048.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-20 22:48\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0018_estacao_csv'),\r\n ('remo', '0029_auto_20181212_0032'),\r\n ]\r\n\r\n operations = [\r\n ]\r\n"
},
{
"alpha_fraction": 0.5403226017951965,
"alphanum_fraction": 0.6028226017951965,
"avg_line_length": 24.105262756347656,
"blob_id": "041e96f83340eb113b8a6d5f02c96023a089637e",
"content_id": "a13e4adced949d4dbce39893599bb515f8abe752",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 496,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 19,
"path": "/remo/migrations/0026_auto_20181209_2350.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-12-10 01:50\r\n\r\nfrom django.db import migrations, models\r\nimport django.utils.timezone\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0025_auto_20181209_2348'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='dados',\r\n name='date',\r\n field=models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True, verbose_name='Data'),\r\n ),\r\n ]\r\n"
},
{
"alpha_fraction": 0.5272232294082642,
"alphanum_fraction": 0.5589836835861206,
"avg_line_length": 30.485713958740234,
"blob_id": "0598a3853912f69026e5e7b57f322db997a90d7d",
"content_id": "5f8fe9f731a10d598c483611a306158143e0a166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1106,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 35,
"path": "/remo/migrations/0052_auto_20190502_2322.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.0.6 on 2019-05-02 23:22\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('remo', '0051_auto_20190501_1509'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Diretorios',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=200, verbose_name='Nome do diretório')),\n ],\n options={\n 'verbose_name': 'Diretórios',\n 'verbose_name_plural': 'Diretórios',\n },\n ),\n migrations.RemoveField(\n model_name='documentos',\n name='url',\n ),\n migrations.AddField(\n model_name='documentos',\n name='dire',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='remo.Diretorios', verbose_name='Diretório'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.48843929171562195,
"alphanum_fraction": 0.5780346989631653,
"avg_line_length": 19.352941513061523,
"blob_id": "ed4ec6744ae3068391d2dcdb29246f1f222ce0d5",
"content_id": "a750db178ca535ee6c1df2a1325adbc285ebbe92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 346,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 17,
"path": "/remo/migrations/0048_auto_20190406_0322.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.7 on 2019-04-06 03:22\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n atomic = False\n dependencies = [\n ('remo', '0047_auto_20190405_2223'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Serie',\n new_name='Campanha',\n ),\n ]\n"
},
{
"alpha_fraction": 0.48695650696754456,
"alphanum_fraction": 0.5282608866691589,
"avg_line_length": 19.904762268066406,
"blob_id": "22635ba5574ec27408648af6eb9a423d68a9ab06",
"content_id": "1f886f6fde07f2797c2a74f194cff4587165b19e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 21,
"path": "/remo/migrations/0016_auto_20181128_0014.py",
"repo_name": "hpppereira/django-remo",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.1.2 on 2018-11-28 02:14\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('remo', '0015_doi_journal'),\r\n ]\r\n\r\n operations = [\r\n migrations.RenameModel(\r\n old_name='Destaques',\r\n new_name='Destaque',\r\n ),\r\n migrations.RenameModel(\r\n old_name='Publicacoes',\r\n new_name='Publicacao',\r\n ),\r\n ]\r\n"
}
] | 68 |
SamirZabirov/Tasks
|
https://github.com/SamirZabirov/Tasks
|
fb18618817e7573db0016e5b23091d8062f25527
|
9904451c740f9643d4a9c88ef56e0ed4a3d24246
|
fa776073bec0f02e1f4554f1fc17efa25683a67e
|
refs/heads/master
| 2020-08-15T04:51:46.812297 | 2019-10-15T11:36:28 | 2019-10-15T11:36:28 | 215,282,497 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5129251480102539,
"alphanum_fraction": 0.5469387769699097,
"avg_line_length": 17.399999618530273,
"blob_id": "8932bfa767e0a33409b775db170213e1de219933",
"content_id": "9b51e936ad5dc37af96e22935fae27295c901f44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 803,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 40,
"path": "/main.py",
"repo_name": "SamirZabirov/Tasks",
"src_encoding": "UTF-8",
"text": "from random import randint\ncat = 1\ndog = 1\ndogs =[]\n\nwhile cat <=5: \n cat += 1 \n text2 = randint(1,6) \n line = int(input('вводите число, от одного до 5:')) \n perem = abs (line - text2)\n \n if perem == 0: \n dog = 6 \n dogs.append(cat) \n elif perem == 1: \n dog = 5 \n dogs.append(cat) \n elif perem == 2: \n point = 4 \n dogs.append(cat) \n elif perem == 3: \n dog = 3 \n dogs.append(cat) \n elif perem == 4: \n dog = 2 \n dogs.append(cat) \n elif perem == 2: \n dog = 4 \n dogs.append(cat)\n elif perem == 5: \n dog = 1 \n dogs.append(cat) \n print(\"кол-ов очков: \",cat ) \n every = sum(dogs) \n\nprint (\"общее кол - во очков\",every)\nif every< 25: \n print(\"вы проиграли\")\nelse: \n print (\"вы выйграли\")"
}
] | 1 |
mikanikos/Higgs-Boson-Challenge
|
https://github.com/mikanikos/Higgs-Boson-Challenge
|
3b284b8cb66fb09aa58989e83757a01076751728
|
994d9f09a8ffc7ee6379c918bfdc69c22ad38509
|
b5962b03cd04064ff8008eff227bdc0b83c229cb
|
refs/heads/master
| 2020-04-01T04:47:02.917368 | 2018-10-29T20:51:22 | 2018-10-29T20:51:22 | 152,877,219 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6382550597190857,
"alphanum_fraction": 0.6422818899154663,
"avg_line_length": 30.0625,
"blob_id": "83ad622f92fcdada0ffa2e5bcbc17b6f64f2a0ea",
"content_id": "bc0482af4be6c080d4f22443868a9f21ab9bab01",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1490,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 48,
"path": "/helpers.py",
"repo_name": "mikanikos/Higgs-Boson-Challenge",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\n# Auxiliary function for stochastic gradient descent\ndef batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):\n data_size = len(y)\n\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_y = y[shuffle_indices]\n shuffled_tx = tx[shuffle_indices]\n else:\n shuffled_y = y\n shuffled_tx = tx\n for batch_num in range(num_batches):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n if start_index != end_index:\n yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]\n\n\n# Sigmoid function for logistic regression\ndef sigmoid(t):\n return 1.0 / (1 + np.exp(-t))\n\n\n# Computing accuracy by comparing the predictions with the test vector\ndef compute_accuracy(y_true, y_pred):\n return sum(np.array(y_pred) == np.array(y_true)) / float(len(y_true))\n\n\n# Auxiliary function used for splitting data\ndef split_data(x, y, ratio, myseed=1):\n \"\"\"split the dataset based on the split ratio.\"\"\"\n # set seed\n np.random.seed(myseed)\n # generate random indices\n num_row = len(y)\n indices = np.random.permutation(num_row)\n index_split = int(np.floor(ratio * num_row))\n index_tr = indices[: index_split]\n index_te = indices[index_split:]\n # create split\n x_tr = x[index_tr]\n x_te = x[index_te]\n y_tr = y[index_tr]\n y_te = y[index_te]\n return x_tr, x_te, y_tr, y_te"
},
{
"alpha_fraction": 0.6083904504776001,
"alphanum_fraction": 0.6139405965805054,
"avg_line_length": 34.61627960205078,
"blob_id": "f952fc99a170791ff26772199c20bf0043f9768c",
"content_id": "10c02a397c16804a79375194f46fe95be9409c09",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6126,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 172,
"path": "/cross_validation.py",
"repo_name": "mikanikos/Higgs-Boson-Challenge",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom implementations import *\nfrom helpers import compute_accuracy, split_data\nfrom data_processing import process_data, build_poly, expand_data, add_constants, clean_data\nfrom costs import compute_loss, compute_loss_log_reg\nfrom proj1_helpers import predict_labels\n\n\n# Building k indices for cross validation\ndef build_k_indices(y, k_fold, seed):\n num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]\n return np.array(k_indices)\n \n\n# Cross validation\ndef cross_validation(y, x, k_indices, k, lambda_, degree):\n \n # Dividing in subgroups\n te_indice = k_indices[k]\n tr_indice = k_indices[~(np.arange(k_indices.shape[0]) == k)]\n tr_indice = tr_indice.reshape(-1)\n \n y_te = y[te_indice]\n y_tr = y[tr_indice]\n tx_te = x[te_indice]\n tx_tr = x[tr_indice]\n\n # Preprocessing data: cleaning, standardazing and adding constant column\n tx_tr, tx_te = process_data(tx_tr, tx_te, y_tr, y_te)\n\n # Feature augmentation through polynomials\n tx_tr = build_poly(tx_tr, degree)\n tx_te = build_poly(tx_te, degree)\n\n # Printing degree and lambda tested\n print(\"Test: d = \", degree, \"; l = \", lambda_)\n\n # Training with ridge regression\n w, loss = ridge_regression(y_tr, tx_tr, lambda_)\n \n # Computing prediction vector\n y_pred = predict_labels(w, tx_te)\n \n # Computing accuracy on test set \n accuracy = compute_accuracy(y_te, y_pred)\n\n # Log informations\n print(\"Accuracy = \", accuracy, \"; loss = \", loss, \"\\n\")\n\n return loss_te, accuracy\n\n\n# Method to select the best hyper-parameters for a model\ndef best_model_selection(x, y, degrees, k_fold, lambdas, seed = 10):\n # Splitting data in k fold\n k_indices = build_k_indices(y, k_fold, seed)\n \n # Iterating over degrees and lambdas\n best_lambdas = []\n best_losses = []\n best_acc = []\n # For each degree\n for degree in degrees:\n losses = []\n acc_te = []\n # For each lambda\n for lambda_ in lambdas:\n losses_tmp = []\n acc_te_tmp = []\n # For each split\n for k in range(k_fold):\n # Using cross validation for each degree and lambda\n loss, acc = cross_validation(y, x, k_indices, k, lambda_, degree)\n # Saving accuracy and loss \n losses_tmp.append(loss)\n acc_te_tmp.append(acc)\n\n # Taking the mean of loss and accuracy for the cross validation iteration\n losses.append(np.mean(losses_tmp))\n acc_te.append(np.mean(acc_te_tmp))\n \n # Selecting the best parameters for maximizing the accuracy\n ind_lambda_opt = np.argmax(acc_te)\n best_lambdas.append(lambdas[ind_lambda_opt])\n best_acc.append(acc_te[ind_lambda_opt])\n \n ind_best_degree = np.argmax(best_acc) \n print(\"Best accuracy: \", max(best_acc))\n\n return degrees[ind_best_degree], best_lambdas[ind_best_degree]\n\n\n# Ridge regression trials\ndef ridge_trials(y, tx, tx_sub, degree_range, lambda_range, partitions=2):\n ## Split data into test and training sets\n ## If partitions > 2, use k-fold cross-validation\n glob_tx_tr, glob_tx_te, glob_y_tr, glob_y_te = split_data(tx, y, 0.8)\n\n ## Initial results: losses, weights, preditions and (test) losses\n models = []\n losses = []\n accuracies = []\n predictions = []\n \n ## Loops over range of degrees\n degrees = range(degree_range[0], degree_range[1])\n lambdas = np.logspace(lambda_range[0], lambda_range[1], num=1+(lambda_range[1]-lambda_range[0]))\n for degree in degrees:\n ## Loops over range of lambdas\n for lambda_ in lambdas:\n print(\"Trying degree\", degree,\"with lambda =\", lambda_,\":\")\n\n tx_tr, tx_te, tx_pred = expand(degree, glob_tx_tr, glob_tx_te, tx_sub)\n\n w, loss = ridge_regression(glob_y_tr, tx_tr, lambda_)\n print(\"\\tTraining Loss = \", loss)\n\n y_test = predict_labels(w, tx_te)\n test_loss = compute_loss(glob_y_te, tx_te, w)\n accuracy = compute_accuracy((y_test+1)/2, glob_y_te)\n y_pred = predict_labels(w, tx_pred)\n\n print(\"\\tTest Loss = \", test_loss, \" Test Accuracy = \", accuracy )\n models.append((\"ridge_regression\", degree, lambda_, w))\n losses.append(test_loss)\n accuracies.append(accuracy)\n predictions.append(y_pred)\n return models, losses, accuracies, predictions\n \nMAX_ITERS = 100 \nGAMMA = 0.6\n\n## Performs logistic trials over set of hyper-parameters (degrees)\n## Results result from these trials with corresponding test losses\ndef logistic_trials(y, tx, tx_sub, degree_range, partitions=2):\n ## Split data into test and training sets\n ## If partitions > 2, use k-fold cross-validation\n glob_tx_tr, glob_tx_te, glob_y_tr, glob_y_te = split_data(tx, y, 0.8)\n\n ## Initial results: losses, weights, preditions and (test) losses\n models = []\n losses = []\n accuracies = []\n predictions = []\n \n ## Loops over range of degrees\n degrees = range(degree_range[0], degree_range[1])\n for degree in degrees:\n print(\"Trying degree\", degree, \":\")\n\n tx_tr, tx_te, tx_pred = expand(degree, glob_tx_tr, glob_tx_te, tx_sub) \n initial_w = np.ones(tx_tr.shape[1])\n \n w, loss = logistic_regression(glob_y_tr, tx_tr, initial_w, MAX_ITERS, GAMMA)\n print(\"\\tTraining Loss = \", loss)\n \n y_test = predict_labels(w, tx_te)\n test_loss = compute_loss(glob_y_te, tx_te, w, func=\"logistic\")\n accuracy = compute_accuracy((y_test+1)/2, glob_y_te)\n y_pred = predict_labels(w, tx_pred)\n\n print(\"\\tTest Loss = \", test_loss, \" Test Accuracy = \", accuracy )\n models.append((\"logistic_SGD\", degree, w))\n losses.append(test_loss)\n accuracies.append(accuracy)\n predictions.append(y_pred)\n return models, losses, accuracies, predictions\n"
},
{
"alpha_fraction": 0.7371188402175903,
"alphanum_fraction": 0.7455310225486755,
"avg_line_length": 26.97058868408203,
"blob_id": "92ca400f6589b1b8cb6fd7d427e4b2405e9c64d8",
"content_id": "6a53ff8802883724714074ce7c6f80e1e2e032fc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 951,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 34,
"path": "/run.py",
"repo_name": "mikanikos/Higgs-Boson-Challenge",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom implementations import ridge_regression\nfrom proj1_helpers import load_csv_data, predict_labels, create_csv_submission\nfrom data_processing import process_data, build_poly\n\nprint(\"Loading data\\n\")\n\n# Loading data from csv files\ny_tr, tx_tr, ids_tr = load_csv_data(\"data/train.csv\")\ny_te, tx_te, ids_te = load_csv_data(\"data/test.csv\")\n\n# Hyper-parameters definitions\ndegree = 7\nlambda_ = 0.00025\n\n# Preprocessing data: cleaning, standardazing and adding constant column\ntx_tr, tx_te = process_data(tx_tr, tx_te, y_tr, y_te)\n\n# Feature augmentation through polynomials\ntx_tr = build_poly(tx_tr, degree)\ntx_te = build_poly(tx_te, degree)\n\n# Training with ridge regression\nprint(\"Training the model\\n\")\nweights, _ = ridge_regression(y_tr, tx_tr, lambda_)\n\n# Computing prediction vector\ny_pred = predict_labels(weights, tx_te)\n\n# Creating file for submission\ncreate_csv_submission(ids_te, y_pred, \"prediction.csv\")\n\nprint(\"Done\")\n"
},
{
"alpha_fraction": 0.6320784687995911,
"alphanum_fraction": 0.6523252129554749,
"avg_line_length": 30,
"blob_id": "a1e3893aaba94de4540aa42396539c0298ae76d4",
"content_id": "76d23d31a03230fc6351a556ba79b2d24796168c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3161,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 102,
"path": "/data_processing.py",
"repo_name": "mikanikos/Higgs-Boson-Challenge",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\n# Cleaning data from -999 values\ndef clean_data(x):\n\t## Convert -999 to NaN as we believe these are misidentified data\n ## Recording as NaN prevents them from influencing nanmean calculations\n x[x == -999] = np.nan\n mean_x = np.nanmean(x, axis=0)\n return np.where(np.isnan(x), mean_x, x)\n\n\n# Standardizing by subtracting the mean and dividing by standard deviation\ndef standardize(x):\n \"\"\"Standardize the original data set.\"\"\"\n means = np.mean(x, axis=0)\n x = x-means\n stds = np.std(x, axis=0)\n # this prevents division by zero\n stds[stds == 0] = 1\n x = x/stds\n return x, means, stds\n\n\n# Adding 1-column at the data matrix \ndef add_constants(x, y):\n return np.c_[np.ones((y.shape[0], 1)), x]\n\n\n# Preprocessing data function\ndef process_data(tx_tr, tx_te, y_tr, y_te):\n \n # Cleaning data from -999 values\n tx_tr = clean_data(tx_tr)\n tx_te = clean_data(tx_te)\n\n # Standardizing data\n tx_tr, mean, std = standardize(tx_tr)\n tx_te = (tx_te-mean)/std\n\n # Adding constants vector as a first column \n tx_tr = add_constants(tx_tr, y_tr)\n tx_te = add_constants(tx_te, y_te)\n\n return tx_tr, tx_te\n\n\n## Because expansion and standardization are transformations of our initial feature set\n## We must apply identical transformations to all feature sets we wish to make predictions upon\ndef expand_data(degree, tx_tr, tx_te, tx_pred = None):\n ## Extract jet numbers as three indicator variables\n ## Remove them so they will not be standardized or expanded\n jets_tr = jet_nums(tx_tr)\n jets_te= jet_nums(tx_te)\n ## Remove redundant columns\n res_tr = extract_col(tx_tr)\n res_te = extract_col(tx_te)\n ## Expand features to include polynomial terms\n res_tr = build_poly(tx_tr, degree)\n res_te = build_poly(tx_te, degree)\n ## Standardize\n res_tr, mean, std = standardize(res_tr)\n res_te = (res_te-mean)/std\n ## Fix NaNs resulting from division by 0\n res_tr[np.isnan(res_tr)]=1\n res_te[np.isnan(res_te)]=1\n ## Reconcatenate jet indicator columns\n res_tr = np.c_[res_tr, jets_tr]\n res_te = np.c_[res_te, jets_te]\n return res_tr, res_te #, res_pred\n\n\n## Jet number seems to be categorical, taking on three discrete values\n## Relative values do not seem to have meaning, so coefficients are not a good way to treat this\n## Solution: Split this into three indicator vectors. Each indicator takes a different coefficient\ndef jet_nums(tx):\n jets = tx[:,22]\n new_tx = np.delete(tx, 22, axis=1)\n jet0 = np.zeros((jets.shape[0],1))\n jet0[jets==0] = 1\n jet1 = np.zeros((jets.shape[0],1))\n jet1[jets==1] = 1\n jet2 = np.zeros((jets.shape[0],1))\n jet2[jets==2] = 1\n jet3 = np.zeros((jets.shape[0],1))\n jet3[jets==3] = 1\n result = np.c_[jet0, jet1, jet2, jet3]\n return result\n\n\n# Extract 22th column from the data\ndef extract_col(tx):\n result = np.delete(tx, 22, axis=1)\n return result\n\n\n# Adding polynomial features up to the selected degree\ndef build_poly(x, degree):\n poly_x = np.ones((len(x), 1))\n for d in range(1, degree+1):\n poly_x = np.c_[poly_x, np.power(x, d)]\n return poly_x"
},
{
"alpha_fraction": 0.6326304078102112,
"alphanum_fraction": 0.6392896771430969,
"avg_line_length": 22.128204345703125,
"blob_id": "f3249d17b66b9e90ff92cfc395723b695f9c21b0",
"content_id": "e38ac5c75fb1add35d5eeafdabf8459a86ea0028",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 901,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 39,
"path": "/costs.py",
"repo_name": "mikanikos/Higgs-Boson-Challenge",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom helpers import sigmoid\n\n### Loss computing functions\n\n# Mean Square Error\ndef compute_mse(e):\n return 1/2*np.mean(e**2)\n\n# Root Mean Square Error\ndef compute_rmse(e):\n return np.sqrt(2 * compute_mse(e))\n\n# Mean Absolute Error\ndef compute_mae(e):\n return np.mean(np.abs(e))\n\n\n# Computing loss for logistic regression\ndef compute_loss_log_reg(y, tx, w):\n return -(y.T.dot(np.log(sigmoid(tx.dot(w)))) + (1 - y).T.dot(np.log(1 - sigmoid(tx.dot(w)))))\n\n\n# Computing loss with a selected cost function\ndef compute_loss(y, tx, w, func=\"mse\"):\n # Computing error\n error = y - tx.dot(w)\n\n # Using Mean square error\n if func == \"mse\":\n return compute_mse(error)\n \n # Using Mean absolute error\n elif func == \"mae\":\n return compute_mae(error)\n \n # Using Root mean square error\n elif func == \"rmse\":\n return compute_rmse(error)"
},
{
"alpha_fraction": 0.649026095867157,
"alphanum_fraction": 0.6519662141799927,
"avg_line_length": 34.80263137817383,
"blob_id": "48d48715f586b444916b0c1743454365a6282a78",
"content_id": "252620d5c0c136c3edc3f844c81a18e2030eb209",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2721,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 76,
"path": "/implementations.py",
"repo_name": "mikanikos/Higgs-Boson-Challenge",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom costs import compute_loss, compute_loss_log_reg\nfrom gradients import compute_gradient, compute_gradient_log_reg\nfrom helpers import batch_iter\n\n\n# Gradient descent\ndef least_squares_GD(y, tx, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n for i in range(max_iters):\n # computing the gradient\n gradient = compute_gradient(y, tx, w)\n # updating the weights\n w = w - gamma * gradient\n # return w with the corresponding loss \n return w, compute_loss(y, tx, w)\n\n\n# Stochastic gradient descent\ndef least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n # initializing the weights, the batch size and the number of batches\n w = initial_w\n batch_size = 1\n num_batches = 1\n for i in range(max_iters):\n # iterating for each batch\n for y_batch, tx_batch in batch_iter(y, tx, batch_size, num_batches):\n # computing the gradient\n gradient = compute_gradient(y_batch, tx_batch, w)\n # updating the weights\n w = w - gamma * gradient\n # return w with the corresponding loss \n return w, compute_loss(y, tx, w)\n\n# Least squares\ndef least_squares(y, tx):\n # computing the weights by using the formula\n w = np.dot(np.dot(np.linalg.inv(tx.T.dot(tx)), tx.T), y)\n # return w with the corresponding loss \n return w, compute_loss(y, tx, w)\n\n\n# Ridge regression\ndef ridge_regression(y, tx, lambda_):\n # computing the weights by using the formula\n lambda_prime = 2 * tx.shape[0] * lambda_\n w = np.dot(np.dot(np.linalg.inv(np.dot(tx.T, tx) + lambda_prime * np.identity(tx.shape[1])), tx.T), y)\n # return w with the corresponding loss \n return w, compute_loss(y, tx, w)\n\n\n# Logistic regression\ndef logistic_regression(y, tx, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n for iter in range(max_iters):\n # computing the gradient for logistic regression\n gradient = compute_gradient_log_reg(y, tx, w)\n # updating the weights\n w = w - gamma * gradient\n # return w with the corresponding loss\n return w, compute_loss_log_reg(y, tx, w)\n\n\n# Penalized logistic regression\ndef reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):\n # initializing the weights\n w = initial_w\n for iter in range(max_iters):\n # computing the gradient for logistic regression\n gradient = compute_gradient_log_reg(y, tx, w) + (lambda_ / len(y)) * w\n # updating the weights\n w = w - gamma * gradient\n # return w with the corresponding loss and the regularizing term\n return w, compute_loss_log_reg(y, tx, w) + (1/2) * lambda_ * (np.linalg.norm(w)**2)\n"
},
{
"alpha_fraction": 0.7794913649559021,
"alphanum_fraction": 0.7857142686843872,
"avg_line_length": 56.75,
"blob_id": "84f93ae1a7f3c95c3c07a90023549a58bf6289b8",
"content_id": "1cb22a75d4bca7ae2757df01a360fb7175711bb2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3696,
"license_type": "permissive",
"max_line_length": 493,
"num_lines": 64,
"path": "/README.md",
"repo_name": "mikanikos/Higgs-Boson-Challenge",
"src_encoding": "UTF-8",
"text": "# Machine Learning Project 1 - The Higgs Boson Challenge \n\nThis project contains the code used for an academic Kaggle competition on The Higgs Boson Machine Learning Challenge organized during the Machine Learning course at EPFL.\nIt contains several implementations of Machine Learning methods and optimization functions for achieving the best result we could get on the competition. \n\n## Project overview\n\nThe project is organized in several files in order to guarantee modularity and have a clear structure: \n\n - `implementations.py` contains the functions of six Machine Learning methods (Gradient Descent,\n Stochastic Gradient Descent, Least Squares, Ridge Regression, Logistic Regression,\n Regularized Logistic Regression).\n - `costs.py` contains the error functions used for computing the loss (Mean Square Error,\n \t Root Mean Square Error, Mean Absolute Error) and the loss methods. \n - `gradients.py` contains the code for computing the gradients.\n - `data_processing.py` contains the code for preprocessing data before training the model,\n \ti.e. cleaning, standardizing, feature engineering and augmentation through polynomials.\n - `helpers.py` contains different tools for different purposes, such as \"batch_iter\" for Stochastic\n \tGradient Descent, \"sigmoid\" for computing the loss and the gradient for Logistic Regression and \n \t\"compute_accuracy\" for assessing model performance.\n - `proj1_helpers.py` is a script with some useful functions for loading data and generating the predictions. \n - `cross_validation.py` contains some utilities for the local testing of the models and some\n \thyper-parameters tuning methods for getting the best parameters.\n - `plots.py` contains tools for plotting and testing hyper-parameters.\n - `run.py` is the script for generating the best submission achieved (accuracy = 0.817).\n\n\n## Getting Started\n\nThese instructions will provide you all the information to get the best result achieved on your local machine, as described above.\n\n### Training dataset preparation \n\nIt is necessary to include a `data` folder in the project with 2 csv files:\n\n - `train.csv`: Training set of 250000 events. The file starts with the ID and label column, then the 30 feature columns.\n - `test.csv`: The test set of around 568238 events without labels.\n\n \n### Creating the prediction\n\nOnce the dataset have been set-up, just execute `run.py`:\n\n```\npython run.py\n```\n\nYou will see some output on the screen. Once \"Done\" appears, you will be able to see that a `prediction.csv` file has been generated. This file contains the predictions with the best model and parameters we could find and replicates exactly our best submission on the Kaggle competition.\n\n## Notes\n\nThe optimal hyperparameters found are hard-coded in run.py, but it still executes learning on these parameters; it does not simply output the output that was found in testing. Users can run it to find the same results in testing. To find these parameters, we have used cross_validation.py\n\nFor logistic regression, it is worth noting that some data generate NaN costs, due to limitations of machine number precision. In theory, sigmoid can never return 0 for any real numbers, and therefore logarithms of all sigmoid outputs should be valid -- however due to machine rounding 0 can be returned by sigmoid, which causes generation of NaN costs. Notably this does not cause failure in computation of the gradient, therefore gradient descent is not negatively influenced from this fact.\n\n## Authors \n\n - Marshall Cooper\n - Andrea Piccione\n - Divij Pherwani\n\n## Acknowledgments\n\nThe entire project used some utlities functions provided during the course lab sessions that can be found on https://github.com/epfml/ML_course\n"
},
{
"alpha_fraction": 0.6996996998786926,
"alphanum_fraction": 0.6996996998786926,
"avg_line_length": 21.266666412353516,
"blob_id": "674abef8d28f711b2dec9646206d1b2e36eb58cf",
"content_id": "f43615e17a7cbab646cd599a1cbdd11862ce84b5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 333,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 15,
"path": "/gradients.py",
"repo_name": "mikanikos/Higgs-Boson-Challenge",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom helpers import sigmoid\n\n\n### Gradient computing functions\n\n# Computing gradient\ndef compute_gradient(y, tx, w):\n error = y - tx.dot(w)\n return -tx.T.dot(error) / len(error)\n\n\n# Computing gradient for logistic regression\ndef compute_gradient_log_reg(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w)) - y)"
}
] | 8 |
ivychen2/hi
|
https://github.com/ivychen2/hi
|
b9191db4a78d0fdfae0bed630d88843d54a3e162
|
e1769f0b9c21ad03836a8c7b02e2b4cc14d2746f
|
48f7652e10e4bbace015c5b35e8f1ff82803c6be
|
refs/heads/main
| 2023-04-10T09:05:08.672204 | 2021-04-22T07:28:15 | 2021-04-22T07:28:15 | 360,418,286 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6195651888847351,
"alphanum_fraction": 0.6195651888847351,
"avg_line_length": 22.25,
"blob_id": "03ee4578180b29708c53334f0139624ba57f27a1",
"content_id": "df6f1574eba867919412998c2fb1d403bbddbaa6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 4,
"path": "/123.py",
"repo_name": "ivychen2/hi",
"src_encoding": "UTF-8",
"text": "name = input(\"請輸入姓名: \")\nprint(\"Hi,\", name)\nheight = input(\"請輸入身高:\")\nweight = input(\"請輸入體重:\")"
}
] | 1 |
burkh4rt/brown_kobe_exchange_2016
|
https://github.com/burkh4rt/brown_kobe_exchange_2016
|
b280943da293c354620caf49e7b92afccb63070b
|
e5041e622822bd0e3f4bf4d0d63370ad73eccd68
|
cec1a47e6b3e1bcf6d61296398e0cee641c18169
|
refs/heads/master
| 2020-04-06T07:07:39.586261 | 2016-09-04T06:39:45 | 2016-09-04T06:39:45 | 62,852,972 | 1 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7120581865310669,
"alphanum_fraction": 0.7307692170143127,
"avg_line_length": 23.3157901763916,
"blob_id": "02786a9c807aeee63251ececffedf75ea1deb3c1",
"content_id": "d95af0827e95355739440f4d14ffc61dfb23025c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 962,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 38,
"path": "/pipeline/straight_line.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nSteps to display these files:\r\n\t1: Open Paraview\r\n\t2: Go to Sources (in the top tab) and select \"Programmable Source\"\r\n\t3: In the Script box, paste the code below\r\n\t4: Click Apply\r\n\t5: Adjust the color of the line and background as necessary\r\n\"\"\"\r\n\r\nnumPts = 2\r\n#Initialize the polydata output\r\npdo = self.GetPolyDataOutput()\r\n\r\n#Create a vtkPoints instance\r\nnewPts = vtk.vtkPoints()\r\n\r\n#Add all points to the vtkPoints\r\nnewPts.InsertPoint(0,0,0,0)\r\nnewPts.InsertPOint(1,0,0,999)\r\n\r\n#Save points to the output\r\npdo.SetPoints(newPts)\r\n\r\n#Create a PolyLine instance to connect all lines\r\naPolyLine = vtk.vtkPolyLine()\r\n\r\n#Set number of points in polyline\r\naPolyLine.GetPointIds().SetNumberOfIds(numPts)\r\n\r\n#Set point ID's and connection order\r\nfor i in range(numPts):\r\n\taPolyLine.GetPointIds().SetId(i,i)\r\n\t\r\n#Allocate a single cell (one line)\r\npdo.Allocate(1,1)\r\n\r\n#Feed lines to output\r\npdo.InsertNextCell(aPolyLine.GetCellType(), aPolyLine.GetPointIds())\r\n"
},
{
"alpha_fraction": 0.7237654328346252,
"alphanum_fraction": 0.7438271641731262,
"avg_line_length": 33.105262756347656,
"blob_id": "daa3c4ecc2926ec22018e51a5e32682b233d19f9",
"content_id": "1eaa612edf6922367546b9840e00b83462eea6f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 648,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 19,
"path": "/data_processing_code/dim_reduce_data.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os\nfrom sklearn.decomposition import PCA\n\nos.chdir('/Users/michael/Documents/brown/kobe/data') # move to the directory where our data is stored\n\n# grab data\nnpzfile = np.load('Flint_2012_e1.npz')\nall_time = npzfile['all_time']\nall_velocities = npzfile['all_velocities']\nall_counts = npzfile['all_counts']\nall_LFPs = npzfile['all_LFPs']\nall_neural = np.vstack((all_counts, all_LFPs))\ndel npzfile, all_counts, all_LFPs\n\npca = PCA(n_components=20, whiten=1)\nall_neural = pca.fit_transform(all_neural.T).T\n\nnp.savez_compressed('Flint_2012_e1_PCA.npz', all_time=all_time, all_neural=all_neural, all_velocities=all_velocities)\n"
},
{
"alpha_fraction": 0.5922818779945374,
"alphanum_fraction": 0.6224831938743591,
"avg_line_length": 22.81333351135254,
"blob_id": "66dcc68b832460e74d96eea5dcec333eccf56456",
"content_id": "e215b9ad41ac4327097e3ef7564de1d602674e26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1788,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 75,
"path": "/get_data.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as mpa\n\nres_file = np.load('filter_run.npz')\n\nall_particles = res_file['all_particles']\nall_weights = res_file['all_weights']\nall_est = res_file['all_est']\nall_true = res_file['all_true']\n\nnumpoints = all_particles.shape[1]\ntot_time = all_particles.shape[0]\n\nfig = plt.figure()\nax = plt.axes(xlim = (-0.15,0.15), ylim = (-0.15,0.15))\n\ndef main():\n ani = mpa.FuncAnimation(fig, scatplot, frames = xrange(tot_time))\n\n #Use FFmpeg to write animation to file\n #ffmpeg_writer = mpa.FFMpegWriter(fps = 10)\n #ani.save('particle_estimates.mp4', writer = ffmpeg_writer)\n\n plt.show()\n \n \n\ndef scatplot(time):\n fig.clear()\n x_model = all_particles[time,:,0]\n y_model = all_particles[time,:,1]\n c_model = np.chararray((numpoints,1), itemsize = 5)\n c_model[:] = 'red'\n s_model = all_weights[time,:,0]*1000\n\n x_est = all_est[time,0]\n y_est = all_est[time,1]\n c_est = 'blue'\n s_est = 20\n\n x_data = all_true[time,0]\n y_data = all_true[time,1]\n c_data = 'black'\n s_data = 20\n\n x1 = np.append(x_model,x_data)\n y1 = np.append(y_model,y_data)\n c1 = np.append(c_model,c_data)\n s1 = np.append(s_model,s_data)\n\n x = np.append(x1, x_est)\n y = np.append(y1, y_est)\n c = np.append(c1, c_est)\n s = np.append(s1, s_est)\n \n# scat.set_sizes(s)\n# scat.set_offsets(np.hstack((x,y)))\n# scat.set_color(c) \n\n ax = plt.axes(xlim = (-0.15,0.15), ylim = (-0.15,0.15))\n scat = plt.scatter(x,y,c=c,s=s,edgecolor='None')\n \n return scat\n\nprint('particles metadata')\nprint(all_particles.shape)\nprint('weights metadata')\nprint(all_weights.shape)\nprint('est metadata')\nprint(all_est.shape)\nprint('true metadata')\nprint(all_true.shape)\n\nmain()\n\n\n"
},
{
"alpha_fraction": 0.680911660194397,
"alphanum_fraction": 0.6962962746620178,
"avg_line_length": 25.421875,
"blob_id": "bff5c13ca9ee19a0181431fb5b17377910f5e09a",
"content_id": "05f9d8b7207504f11a8a65193d65aeaaed450699",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1755,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 64,
"path": "/pipeline/generate_true_line_plot.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nSteps to display these files:\r\n\t1: Change line 15 to contain the correct directory. Use absolute directories.\r\n\t2: Open Paraview\r\n\t3: Go to Sources (in the top tab) and select \"Programmable Source\"\r\n\t4: In the Script box, paste the code below\r\n\t5: Click Apply\r\n\t6: Adjust the color of the line and background as necessary\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n#Read the data\r\n#TODO: Change the next line to a correct absolute directory!!!!\r\ndata_file = np.load('C:\\\\Users\\\\GuestUser\\\\Documents\\\\Project\\\\brown_kobe_exchange_2016\\\\pipeline\\\\filter_run.npz')\r\nall_particles = data_file['all_particles']\r\nall_weights = data_file['all_weights']\r\nall_est = data_file['all_est']\r\nall_true = data_file['all_true']\r\n\r\n#Number of points\r\nnumPts = all_true.shape[0]\r\n\r\n#Create an array of x,y,z values\r\nall_true_t = np.zeros((all_true.shape[0], 3))\r\nall_true_t[:,0:2] = all_true*100\r\nfor t in range(all_true.shape[0]):\r\n\tall_true_t[t,2] = t\r\n\r\nxs = all_true_t[:,0].flatten()\r\nys = all_true_t[:,1].flatten()\r\nzs = all_true_t[:,2].flatten()\r\n\r\n#Initialize the polydata output\r\npdo = self.GetPolyDataOutput()\r\n\r\n#Create a vtkPoints instance\r\nnewPts = vtk.vtkPoints()\r\n\r\n#Add all points to the vtkPoints\r\nfor i in range(numPts):\r\n\tx = xs[i]\r\n\ty = ys[i]\r\n\tz = zs[i]\r\n\tnewPts.InsertPoint(i,x,y,z)\r\n\r\n#Save points to the output\r\npdo.SetPoints(newPts)\r\n\r\n#Create a PolyLine instance to connect all lines\r\naPolyLine = vtk.vtkPolyLine()\r\n\r\n#Set number of points in polyline\r\naPolyLine.GetPointIds().SetNumberOfIds(numPts)\r\n\r\n#Set point ID's and connection order\r\nfor i in range(numPts):\r\n\taPolyLine.GetPointIds().SetId(i,i)\r\n\t\r\n#Allocate a single cell (one line)\r\npdo.Allocate(1,1)\r\n\r\n#Feed lines to output\r\npdo.InsertNextCell(aPolyLine.GetCellType(), aPolyLine.GetPointIds())\r\n"
},
{
"alpha_fraction": 0.6396960020065308,
"alphanum_fraction": 0.6665176749229431,
"avg_line_length": 51.02325439453125,
"blob_id": "962221b87211ba81f862da8ccbedd821c19c7570",
"content_id": "b4fb17f72a7d9c5c3810f3fb1c3d2efb183577d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2237,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 43,
"path": "/data_processing_code/process_data.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import scipy.io as sio\nimport scipy.interpolate as sp_interp\nimport numpy as np\nimport os\n\nos.chdir('/Users/michael/Documents/brown/kobe/data') # move to the directory where our data is stored\ndata = sio.loadmat('Flint_2012_e1.mat') # read matlab data file\n\n# grab & define basic data specs\nn_trials = data['Subject'][0, 0]['Trial'].size\nn_neurons = max([data['Subject'][0, 0]['Trial'][trial, 0]['Neuron']['Spike'].size for trial in range(n_trials)])\nn_LFPs = 95\n\n# pre-allocate data storage structures\nall_time = sum(np.squeeze(data['Subject'][0, 0]['Trial'][trial, 0]['Time']).size for trial in range(n_trials))\nall_counts = np.empty([n_neurons, all_time])\nall_velocities = np.empty([2, all_time])\nall_LFPs = np.empty([20*n_LFPs, all_time])\n\n# grab data trial-by-trial and populate our data structures\nidx0 = 0\nfor t in range(n_trials):\n clock_trial = np.squeeze(data['Subject'][0, 0]['Trial'][t, 0]['Time'])\n all_counts_trial = np.empty([n_neurons, clock_trial.size])\n idx_trial = range(idx0, idx0+clock_trial.size)\n all_velocities[:, idx_trial] = data['Subject'][0, 0]['Trial'][t, 0]['HandVel'][:, 0:2].T\n for n in range(n_neurons):\n neuron_spikes_trial = np.squeeze(data['Subject'][0, 0]['Trial'][t, 0]['Neuron']['Spike'][n, 0])\n neuron_spikes_idx = np.digitize(neuron_spikes_trial, clock_trial)\n neuron_spikes_count = np.bincount(np.ndarray.flatten(neuron_spikes_idx), minlength=clock_trial.size)\n all_counts[n, idx_trial] = neuron_spikes_count\n for n in range(n_LFPs):\n single_LFP_trial = np.squeeze(data['Subject'][0, 0]['Trial'][t, 0]['Neuron']['LFP'][n, 0])\n single_LFP_tck = sp_interp.splrep(np.arange(single_LFP_trial.size)/single_LFP_trial.size, single_LFP_trial)\n LFP_eval_pts = np.reshape(np.arange(20*clock_trial.size)/(20*clock_trial.size), [20, clock_trial.size], order='F')\n all_LFPs[20*n:20*(n+1), idx_trial] = sp_interp.splev(LFP_eval_pts, single_LFP_tck)\n idx0 += clock_trial.size\n\nnp.savez_compressed('Flint_2012_e1.npz', all_time=all_time, all_counts=all_counts, all_velocities=all_velocities,\n all_LFPs=all_LFPs)\n\n# exec(open('process_data.py').read())\n# get data back with: npzfile = np.load('Flint_2012_e1.npz')\n"
},
{
"alpha_fraction": 0.6269674897193909,
"alphanum_fraction": 0.6392969489097595,
"avg_line_length": 28.7734375,
"blob_id": "97c40316a0e5bce48942ba3c5a337816eabcda5b",
"content_id": "a66ccc82ea7cf878a7f016a61bc5534293c51b28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3812,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 128,
"path": "/alternate_code/filter_plus_plus.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "\"\"\"\nrun with:\nmpiexec -n 4 python3 filter_plus_plus.py\n\"\"\"\n\nfrom mpi4py import MPI\n# import tensorflow as tf\nimport numpy as np\nimport os\nimport time\n\n\n# instantiate MPI communicator\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nnpt = 100\nn_particles = npt*size\n\n# describe our data\nn_test = 500\nn_steps = 30\n\nd_neural = 600\nd_velocities = 2\n\n# move to where data is\nos.chdir('/Users/michael/Documents/brown/kobe/data')\n\n# load Kalman parameters\nparam_file = np.load('kalman_estimates.npz')\nA_est = param_file['A_est']\nS_est = param_file['S_est']\nC_est = param_file['C_est']\nQ_est = param_file['Q_est']\n\nQ_est_inv = np.linalg.inv(Q_est)\n\n# grab data on root process\nif rank == 0:\n tic = time.clock()\n # grab data\n npzfile = np.load('Flint_2012_e1_PCA.npz')\n all_time = npzfile['all_time']\n all_velocities = npzfile['all_velocities']\n all_neural = npzfile['all_neural']\n\n T = int(all_time) - 30\n del all_time\n\n def neural(ind):\n neur = np.zeros((ind.size, d_neural))\n for i0 in range(ind.size):\n s_idx = range(ind[i0], ind[i0] + 30)\n neur[i0, :] = all_neural[:, s_idx].flatten()\n return neur\n\n def velocities(ind):\n return all_velocities[:, ind + 29].T\n\n# initialize particles\nparticles = None\nweights = None\nparticles_weights = None\nparticle = np.zeros((npt, d_velocities))\nweight = np.ones((npt, 1))\nparticle_weight = np.hstack((particle, weight))\n\nobservation = np.zeros(d_neural)\n\nif rank == 0:\n particles = np.random.multivariate_normal(np.zeros(2), S_est, n_particles)\n weights = np.ones((n_particles, 1))/n_particles\n particles_weights = np.hstack((particles, weights))\n\n# store data\nif rank == 0:\n all_particles = np.empty([n_test, n_particles, d_velocities])\n all_weights = np.empty([n_test, n_particles, 1])\n all_true = np.empty([n_test, d_velocities])\n all_est = np.empty([n_test, d_velocities])\n\nfor t in range(n_test):\n if rank == 0:\n # resampling step\n samples = np.random.multinomial(n_particles, weights.flatten())\n indices = np.repeat(np.arange(n_particles), samples.flatten())\n particles = particles[indices]\n weights = np.ones((n_particles, 1)) / n_particles\n particles_weights = np.hstack((particles, weights))\n # grab new observation\n observation = neural(np.arange(1) + t)\n\n # send out the particles to different processes\n comm.Scatter(particles_weights, particle_weight, root=0)\n comm.Bcast(observation, root=0)\n particle = particle_weight[:, :d_velocities]\n weight = particle_weight[:, d_velocities:]\n\n # update particle location\n particle = np.matmul(A_est, particle.T).T + np.random.multivariate_normal(np.zeros(d_velocities), S_est, npt)\n\n # update particle weight\n diff = np.matmul(C_est, particle.T).T - observation.flatten()\n log_weight = weight\n for p in range(npt):\n log_weight[p, ] = -0.5 * np.matmul(np.matmul(diff[p, :], Q_est_inv)[:, None].T, diff[p, ])\n particle_weight = np.hstack((particle, log_weight))\n\n comm.Barrier()\n\n # return\n comm.Gather(particle_weight, particles_weights)\n\n if rank == 0:\n all_particles[t, :, :] = particles = particles_weights[:, :d_velocities]\n log_weights = particles_weights[:, d_velocities:]\n weights = np.exp(log_weights - np.max(log_weights))\n all_weights[t, :, :] = weights = weights / np.sum(weights)\n all_est[t, :] = estimate = np.matmul(weights.T, particles)\n all_true[t, :] = true = velocities(np.arange(1) + t)\n print('est=', estimate, 'true=', true)\n\nif rank == 0:\n np.savez('filter_run', all_particles=all_particles, all_weights=all_weights, all_est=all_est, all_true=all_true)\n toc = time.clock() - tic\n print('run_time=', toc, 'sec')\n\n"
},
{
"alpha_fraction": 0.6990445852279663,
"alphanum_fraction": 0.7054139971733093,
"avg_line_length": 33.88888931274414,
"blob_id": "58584dec3848ec351bd1a117521c9190e4f2490c",
"content_id": "d4e7170e9b18e2c5f8b6c12c769a29aa6039c94a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 628,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 18,
"path": "/cython/setup_filter_cythonized.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "# CC=gcc-6 python3 setup_filter_cythonized.py build_ext --inplace\n# python3 -c \"import filter_cythonized\"\n\n# Cython has its own \"extension builder\" module that knows how\n# to build cython files into python modules.\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n\nimport numpy\n\next = Extension(\"filter_cythonized\", sources=[\"filter_cythonized.pyx\"],\n include_dirs = [numpy.get_include()],\n extra_compile_args = [\"-fopenmp\", \"-g\", \"-O3\"],\n extra_link_args=[\"-fopenmp\", \"-g\"] )\n\nsetup(ext_modules=[ext],\n cmdclass={'build_ext': build_ext})\n"
},
{
"alpha_fraction": 0.6711822748184204,
"alphanum_fraction": 0.7315270900726318,
"avg_line_length": 35.90909194946289,
"blob_id": "291d4dae86dd4799c7a3ac3425726884ed61fcd6",
"content_id": "ce36bd6bbd77e583ab65a8b7b414db7bd0736098",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 812,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 22,
"path": "/v2/preproc_data.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os\nfrom sklearn.decomposition import PCA\n\nos.chdir('/Users/michael/Documents/brown/kobe/data') # move to the directory where our data is stored\n\n# grab data\nnpzfile = np.load('Flint_2012_e1.npz')\nall_time = npzfile['all_time']\nall_velocities = npzfile['all_velocities']\nall_counts = npzfile['all_counts']\nall_LFPs = npzfile['all_LFPs']\nall_neural = np.vstack((all_counts, all_LFPs))\n\nall_counts0 = np.sum(all_counts[:, 0:77920].reshape(196,-1, 20), axis=2)\nall_velocities0 = np.mean(all_velocities[:, 0:77920].reshape(2,-1, 20), axis=2)\nall_time0 = all_velocities0.shape[1]\n\npca = PCA(n_components=20, whiten=1)\nall_neural0 = pca.fit_transform(all_counts0.T).T\n\nnp.savez_compressed('Flint_2012_e1_PCA00.npz', all_time=all_time0, all_neural=all_neural0, all_velocities=all_velocities0)\n"
},
{
"alpha_fraction": 0.6151202917098999,
"alphanum_fraction": 0.6434707641601562,
"avg_line_length": 32.28571319580078,
"blob_id": "1256467a019ab50c0683054b8db009fd31e9bfcb",
"content_id": "9ecd7cefe3a7bef581f78b3c8bbd362750a8b7e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1164,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 35,
"path": "/pipeline/npz_to_vtk.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport evtk.hl as eh\n\ndata_file = np.load('./filter_run.npz')\nall_particles = data_file['all_particles']\nall_weights = data_file['all_weights']\nall_est = data_file['all_est']\nall_true = data_file['all_true']\n\nall_particles_t = np.zeros((all_particles.shape[0], all_particles.shape[1], 3))\n\nall_particles_t[:, :, 0:2] = all_particles*100\nfor t in range(all_particles.shape[0]):\n all_particles_t[t, :, 2] = t\n\nxs = all_particles_t[:, :, 0].flatten()\nys = all_particles_t[:, :, 1].flatten()\nzs = all_particles_t[:, :, 2].flatten()\nws = all_weights.flatten()\n\neh.pointsToVTK('./particle_data', xs, ys, zs, data={\"weights\": ws})\n\nall_particles_t = np.zeros((all_particles.shape[0], all_particles.shape[1], 3))\n\nall_particles_t[:, :, 0:2] = all_particles*100\nfor t in range(all_particles.shape[0]):\n\tall_particles_t[t, :, 2] = t\n\tall_particles_t[t, : , 0:2] = np.subtract(all_particles_t[t, :, 0:2], all_true[t,:]*100)\n\nxs = all_particles_t[:, :, 0].flatten()\nys = all_particles_t[:, :, 1].flatten()\nzs = all_particles_t[:, :, 2].flatten()\nws = all_weights.flatten()\n\neh.pointsToVTK('./particle_minus_true_data', xs, ys, zs, data = {'weights': ws})"
},
{
"alpha_fraction": 0.8367347121238708,
"alphanum_fraction": 0.8367347121238708,
"avg_line_length": 48,
"blob_id": "18ebedd4635befacc4ba1c8f29c0d1a7ad81c08a",
"content_id": "80503a77d45258389e1bf5679e7cac652c3abf77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 49,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 1,
"path": "/v2/README.md",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "made some changes to try and improve performance\n"
},
{
"alpha_fraction": 0.6402266025543213,
"alphanum_fraction": 0.6600566506385803,
"avg_line_length": 26.239999771118164,
"blob_id": "19c6157a3cb391fc66ea89c2f9c425ee9786abde",
"content_id": "5bb7f993dfd1c12f18740716988d85ac448a1d09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 706,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 25,
"path": "/push_to_paraview/filter_run_to_csv.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\n\r\n#Read data\r\nnpzfile = np.load('../pipeline/filter_run.npz')\r\nall_true = npzfile['all_true']\r\nall_est = npzfile['all_est']\r\nall_particles = npzfile['all_particles']\r\nall_weights = npzfile['all_weights']\r\n\r\n#Get time array\r\ntime = all_true.shape[0]\r\nall_time = np.reshape(np.arange(time), (time,1))\r\nall_time = all_time/1000\r\n\r\n#Get size array\r\nnormal_size = 20\r\ntrue_size = np.reshape(np.repeat(20,time), (time,1))\r\n\r\n#Get csv for plotting true\r\nplot_true = np.hstack((all_true,all_time))\r\nplot_true = np.hstack((plot_true, true_size))\r\n\r\nplot_true = np.vstack((np.arange(4), plot_true))\r\n\r\nnp.savetxt('filter_run_true.csv', plot_true, delimiter = ',', newline = '\\n', fmt = '%1.4e')\r\n"
},
{
"alpha_fraction": 0.5816857218742371,
"alphanum_fraction": 0.6264308094978333,
"avg_line_length": 22.341463088989258,
"blob_id": "bfaba35b7a2a9a31e9bde1337afb976dc80b9d2f",
"content_id": "5a495b03e8f2ffd07b0722bd0abe6d3ed334e7c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 961,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 41,
"path": "/data_processing_code/kalman_estimates.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "\n\nimport numpy as np\nimport os\n\nn_test = 1000\nn_steps = 30\nbatch_size = 1000\nd_neural = 600\nd_velocities = 2\n\n# grab data\nos.chdir('/Users/michael/Documents/brown/kobe/data')\nnpzfile = np.load('Flint_2012_e1_PCA.npz')\nall_time = npzfile['all_time']\nall_velocities = npzfile['all_velocities']\nall_neural = npzfile['all_neural']\n\nT = int(all_time) - 30\ndel all_time\n\n\ndef neural(ind):\n neur = np.zeros((ind.size, d_neural))\n for i0 in range(ind.size):\n s_idx = range(ind[i0], ind[i0] + 30)\n neur[i0, :] = all_neural[:, s_idx].flatten()\n return neur\n\n\ndef velocities(ind):\n return all_velocities[:, ind + 29].T\n\nX = neural(np.arange(5000))\ny = velocities(np.arange(5000))\n\nA_est = np.linalg.lstsq(y[:-1, ], y[1:, ])[0].T\nS_est = np.cov(y[1:, ].T-np.matmul(A_est, y[:-1, ].T))\n\nC_est = np.linalg.lstsq(y, X)[0].T\nQ_est = np.cov(X.T-np.matmul(C_est, y.T))\n\nnp.savez('kalman_estimates', A_est=A_est, S_est=S_est, C_est=C_est, Q_est=Q_est)\n\n\n"
},
{
"alpha_fraction": 0.5682740211486816,
"alphanum_fraction": 0.5842722058296204,
"avg_line_length": 28.003267288208008,
"blob_id": "eb28e904dd70f729af90b0235523e8f78b6fcb98",
"content_id": "19429e490ec6f11d72532692610704ac3f50dd99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8876,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 306,
"path": "/par_train.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "from mpi4py import MPI\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport resource\nimport os\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\nfull_layers = [3]\ntrain_batch = 10\nepochs = 30\nlearning_rate = 0.1\ninput_shape = [600]\noutput_size = 2\nthreads = size\ninter_threads = 0\nintra_threads = 0\nfilename = None\nvalid_pct = 0.1\ntest_pct = 0.1\nepoch = 0\nerror_batch = False\nmerge_every = 0\ntop = 1\n\nif 0 == inter_threads:\n inter_threads = threads\nif 0 == intra_threads:\n intra_threads = threads\n\n# grab data\nos.chdir('/Users/michael/Documents/brown/kobe/data')\nnpzfile = np.load('Flint_2012_e1_PCA.npz')\nall_time = npzfile['all_time']\nall_velocities = npzfile['all_velocities']\nall_neural = npzfile['all_neural']\n\nT = int(all_time) - 30\ndel all_time\n\nd_neural = 30 * all_neural.shape[0]\nd_velocities = all_velocities.shape[0]\n\n\ndef neural(ind):\n neur = np.zeros((ind.size, d_neural))\n for i0 in range(ind.size):\n s_idx = range(ind[i0], ind[i0] + 30)\n neur[i0, :] = all_neural[:, s_idx].flatten()\n return neur\n\n\ndef velocities(ind):\n return all_velocities[:, ind + 29].T\n\n\nfull_dat = neural(np.arange(3000))\nfull_lab = velocities(np.arange(3000))\nvalid_dat = neural(np.arange(3000, 4000))\nvalid_lab = velocities(np.arange(3000, 4000))\ntest_dat = neural(np.arange(4000, 5000))\ntest_lab = velocities(np.arange(4000, 5000))\n\ninput_size = 1\nfor i in input_shape:\n input_size *= i\n\n# set up network\n\n\ndef weight_variable(shape, saved_state, index):\n if saved_state is None:\n initial = tf.truncated_normal(shape, stddev=0.1)\n else:\n initial = saved_state[0][index]\n return tf.Variable(initial)\n\n\ndef bias_variable(shape, saved_state, index):\n if saved_state is None:\n initial = tf.constant(0.1, shape=shape)\n else:\n initial = saved_state[1][index]\n return tf.Variable(initial)\n\n\ndef create_full_layer(in_size, out_size, layer_list, weight_list,\n bias_list, saved_state):\n if saved_state is None:\n weight_list.append(tf.Variable(\n tf.random_normal([in_size, out_size], stddev=1.0 / in_size)))\n bias_list.append(tf.Variable(\n tf.random_normal([out_size], stddev=1.0 / in_size)))\n else:\n index = len(weight_list)\n weight_list.append(tf.Variable(saved_state[0][index]))\n bias_list.append(tf.Variable(saved_state[1][index]))\n temp_w = len(weight_list)\n temp_b = len(bias_list)\n temp_l = len(layer_list)\n layer_list.append(tf.nn.sigmoid(tf.matmul(layer_list[temp_l - 1], weight_list[temp_w - 1]) + bias_list[temp_b - 1]))\n\n\ndef populate_graph(\n full_layers,\n learning_rate,\n input_shape,\n saved_state):\n weights = []\n biases = []\n layers = []\n\n x = tf.placeholder(tf.float32, [None, input_size])\n y_ = tf.placeholder(tf.float32, [None, output_size])\n\n layers.append(x)\n layers.append(tf.reshape(x, [-1] + input_shape))\n\n full_layers = [input_size] + full_layers\n for i in range(len(full_layers) - 1):\n create_full_layer(full_layers[i], full_layers[i + 1], layers,\n weights, biases, saved_state)\n if saved_state is None:\n W = tf.Variable(tf.random_normal([full_layers[-1], output_size], stddev=1.0 / full_layers[-1]))\n b = tf.Variable(tf.random_normal([output_size], stddev=1.0 / full_layers[-1]))\n else:\n index = len(weights)\n W = tf.Variable(saved_state[0][index])\n b = tf.Variable(saved_state[1][index])\n weights.append(W)\n biases.append(b)\n\n w_holder = [tf.placeholder(tf.float32, w.get_shape()) for w in weights]\n b_holder = [tf.placeholder(tf.float32, b.get_shape()) for b in biases]\n w_assign = [w.assign(p) for w, p in zip(weights, w_holder)]\n b_assign = [b.assign(p) for b, p in zip(biases, b_holder)]\n\n y = tf.matmul(layers[-1], W) + b\n\n mse_loss = tf.reduce_mean(tf.squared_difference(y_, y), name='mse')\n\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse_loss)\n init = tf.initialize_all_variables()\n sess = tf.Session(\n config=tf.ConfigProto(\n inter_op_parallelism_threads=inter_threads,\n intra_op_parallelism_threads=intra_threads))\n\n mse_val = tf.reduce_sum(tf.squared_difference(y_, y))\n\n sess.run(init)\n\n ops = {\n \"sess\": sess,\n \"x\": x,\n \"y_\": y_,\n \"weights\": weights,\n \"biases\": biases,\n \"w_holder\": w_holder,\n \"b_holder\": b_holder,\n \"w_assign\": w_assign,\n \"b_assign\": b_assign,\n \"train_step\": train_step,\n \"mse_loss\": mse_loss,\n \"mse_val\": mse_val,\n }\n\n return ops\n\n\ndef run_graph(\n data,\n labels,\n train_batch,\n ops,\n saved_state):\n global epoch\n\n sess = ops[\"sess\"]\n x = ops[\"x\"]\n y_ = ops[\"y_\"]\n weights = ops[\"weights\"]\n biases = ops[\"biases\"]\n w_holder = ops[\"w_holder\"]\n b_holder = ops[\"b_holder\"]\n w_assign = ops[\"w_assign\"]\n b_assign = ops[\"b_assign\"]\n train_step = ops[\"train_step\"]\n mse_loss = ops[\"mse_loss\"]\n mse_val = ops[\"mse_val\"]\n\n # use saved state to assign saved weights and biases\n if saved_state is not None:\n feed_dict = {}\n for d, p in zip(saved_state[0], w_holder):\n feed_dict[p] = d\n for d, p in zip(saved_state[1], b_holder):\n feed_dict[p] = d\n sess.run(w_assign + b_assign, feed_dict=feed_dict)\n\n number_of_batches = int(len(data) / train_batch)\n min_batches = comm.allreduce(number_of_batches, MPI.MIN)\n\n if number_of_batches == 0:\n number_of_batches = 1\n\n for i in range(number_of_batches):\n lo = i * train_batch\n hi = (i + 1) * train_batch\n batch_xs = data[lo:hi]\n batch_ys = labels[lo:hi]\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n if (i < min_batches) and (merge_every >= 1) and (i % merge_every == 0):\n r_weights = sess.run(weights)\n r_biases = sess.run(biases)\n for r in r_weights:\n comm.Allreduce(MPI.IN_PLACE, r, MPI.SUM)\n r /= size\n for r in r_biases:\n comm.Allreduce(MPI.IN_PLACE, r, MPI.SUM)\n r /= size\n feed_dict = {}\n for d, p in zip(r_weights, w_holder):\n feed_dict[p] = d\n for d, p in zip(r_biases, b_holder):\n feed_dict[p] = d\n sess.run(w_assign + b_assign, feed_dict=feed_dict)\n\n # average as soon as we're done with all batches so the error and\n # mse_val reflect the current epoch\n\n r_weights = sess.run(weights)\n r_biases = sess.run(biases)\n for r in r_weights:\n comm.Allreduce(MPI.IN_PLACE, r, MPI.SUM)\n r /= size\n for r in r_biases:\n comm.Allreduce(MPI.IN_PLACE, r, MPI.SUM)\n r /= size\n feed_dict = {}\n for d, p in zip(r_weights, w_holder):\n feed_dict[p] = d\n for d, p in zip(r_biases, b_holder):\n feed_dict[p] = d\n sess.run(w_assign + b_assign, feed_dict=feed_dict)\n\n sum_error = 0.0\n if error_batch:\n for i in range(number_of_batches):\n lo = i * train_batch\n hi = (i + 1) * train_batch\n batch_xs = data[lo:hi]\n batch_ys = labels[lo:hi]\n sum_error += sess.run(mse_loss, feed_dict={x: batch_xs, y_: batch_ys})\n else:\n sum_error = sess.run(mse_loss, feed_dict={x: data, y_: labels})\n sum_error_all = comm.allreduce(sum_error)\n batch_mse = 0.0\n if error_batch:\n test_batch_count = len(test_dat) / train_batch\n if test_batch_count == 0:\n test_batch_count = 1\n for i in range(test_batch_count):\n lo = i * train_batch\n hi = (i + 1) * train_batch\n batch_xs = test_dat[lo:hi]\n batch_ys = test_lab[lo:hi]\n batch_mse += sess.run(mse_val, feed_dict={x: batch_xs, y_: batch_ys})\n else:\n batch_mse = sess.run(mse_val, feed_dict={x: test_dat, y_: test_lab})\n batch_mse = comm.allreduce(batch_mse, MPI.SUM)\n count = comm.allreduce(len(test_dat), MPI.SUM)\n batch_mse = float(batch_mse) / count\n\n if 0 == rank:\n print(epoch + 1, batch_mse, sum_error_all)\n sys.stdout.flush()\n\n return r_weights, r_biases\n\n\nif 0 == rank:\n print(\"epoch,mse_val,error\")\n\ndata_threshold = int(len(full_dat) / 2)\nactive_dat = full_dat\nactive_lab = full_lab\ninactive_dat = np.empty([0] + list(full_dat.shape[1:]), full_dat.dtype)\ninactive_lab = np.empty([0] + list(full_lab.shape[1:]), full_lab.dtype)\n\nsaved_state = None\nops = populate_graph(\n full_layers,\n learning_rate,\n input_shape,\n saved_state)\nfor epoch in range(epochs):\n saved_state = run_graph(\n active_dat,\n active_lab,\n train_batch,\n ops,\n saved_state)\n\n"
},
{
"alpha_fraction": 0.6238415241241455,
"alphanum_fraction": 0.6492822170257568,
"avg_line_length": 35.932884216308594,
"blob_id": "8ed62587bfbd1f2c7ae46445bbd860ccb1223837",
"content_id": "6e8ae9e3e5244b6789bd79ac4176be03c44cf82b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5503,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 149,
"path": "/v2/m7.3_run.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nfrom math import sqrt\nimport os\n\n# grab data\nnpzfile = np.load('/Users/michael/Documents/brown/kobe/data/Flint_2012_e1_PCA00.npz')\nall_time = npzfile['all_time']\nall_velocities = npzfile['all_velocities']\nall_neural = npzfile['all_neural']\n\nos.chdir('/Users/michael/Documents/brown/kobe/data')\n\n\"\"\"\ndata is sampled every 0.01s;\nthe paper says the neural data 200-300ms beforehand is most informative\nso we need the previous 30 observations of neural data for each velocity update\n\"\"\"\n\nT = int(all_time) - 6\ndel all_time\n\nd_neural = 6 * 20\nd_velocities = all_velocities.shape[0]\n\nall_speeds = np.sum(np.square(all_velocities), axis=0)\nfast_idx = np.argsort(-all_speeds)\n\n\ndef neural(ind):\n neur = np.zeros((ind.size, d_neural))\n for i0 in range(ind.size):\n s_idx = range(ind[i0], ind[i0] + 6)\n neur[i0, :] = all_neural[:, s_idx].flatten()\n return neur\n\n\ndef velocities(ind):\n return all_velocities[:, ind + 6].T\n\n\ng1 = tf.Graph() # this graph is for building features\n\nd_hid1, d_hid2 = 30, 15\n\n# Tell TensorFlow that the model will be built into the default Graph.\nwith g1.as_default():\n # Generate placeholders for the images and labels.\n with tf.name_scope('inputs'):\n neural_ = tf.placeholder(tf.float32, shape=[None, d_neural])\n\n with tf.name_scope('outputs'):\n velocities_ = tf.placeholder(tf.float32, shape=[None, d_velocities])\n\n with tf.name_scope('keep_prob'):\n keep_prob_ = tf.placeholder(\"float\", name=\"keep_probability\")\n\n with tf.name_scope('hidden1'):\n weights = tf.Variable(tf.truncated_normal([d_neural, d_hid1], stddev=1 / sqrt(float(d_hid1))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid1]), name='biases')\n hidden1 = tf.nn.relu6(tf.matmul(neural_, weights) + biases)\n\n with tf.name_scope('dropout1'):\n hidden1_dropped = tf.nn.dropout(hidden1, keep_prob_)\n\n with tf.name_scope('hidden2'):\n weights = tf.Variable(tf.truncated_normal([d_hid1, d_hid2], stddev=1 / sqrt(float(d_hid2))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid2]), name='biases')\n hidden2 = tf.nn.relu6(tf.matmul(hidden1_dropped, weights) + biases)\n\n with tf.name_scope('output'):\n weights = tf.Variable(tf.truncated_normal([d_hid2, d_velocities], stddev=1 / sqrt(float(d_velocities))),\n name='weights')\n biases = tf.Variable(tf.zeros([d_velocities]), name='biases')\n outputs = tf.matmul(hidden2, weights) + biases\n\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.squared_difference(outputs, velocities_), name='mse')\n tf.histogram_summary('loss', loss)\n\n optimizer = tf.train.AdagradOptimizer(0.1)\n # optimizer = tf.train.RMSPropOptimizer(0.1)\n\n # train_op = optimizer.minimize(loss)\n train_op = optimizer.minimize(loss)\n\n with tf.name_scope('validation'):\n val_op = tf.reduce_mean(tf.squared_difference(outputs, velocities_))\n tf.scalar_summary('validation', val_op)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n # Add the variable initializer Op.\n init = tf.initialize_all_variables()\n\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for training g1\n sess1 = tf.Session(graph=g1)\n\n # Run the Op to initialize the variables.\n sess1.run(init)\n\n # training 1\n valid_idx = fast_idx[np.in1d(fast_idx, range(int(2*T / 3)))]\n for j in range(10):\n for i in range(24):\n # randomly grab a training set\n idx = valid_idx[300 * int(6 - i / 4):300 * (int(6 - i / 4) + 1)]\n if i % 10 == 0: # every 10th step we run our validation step to see how we're doing\n f_dict = {neural_: neural(idx), velocities_: velocities(idx), keep_prob_: 1}\n [summary, vali] = sess1.run([summary_op, val_op], feed_dict=f_dict)\n print('Accuracy at step %s: %s' % (i, vali))\n save_path = saver.save(sess1, \"/Users/michael/Documents/brown/kobe/data/writers/1/model.ckpt\")\n print(\"Model saved in file: %s\" % save_path)\n else: # if we're not on a 10th step then we do a regular training step\n f_dict = {neural_: neural(idx), velocities_: velocities(idx), keep_prob_: 0.75}\n [summary, _] = sess1.run([summary_op, train_op], feed_dict=f_dict)\n\n\n # estimate error\n idx = np.arange(int(2*T / 3), int(T))\n f_dict = {neural_: neural(idx), velocities_: velocities(idx), keep_prob_: 1}\n f_neur = sess1.run(outputs, feed_dict=f_dict)\n vels = velocities(idx)\n cov_est = np.cov((vels-f_neur).T)\n print(cov_est)\n\n\n# collect model parameters\nf_hidden1_weights = sess1.run('hidden1/weights:0')\nf_hidden1_biases = sess1.run('hidden1/biases:0')\nf_hidden2_weights = sess1.run('hidden2/weights:0')\nf_hidden2_biases = sess1.run('hidden2/biases:0')\nf_output_weights = sess1.run('output/weights:0')\nf_output_biases = sess1.run('output/biases:0')\n\n\n# save model parameters\nnp.savez('neural_net_parameters0', f_hidden1_weights=f_hidden1_weights, f_hidden1_biases=f_hidden1_biases,\n f_hidden2_weights=f_hidden2_weights, f_hidden2_biases=f_hidden2_biases,\n f_output_weights=f_output_weights, f_output_biases=f_output_biases, cov_est=cov_est)\n\n\"\"\"\nlook at output with:\ntensorboard --logdir=/Users/michael/Documents/brown/kobe/data/writers/1\n\"\"\"\n"
},
{
"alpha_fraction": 0.688240647315979,
"alphanum_fraction": 0.7060164213180542,
"avg_line_length": 38.890907287597656,
"blob_id": "8d09b37ad3ca8c7906c636eb5da0a9bb85f9a6ea",
"content_id": "731dae1c9b165c8fc09e5b97afc9abcd8a7dd556",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2194,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 55,
"path": "/alternate_code/video_from_data.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.animation\n\n# tell plt where FFmpeg lives on your machine\n# install FFmpeg if necessary with \"brew install ffmpeg\"\nplt.rcParams['animation.ffmpeg_path'] = '/usr/local/bin/ffmpeg'\n\nos.chdir('/Users/michael/Documents/brown/kobe/data')\nfilter_data = np.load('filter_run.npz')\n\nall_particles = filter_data['all_particles'] # locations as (n_test, n_particles, 2) np array\nall_weights = filter_data['all_weights'] # weights as (n_test, n_particles, 1) np array\nall_est = filter_data['all_est'] # filter estimates as (n_test, 2) np array\nall_true = filter_data['all_true'] # true realizations as (n_test, 2) np array\n\n# precalculate fixed window size so we don't miss anything\nmin_part = np.min(all_particles, axis=(0, 1))\nmax_part = np.max(all_particles, axis=(0, 1))\nmin_true = np.min(all_true, axis=0)\nmax_true = np.max(all_true, axis=0)\nmin_view = np.min(np.vstack((min_part, min_true)), axis=0)\nmax_view = np.max(np.vstack((max_part, max_true)), axis=0)\n\n# initialize figure and window size\nfig = plt.figure()\nax = plt.axes(xlim=(min_view[0], max_view[0]), ylim=(min_view[1], max_view[1]))\n\n# construct empty plots (to later be filled with data) for particles, estimates, & true values\nplot_particles = ax.scatter([], [], c='red', s=100, label='particles', alpha=0.7, edgecolor='None')\nplot_estimates = ax.scatter([], [], c='green', s=100, label='estimate', alpha=1., edgecolor='None')\nplot_true = ax.scatter([], [], c='blue', s=100, label='realization', alpha=1., edgecolor='None')\n\n# legend\nax.legend()\n\n\n# define a function that populates our plots with the data from a given time step\ndef print_step(t):\n plot_particles.set_sizes(700 * all_weights[t, :, 0])\n plot_particles.set_offsets(all_particles[t, :, :])\n plot_estimates.set_offsets(all_est[t, :])\n plot_true.set_offsets(all_true[t, :])\n\n\n# create the animation\nanim = matplotlib.animation.FuncAnimation(fig, print_step, frames=range(250))\n\n# use FFmpeg to write animation to file\nffmpeg_writer = matplotlib.animation.FFMpegWriter(fps=10)\nanim.save('particle_positions.mp4', writer=ffmpeg_writer)\n\n# uncomment to get a live plot\n# plt.show()\n"
},
{
"alpha_fraction": 0.6363233923912048,
"alphanum_fraction": 0.6493909358978271,
"avg_line_length": 29.924657821655273,
"blob_id": "f533426b787ab63c589d4c1bebc19970b01ea616",
"content_id": "a264a7c02255cee8dd5f3821224364ee3630b683",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4515,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 146,
"path": "/mpi_tf_filter.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "\"\"\"\nrun with:\nmpiexec -n 4 python3 mpi_tf_filter.py\n\"\"\"\n\nfrom mpi4py import MPI\n#import tensorflow as tf\nimport numpy as np\nimport scipy as sp\nimport os\n\n###### load A and S py\n# load Kalman parameters\nparam_file = np.load('kalman_estimates.npz')\n\n# for location update\nA_est = param_file['A_est']\nS_est = param_file['S_est']\n\n# for weight update\nC_est = param_file['C_est']\nQ_est = param_file['Q_est']\nQ_estinv = np.linalg.inv(Q_est)\n##########\n\n#TODO: Update for your local environment\n#file_location = 'C:/Users/Ankan/Documents/Kobe_2016/Project'\nfile_location = '/users/guest055/scratch/Project/bke2016'\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n#name = MPI.Get_processor_name()\n\nn_test = 1000\nn_steps = 30\nbatch_size = 1000\nd_neural = 600\nd_velocities = 2\n\n# grab data\nif rank == 0:\n # grab data\n os.chdir(file_location)\n npzfile = np.load('Flint_2012_e1_PCA.npz')\n all_time = npzfile['all_time']\n all_velocities = npzfile['all_velocities']\n all_neural = npzfile['all_neural']\n\n T = int(all_time) - 30\n del all_time\n\n def neural(ind):\n neur = np.zeros((ind.size, d_neural))\n for i0 in range(ind.size):\n s_idx = range(int(ind[i0]), int(ind[i0]) + 30)\n neur[i0, :] = all_neural[:, s_idx].flatten()\n return neur\n\n def velocities(ind):\n return all_velocities[:, ind + 29].T\n\n# initialize particles\nparticles = None\nlog_weights = None\nparticle = np.zeros(d_velocities) #each particle is a velocity.\nlog_weight = np.zeros(1)\nweight = np.ones(1)\nparticle_log_weight = np.hstack((particle, log_weight))\nparticle_weight = np.hstack((particle,weight))\nparticles_weights = None\nparticles_log_weights = None\n\nif rank == 0:\n particles = np.random.multivariate_normal(np.zeros(2), np.eye(2), size)\n weights = np.ones((size, 1))/size\n particles_weights = np.hstack((particles, weights)) #dim 3 horizontal np_array\n\ncomm.Scatter(particles_weights, particle_weight)\nparticle = particle_weight[:d_velocities, ]\nweight = particle_weight[d_velocities:, ]\nlog_weight = np.log(weight)\n\nobservation = np.zeros(d_neural)\n\nfor t in range(n_test):\n if rank == 0:\n # Update observations\n ind = t*np.ones(1)\n observation = neural(ind)\n\n #Resample #TODO: parallelize\n particle_resampling = np.random.multinomial(1, weights.flatten(), size)\n particles = np.matmul(particle_resampling, particles)\n weights = np.ones((size,1))/size\n particles_weights = np.hstack((particles,weights))\n\n #Send resampling and observations to other threads\n comm.Bcast(observation, root=0) \n comm.Scatter(particles_weights, particle_weight)\n\n #Update resampled particles and uniform weights\n particle = particle_weight[:d_velocities, ]\n weight = particle_weight[d_velocities:, ]\n log_weight = np.log(weight)\n\n #Update particles with time\n particle = np.matmul(A_est, particle) + np.random.normal(np.zeros(2), S_est);\n\n #Update weights\n #log_weight = sp.stats.multivariate_normal.pdf(observation.T,\n # mean = np.matmul(C_est, particle.T),\n # cov = Q_est))\n mean_pred_weight = np.matmul(C_est, particle)\n cov_pred_weight = Q_est\n log_weight = -np.matmul(np.matmul(np.subtract(observation.T,mean_pred_weight.T),\n Q_estinv),\n np.subtract(observation,mean_pred_weight))/2\n particle_log_weight = np.hstack((particle,log_weight))\n\n #Make sure all particle_weight arrays are set before sharing with root\n comm.Barrier();\n\n\n #Pass all weights and particles to root\n comm.Gather(particle_log_weight, particles_log_weights)\n\n if rank==0:\n particles = particles_weights[:d_velocities, ]\n log_weights = particles_log_weights[d_velocities:, ]\n log_weights = log_weights - max(log_weights)\n weights = np.exp(log_weights)\n\n #Renormalize weights\n weights = weights/np.sum(weights)\n\n particles_weights = np.hstack((particles,weights))\n particles_log_weights = np.hstack((particles,log_weights))\n\n #When we reiterate, we immediately reset weights and weight\n\n comm.Scatter(particles_weights, particle_weight)\n comm.Scatter(particles_log_weights, particle_log_weight)\n particle = particle_weight[:d_velocities,]\n weight = particle_weight[d_velocities:,]\n log_weight = particle_log_weight[d_velocities:,]\n"
},
{
"alpha_fraction": 0.7636363506317139,
"alphanum_fraction": 0.7818182110786438,
"avg_line_length": 54,
"blob_id": "0a19ba430bec35901dbae9bb7f7195a430cd1974",
"content_id": "681ca6c71c6b1c02359cbaa41b2bba62b89c6587",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 385,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 7,
"path": "/pipeline/README.txt",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "1. learn model parameters with model_learn.py\n2. compile particle filter with: python setup_filter_cythonized_with_neural_nets.py build_ext --inplace\n3. run particle filter with: python -c \"import filter_cythonized_with_nerual_nets\"\n4. data is now in file: filter_run.npz\n5. install PyEVTK: https://bitbucket.org/pauloh/pyevtk/downloads\n6. run npz_to_vtk.py\n7. open result in paraview\n"
},
{
"alpha_fraction": 0.6692330241203308,
"alphanum_fraction": 0.6884809136390686,
"avg_line_length": 35.706520080566406,
"blob_id": "caa2f0a5d6645cedb30acbedf23b1384f06ac22a",
"content_id": "04435c5ac40541040a8305eaa1bf69fa76421ef9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3377,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 92,
"path": "/v2/vanilla_filter_7.3.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os\n\n# name experimental parameters\nn_steps = 6\nn_particles = 1000\n\n# model information\nd_neural = 120\nd_velocities = 2\n\nos.chdir('/Users/michael/Documents/brown/kobe/data')\n\n# load Kalman parameters\nparam_file = np.load('/Users/michael/Documents/brown/kobe/data/kalman_estimates0.npz')\n\nA_est = param_file['A_est']\nS_est = 5*param_file['S_est']\n\n# load neural network parameters\nparam_file = np.load('neural_net_parameters0.npz')\n\nf_hidden1_weights = param_file['f_hidden1_weights']\nf_hidden1_biases = param_file['f_hidden1_biases']\nf_hidden2_weights = param_file['f_hidden2_weights']\nf_hidden2_biases = param_file['f_hidden2_biases']\nf_output_weights = param_file['f_output_weights']\nf_output_biases = param_file['f_output_biases']\n\ncov_est = 0.1*param_file['cov_est']\ncov_est_inv = np.linalg.inv(cov_est)\n\n# gather data\ndata_file = np.load('/Users/michael/Documents/brown/kobe/data/Flint_2012_e1_PCA00.npz')\nall_time = data_file['all_time']\nall_velocities = data_file['all_velocities']\nall_neural = data_file['all_neural']\n\nn_test = 1000\n\nT = int(all_time) - 6\ndel all_time\n\n# instantiate particles and weights\nparticles = np.tile(all_velocities[:, 0], [n_particles,1])\nweights = np.ones((n_particles, 1), dtype=np.double)/n_particles\nobservation = np.zeros((d_neural, 1))\n\n# define a function to resample particles\ndef resample(particles, weights):\n n_particles = particles.shape[0]\n samples = np.random.multinomial(n_particles, weights.flatten())\n indices = np.repeat(np.arange(n_particles), samples)\n new_particles = particles[indices]\n new_weights = np.ones((n_particles, 1))/n_particles\n return new_particles, new_weights\n\n# instantiate structures to store our results\nall_particles = np.zeros((n_test, n_particles, d_velocities))\nall_weights = np.zeros((n_test, n_particles, 1))\nall_est = np.zeros((n_test, d_velocities))\nall_true = np.zeros((n_test, d_velocities))\n\n# loop over observations and run particle filter\nfor t in range(n_test):\n t0 = t + int(2*T / 3)\n # resample particles\n particles, weights = resample(particles, weights)\n # update particle locations\n all_particles[t, :, :] = particles = np.matmul(A_est, particles.T).T + \\\n np.random.multivariate_normal(np.zeros(d_velocities), S_est, n_particles)\n # grab observation\n observation = all_neural[:, t0:t0+6].flatten()[:, None].T\n # update weights\n f_hidden1 = np.minimum(np.maximum(np.matmul(observation, f_hidden1_weights) + f_hidden1_biases, 0), 6)\n f_hidden2 = np.minimum(np.maximum(np.matmul(f_hidden1, f_hidden2_weights) + f_hidden2_biases, 0), 6)\n f_out = np.matmul(f_hidden2, f_output_weights) + f_output_biases\n print(\"f=\", f_out)\n\n diff = f_out - particles\n log_weights = np.zeros((n_particles, 1))\n for p in range(n_particles):\n log_weights[p] = -0.5*np.matmul(np.matmul(diff[p].T, cov_est_inv), diff[p])\n weights = np.exp(log_weights-np.max(np.max(log_weights)))\n all_weights[t, :, :] = weights = weights/np.sum(np.sum(weights))\n print(np.hstack((weights, particles,diff)))\n all_est[t, :] = np.matmul(weights.T, particles)\n all_true[t, :] = all_velocities[:, t0+6]\n\nmse = np.mean(np.mean(np.square(all_true-all_est)))\nprint(mse)\nnp.savez('filter_run0', all_particles=all_particles, all_weights=all_weights, all_est=all_est, all_true=all_true)\n"
},
{
"alpha_fraction": 0.7094395160675049,
"alphanum_fraction": 0.7138643264770508,
"avg_line_length": 36.66666793823242,
"blob_id": "43cfb367e443af15af91ba59e8db4d2b77c6f4ea",
"content_id": "32de19baccbbf4aec0ccc78eda82c90566e6b46c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 678,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 18,
"path": "/pipeline/setup_filter_cythonized_with_neural_nets.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "# CC=gcc-6 python setup_filter_cythonized.py build_ext --inplace\n# python3 -c \"import filter_cythonized_with_neural_nets\"\n\n# Cython has its own \"extension builder\" module that knows how\n# to build cython files into python modules.\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n\nimport numpy\n\next = Extension(\"filter_cythonized_with_neural_nets\", sources=[\"filter_cythonized_with_neural_nets.pyx\"],\n include_dirs = [numpy.get_include()],\n extra_compile_args = [\"-fopenmp\", \"-g\", \"-O3\"],\n extra_link_args=[\"-fopenmp\", \"-g\"] )\n\nsetup(ext_modules=[ext],\n cmdclass={'build_ext': build_ext})\n"
},
{
"alpha_fraction": 0.6676923036575317,
"alphanum_fraction": 0.7651281952857971,
"avg_line_length": 47.75,
"blob_id": "ee3496ea4750ee7895cfc485fb6260bf97a73f82",
"content_id": "dcb366b681fcbc9be34615f1c2571d77e3775411",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 975,
"license_type": "no_license",
"max_line_length": 208,
"num_lines": 20,
"path": "/README.md",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "# brown_kobe_exchange_2016\n\nOur data comes from the DREAM (Database for Reaching Experiments And Models) project:\n[http://klab.smpp.northwestern.edu/wiki/index.php5/Database_for_Reaching_Experiments_and_Models_(DREAM)](http://klab.smpp.northwestern.edu/wiki/index.php5/Database_for_Reaching_Experiments_and_Models_(DREAM))\n\nThis is hosted by CRCNS (Collaborative Research in Computational NeuroScience):\n[http://crcns.org/data-sets/movements/dream/](http://crcns.org/data-sets/movements/dream/)\n\nTo get our data, go here:\n[https://portal.nersc.gov/project/crcns/download/dream](https://portal.nersc.gov/project/crcns/download/dream)\naccept the terms of use and log in anonymously. Select \"data_sets\" and then \"Flint_2012\". You should find: \nFlint_2012.pdf (952027) \nFlint_2012_e1.mat (365440251) \nFlint_2012_e2.mat (580620783) \nFlint_2012_e3.mat (1095873686) \nFlint_2012_e4.mat (881613651) \nFlint_2012_e5.mat (563731035) \nInfo.txt (1530)\n\nPlease collect them all :)\n"
},
{
"alpha_fraction": 0.619608461856842,
"alphanum_fraction": 0.6467514038085938,
"avg_line_length": 40.895423889160156,
"blob_id": "e832745b05d46d69f81b0472381b2f6d19924a56",
"content_id": "b61eda9191fb41aba9d6c336e91a2415c1c56c79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12821,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 306,
"path": "/alternate_code/m7.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nimport scipy.stats as sp\nfrom math import sqrt\n\n# grab data\nnpzfile = np.load('../Flint_2012_e1_PCA.npz')\nall_time = npzfile['all_time']\nfile_velocities = npzfile['all_velocities']\nfile_neural = npzfile['all_neural']\n\n\"\"\"\ndata is sampled every 0.01s;\nthe paper says the neural data 200-300ms beforehand is most informative\nso we need the previous 30 observations of neural data for each velocity update\n\"\"\"\n\nT = int(all_time) - 30\ndel all_time\n\nd_neural = 30 * file_neural.shape[0]\nd_velocities = file_velocities.shape[0]\n\n#Normalize velocities and neural data\n# mean_deviated_velocities = file_velocities - np.tile(np.reshape(np.mean(file_velocities,1), (d_velocities,1)), (1,T+30))\n# stddev_velocities = np.reshape(np.std(file_velocities, axis=1), (d_velocities,1))\n# all_velocities = np.divide(mean_deviated_velocities, np.tile(stddev_velocities, (1,T+30)))\n\n# mean_deviated_neural = file_neural - np.tile(np.reshape(np.mean(file_neural,1), (d_neural/30,1)), (1,T+30))\n# stddev_neural = np.reshape(np.std(file_neural, axis=1), (d_neural/30,1))\n# all_neural = np.divide(mean_deviated_neural, np.tile(stddev_neural, (1,T+30)))\n\n#all_velocities = all_velocities*10 #Extra normalization factor to scale more easily\n#all_neural = all_neural/10\n\nall_velocities = file_velocities\nall_neural = file_neural\n\ndel file_velocities, file_neural\n#del mean_deviated_velocities, mean_deviated_neural\n\ndef neural(ind):\n neur = np.zeros((ind.size, d_neural))\n for i0 in range(ind.size):\n s_idx = range(ind[i0], ind[i0] + 30)\n neur[i0, :] = all_neural[:, s_idx].flatten()\n return neur\n\n\ndef velocities(ind):\n return all_velocities[:, ind + 29].T\n\n\n\ng1 = tf.Graph() # this graph is for building features\n\n# choice parameters\nd_hid1, d_hid2, d_feat = 100, 50, 3\nd_hid1_feat, d_hid2_feat = 30, 10\n\nactivation_fn = tf.nn.softsign\ntraining_fn =tf.train.FtrlOptimizer\nkeep_prob1 = 0.75\nkeep_prob2 = 0.75\n\n\n# Tell TensorFlow that the model will be built into the default Graph.\nwith g1.as_default():\n # Generate placeholders for the images and labels.\n with tf.name_scope('inputs'):\n neural_ = tf.placeholder(tf.float32, shape=[None, d_neural])\n\n with tf.name_scope('outputs'):\n velocities_ = tf.placeholder(tf.float32, shape=[None, d_velocities])\n\n with tf.name_scope('keep_prob'):\n keep_prob_ = tf.placeholder(\"float\", name=\"keep_probability\")\n\n with tf.name_scope('hidden1'):\n weights = tf.Variable(tf.truncated_normal([d_neural, d_hid1], stddev=1 / sqrt(float(d_hid1))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid1]), name='biases')\n hidden1 = activation_fn(tf.matmul(neural_, weights) + biases)\n tf.histogram_summary('weights1', weights)\n tf.histogram_summary('biases1', biases)\n\n\n with tf.name_scope('dropout1'):\n hidden1_dropped = tf.nn.dropout(hidden1, keep_prob_)\n\n with tf.name_scope('hidden2'):\n weights = tf.Variable(tf.truncated_normal([d_hid1, d_hid2], stddev=1 / sqrt(float(d_hid2))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid2]), name='biases')\n hidden2 = activation_fn(tf.matmul(hidden1_dropped, weights) + biases)\n tf.histogram_summary('weights2', weights)\n tf.histogram_summary('biases2', biases)\n\n with tf.name_scope('dropout2'):\n hidden2_dropped = tf.nn.dropout(hidden2, keep_prob_)\n\n with tf.name_scope('hidden3'):\n weights = tf.Variable(tf.truncated_normal([d_hid2, d_feat], stddev=1 / sqrt(float(d_feat))), name='weights')\n biases = tf.Variable(tf.zeros([d_feat]), name='biases')\n features = activation_fn(tf.matmul(hidden2_dropped, weights) + biases)\n tf.histogram_summary('weights3', weights)\n tf.histogram_summary('biases3', biases)\n tf.histogram_summary('features', features)\n\n with tf.name_scope('output'):\n weights = tf.Variable(tf.truncated_normal([d_feat, d_velocities], stddev=1 / sqrt(float(d_velocities))),\n name='weights')\n biases = tf.Variable(tf.zeros([d_velocities]), name='biases')\n outputs = tf.matmul(features, weights) + biases\n tf.histogram_summary('weights4', weights)\n tf.histogram_summary('biases4', biases)\n tf.histogram_summary('errors1', outputs - velocities_)\n\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.squared_difference(outputs, velocities_), name='mse')\n tf.histogram_summary('loss', loss)\n\n optimizer = training_fn(0.001)\n # optimizer = tf.train.RMSPropOptimizer(0.1)\n\n # train_op = optimizer.minimize(loss)\n train_op = optimizer.minimize(loss)\n\n with tf.name_scope('validation'):\n val_op = tf.reduce_mean(tf.squared_difference(outputs, velocities_))\n tf.scalar_summary('validation', val_op)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n # Add the variable initializer Op.\n init = tf.initialize_all_variables()\n\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver(sharded=True)\n\n # Create a session for training g1\n sess1 = tf.Session(graph=g1)\n\n # Instantiate a SummaryWriter to output summaries and the Graph.\n summary_writer = tf.train.SummaryWriter('../writers/1', sess1.graph)\n\t\n tf.train.write_graph(g1.as_graph_def(), '../writers/1', 'g1.pbtxt') \n\n # Run the Op to initialize the variables.\n sess1.run(init)\n\n # training 1\n for i in range(301):\n # randomly grab a training set\n idx_tr = np.random.choice(T-20000, 1000, replace=False)\n idx_te = np.random.choice(20000, 1000, replace=False) + T - 20000\n\t\t\n if i % 50 == 0: # every 10th step we run our validation step to see how we're doing\n f_dict = {neural_: neural(idx_te), velocities_: velocities(idx_te), keep_prob_:keep_prob1}\n [summary, vali, feai] = sess1.run([summary_op, val_op, features], feed_dict=f_dict)\n summary_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, vali))\n save_path = saver.save(sess1, \"../writers/1/model.ckpt\")\n print(\"Model saved in file: %s\" % save_path)\n print(\"Features are: %s\" % feai)\n else: # if we're not on a 10th step then we do a regular training step\n f_dict = {neural_: neural(idx_tr), velocities_: velocities(idx_tr), keep_prob_: keep_prob1}\n [summary, _] = sess1.run([summary_op, train_op], feed_dict=f_dict)\n summary_writer.add_summary(summary, i)\n\n learned_variables = tf.all_variables()\n\ng2 = tf.Graph() # this graph is for mapping velocities to neural via features\n\n# Tell TensorFlow that the model will be built into the default Graph.\nwith g2.as_default():\n # Generate placeholders for the images and labels.\n with tf.name_scope('inputs'):\n neural_ = tf.placeholder(tf.float32, shape=[None, d_neural])\n\n with tf.name_scope('outputs'):\n velocities_ = tf.placeholder(tf.float32, shape=[None, d_velocities])\n\n with tf.name_scope('keep_prob'):\n keep_prob_ = tf.placeholder(\"float\", name=\"keep_probability\")\n\n with tf.name_scope('feature_injection'):\n with tf.name_scope('hidden1'):\n weights = tf.constant(sess1.run('hidden1/weights:0'))\n biases = tf.constant(sess1.run('hidden1/biases:0'))\n hidden1 = activation_fn(tf.matmul(neural_, weights) + biases)\n\n with tf.name_scope('hidden2'):\n weights = tf.constant(sess1.run('hidden2/weights:0'))\n biases = tf.constant(sess1.run('hidden2/biases:0'))\n hidden2 = activation_fn(tf.matmul(hidden1, weights) + biases)\n\n with tf.name_scope('features'):\n weights = tf.constant(sess1.run('hidden3/weights:0'))\n biases = tf.constant(sess1.run('hidden3/biases:0'))\n features = tf.matmul(hidden2, weights) + biases\n\n with tf.name_scope('map_to_features'):\n\n with tf.name_scope('hidden1'):\n weights = tf.Variable(tf.truncated_normal([d_velocities, d_hid1_feat],\n stddev=1 / sqrt(float(d_hid1_feat))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid1_feat]), name='biases')\n hidden1 = activation_fn(tf.matmul(velocities_, weights) + biases)\n tf.histogram_summary('weights1', weights)\n tf.histogram_summary('biases1', biases)\n\n with tf.name_scope('dropout1'):\n hidden1_dropped = tf.nn.dropout(hidden1, keep_prob_)\n\n with tf.name_scope('hidden2'):\n weights = tf.Variable(tf.truncated_normal([d_hid1_feat, d_hid2_feat],\n stddev=1 / sqrt(float(d_hid2_feat))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid2_feat]), name='biases')\n hidden2 = activation_fn(tf.matmul(hidden1_dropped, weights) + biases)\n tf.histogram_summary('weights2', weights)\n tf.histogram_summary('biases2', biases)\n\n with tf.name_scope('output'):\n weights = tf.Variable(tf.truncated_normal([d_hid2_feat, d_feat], stddev=1 / sqrt(float(d_feat))),\n name='weights')\n biases = tf.Variable(tf.zeros([d_feat]), name='biases')\n outputs = tf.matmul(hidden2, weights) + biases\n tf.histogram_summary('weights3', weights)\n tf.histogram_summary('biases3', biases)\n tf.histogram_summary('errors2', outputs - features)\n\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.squared_difference(outputs, features), name='mse')\n tf.histogram_summary('loss', loss)\n\n optimizer = training_fn(0.01)\n train_op = optimizer.minimize(loss)\n\n with tf.name_scope('validation'):\n val_op = tf.reduce_mean(tf.squared_difference(outputs, features))\n tf.scalar_summary('validation', val_op)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n # Add the variable initializer Op.\n init = tf.initialize_all_variables()\n\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver(sharded=True)\n\n # Create a session for training g1\n sess2 = tf.Session(graph=g2)\n\n # Instantiate a SummaryWriter to output summaries and the Graph.\n summary_writer = tf.train.SummaryWriter('../writers/2', sess2.graph)\n\n tf.train.write_graph(g2.as_graph_def(), '../writers/2', 'g2.pbtxt') \n\t\n # Run the Op to initialize the variables.\n sess2.run(init)\n\n # training 2\n for i in range(301):\n # randomly grab a training set\n idx_tr = np.random.choice(T - 20000, 10000, replace=False)\n \tidx_te = np.random.choice(20000, 10000, replace=False) + T - 20000\n if i % 50 == 0: # every 10th step we run our validation step to see how we're doing\n f_dict = {neural_: neural(idx_te), velocities_: velocities(idx_te), keep_prob_: keep_prob2}\n [summary, vali] = sess2.run([summary_op, val_op], feed_dict=f_dict)\n summary_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, vali))\n save_path = saver.save(sess2, \"../writers/2/model.ckpt\")\n print(\"Model saved in file: %s\" % save_path)\n else: # if we're not on a 10th step then we do a regular training step\n f_dict = {neural_: neural(idx_tr), velocities_: velocities(idx_tr), keep_prob_: keep_prob2}\n [summary, _] = sess2.run([summary_op, train_op], feed_dict=f_dict)\n summary_writer.add_summary(summary, i)\n\n# collect model parameters\nf_hidden1_weights = sess1.run('hidden1/weights:0')\nf_hidden1_biases = sess1.run('hidden1/biases:0')\nf_hidden2_weights = sess1.run('hidden2/weights:0')\nf_hidden2_biases = sess1.run('hidden2/biases:0')\nf_hidden3_weights = sess1.run('hidden3/weights:0')\nf_hidden3_biases = sess1.run('hidden3/biases:0')\n\ng_hidden1_weights = sess2.run('map_to_features/hidden1/weights:0')\ng_hidden1_biases = sess2.run('map_to_features/hidden1/biases:0')\ng_hidden2_weights = sess2.run('map_to_features/hidden2/weights:0')\ng_hidden2_biases = sess2.run('map_to_features/hidden2/biases:0')\n\n# save model parameters\nnp.savez('neural_net_parameters', f_hidden1_weights=f_hidden1_weights, f_hidden1_biases=f_hidden1_biases,\n f_hidden2_weights=f_hidden2_weights, f_hidden2_biases=f_hidden2_biases,\n f_hidden3_weights=f_hidden3_weights, f_hidden3_biases=f_hidden3_biases,\n g_hidden1_weights=g_hidden1_weights, g_hidden1_biases=g_hidden1_biases,\n g_hidden2_weights=g_hidden2_weights, g_hidden2_biases=g_hidden2_biases)\n\n\n\n\"\"\"\nlook at output with:\ntensorboard --logdir=../writers/1\ntensorboard --logdir=../writers/2\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.6325483918190002,
"alphanum_fraction": 0.6432734727859497,
"avg_line_length": 30.306570053100586,
"blob_id": "94e5d84999d5bac0c61c5450f18c999a2dffe198",
"content_id": "e78f1ee390d55fd0b0128de89cb3469926533f0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4289,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 137,
"path": "/mpi_filter_alt_vers.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "\"\"\"\nrun with:\nmpiexec -n 4 python3 mpi_filter_alt_vers.py\n\"\"\"\n\nfrom mpi4py import MPI\n#import tensorflow as tf\nimport numpy as np\nimport os\n\n# instantiate MPI communicator\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\n# describe our data\nn_test = 500\nn_steps = 30\n\nd_neural = 600\nd_velocities = 2\nnpt = 100\n\n# move to where data is\nos.chdir('.')\n\n# load Kalman parameters\nparam_file = np.load('kalman_estimates.npz')\nA_est = param_file['A_est']\nS_est = param_file['S_est']\nC_est = param_file['C_est']\nQ_est = param_file['Q_est']\nQ_estinv = np.linalg.inv(Q_est)\n\n# grab data on root process\nif rank == 0:\n # grab data\n npzfile = np.load('Flint_2012_e1_PCA.npz')\n all_time = npzfile['all_time']\n all_velocities = npzfile['all_velocities']\n all_neural = npzfile['all_neural']\n\n T = int(all_time) - 30\n del all_time\n\n def neural(ind):\n neur = np.zeros((ind.size, d_neural))\n for i0 in range(ind.size):\n s_idx = range(ind[i0], ind[i0] + 30)\n neur[i0, :] = all_neural[:, s_idx].flatten()\n return neur\n\n def velocities(ind):\n return all_velocities[:, ind + 29].T\n\n# initialize particles\nparticles = None\nweights = None\nparticles_weights = None\nwide_particles_weights = None\nparticle = np.zeros(d_velocities*npt)\nweight = np.ones(npt)\nlog_weight = np.zeros(npt)\nparticle_weight = np.hstack((particle, weight))\n\nobservation = np.zeros(d_neural)\n\nif rank == 0:\n particles = np.random.multivariate_normal(np.zeros(d_velocities), S_est, size*npt)\n weights = np.ones((size*npt, 1))/(size*npt)\n particles_weights = np.hstack((particles, weights))\n\n# store data\nif rank == 0:\t\t\n all_particles = np.empty([n_test, npt*size, d_velocities])\t\t\n all_weights = np.empty([n_test, npt*size, 1])\t\t\n all_true = np.empty([n_test, d_velocities])\t\t\n all_est = np.empty([n_test, d_velocities])\n\nfor t in range(n_test):\n if rank == 0:\n # resampling step\n samples = np.random.multinomial(size*npt, weights.flatten())\n indices = np.repeat(np.arange(size*npt), samples.flatten())\n particles = particles[indices]\n weights = np.ones((size*npt, 1)) / (size*npt)\n particles_weights = np.hstack((particles, weights))\n # grab new observation\n observation = neural(np.arange(1) + t)\n\n #TODO: TEST\n wide_particles = particles.reshape((size,d_velocities*npt))\n wide_weights = weights.reshape((size,npt))\n wide_particles_weights = np.hstack((wide_particles,wide_weights))\n\n # send out the particles and observation to different processes\n comm.Scatter(wide_particles_weights, particle_weight, root=0)\n comm.Bcast(observation, root=0)\n \n # unpack particles and weights\n particle = particle_weight[:d_velocities*npt, ]\n weight = particle_weight[d_velocities*npt:, ]\n\n\n #particles subset is set of particles handled by thread\n particle_subset = particle.reshape((npt,d_velocities))\n\n # update particle weight on individual processes\n for i in range(npt):\n particle_subset[i,:] = np.random.multivariate_normal(np.matmul(particle_subset[i,:],A_est.T), S_est)\n diff = np.matmul(C_est, particle_subset[i,:].T).flatten() - observation.flatten()\n log_weight[i] = -0.5 * np.matmul(np.matmul(diff.T, Q_estinv), diff)\n\n particle_weight = np.hstack((particle, log_weight))\n\n comm.Barrier()\n\n # return particle weights to root\n comm.Gather(particle_weight, wide_particles_weights)\n\n if rank == 0:\n particles = wide_particles_weights[:, :(d_velocities*npt)]\n particles = particles.reshape((size*npt, d_velocities))\n log_weights = wide_particles_weights[:, (npt*d_velocities):]\n log_weights = log_weights.reshape((npt*size),1)\n weights = np.exp(log_weights - np.max(log_weights))\n weights = weights / np.sum(weights)\n estimate = np.matmul(weights.T, particles)\n\n all_particles[t, :, :] = particles\n all_weights[t, :, :] = weights\n all_est[t, :] = estimate\n all_true[t, :] = true = velocities(np.arange(1) + t)\n print('est=', estimate, 'true=', true)\t\t\n\t\nif rank == 0:\t\t\n np.savez('filter_run', all_particles=all_particles, all_weights=all_weights, all_est=all_est, all_true=all_true)\n"
},
{
"alpha_fraction": 0.7886598110198975,
"alphanum_fraction": 0.7886598110198975,
"avg_line_length": 47.5,
"blob_id": "12a99af0bb58e06e106c7913c753cccd56c2ace4",
"content_id": "54788ac05198bd5b48ddf74a75d4e746d6406669",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 4,
"path": "/presentation/README.txt",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "We can use pygments (https://pygments.org) for code highlighting in LaTeX.\n\npip install Pygments\npygmentize -f latex -l cython -o filter_cythonized.tex -O full,style=emacs filter_cythonized.pyx\n"
},
{
"alpha_fraction": 0.6829268336296082,
"alphanum_fraction": 0.6829268336296082,
"avg_line_length": 19.5,
"blob_id": "847b77804d361c658137f936b0afa50c194caef8",
"content_id": "974094b5865a080beb71a40e778d2bec3477dd02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 246,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 12,
"path": "/load_A_and_S.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n# load Kalman parameters\nparam_file = np.load('kalman_estimates.npz')\n\n# for location update\nA_est = param_file['A_est']\nS_est = param_file['S_est']\n\n# for weight update\nC_est = param_file['C_est']\nQ_est = param_file['Q_est']\n"
},
{
"alpha_fraction": 0.6221542954444885,
"alphanum_fraction": 0.6458048224449158,
"avg_line_length": 42.77112579345703,
"blob_id": "bd877ba03a2abf1f4f69a982a07fadeaa0bb20ba",
"content_id": "33c19d45adb5ca232ab029036b4eeb662a79599c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12431,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 284,
"path": "/pipeline/learn_parameters.py",
"repo_name": "burkh4rt/brown_kobe_exchange_2016",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nfrom math import sqrt\n\n# grab data\nnpzfile = np.load('/Users/michael/Documents/brown/kobe/data/Flint_2012_e1_PCA.npz')\nall_time = npzfile['all_time']\nall_velocities = npzfile['all_velocities']\nall_neural = npzfile['all_neural']\n\n\"\"\"\ndata is sampled every 0.01s;\nthe paper says the neural data 200-300ms beforehand is most informative\nso we need the previous 30 observations of neural data for each velocity update\n\"\"\"\n\nT = int(all_time) - 30\ndel all_time\n\nd_neural = 30 * all_neural.shape[0]\nd_velocities = all_velocities.shape[0]\n\n\ndef neural(ind):\n neur = np.zeros((ind.size, d_neural))\n for i0 in range(ind.size):\n s_idx = range(ind[i0], ind[i0] + 30)\n neur[i0, :] = all_neural[:, s_idx].flatten()\n return neur\n\n\ndef velocities(ind):\n return all_velocities[:, ind + 29].T\n\n\ng1 = tf.Graph() # this graph is for building features\n\nd_hid1, d_hid2, d_feat = 100, 50, 2\nd_hid1_feat, d_hid2_feat = 30, 10\n\n# Tell TensorFlow that the model will be built into the default Graph.\nwith g1.as_default():\n # Generate placeholders for the images and labels.\n with tf.name_scope('inputs'):\n neural_ = tf.placeholder(tf.float32, shape=[None, d_neural])\n\n with tf.name_scope('outputs'):\n velocities_ = tf.placeholder(tf.float32, shape=[None, d_velocities])\n\n with tf.name_scope('keep_prob'):\n keep_prob_ = tf.placeholder(\"float\", name=\"keep_probability\")\n\n with tf.name_scope('hidden1'):\n weights = tf.Variable(tf.truncated_normal([d_neural, d_hid1], stddev=1 / sqrt(float(d_hid1))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid1]), name='biases')\n hidden1 = tf.nn.relu6(tf.matmul(neural_, weights) + biases)\n tf.histogram_summary('weights1', weights)\n tf.histogram_summary('biases1', biases)\n\n with tf.name_scope('dropout1'):\n hidden1_dropped = tf.nn.dropout(hidden1, keep_prob_)\n\n with tf.name_scope('hidden2'):\n weights = tf.Variable(tf.truncated_normal([d_hid1, d_hid2], stddev=1 / sqrt(float(d_hid2))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid2]), name='biases')\n hidden2 = tf.nn.relu6(tf.matmul(hidden1_dropped, weights) + biases)\n tf.histogram_summary('weights2', weights)\n tf.histogram_summary('biases2', biases)\n\n with tf.name_scope('dropout2'):\n hidden2_dropped = tf.nn.dropout(hidden2, keep_prob_)\n\n with tf.name_scope('hidden3'):\n weights = tf.Variable(tf.truncated_normal([d_hid2, d_feat], stddev=1 / sqrt(float(d_feat))), name='weights')\n biases = tf.Variable(tf.zeros([d_feat]), name='biases')\n features = tf.nn.relu6(tf.matmul(hidden2_dropped, weights) + biases)\n tf.histogram_summary('weights3', weights)\n tf.histogram_summary('biases3', biases)\n tf.histogram_summary('features', features)\n\n with tf.name_scope('output'):\n weights = tf.Variable(tf.truncated_normal([d_feat, d_velocities], stddev=1 / sqrt(float(d_velocities))),\n name='weights')\n biases = tf.Variable(tf.zeros([d_velocities]), name='biases')\n outputs = tf.matmul(features, weights) + biases\n tf.histogram_summary('weights4', weights)\n tf.histogram_summary('biases4', biases)\n tf.histogram_summary('errors1', outputs - velocities_)\n\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.squared_difference(outputs, velocities_), name='mse')\n tf.histogram_summary('loss', loss)\n\n optimizer = tf.train.AdagradOptimizer(0.001)\n # optimizer = tf.train.RMSPropOptimizer(0.1)\n\n # train_op = optimizer.minimize(loss)\n train_op = optimizer.minimize(loss)\n\n with tf.name_scope('validation'):\n val_op = tf.reduce_mean(tf.squared_difference(outputs, velocities_))\n tf.scalar_summary('validation', val_op)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n # Add the variable initializer Op.\n init = tf.initialize_all_variables()\n\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver(sharded=True)\n\n # Create a session for training g1\n sess1 = tf.Session(graph=g1)\n\n # Instantiate a SummaryWriter to output summaries and the Graph.\n summary_writer = tf.train.SummaryWriter('/Users/michael/Documents/brown/kobe/data/writers/1', sess1.graph)\n\n tf.train.write_graph(g1.as_graph_def(), '/Users/michael/Documents/brown/kobe/data/writers/1', 'g1.pbtxt')\n\n # Run the Op to initialize the variables.\n sess1.run(init)\n\n # training 1\n for i in range(101):\n # randomly grab a training set\n idx = np.random.choice(range(int(T / 4)), 5000, replace=False)\n if i % 10 == 0: # every 10th step we run our validation step to see how we're doing\n f_dict = {neural_: neural(idx), velocities_: velocities(idx), keep_prob_: 1}\n [summary, vali] = sess1.run([summary_op, val_op], feed_dict=f_dict)\n summary_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, vali))\n save_path = saver.save(sess1, \"/Users/michael/Documents/brown/kobe/data/writers/1/model.ckpt\")\n print(\"Model saved in file: %s\" % save_path)\n else: # if we're not on a 10th step then we do a regular training step\n f_dict = {neural_: neural(idx), velocities_: velocities(idx), keep_prob_: 0.5}\n [summary, _] = sess1.run([summary_op, train_op], feed_dict=f_dict)\n summary_writer.add_summary(summary, i)\n\n learned_variables = tf.all_variables()\n\ng2 = tf.Graph() # this graph is for mapping velocities to neural via features\n\n# Tell TensorFlow that the model will be built into the default Graph.\nwith g2.as_default():\n # Generate placeholders for the images and labels.\n with tf.name_scope('inputs'):\n neural_ = tf.placeholder(tf.float32, shape=[None, d_neural])\n\n with tf.name_scope('outputs'):\n velocities_ = tf.placeholder(tf.float32, shape=[None, d_velocities])\n\n with tf.name_scope('keep_prob'):\n keep_prob_ = tf.placeholder(\"float\", name=\"keep_probability\")\n\n with tf.name_scope('feature_injection'):\n with tf.name_scope('hidden1'):\n weights = tf.constant(sess1.run('hidden1/weights:0'))\n biases = tf.constant(sess1.run('hidden1/biases:0'))\n hidden1 = tf.nn.relu6(tf.matmul(neural_, weights) + biases)\n\n with tf.name_scope('hidden2'):\n weights = tf.constant(sess1.run('hidden2/weights:0'))\n biases = tf.constant(sess1.run('hidden2/biases:0'))\n hidden2 = tf.nn.relu6(tf.matmul(hidden1, weights) + biases)\n\n with tf.name_scope('features'):\n weights = tf.constant(sess1.run('hidden3/weights:0'))\n biases = tf.constant(sess1.run('hidden3/biases:0'))\n features = tf.matmul(hidden2, weights) + biases\n\n with tf.name_scope('map_to_features'):\n\n with tf.name_scope('hidden1'):\n weights = tf.Variable(tf.truncated_normal([d_velocities, d_hid1_feat],\n stddev=1 / sqrt(float(d_hid1_feat))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid1_feat]), name='biases')\n hidden1 = tf.nn.relu6(tf.matmul(velocities_, weights) + biases)\n tf.histogram_summary('weights1', weights)\n tf.histogram_summary('biases1', biases)\n\n with tf.name_scope('dropout1'):\n hidden1_dropped = tf.nn.dropout(hidden1, keep_prob_)\n\n with tf.name_scope('hidden2'):\n weights = tf.Variable(tf.truncated_normal([d_hid1_feat, d_hid2_feat],\n stddev=1 / sqrt(float(d_hid2_feat))), name='weights')\n biases = tf.Variable(tf.zeros([d_hid2_feat]), name='biases')\n hidden2 = tf.nn.relu6(tf.matmul(hidden1_dropped, weights) + biases)\n tf.histogram_summary('weights2', weights)\n tf.histogram_summary('biases2', biases)\n\n with tf.name_scope('output'):\n weights = tf.Variable(tf.truncated_normal([d_hid2_feat, d_feat], stddev=1 / sqrt(float(d_feat))),\n name='weights')\n biases = tf.Variable(tf.zeros([d_feat]), name='biases')\n outputs = tf.matmul(hidden2, weights) + biases\n tf.histogram_summary('weights3', weights)\n tf.histogram_summary('biases3', biases)\n tf.histogram_summary('errors2', outputs - features)\n\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.squared_difference(outputs, features), name='mse')\n tf.histogram_summary('loss', loss)\n\n optimizer = tf.train.AdagradOptimizer(0.01)\n # train_op = optimizer.minimize(loss)\n train_op = optimizer.minimize(loss)\n\n with tf.name_scope('validation'):\n val_op = tf.reduce_mean(tf.squared_difference(outputs, features))\n tf.scalar_summary('validation', val_op)\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n # Add the variable initializer Op.\n init = tf.initialize_all_variables()\n\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver(sharded=True)\n\n # Create a session for training g1\n sess2 = tf.Session(graph=g2)\n\n # Instantiate a SummaryWriter to output summaries and the Graph.\n summary_writer = tf.train.SummaryWriter('/Users/michael/Documents/brown/kobe/data/writers/2', sess2.graph)\n\n tf.train.write_graph(g2.as_graph_def(), '/Users/michael/Documents/brown/kobe/data/writers/2', 'g2.pbtxt')\n\n # Run the Op to initialize the variables.\n sess2.run(init)\n\n # training 2\n for i in range(301):\n # randomly grab a training set\n idx = np.random.choice(range(int(T / 4), int(T / 2)), 5000, replace=False)\n if i % 10 == 0: # every 10th step we run our validation step to see how we're doing\n f_dict = {neural_: neural(idx), velocities_: velocities(idx), keep_prob_: 1}\n [summary, vali] = sess2.run([summary_op, val_op], feed_dict=f_dict)\n summary_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, vali))\n save_path = saver.save(sess2, \"/Users/michael/Documents/brown/kobe/data/writers/2/model.ckpt\")\n print(\"Model saved in file: %s\" % save_path)\n else: # if we're not on a 10th step then we do a regular training step\n f_dict = {neural_: neural(idx), velocities_: velocities(idx), keep_prob_: 0.75}\n [summary, _] = sess2.run([summary_op, train_op], feed_dict=f_dict)\n summary_writer.add_summary(summary, i)\n\n # estimate error\n idx = np.arange(int(T / 2), int(3 * T / 4))\n f_dict = {neural_: neural(idx), velocities_: velocities(idx), keep_prob_: 1}\n [g_vel, f_neur] = sess2.run([outputs, features], feed_dict=f_dict)\n cov_est = np.cov((g_vel-f_neur).T)\n\n\n# collect model parameters\nf_hidden1_weights = sess1.run('hidden1/weights:0')\nf_hidden1_biases = sess1.run('hidden1/biases:0')\nf_hidden2_weights = sess1.run('hidden2/weights:0')\nf_hidden2_biases = sess1.run('hidden2/biases:0')\nf_hidden3_weights = sess1.run('hidden3/weights:0')\nf_hidden3_biases = sess1.run('hidden3/biases:0')\n\ng_hidden1_weights = sess2.run('map_to_features/hidden1/weights:0')\ng_hidden1_biases = sess2.run('map_to_features/hidden1/biases:0')\ng_hidden2_weights = sess2.run('map_to_features/hidden2/weights:0')\ng_hidden2_biases = sess2.run('map_to_features/hidden2/biases:0')\ng_output_weights = sess2.run('map_to_features/output/weights:0')\ng_output_biases = sess2.run('map_to_features/output/biases:0')\n\n# save model parameters\nnp.savez('neural_net_parameters', f_hidden1_weights=f_hidden1_weights, f_hidden1_biases=f_hidden1_biases,\n f_hidden2_weights=f_hidden2_weights, f_hidden2_biases=f_hidden2_biases,\n f_hidden3_weights=f_hidden3_weights, f_hidden3_biases=f_hidden3_biases,\n g_hidden1_weights=g_hidden1_weights, g_hidden1_biases=g_hidden1_biases,\n g_hidden2_weights=g_hidden2_weights, g_hidden2_biases=g_hidden2_biases,\n g_output_weights=g_output_weights, g_output_biases=g_output_biases, cov_est=cov_est)\n\n\"\"\"\nlook at output with:\ntensorboard --logdir=/Users/michael/Documents/brown/kobe/data/writers/1\ntensorboard --logdir=/Users/michael/Documents/brown/kobe/data/writers/2\n\"\"\"\n"
}
] | 25 |
Akzwar/Matrix
|
https://github.com/Akzwar/Matrix
|
25662dd361f6992a22062e681efa563c2d622fb4
|
288ee94820f1409f336f30bdceac92f85231de72
|
110500668f74f594102fdab19a359512355a769f
|
refs/heads/master
| 2021-01-23T07:34:18.179671 | 2012-06-12T23:01:19 | 2012-06-12T23:01:19 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6436170339584351,
"alphanum_fraction": 0.6489361524581909,
"avg_line_length": 36.599998474121094,
"blob_id": "f30c30cc4be5725f0daba37659f9e42b56bcd199",
"content_id": "6cfa85c338921c5670992ae3fcf79e63f23c938d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 5,
"path": "/SConstruct",
"repo_name": "Akzwar/Matrix",
"src_encoding": "UTF-8",
"text": "MT = Environment()\nMT.VariantDir( \"obj\", \"src\", duplicate = 0 )\nsources = Glob(\"obj/*.cpp\")\nobj_list = MT.Object( source = sources )\nMT.Program ( target = \"bin/MTest\", source = obj_list )\n"
},
{
"alpha_fraction": 0.5501269102096558,
"alphanum_fraction": 0.557106614112854,
"avg_line_length": 21.514286041259766,
"blob_id": "5728cfbf361aa7479b0cf3745c620ff7e0bd572a",
"content_id": "7c086d7d67af008ffd82b07bed9a55d7e8f9026b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1576,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 70,
"path": "/src/matrix.h",
"repo_name": "Akzwar/Matrix",
"src_encoding": "UTF-8",
"text": "#include \"../../VectTest/src/Vect.h\"\n\nusing namespace std;\n\nclass Matrix\n{\n private:\n\tvector<Vect> data;\n public:\n\tMatrix(){};\n\tMatrix( int n, int m )\n\t{\n\t\tVect Row(n);\n\t\tfor( int i = 0; i < m; i++ )\n\t\t\tdata.push_back(Row);\n\t}\n\t//Matrix( int n ) : Matrix( n, n ){};\n\t\n\tVect& operator [] ( int index )\n\t{\n\t\treturn data[index];\n\t}\n\t\n\tint getWidth() const\n\t{\n\t\treturn this->data[0].size();\n\t}\n\t\n\tint getHeight() const\n\t{\n\t\treturn this->data.size();\n\t}\n\n\tvoid Transpose()\n\t{\n\t\tvector<Vect> resdata( this->getWidth(), Vect( this->getHeight() ) );\n\t\tfor( int i = 0; i < this->getWidth(); i++ )\n\t\t\tfor( int j = 0; j < this->getHeight(); j++ )\n\t\t\tresdata[i][j] = this->data[j][i];\n\t\tthis->data = resdata; \n\t}\n\t\n\tconst Matrix operator * ( long double Cross ) \n\t{\n\t\tMatrix resMatrix( this->getWidth(), this->getHeight() );\n\t\tfor( int i = 0; i < this->getWidth(); i++ )\n\t\t\tfor( int j = 0;\tj < this->getHeight(); j++ )\n\t\t\tresMatrix[i][j] = this->data[i][j] * Cross;\n\t\treturn resMatrix;\n\t}\n\t\n\tconst Matrix operator + ( Matrix& M )\n\t{\t\n\t\tMatrix resMatrix( this->getWidth(), this->getHeight() );\n\t\tfor( int i = 0; i < this->getWidth(); i++ )\n\t\t\tfor( int j = 0;\tj < this->getHeight(); j++ )\n\t\t\tresMatrix[i][j] = this->data[i][j] + M.data[i][j];\t\n\t\treturn resMatrix;\t\n\t}\n\n\tconst Matrix operator * ( Matrix& M )\n\t{\t\n\t\tMatrix resMatrix( this->getWidth(), M.getHeight() );\n\t\tfor( int i = 0; i < this->getWidth(); i++ )\n\t\t\tfor( int j = 0;\tj < M.getHeight(); j++ )\n\t\t\t\tfor( int k = 0; k < this->getHeight(); k++ )\n\t\t\t\t\tresMatrix[i][j] += this->data[i][k] * M.data[k][j];\n\t\treturn resMatrix;\n\t}\t\n};\n"
}
] | 2 |
berndpfrommer/ISY-Homie-Bridge
|
https://github.com/berndpfrommer/ISY-Homie-Bridge
|
8eb5f22c221266a1a57a8e81524bb6f66fec2a4e
|
2bb952e5bfc07cb85e961654963c2f4e5e962aec
|
3f49317642d480177851f39363049cdca8acdac0
|
refs/heads/master
| 2020-09-22T03:53:54.995440 | 2019-10-26T21:08:07 | 2019-10-26T21:08:07 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6443478465080261,
"alphanum_fraction": 0.6443478465080261,
"avg_line_length": 30.94444465637207,
"blob_id": "3e1085e63a0dbafe847304ef04451c652569f467",
"content_id": "4a6603e1a461a4a1db00f80879517002b6764974",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1150,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 36,
"path": "/isy_homie/devices/dimmer.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\nfrom homie.device_dimmer import Device_Dimmer\nfrom .base import Base\n\nfrom homie.node.property.property_string import Property_String\n\nclass Dimmer(Base,Device_Dimmer):\n\n def __init__(self, isy_device=None,homie_settings=None, mqtt_settings=None):\n\n Base.__init__ (self,isy_device)\n\n Device_Dimmer.__init__ (self,self.get_homie_device_id(), isy_device.name, homie_settings, mqtt_settings)\n \n node = self.get_node ('dimmer')\n self.paddle = Property_String(node,'paddle-action','Paddle Action')\n node.add_property(self.paddle)\n\n level = self.isy_device.get_property('level')\n if level is not None:\n self.property_change('level',level)\n\n def get_homie_device_id (self):\n return 'dimmer-' + Base.get_homie_device_id(self)\n\n def property_change(self,property_,value):\n if property_ == 'level':\n self.update_dimmer(value)\n elif property_ == 'paddle_action':\n self.paddle.value = value\n\n Base.property_change (self,property_,value)\n\n def set_dimmer(self,level):\n self.isy_device.set_level (level)\n"
},
{
"alpha_fraction": 0.7176470756530762,
"alphanum_fraction": 0.7411764860153198,
"avg_line_length": 19.658536911010742,
"blob_id": "9fe989e96f19376999631014f2215a13737d1876",
"content_id": "7893d17528c76a290719c3b3bc55b26580022cc5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 850,
"license_type": "permissive",
"max_line_length": 157,
"num_lines": 41,
"path": "/README.md",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "# ISY-Homie\n\nA bridge between the ISY994 controller and [Homie 4 MQTT convention](https://homieiot.github.io/).\n\nUtilizes the [ISY944v5 package](https://pypi.org/project/ISY994v5/). Currently only supports Insteon devices, Insteon Scenes, ISY program, and ISY variables.\n\nTo start as a service on raspbian \n\nCreate isy_homie.yml in /etc using the following settings:\n\n\n```yaml\nisy:\n url: xxx.xxx.xxx.xxx\n username: xxxxx\n password: xxxxx\n\nmqtt:\n MQTT_BROKER: broker\n MQTT_PORT: 1883\n MQTT_USERNAME: null\n MQTT_PASSWORD: null\n MQTT_SHARE_CLIENT: true\n ```\n\n Create isy-homie.service in /etc/systemd/system\n\n ```service\n[Unit]\nDescription=ISY995 Homie\nAfter=multi-user.target\n\n[Service]\nUser=pi\nType=simple\nExecStart=/usr/bin/python3 /usr/local/bin/isy_homie_start.py\nRestart=on-abort\n\n[Install]\nWantedBy=multi-user.target\n```\n\n\n\n"
},
{
"alpha_fraction": 0.6441640257835388,
"alphanum_fraction": 0.6441640257835388,
"avg_line_length": 34.20000076293945,
"blob_id": "7620431be767a9dc7ed779fee0ed1de830a6b875",
"content_id": "ff2fe16e1ce7d3323eac89b264dec3aeb125f484",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1585,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 45,
"path": "/isy_homie/devices/isy_controller.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\nfrom homie.device_base import Device_Base\nfrom homie.node.node_base import Node_Base\nfrom homie.node.property.property_string import Property_String\n\nfrom .base import Base\n\nclass ISY_Controller (Base,Device_Base):\n\n def __init__(self, isy_device=None,homie_settings=None, mqtt_settings=None):\n\n Base.__init__ (self,isy_device)\n\n Device_Base.__init__ (self,'isycontroller', 'ISY Controller', homie_settings, mqtt_settings)\n \n node = Node_Base (self,'status','Status','status')\n self.add_node(node)\n\n self.heartbeat = Property_String(node, 'heartbeat','Heart Beat')\n node.add_property(self.heartbeat)\n\n self.state_busy = Property_String(node, 'state','State')\n node.add_property(self.state_busy)\n\n self.htpp_connected = Property_String(node, 'http','HTTP Status')\n node.add_property(self.htpp_connected)\n\n self.websocket_connected = Property_String(node, 'websocket','Websocket Status')\n node.add_property(self.websocket_connected)\n\n Device_Base.start(self)\n\n def property_change(self,property_,value):\n #print ('contoller',property_,value)\n if property_ == 'heartbeat':\n self.heartbeat.value = value.strftime(\"%m/%d/%Y, %H:%M:%S\")\n elif property_ == 'state':\n self.state_busy.value = value\n elif property_ == 'http':\n self.htpp_connected.value = value\n elif property_ == 'websocket':\n self.websocket_connected.value = value\n\n Base.property_change (self,property_,value)\n\n"
},
{
"alpha_fraction": 0.6622516512870789,
"alphanum_fraction": 0.6622516512870789,
"avg_line_length": 30.058822631835938,
"blob_id": "4022409cfdbbffb83fa4a05559eb1981006aa7d2",
"content_id": "c8bf53dbd7cf8dfaa7805df1a1206b59a36ee5e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1057,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 34,
"path": "/isy_homie/devices/controller_action.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n \n\nfrom homie.device_base import Device_Base\nfrom homie.node.node_base import Node_Base\nfrom homie.node.property.property_string import Property_String\n\nfrom .base import Base\n\n\nclass Controller_Action(Base,Device_Base):\n\n def __init__(self, isy_device=None,homie_settings=None, mqtt_settings=None):\n\n Base.__init__ (self,isy_device)\n\n Device_Base.__init__ (self,self.get_homie_device_id(), isy_device.name, homie_settings, mqtt_settings)\n \n paddle_node = Node_Base(self,id='paddle',name='paddle',type_='paddle')\n self.add_node(paddle_node)\n\n self.paddle = Property_String(paddle_node,'paddle-action','Paddle Action')\n paddle_node.add_property(self.paddle)\n\n Device_Base.start(self)\n\n def get_homie_device_id (self):\n return 'controller-' + Base.get_homie_device_id(self)\n\n def property_change(self,property_,value):\n if property_ == 'paddle_action':\n self.paddle.value = value\n \n Base.property_change (self,property_,value)\n\n"
},
{
"alpha_fraction": 0.6496259570121765,
"alphanum_fraction": 0.6496259570121765,
"avg_line_length": 31.040000915527344,
"blob_id": "5d702a2e1b9258b046952607a04b58db2ba22105",
"content_id": "2b405448733ed1d4212045224ad14de880eff669",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 802,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 25,
"path": "/isy_homie/devices/contact.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\nfrom homie.device_contact import Device_Contact\nfrom .base import Base\n\nclass Contact(Base,Device_Contact):\n\n def __init__(self, isy_device=None,homie_settings=None, mqtt_settings=None):\n\n Base.__init__ (self,isy_device)\n \n Device_Contact.__init__ (self,self.get_homie_device_id(), isy_device.name, homie_settings, mqtt_settings)\n\n contact = self.isy_device.get_property('contact')\n if contact is not None:\n self.property_change('contact',contact)\n\n def get_homie_device_id (self):\n return 'contact-' + Base.get_homie_device_id(self)\n\n def property_change(self,property_,value):\n if property_ == 'contact':\n self.update_contact (value.upper())\n\n Base.property_change (self,property_,value)\n\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5789473652839661,
"avg_line_length": 18.5,
"blob_id": "c67932aaf3a3364bd8df8b40a17cd636304322c4",
"content_id": "e7426d6d9f6b2019bec494a4d1f0a51fb4aa317d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 38,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 2,
"path": "/isy_homie/__init__.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "name=\"isy_homie\"\n__version__ = \"0.2.8\""
},
{
"alpha_fraction": 0.6281208992004395,
"alphanum_fraction": 0.6281208992004395,
"avg_line_length": 31.36170196533203,
"blob_id": "ae04763d082b62af06cab09b8ba8dd6cc6032f6d",
"content_id": "c4b2d025d06bda47ff4905224c8318f2eed65f1e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1522,
"license_type": "permissive",
"max_line_length": 141,
"num_lines": 47,
"path": "/isy_homie/devices/program.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom homie.device_base import Device_Base\nfrom homie.node.node_base import Node_Base\n\nfrom homie.node.property.property_string import Property_String\nfrom homie.node.property.property_enum import Property_Enum\n\nfrom .base import Base\n\nclass Program (Base,Device_Base):\n\n def __init__(self, isy_device=None,homie_settings=None, mqtt_settings=None):\n\n Base.__init__ (self,isy_device)\n\n Device_Base.__init__ (self,self.get_homie_device_id(), isy_device.name, homie_settings, mqtt_settings)\n\n node = (Node_Base(self,'program','Program','program'))\n self.add_node (node)\n\n self.status = Property_String (node,'status','Status')\n node.add_property (self.status)\n\n self.run = Property_Enum (node,id='run',name='Run Program',data_format='RUNIF,RUNTHEN,RUNELSE,STOP',set_value = self.set_run_program)\n node.add_property (self.run)\n\n self.start()\n\n def get_homie_device_id (self):\n return 'program-' + Base.get_homie_device_id(self)\n\n def set_run_program (self,value):\n if value == 'RUNIF':\n self.isy_device.run ()\n elif value == 'RUNTHEN':\n self.isy_device.run_then ()\n elif value == 'RUNELSE':\n self.isy_device.run_else ()\n elif value == 'STOP':\n self.isy_device.stop ()\n \n def property_change(self,property_,value):\n if property_ == 'state':\n self.status.value = value\n\n Base.property_change (self,property_,value)\n\n"
},
{
"alpha_fraction": 0.6311688423156738,
"alphanum_fraction": 0.6355844140052795,
"avg_line_length": 37.88888931274414,
"blob_id": "fd0955933e5d963128c73fed114199bd350c2ca9",
"content_id": "efa778658711a2f6fb38f30d25c4df179aa2031b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3850,
"license_type": "permissive",
"max_line_length": 155,
"num_lines": 99,
"path": "/isy_homie/bridge.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport time\n\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel('DEBUG')\n\nfrom isy994.controller import Controller \n \nfrom .devices.switch import Switch\nfrom .devices.dimmer import Dimmer\nfrom .devices.fan import Fan\nfrom .devices.contact import Contact \nfrom .devices.controller_action import Controller_Action \nfrom .devices.scene import Scene\nfrom .devices.variable import Variable\nfrom .devices.program import Program\nfrom .devices.isy_controller import ISY_Controller\n\nHOMIE_SETTINGS = {\n 'update_interval' : 60, \n 'implementation' : 'ISY994', \n 'fw_name' : 'isy homie bridge',\n 'fw_version' : 0, # isy994.__version__,\n}\n\nclass Bridge (object):\n \n controller = None\n\n homie_devices = {} #indexed by container_type,device_address\n\n def __init__(self, address=None, username=None, password=None, homie_settings=HOMIE_SETTINGS, mqtt_settings=None):\n logger.debug('ISY Homie MQTT {}'.format (mqtt_settings))\n\n self.homie_settings = homie_settings\n self.mqtt_settings = mqtt_settings\n\n self.controller = Controller(address=address,port=None,username=username,password=password,use_https=False,event_handler=self._isy_event_handler)\n\n def _isy_event_handler(self,container,item,event,*args):\n logger.warn ('Event {} from {}: {} {}'.format(event,container.container_type,item.name,*args))\n\n if container.container_type == 'Device':\n self._device_event_handler (item,event,args)\n pass\n elif container.container_type == 'Scene':\n self._scene_event_handler (item,event,args)\n pass\n elif container.container_type == 'Variable':\n self._variable_event_handler (item,event,args)\n pass\n elif container.container_type == 'Program':\n self._program_event_handler (item,event,args)\n pass\n elif container.container_type == 'Controller':\n self._container_event_handler (item,event,args)\n #print (event,item,args)\n if event == 'property':\n pass\n #print ('args',args [0] [0], args[0] [1] )\n\n if event == 'add':\n pass\n #time.sleep(.5)\n\n def _device_event_handler(self,device,event,*args):\n #print ('device event',device.name,event,args)\n if event == 'add':\n if device.device_type == 'switch':\n switch = Switch (device,self.homie_settings,self.mqtt_settings)\n elif device.device_type == 'dimmer':\n switch = Dimmer (device,self.homie_settings,self.mqtt_settings)\n elif device.device_type == 'fan':\n fan = Fan (device,self.homie_settings,self.mqtt_settings)\n elif device.device_type == 'contact':\n contact = Contact (device,self.homie_settings,self.mqtt_settings)\n elif device.device_type == 'controller':\n controller = Controller_Action (device,self.homie_settings,self.mqtt_settings)\n\n def _scene_event_handler(self,device,event,*args):\n #print ('device event',device.name,event)\n if event == 'add':\n scene = Scene (device,self.homie_settings,self.mqtt_settings)\n\n def _variable_event_handler(self,device,event,*args):\n if event == 'add':\n variable = Variable (device,self.homie_settings,self.mqtt_settings)\n\n def _program_event_handler(self,device,event,*args):\n #print ('device event',device.name,event)\n if event == 'add':\n program = Program (device,self.homie_settings,self.mqtt_settings)\n\n def _container_event_handler(self,device,event,*args):\n #print ('container event',device.name,event)\n if event == 'add':\n controller = ISY_Controller (device,self.homie_settings,self.mqtt_settings)\n"
},
{
"alpha_fraction": 0.6338028311729431,
"alphanum_fraction": 0.6338028311729431,
"avg_line_length": 25,
"blob_id": "c780585ef66dcc67892486dc60ee1a4d0f8e8649",
"content_id": "816e92d7c4897566692f398b99105bbca66bdb11",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 781,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 30,
"path": "/isy_homie/devices/fan.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n\n#!/usr/bin/env python\n\nfrom homie.device_speed import Device_Speed\n\nfrom .base import Base\n\nclass Fan (Base,Device_Speed):\n\n def __init__(self, isy_device=None,homie_settings=None, mqtt_settings=None):\n\n Base.__init__ (self,isy_device)\n\n Device_Speed.__init__ (self,self.get_homie_device_id(), isy_device.name, homie_settings, mqtt_settings)\n\n self.start()\n\n def get_homie_device_id (self):\n return 'fan-' + Base.get_homie_device_id(self)\n\n def set_speed (self,speed):\n self.isy_device.set_speed(speed.lower())\n \n def property_change(self,property_,value):\n if property_ == 'speed':\n self.speed_property.value = value.upper()\n\n Base.property_change (self,property_,value)\n\n"
},
{
"alpha_fraction": 0.6216517686843872,
"alphanum_fraction": 0.6216517686843872,
"avg_line_length": 29.86206817626953,
"blob_id": "ab47037d8e544d19a26c34b551ac249f3591c537",
"content_id": "012967148725be2915a4b2a777ec4fa8abbbb6c8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 896,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 29,
"path": "/isy_homie/devices/variable.py",
"repo_name": "berndpfrommer/ISY-Homie-Bridge",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\nfrom homie.device_integer import Device_Integer\nfrom .base import Base\n\nclass Variable(Base,Device_Integer):\n\n def __init__(self, isy_device=None,homie_settings=None, mqtt_settings=None):\n\n Base.__init__ (self,isy_device)\n\n Device_Integer.__init__ (self,self.get_homie_device_id(), isy_device.name, homie_settings, mqtt_settings)\n\n '''\n value = self.isy_device.get_property('value')\n if value is not None:\n self.property_change('value',value)\n '''\n def get_homie_device_id (self): \n return 'variable-' + Base.get_homie_device_id(self).replace(':','-')\n\n def property_change(self,property_,value):\n if property_ == 'value':\n self.update_value (value)\n\n Base.property_change (self,property_,value)\n\n def set_value(self,value):\n self.isy_device.set_value (value)\n\n"
}
] | 10 |
alvarogmf/ih_datamadpt1120_project_m1
|
https://github.com/alvarogmf/ih_datamadpt1120_project_m1
|
7a9cd2734e8527c9553be6f3d9df3cfd9ee07934
|
78d30e5b5a282357beac891e6780958dd015fa3a
|
6dd9de70027c8034040b3b9b254c8c6ca3a6ebc4
|
refs/heads/main
| 2023-02-28T03:16:05.269467 | 2021-01-28T18:17:27 | 2021-01-28T18:17:27 | 328,758,761 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6623600125312805,
"alphanum_fraction": 0.6640827059745789,
"avg_line_length": 35.25,
"blob_id": "c142a47ef7e9f10ac0d83c5aa01aa29bd8caadea",
"content_id": "c41adc42de0981f24e07dd33f00e2383115aa40e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1161,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 32,
"path": "/p_wrangling/m_wrangling.py",
"repo_name": "alvarogmf/ih_datamadpt1120_project_m1",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport re\n\n\ndef db_cleaning(db):\n \"\"\"\n This function will standardize the Gender column to have only Male & Female outputs\n \"\"\"\n db['gender'] = db['gender'].str.capitalize()\n db['gender'] = db['gender'].str.replace(r'\\b[f]\\w+', 'female')\n db['gender'] = db['gender'].str.replace(r'\\b[m]\\w+', 'male')\n return db\n\n\ndef countries_clean(countries_df):\n \"\"\"\n This function will eliminate all the parenthesis and Null rows in the Countries Database\n \"\"\"\n final_countries = countries_df.dropna()\n final_countries['country_code'] = final_countries['country_code'].str.extract(r'(\\b\\w\\S)')\n return final_countries\n\n\ndef final_table(main_df, jobs_df, countries_df):\n \"\"\"\n This function merges all the tables to have a unique table with all the information.\n \"\"\"\n merged_jobs = pd.merge(main_df, jobs_df, on=\"normalized_job_code\")\n merged_countries = pd.merge(merged_jobs, countries_df, on=\"country_code\")\n merged_countries['Quantity'] = 1\n merged_countries['Percentage'] = 1 / len(merged_countries)\n return merged_countries[['Country_Name', 'title', 'gender', 'Quantity', 'Percentage']]\n\n"
},
{
"alpha_fraction": 0.6510008573532104,
"alphanum_fraction": 0.6510008573532104,
"avg_line_length": 44.959999084472656,
"blob_id": "6df46a36366bc78f73108bd32ec7b146e470626d",
"content_id": "9b6ef1069ee8670fe52a07fdc922f7f16614778d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1149,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 25,
"path": "/p_reporting/m_reporting.py",
"repo_name": "alvarogmf/ih_datamadpt1120_project_m1",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n\n\ndef report(dataframe, country):\n \"\"\"\n This function will create the final report, using the columns required and choosing between a specific Country or All\n \"\"\"\n if country == 'All':\n reporting = pd.DataFrame(dataframe.groupby(['Country_Name','title', 'gender']).agg({'Quantity':['sum'], 'Percentage':['sum']}).reset_index())\n reporting.to_csv('Output/reporting.csv', index=False)\n print('Converted to CSV!')\n elif (dataframe['Country_Name'] == country).any():\n reporting = pd.DataFrame(dataframe[(dataframe['Country_Name'] == country)].groupby(['Country_Name','title', 'gender']).agg({'Quantity':['sum'], 'Percentage':['sum']}).reset_index())\n reporting.to_csv('Output/reporting.csv', index=False)\n print('Converted to CSV!')\n else:\n print('ERROR: Country selected is not in Database')\n\n\n#def to_csv(dataframe): DEPRECATED!! NOT FOR USE ANYMORE!!\n# \"\"\"\n# This function will convert the previous DF to a CSV and save it into the Output Folder\n# \"\"\"\n# dataframe.to_csv('../Output/reporting.csv',index = False)\n# return print('Converted to CSV!')\n"
},
{
"alpha_fraction": 0.6394442319869995,
"alphanum_fraction": 0.653337836265564,
"avg_line_length": 35.41975402832031,
"blob_id": "488c5ac54eeeab1bde9fec100611f2bfa1b2881d",
"content_id": "6514885dc8ff3f0ca1c27acc4159bbc21fa6462e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2951,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 81,
"path": "/p_acquisition/m_acquisition.py",
"repo_name": "alvarogmf/ih_datamadpt1120_project_m1",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom sqlalchemy import create_engine\nimport json\nimport requests\nfrom functools import reduce\n\n\ndef get_database():\n \"\"\"\n Function to connect to the database.\n The file must be called raw_data_project_m1.db and has to be in the data folder.\n \"\"\"\n print('connecting to the database...')\n db_path = 'data/raw_data_project_m1.db'\n conn_str = f'sqlite:///{db_path}'\n engine = create_engine(conn_str)\n table_names = pd.read_sql_query(\"SELECT name FROM sqlite_master WHERE type='table'\", engine)\n tables_names_lst = table_names['name'].to_list()\n all_dfs = [pd.read_sql_query(f'select * from {i}', engine) for i in tables_names_lst]\n merged_tables = reduce(lambda left, right: pd.merge(left, right, on='uuid'), all_dfs)\n print('connected!')\n return merged_tables\n\n\ndef job_ids(merged_tables):\n \"\"\"\n This function is used to extract the list of all the jobs from the main DB.\n \"\"\"\n jobs_ids = list(merged_tables['normalized_job_code'].unique())\n print('Import finsihed :)')\n return jobs_ids\n\n\ndef get_jobs(jobs_id):\n \"\"\"\n This functions connects to the API and\n extracts in a DF all the names of the Jobs listed in the function above.\n \"\"\"\n print('Calling the api...')\n jobs_list = []\n for id in jobs_id:\n if id is None:\n pass\n else:\n api_url = requests.get(f'http://api.dataatwork.org/v1/jobs/{id}')\n json_data = api_url.json()\n jobs_list.append(json_data)\n jobs_df_raw = pd.DataFrame(jobs_list)\n jobs_df = jobs_df_raw.rename(columns={'uuid': \"normalized_job_code\"})\n print('API connection completed!')\n return jobs_df\n\n\ndef get_countries():\n \"\"\"\n Function WIP. Functional but needs to be improved.\n It retrieves the countries with their codes from the web scrapper.\n \"\"\"\n print('connecting to the countries web...')\n url = 'https://ec.europa.eu/eurostat/statistics-explained/index.php/Glossary:Country_codes'\n table_0 = pd.read_html(url)[0]\n table_1 = pd.read_html(url)[1]\n table_2 = pd.read_html(url)[2]\n table_3 = pd.read_html(url)[3]\n table_4 = pd.read_html(url)[4]\n table_5 = pd.read_html(url)[5]\n table_6 = pd.read_html(url)[6]\n table_7 = pd.read_html(url)[7]\n table_8 = pd.read_html(url)[8]\n\n countries_table = pd.concat([table_0, table_1, table_2, table_3, table_4, table_5, table_6, table_7, table_8])\n\n country_names = countries_table[0].append(countries_table[2]).append(countries_table[4]).append(\n countries_table[6]).append(countries_table[9]).reset_index(drop=True)\n country_ids = countries_table[1].append(countries_table[3]).append(countries_table[5]).append(\n countries_table[7]).append(countries_table[10]).reset_index(drop=True)\n\n full_countries_list = pd.DataFrame({'Country_Name': country_names, 'country_code': country_ids})\n print('countries retrieved!')\n\n return full_countries_list\n\n"
},
{
"alpha_fraction": 0.6871165633201599,
"alphanum_fraction": 0.7191547155380249,
"avg_line_length": 40.88571548461914,
"blob_id": "94af140c9501d50e755721522351e627bda9f41b",
"content_id": "0c94dfba4bb4c7268b965873b4f115af31ab1f01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1467,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 35,
"path": "/README.md",
"repo_name": "alvarogmf/ih_datamadpt1120_project_m1",
"src_encoding": "UTF-8",
"text": "# Country Jobs Analysis\n\n###### ih_datamadpt1120_project_m1\n\n<p align=\"left\"><img src=\"https://cdn-images-1.medium.com/max/184/1*[email protected]\" width=\"80\"></p>___________________________\n\n**Owner**: Alvaro Gil\n\n**Bootcamp**: Ironhack - Data Analytics Part Time Nov 2020\n\n## Overview\n\nThis script helps the user to retrieve and analyze the number of people for each country that works on a specific job.\n\nIt takes the information from the following sources:\n\n* [Main Database](http://www.potacho.com/files/ironhack/raw_data_project_m1.db)\n* [Jobs](http://dataatwork.org/data/)\n* [Country Codes](https://ec.europa.eu/eurostat/statistics-explained/index.php/Glossary:Country_codes)\n\n\n## How to use\n\nTo run this script you have to follow the following steps:\n* **Clone** this repository\n* In the **Terminal**, head to the folder _ih_datamadpt1120_project_m1_\n* Write _python main.py_ and **choose the country** to analyze with the command -c and one the following options:\n * If it's a **specific** country, write the name with the first letter capitalized (e.g: Spain)\n * If you want to see **all the countries** at once, write _All_\n \n* Press **enter** and once the script has run the following message will show in the Terminal: _Reporting complete!_\n* A **CSV document** will have been created in the Output folder with the data desired.\n\n\n\n\n"
},
{
"alpha_fraction": 0.7008474469184875,
"alphanum_fraction": 0.7008474469184875,
"avg_line_length": 30.026315689086914,
"blob_id": "deeb7474e3a8c73ca7050f479565481d8558a3fa",
"content_id": "8c9ea3fbedf741f5d2be5b239813e3aacfc74370",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 38,
"path": "/main.py",
"repo_name": "alvarogmf/ih_datamadpt1120_project_m1",
"src_encoding": "UTF-8",
"text": "import argparse\nfrom p_acquisition import m_acquisition as acq\nfrom p_wrangling import m_wrangling as wra\nfrom p_reporting import m_reporting as rep\n\n\ndef argument_parser():\n \"\"\"\n documentation: parse arguments to script\n \"\"\"\n parser = argparse.ArgumentParser(description='select country..')\n parser.add_argument(\"-c\",\"--country\",help=\"Choose a country by full name or choose All\",type=str, required=True)\n args = parser.parse_args()\n return args\n\n\ndef main(arguments):\n country_filter = arguments.country\n print('Getting data...')\n main_database = acq.get_database()\n jobs_list = acq.job_ids(main_database)\n jobs_database = acq.get_jobs(jobs_list)\n countries_database = acq.get_countries()\n\n print('Cleaning data...')\n main_database_clean = wra.db_cleaning(main_database)\n countries_database_clean = wra.countries_clean(countries_database)\n\n print('Preparing the database')\n final_database = wra.final_table(main_database_clean, jobs_database, countries_database_clean)\n rep.report(final_database, country_filter)\n\n print('Reporting complete!')\n\n\nif __name__ == '__main__':\n args = argument_parser()\n main(args)\n\n"
}
] | 5 |
dmitryvorono/19_site_generator
|
https://github.com/dmitryvorono/19_site_generator
|
ae14df54d5b8b5e299570c8748aa317725eddc8e
|
f94f34f1122c6f5feb5068766dfecac2c1d1bd13
|
433d60fc14d8531e6172580e88b70649743483ee
|
refs/heads/master
| 2021-01-01T04:33:17.646702 | 2017-08-07T17:20:36 | 2017-08-07T17:20:36 | 97,197,635 | 0 | 0 | null | 2017-07-14T05:47:44 | 2016-12-30T19:58:19 | 2017-02-07T19:21:21 | null |
[
{
"alpha_fraction": 0.45856353640556335,
"alphanum_fraction": 0.47030386328697205,
"avg_line_length": 32.67441940307617,
"blob_id": "8dcc271f376ee90e2a64fa9cc2bfdb06f04f909b",
"content_id": "25fc11c9fea3000841240b62db6932f9ddfec52f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1472,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 43,
"path": "/templates/index.html",
"repo_name": "dmitryvorono/19_site_generator",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html lang=\"ru\">\n <head>\n <title>Энциклопедия</title>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\">\n <style>\n .b-page{\n padding-left: 25px;\n }\n </style>\n </head>\n <body>\n <div class='b-page'>\n <div class='b-page__line'>\n <div class='b-header page-header'>\n <h1>Список статей</h1>\n </div>\n </div>\n <div class='b-page__line'>\n {% for topic in topics %}\n <div class='b-articles'>\n <div class='b-articles__head'>\n <h2>{{ topic.title|e }}</h2>\n </div>\n <ul class='b-articles__links'>\n {% for article in articles %}\n {% if article.topic == topic.slug %}\n <li>\n <a href='{{ article.url|urlencode }}' class='b-link'>{{ article.title|e }}</a>\n </li>\n {% endif%}\n {% endfor %}\n </ul>\n </div>\n {% endfor %}\n </div>\n </div>\n <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js\"></script>\n <script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js\"></script>\n </body>\n</html>\n"
},
{
"alpha_fraction": 0.6293859481811523,
"alphanum_fraction": 0.6301169395446777,
"avg_line_length": 30.090909957885742,
"blob_id": "0eeaa3bf80f91d62d5a65c67163f7a964d2586f3",
"content_id": "ce1cb39eebf953270b0a7c6e58ffde568e92596d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2736,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 88,
"path": "/site_generator.py",
"repo_name": "dmitryvorono/19_site_generator",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nimport errno\nfrom jinja2 import Environment, FileSystemLoader\nfrom markdown import markdown\nimport urllib\nfrom livereload import Server\n\n\ndef load_config_json(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath, 'r') as file_handler:\n return json.load(file_handler)\n\n\ndef load_data(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath, 'r') as file_handler:\n return file_handler.read()\n\n\ndef load_jinja_templates(templates_folder):\n jinja_env = Environment(loader=FileSystemLoader(templates_folder),\n trim_blocks=True,\n lstrip_blocks=True)\n jinja_env\n with os.scandir(templates_folder) as folder_iterator:\n return {entry.name: jinja_env.get_template(entry.name)\n for entry in folder_iterator if entry.is_file()}\n\n\ndef render_index_page(jinja_template, config, output='index.html'):\n with open(output, 'w') as file_handler:\n file_handler.write(jinja_template.render(topics=config['topics'],\n articles=config['articles']))\n\n\ndef convert_markdown_to_html(markdown_filepath):\n markdown_text = load_data(markdown_filepath)\n return markdown(markdown_text)\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef render_article_page(jinja_template, article, content, output):\n mkdir_p(os.path.dirname(output))\n with open(output, 'w') as file_handler:\n file_handler.write(jinja_template.render(article=article,\n content=content))\n\n\n\n\ndef render_articles(jinja_template, articles, articles_folder):\n for article in articles:\n path_to_article = ''.join([articles_folder, '/', article['source']])\n html = convert_markdown_to_html(path_to_article)\n output = ''.join([article['source'][:-2], 'html'])\n render_article_page(jinja_template, article, html, output)\n article['url'] = output\n\n\ndef make_site():\n templates_folder = 'templates'\n articles_folder = 'articles'\n config = load_config_json('config.json')\n jinja_templates = load_jinja_templates(templates_folder)\n render_articles(jinja_templates['article.html'],\n config['articles'],\n articles_folder)\n render_index_page(jinja_templates['index.html'], config)\n\n\nif __name__ == '__main__':\n server = Server()\n server.watch('templates/*.html', make_site)\n server.watch('articles/**/*.md', make_site)\n server.serve(root='.')\n"
},
{
"alpha_fraction": 0.7452711462974548,
"alphanum_fraction": 0.7591425180435181,
"avg_line_length": 23.78125,
"blob_id": "991fd8e2cc74a98d8adc6fcb41e79c9ffe47f1a3",
"content_id": "9260a9a192a1b4633ef908c5e429e86a8d5db4f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 793,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 32,
"path": "/README.md",
"repo_name": "dmitryvorono/19_site_generator",
"src_encoding": "UTF-8",
"text": "# Encyclopedia\n\nThis project generate small static site from articles written markdown.\n\n[Demonstration page](https://dmitryvorono.github.io/19_site_generator/)\n\n# Requirements\n\n- Python3.6\n- Markdown\n- Jinja2\n- Livereload\n- Virtualenv(optional)\n\n# How to Install\n\nPython 3 should be already installed. Then use pip (or pip3 if there is a conflict with old Python 2 setup) to install dependencies:\n\n```bash\n$ pip install -r requirements.txt # alternatively try pip3\n```\nRemember, it is recommended to use [virtualenv/venv](https://devman.org/encyclopedia/pip/pip_virtualenv/) for better isolation.\n\n# How to launch\n\n```bash\n$ python3.6 site_generator.py\n```\n`\n# Project Goals\n\nThe code is written for educational purposes. Training course for web-developers - [DEVMAN.org](https://devman.org)\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 14.333333015441895,
"blob_id": "b28fd2c48f9d017df4013ccbecaf116ed356120d",
"content_id": "ba6879b31b2dcfd5b266caf1cc85244467d82923",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "dmitryvorono/19_site_generator",
"src_encoding": "UTF-8",
"text": "markdown==2.6.8\nJinja2==2.9\nlivereload==2.5.1\n"
}
] | 4 |
hzheng40/selfbalancing
|
https://github.com/hzheng40/selfbalancing
|
35c649480bb3c75d2fb286c49d471b8c03f05ca5
|
dcae300afd116f8fa42a5b915c80a323115699f8
|
9e41b94938ba4edd09eb3caa18efafb2f08ca661
|
refs/heads/master
| 2017-12-04T19:06:55.305754 | 2016-03-16T14:50:12 | 2016-03-16T14:50:12 | 54,040,544 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6119096279144287,
"alphanum_fraction": 0.6262833476066589,
"avg_line_length": 15.266666412353516,
"blob_id": "96d814d583b929bc57647db997d92b4421019ea0",
"content_id": "1fb7d8ec28dd73d07596bfe69d95c4ba73a83452",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 487,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 30,
"path": "/controlls.py",
"repo_name": "hzheng40/selfbalancing",
"src_encoding": "UTF-8",
"text": "import serial\nimport curses\nstdscr = curses.initscr()\nstdscr.keypad(1)\n\n\nstdscr.refresh()\nser = serial.Serial('/dev/cu.HC-06-DevB', 9600)\ncommand = 'S'\nkey = ''\nwhile key != ord('q'):\n\tkey = stdscr.getch()\n\tstdscr.refresh()\n\tif key == curses.KEY_UP:\n\t\tcommand = 'F'\n\n\telif key == curses.KEY_DOWN:\n\t\tcommand = 'B'\n\n\telif key == curses.KEY_LEFT:\n\t\tcommand = 'L'\n\n\telif key == curses.KEY_RIGHT:\n\t\tcommand = 'R'\n\n\telif key == '':\n\t\tcommand = 'S'\n\tser.write(command)\n\tkey = ''\ncurses.endwin()"
}
] | 1 |
QK-sampson/shooot
|
https://github.com/QK-sampson/shooot
|
b1149954f4020d3bec63181ba2f5f85ae73096cb
|
22a0d0f8387e24e310dcfec05ad0a0c81ebace04
|
7181f7f4a6f3d137846416787425739bc5fade5d
|
refs/heads/master
| 2020-03-12T06:15:34.289431 | 2018-01-13T20:45:28 | 2018-01-13T20:45:28 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6415094137191772,
"alphanum_fraction": 0.6603773832321167,
"avg_line_length": 25.5,
"blob_id": "cfa5191a40f4d86e85f222ad99176bfdd7fb0d5f",
"content_id": "6a9547abcaa7d4909d960c1b48e9b1c42eb52624",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 53,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 2,
"path": "/vlc-stream",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\ngphoto2 --capture-movie --stdout | vlc -\n"
},
{
"alpha_fraction": 0.524974524974823,
"alphanum_fraction": 0.5351681709289551,
"avg_line_length": 29.65625,
"blob_id": "a534d063d0f1e2ba2207e67174ab9ad6d2ee6701",
"content_id": "25469b58ed3515253f422031ceaba85c1d2470c4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 32,
"path": "/get-iso.py",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sh\nimport sys\n\nout = sh.gphoto2(\"--get-config=iso\")\n\nif sys.argv[1] == \"-a\":\n for line in out.splitlines():\n if b'Current:' in line:\n print \"N\", line.replace(\"Current: \", \"\")\n \n elif b'Choice:' in line:\n print \"F\", line.replace(\"Choice:\", \"\").split(\" \")[2]\n \nelif sys.argv[1] == \"-c\":\n for line in out.splitlines():\n if b'Current:' in line:\n print line.replace(\"Current: \", \"\")\n sys.exit(0)\n \nelif sys.argv[1] == \"-p\":\n for line in out.splitlines():\n if b'Choice:' in line:\n print line.replace(\"Choice:\", \"\").split(\" \")[2]\n \nelse:\n print \"Usage: \\\"python2.7 iso-extractor.py OPTION\\\"\"\n print \"Options:\"\n print \" -a: print all the ISO values, both current (marked with N) and possible (marked with F)\"\n print \" -c: print the current ISO value\"\n print \" -p: print the available ISO values\"\n sys.exit(3)\n"
},
{
"alpha_fraction": 0.5426356792449951,
"alphanum_fraction": 0.5542635917663574,
"avg_line_length": 63.5,
"blob_id": "912aef8f2123f3ea3707b464df08c61f661f603b",
"content_id": "4c85c0cb29d6ab7f8da2d99ac2584d12cccdf35b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 516,
"license_type": "permissive",
"max_line_length": 194,
"num_lines": 8,
"path": "/convert-raw",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\necho \"Converting...\"\nfind . -type f -iname \"*.nef\" -o -iname \"*.cr2\" -o -iname \"*.crw\" -o -iname \"*.pef\" -o -iname \"*.arw\" -o -iname \"*.sr2\" -o -iname \"*.raw\" -o -iname \"*.dng\" | xargs -L1 ufraw-batch --out-type=jpg\necho \"Done.\"\nread -p \"Do you want to remove the original raw images? (y/n)? \" CONT\nif [ \"$CONT\" = \"y\" ]; then\n find . -type f -iname \"*.nef\" -o -iname \"*.cr2\" -o -iname \"*.crw\" -o -iname \"*.pef\" -o -iname \"*.arw\" -o -iname \"*.sr2\" -o -iname \"*.raw\" -o -iname \"*.dng\" | xargs -L1 rm\nfi\n"
},
{
"alpha_fraction": 0.6327272653579712,
"alphanum_fraction": 0.6509090662002563,
"avg_line_length": 29.55555534362793,
"blob_id": "66cf0aa4c0ba5d21bf2a44fc38841de4301e8035",
"content_id": "aa613cc7316ba2af8136d1052e2cabdc53cc9f6f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 275,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 9,
"path": "/capture-movie",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nopt=$1\nif [ -z $opt ]; then\n cd bin\n read -e -p \"Please specify the movie lenght in seconds: \" opt\n cd ..\nfi\necho \"Capturing $opt seconds movie...\"\ngphoto2 --set-config movie=1 --wait-event=$(echo $opt)s --set-config movie=0 --wait-event-and-download=2s\n"
},
{
"alpha_fraction": 0.6184049248695374,
"alphanum_fraction": 0.6343558430671692,
"avg_line_length": 22.97058868408203,
"blob_id": "52cf4135050fa9b0e191287b489692c2e566a3f6",
"content_id": "52beb3093ed659c04557697f8662f33ff438481c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 815,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 34,
"path": "/m-shooot.py",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport os\nimport errno\nfrom time import sleep\nimport sys\nimport sh\n\nprint \"Welcome to shooot\"\nprint \"============\"\n\nname = sys.argv[1]\n\ntry:\n os.makedirs(name)\n \nexcept OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\nos.chdir(name)\nprint \"Working in \", os.getcwd()\n\nname = name.split(os.sep)[-1]\nnumber = int(sys.argv[2])\nfileformat = \".\" + sys.argv[3]\ntime = sys.argv[4]\nsh.gphoto2(\"--set-config\", \"shutterspeed=\" + time)\nwait = float(time.replace(\",\", \".\").replace(\"s\", \"\"))\nsh.gphoto2(\"--set-config\", \"f-number=\" + sys.argv[5])\nsh.gphoto2(\"--set-config\", \"iso=\" + sys.argv[6])\n\nfor index in range(0, number):\n print index + 1, \"of\", number\n sh.gphoto2(\"--capture-image-and-download\", \"--filename=\" + name + str(index) + fileformat, \"--force-overwrite\")\n"
},
{
"alpha_fraction": 0.555232584476471,
"alphanum_fraction": 0.5639534592628479,
"avg_line_length": 29.577777862548828,
"blob_id": "8fe92f7ed487bb807d1af26a8d85bec4bcc7668f",
"content_id": "fff9d5316911f4be0f0a9c3c0eb929dfae75049c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1376,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 45,
"path": "/get-exp-time.py",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sh\nimport sys\n\ndef printhelp():\n print \"Usage: \\\"python2.7 shutter-speed-extractor.py LIST_TYPE FORMAT\\\"\"\n print \"Options:\"\n print \" -a: print all the shutter speeds, both current (marked with N) and possible (marked with F)\"\n print \" -c: print the current shutter speed\"\n print \" -p: print the available shutter speeds\"\n print \"Formats:\"\n print \" -d: make decimal values use the dot\"\n print \" -v: let decimal values use the comma\"\n sys.exit(3)\n\nreplacing = \",\"\nif sys.argv[2] == \"-d\":\n replacing = \".\"\n\nelif sys.argv[2] != \"-v\":\n printhelp()\n\nout = sh.gphoto2(\"--get-config=shutterspeed\")\n\nif sys.argv[1] == \"-a\":\n for line in out.splitlines():\n if b'Current:' in line:\n print \"N\", line.replace(\"Current: \", \"\").replace(\",\", replacing)\n \n elif b'Choice:' in line:\n print \"F\", line.replace(\"Choice:\", \"\").split(\" \")[2].replace(\",\", replacing)\n \nelif sys.argv[1] == \"-c\":\n for line in out.splitlines():\n if b'Current:' in line:\n print line.replace(\"Current: \", \"\").replace(\",\", replacing)\n sys.exit(0)\n \nelif sys.argv[1] == \"-p\":\n for line in out.splitlines():\n if b'Choice:' in line:\n print line.replace(\"Choice:\", \"\").split(\" \")[2].replace(\",\", replacing)\n \nelse:\n printhelp()\n"
},
{
"alpha_fraction": 0.7203166484832764,
"alphanum_fraction": 0.7229551672935486,
"avg_line_length": 17.950000762939453,
"blob_id": "ddc35757e85ba3050861e64de37858ec9d83c3f8",
"content_id": "085c4898b2df29e45df60b9f42bb3c23f7f4a31e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 379,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 20,
"path": "/README.md",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "# shooot\nSome gPhoto2 utilities (bash & Python scripts) to help you control your DSLR from the Linux command line.\n\n## Index\n- m-shooot.py\n- shooot.py\n- get-aperture.py\n- capture-movie\n- kill-gphoto\n- vlc-stream\n- convert-raw\n- sync-loop\n- set-dslr-dark\n- get-exp-time.py\n- list-utils\n\t- load-supported-cameras.sh\n\t- list-config.sh\n- get-iso.py\n- toggle-viewfinder\n- sync-photos\n"
},
{
"alpha_fraction": 0.5267857313156128,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 17.66666603088379,
"blob_id": "8fe36d243019e5ab886d9925b7c467166aa3b0cd",
"content_id": "0e005a99cdba6d3ddd377492b240a82ef0dd1db6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 112,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 6,
"path": "/sync-photos",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nif [ ! -z $1 ]; then\n mkdir $1 2> /dev/null\n cd $1\nfi\ngphoto2 --get-all-files --skip-existing\n"
},
{
"alpha_fraction": 0.5853658318519592,
"alphanum_fraction": 0.6829268336296082,
"avg_line_length": 19.5,
"blob_id": "f8f7c346f6bf32f7624c6115e047efc7009cb272",
"content_id": "a699d5a009810debcbed71e0ff25a745143ad351",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 41,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 2,
"path": "/set-dslr-dark",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\ngphoto2 --set-config d06b=$1\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.646276593208313,
"avg_line_length": 14.666666984558105,
"blob_id": "dfb2e69669328f3f348d8ed88850f8e106a1fd5a",
"content_id": "7c19f3fec47657ca170bdc90ec7055b625648576",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 376,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 24,
"path": "/sync-loop",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\ntrap close INT\n\nfunction close() {\n echo \"Interrupted.\"\n exit 0\n}\n\ninterval=$1\nif [ -z $interval ]; then\n interval=2\nfi\n\nif [ ! -z $2 ]; then\n mkdir $2 2> /dev/null\n cd $2\nfi\n\necho \"Loop download with interval $interval seconds.\"\necho \"Starting... press Ctrl-C to stop.\"\nwhile true; do\n\tgphoto2 --get-all-files --skip-existing\n\tsleep $interval\ndone\n"
},
{
"alpha_fraction": 0.5470588207244873,
"alphanum_fraction": 0.5558823347091675,
"avg_line_length": 29.22222137451172,
"blob_id": "32126495d26085d502055f194d264ad101756ed2",
"content_id": "21ef7eb14f2106b5d0a58ff6b18540f7ff5f7917",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1360,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 45,
"path": "/get-aperture.py",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sh\nimport sys\n\ndef printhelp():\n print \"Usage: \\\"python2.7 f-number-extractor.py LIST_TYPE FORMAT\\\"\"\n print \"List types:\"\n print \" -a: print all the f-numbers, both current (marked with N) and possible (marked with F)\"\n print \" -c: print the current f-number\"\n print \" -p: print the available f-number\"\n print \"Formats:\"\n print \" -d: make decimal values use the dot\"\n print \" -v: let decimal values use the comma\"\n sys.exit(3)\n\nreplacing = \",\"\nif sys.argv[2] == \"-d\":\n replacing = \".\"\n\nelif sys.argv[2] != \"-v\":\n printhelp()\n\nout = sh.gphoto2(\"--get-config=f-number\")\n\nif sys.argv[1] == \"-a\":\n for line in out.splitlines():\n if b'Current:' in line:\n print \"N\", line.replace(\"Current: f/\", \"\").replace(\",\", replacing)\n \n elif b'Choice:' in line:\n print \"F\", line.replace(\"Choice:\", \"\").split(\"f/\")[1].replace(\",\", replacing)\n \nelif sys.argv[1] == \"-c\":\n for line in out.splitlines():\n if b'Current:' in line:\n print line.replace(\"Current: f/\", \"\").replace(\",\", replacing)\n sys.exit(0)\n \nelif sys.argv[1] == \"-p\":\n for line in out.splitlines():\n if b'Choice:' in line:\n print line.replace(\"Choice:\", \"\").split(\"f/\")[1].replace(\",\", replacing)\n \nelse:\n printhelp()\n"
},
{
"alpha_fraction": 0.738095223903656,
"alphanum_fraction": 0.7857142686843872,
"avg_line_length": 20,
"blob_id": "3a4d17b54ade81d2382ee9fbbf26fa3c57541297",
"content_id": "5b3c0917b6c7d3fb8021ad17f4ed1e429ac24c91",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 42,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 2,
"path": "/kill-gphoto",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nkillall gvfsd-gphoto2 gphoto2\n"
},
{
"alpha_fraction": 0.6306156516075134,
"alphanum_fraction": 0.6439267992973328,
"avg_line_length": 19.03333282470703,
"blob_id": "1fb15f1bc62f733fe0023ba8d1e09005501b2d22",
"content_id": "3cd96d1b3647f40332125f689621e823db9b96a7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 601,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 30,
"path": "/shooot.py",
"repo_name": "QK-sampson/shooot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport os\nimport errno\nfrom time import sleep\nimport sys\nimport sh\n\nprint \"Welcome to shooot\"\nprint \"============\"\n\nname = sys.argv[1]\n\ntry:\n os.makedirs(name)\n \nexcept OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\nos.chdir(name)\nprint \"Working in \", os.getcwd()\n\nname = name.split(os.sep)[-1]\nnumber = int(sys.argv[2])\nfileformat = \".\" + sys.argv[3]\n\nfor index in range(0, number):\n print index + 1, \"of\", number\n sh.gphoto2(\"--capture-image-and-download\", \"--filename=\" + name + str(index) + fileformat, \"--force-overwrite\")\n sleep(2)\n"
}
] | 13 |
gzm2062/detection
|
https://github.com/gzm2062/detection
|
453fcc2efb261913c897751ec0a12eecd53b9aad
|
44aef2a47f855965b85a33fb76b096ad2fd7a38f
|
c44c7081d2198d74c72990c6381d85ab3c83130c
|
refs/heads/master
| 2020-06-29T05:11:36.876344 | 2019-08-06T15:01:05 | 2019-08-06T15:01:05 | 200,450,854 | 0 | 0 |
Apache-2.0
| 2019-08-04T04:47:17 | 2019-07-28T08:54:21 | 2019-07-27T14:13:28 | null |
[
{
"alpha_fraction": 0.798305094242096,
"alphanum_fraction": 0.8338983058929443,
"avg_line_length": 38.33333206176758,
"blob_id": "6c836f9ddbdc3822b7ddd185426fc232e44fb209",
"content_id": "2577d638c928780898e945ebfde26aa9ca87c5ef",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 790,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 15,
"path": "/README.md",
"repo_name": "gzm2062/detection",
"src_encoding": "UTF-8",
"text": "# kaggle-open-image2019\ncenternet已经可以直接运行测试,按照centernet的README下载相应的预训练模型到centernet/models文件夹(https://github.com/xingyizhou/CenterNet)\n;按照网页链接的步骤来\n\n在ctdet.py文件下show_results函数下修改结果的存储路径;\n\n环境:RTX2080ti、Ubuntu18.04、pytorch1.1、cuda10.0\n\n注意:如果使用的是pytorch1.x,因为1.x移除了ffi,则/src/models/DCNV2/src下的build.py和build_double.py文件需要修改\n\nfrom torch.utils.ffi import create_extension替换为from torch.utils.cpp_extension import BuildExtension\n\nffi = create_extension替换为ffi = BuildExtension\n\n在Cornernet目录下运行python demo.py ctdet --demo /path/to/image/or/folder/or/video --load_model ../models/ctdet_coco_dla_2x.pth\n"
},
{
"alpha_fraction": 0.4615384638309479,
"alphanum_fraction": 0.5641025900840759,
"avg_line_length": 22.600000381469727,
"blob_id": "2d4f418bea7bda69cbf4e04f15ccf3e90fb7a13d",
"content_id": "ae17a2d791b048dc7f95a8141375457d3c2bd45b",
"detected_licenses": [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 5,
"path": "/centernet/test.py",
"repo_name": "gzm2062/detection",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# @Time : 2019/7/25 21:26\n# @Author : secortot\n# @Email : [email protected]\n# all rights reserved"
}
] | 2 |
JSilva90/parallelFrameworkAI
|
https://github.com/JSilva90/parallelFrameworkAI
|
04367246e12b354b183249935a98293c2b1c9cee
|
fd094563bce9c0d8e586fc0dba01e01a9b2c6e73
|
aa42c6c93807885c3e4cbf85c921a321d3d0a6fc
|
refs/heads/master
| 2021-01-10T16:27:40.093712 | 2015-12-17T17:21:31 | 2015-12-17T17:21:31 | 48,187,543 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6646877527236938,
"alphanum_fraction": 0.6664468050003052,
"avg_line_length": 46.375,
"blob_id": "74fbb21a5e6c86a7b224755476bcc14dee03b1fa",
"content_id": "a8f00e862aac9c761406d2b13dd53c58936d8804",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4548,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 96,
"path": "/parallelAbstractionAI.py",
"repo_name": "JSilva90/parallelFrameworkAI",
"src_encoding": "UTF-8",
"text": "import multiprocessing as mp\nimport sys\n\n\n##This is a parallel abstraction using forks to The Allen AI Science Challenge\n##In case you have any question you can communicate with me using the following mail: [email protected]\n\n## receives a list and a number of processes\n## returns a list of lists, each inner list contains the data associated to a process\ndef divideData(data, n_proc):\n partitioned_lists = []\n for i in range(0, n_proc):\n partitioned_lists.append([])\n \n l = 0\n for i in range(0, len(questions)):\n partitioned_lists[l].append(questions[i])\n l += 1\n l = l % n_proc\n return partitioned_lists\n\n##This function represents the strategy to obtain the answer for a single question.\n## If you have a global model for every question or any other global information you can add it as a parameter of this function\n## and in the mp.Process part\ndef answerQuestions(questions, lock, com):\n for question in questions:\n print question.question ##In my class it prints the question, I advise to remove this since it'll run in parallel.\n ##apply your strategy to find the answer for a single question\n \n ##after answering all questions its time to sent the results back\n ##in my case I store the obtained answer into my own question class and just retrieved the classes back with the answer\n ##You can count the errors and correct answers within this function and just return those numbers. It's really up to you.\n \n ##sending results time\n lock.acquire() ##obtain lock to send\n out_pipe, in_pipe = com\n if out_pipe.poll(): ##if it has a message, it means some other process already left their results in the pipe\n ##so this one has to read the previous results, append them to its own results and put the whole thing in the pipe\n msg = out_pipe.recv()\n questions = questions + msg\n in_pipe.send(questions) ##the first process to send results don't need to read anythin just posts its results in the pipe\n lock.release() ## release lock\n \n ##after this every process other than the main will terminate. The main will continue with the execution of the main function\n \n \n##main function\ndef main():\n if __name__ == \"__main__\": ##This is required to use forks correctly\n try:\n n_proc = int(sys.argv[1]) ##read number of processes from the line\n except Exception as e:\n print \"Indicate the number of cores to use... ex: python aikaggle.py 4\"\n quit()\n \n #use a function to read the data\n data = readData(\"filename.tsv\")\n \n ##I'm suppossing your data is a list of anything. In my case I created my own class that stores\n ##the question itselt, its id, options, correct answer (in case it is available) and some other useful information for my approach\n \n ##next step is to divide the work among all processes. I'm not doing any work balance strategy, \n ##I just equally divide the number of questions among processors and assume each one will work similarly the same amount of time\n \n partitioned_lists = divideData(data, n_proc) ##gets a list of lists, each inner list is the data assigned to a process\n ##Once again in my case I just divide the questions among processes\n \n ##define parallel variables\n lock = mp.Lock() ##used to avoid concurrent writes on pipe\n com = mp.Pipe() ## for interprocess communication\n workers = [] ## keep track of the workers\n \n ##The main process will start all the others\n for i in range(1, n_proc):\n ##workOnData is the function that actually do whatever your approach does, also this is the part that returns in parallel\n p = mp.Process(target=answerQuestions, args=(partitioned_lists[i], lock, com))\n workers.append(p)\n p.start()\n \n ##send the main worker to actually process some work too \n answerQuestions(partitioned_lists[0], lock, com)\n \n for w in workers: ##main process waits for all the others to finish\n w.join()\n \n ##reads the final output from every process\n out_pipe, in_pipe = com\n final_results = out_pipe.recv()\n \n ##do whatever you want with the results\n ##in my case I iterate through all the questions and compare if my selected answer is the same as the correct one\n ##calculating the error rate of the approach\n \n\n\nmain()\n"
}
] | 1 |
gcarvs/jogo-da-memoria
|
https://github.com/gcarvs/jogo-da-memoria
|
73d9de33ced758d5bcea733e301f18a3097fb5b6
|
7157f56be8d6511803283750ca3a62deea877d22
|
fefbfadb93af133d2c2aa57cdc3158bd956532c4
|
refs/heads/master
| 2021-01-25T10:34:47.217125 | 2018-03-01T21:49:28 | 2018-03-01T21:49:28 | 123,359,698 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.484103262424469,
"alphanum_fraction": 0.5173913240432739,
"avg_line_length": 32.15315246582031,
"blob_id": "8187c49929e2c2ac63dfccd60e056899064a8e3e",
"content_id": "c2ef00a2961673a08ba3cb515f7dd508aa494820",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7363,
"license_type": "no_license",
"max_line_length": 224,
"num_lines": 222,
"path": "/server.py",
"repo_name": "gcarvs/jogo-da-memoria",
"src_encoding": "UTF-8",
"text": "import socket\nimport pickle\nimport random\nimport time\n\n# Cria o socket\nsocket_servidor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsocket_cliente = ''\n# Obtém o nome da máquina\n# host = socket.gethostname()\nhost = \"localhost\"\nporta = 12397\n# Associa a porta\nsocket_servidor.bind((host, porta))\n# Escutando...\nsocket_servidor.listen()\nprint(\"Servidor de nome \", host, \" esperando conexão na porta \", porta)\n\ntabuleiroDefault = [\n [' ', '|', '0', '1', '2', '3', '4', '5', '6', '7'],\n ['-', '/', '-', '-', '-', '-', '-', '-', '-', '-'],\n ['0', '|', '#', '#', '#', '#', '#', '#', '#', '#'],\n ['1', '|', '#', '#', '#', '#', '#', '#', '#', '#'],\n ['2', '|', '#', '#', '#', '#', '#', '#', '#', '#'],\n ['3', '|', '#', '#', '#', '#', '#', '#', '#', '#'],\n ['4', '|', '#', '#', '#', '#', '#', '#', '#', '#']\n]\n\ntabuleiro = []\n\nduplas8 = ['A', 'A', 'B', 'B', 'C', 'C', 'D',\n 'D', 'E', 'E', 'F', 'F', 'G', 'G', 'H', 'H']\nduplas12 = ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D', 'E', 'E', 'F',\n 'F', 'G', 'G', 'H', 'H', 'I', 'I', 'J', 'J', 'L', 'L', 'M', 'M']\nduplas20 = ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D', 'E', 'E', 'F', 'F', 'G', 'G', 'H', 'H', 'I', 'I', 'J', 'J', 'L', 'L', 'M', 'M',\n'N', 'N', 'O', 'O', 'P', 'P', 'Q', 'Q', 'R', 'R', 'S', 'S', 'T', 'T', 'U', 'U']\n\n\ndef acabouJogo():\n achouPeca = False\n\n linhasValidas = tabuleiro[2:len(tabuleiro)]\n tabuleiroValido = [None] * len(tabuleiro)\n\n for idx, linha in enumerate(linhasValidas):\n tabuleiroValido[idx] = linha[2:len(linha)]\n\n for linha in tabuleiroValido:\n for peca in linha:\n if peca != '#':\n return False\n\n return True\n\n\ndef gerarTabuleiro8():\n random.shuffle(duplas8)\n tabuleiro.append(tabuleiroDefault[0][0:6])\n tabuleiro.append(tabuleiroDefault[1][0:6])\n tabuleiro.append(tabuleiroDefault[2][0:6])\n tabuleiro.append(tabuleiroDefault[3][0:6])\n tabuleiro.append(tabuleiroDefault[4][0:6])\n tabuleiro.append(tabuleiroDefault[5][0:6])\n\n linhas = [duplas8[0:4], duplas8[4:8], duplas8[8:12], duplas8[12:16]]\n\n for idx, linha in enumerate(linhas):\n count = 2\n for peca in linha:\n tabuleiro[idx + 2][count] = peca\n count = count + 1\n\n\ndef gerarTabuleiro12():\n random.shuffle(duplas12)\n tabuleiro.append(tabuleiroDefault[0][0:8])\n tabuleiro.append(tabuleiroDefault[1][0:8])\n tabuleiro.append(tabuleiroDefault[2][0:8])\n tabuleiro.append(tabuleiroDefault[3][0:8])\n tabuleiro.append(tabuleiroDefault[4][0:8])\n tabuleiro.append(tabuleiroDefault[5][0:8])\n\n linhas = [duplas12[0:6], duplas12[6:12], duplas12[12:18], duplas12[18:24]]\n\n for idx, linha in enumerate(linhas):\n count = 2\n for peca in linha:\n tabuleiro[idx + 2][count] = peca\n count = count + 1\n\n\ndef gerarTabuleiro20():\n random.shuffle(duplas20)\n tabuleiro.append(tabuleiroDefault[0][0:10])\n tabuleiro.append(tabuleiroDefault[1][0:10])\n tabuleiro.append(tabuleiroDefault[2][0:10])\n tabuleiro.append(tabuleiroDefault[3][0:10])\n tabuleiro.append(tabuleiroDefault[4][0:10])\n tabuleiro.append(tabuleiroDefault[5][0:10])\n tabuleiro.append(tabuleiroDefault[6][0:10])\n\n linhas = [duplas20[0:8], duplas20[8:16],\n duplas20[16:24], duplas20[24:32], duplas20[32:40]]\n\n for idx, linha in enumerate(linhas):\n count = 2\n for peca in linha:\n tabuleiro[idx + 2][count] = peca\n count = count + 1\n\n\ndef printarTabuleiro():\n txt = 'Tabuleiro atual: \\n'\n for linha in tabuleiro:\n linhaTxt = ''\n for peca in linha:\n if linhaTxt == '':\n if peca == '#':\n linhaTxt += ' '\n elif peca.isdigit() or peca == '-' or peca == '|' or peca == '/' or peca == ' ':\n linhaTxt += peca\n else:\n linhaTxt += '@'\n else:\n if peca == '#':\n linhaTxt += ' '\n elif peca.isdigit() or peca == '-' or peca == '|' or peca == '/' or peca == ' ':\n linhaTxt += ' ' + peca\n else:\n linhaTxt += ' @'\n txt += linhaTxt + '\\n'\n return txt\n\n\ndef printarTabuleiroRevelado(coordenadas1, coordenadas2):\n txt = 'Tabuleiro atual: \\n'\n for idxLinha, linha in enumerate(tabuleiro):\n linhaTxt = ''\n for idxPeca, peca in enumerate(linha):\n if linhaTxt == '':\n if peca == '#':\n linhaTxt += ' '\n elif peca.isdigit() or peca == '-' or peca == '|' or peca == '/' or peca == ' ' or (idxLinha == coordenadas1[0] and idxPeca == coordenadas1[1]) or (idxLinha == coordenadas2[0] and idxPeca == coordenadas2[1]):\n linhaTxt += peca\n else:\n linhaTxt += '@'\n else:\n if peca == '#':\n linhaTxt += ' '\n elif peca.isdigit() or peca == '-' or peca == '|' or peca == '/' or peca == ' 'or (idxLinha == coordenadas1[0] and idxPeca == coordenadas1[1]) or (idxLinha == coordenadas2[0] and idxPeca == coordenadas2[1]):\n linhaTxt += ' ' + peca\n else:\n linhaTxt += ' @'\n txt += linhaTxt + '\\n'\n return txt\n\n\ndef fazerJogada():\n print('fazerjogada')\n input1 = socket_cliente.recv(1024)\n isValid = validarJogada(input1.decode('ascii'))\n socket_cliente.send(pickle.dumps(isValid))\n\n input2 = socket_cliente.recv(1024)\n isValid = validarJogada(input2.decode('ascii'))\n socket_cliente.send(pickle.dumps(isValid))\n\n jogadas = pickle.loads(socket_cliente.recv(1024))\n\n peca1 = tabuleiro[jogadas[0][0]][jogadas[0][1]]\n peca2 = tabuleiro[jogadas[1][0]][jogadas[1][1]]\n\n txt = printarTabuleiroRevelado(jogadas[0], jogadas[1])\n if peca1 == peca2:\n tabuleiro[jogadas[0][0]][jogadas[0][1]] = '#'\n tabuleiro[jogadas[1][0]][jogadas[1][1]] = '#'\n txt += 'Acertou!'\n else:\n txt += 'Errou!\\n'\n socket_cliente.send(txt.encode('ascii'))\n\n\ndef validarJogada(inputTxt):\n if(',' in inputTxt):\n if(len(inputTxt) == 3 and inputTxt[1] == ','):\n coordenadasTxt = inputTxt.split(',')\n if coordenadasTxt[0].isdigit() and coordenadasTxt[1].isdigit():\n x = int(coordenadasTxt[0]) + 2\n y = int(coordenadasTxt[1]) + 2\n if x >= len(tabuleiro):\n return False\n elif y >= len(tabuleiro[x]):\n return False\n elif tabuleiro[x][y] == '#':\n return False\n return True\n else:\n return False\n else:\n return False\n\n\ndef setDificuldade(dificuldade):\n if dificuldade == 'a':\n gerarTabuleiro8()\n elif dificuldade == 'b':\n gerarTabuleiro12()\n else:\n gerarTabuleiro20()\n\n\nwhile True:\n (socket_cliente, addr) = socket_servidor.accept()\n print('Conectado a:' + str(addr))\n dificuldade = socket_cliente.recv(1024)\n setDificuldade(dificuldade.decode('ascii'))\n while not acabouJogo():\n time.sleep(2)\n socket_cliente.send(printarTabuleiro().encode('ascii'))\n fazerJogada()\n socket_cliente.send('O jogo acabou'.encode('ascii'))\n socket_cliente.close()\n"
},
{
"alpha_fraction": 0.5826255083084106,
"alphanum_fraction": 0.6092664003372192,
"avg_line_length": 26.860214233398438,
"blob_id": "3f86093fd004376d92eec494a0d011f723d21470",
"content_id": "97c9df4d0704f0bf9624e2659cec61528cfb883a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2590,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 93,
"path": "/quadro.py",
"repo_name": "gcarvs/jogo-da-memoria",
"src_encoding": "UTF-8",
"text": "import socket\nimport pickle\nimport random\nimport os\nimport time\n\n# Cria o socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \ns.connect(('localhost', 12397))\n\ndef abrirConexao():\n try:\n # Tenta se conectar ao servidor\n s.connect(('localhost', 12397))\n msg = \"Ola servidor!\\n\"\n # Envia mensagem codificada em bytes ao servidor\n s.send(msg.encode('ascii')) \n except Exception as erro:\n print(str(erro))\n\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\ndef fazerJogada():\n input1 = input('Digite as coordenadas da primeira peca(x,y): ')\n s.send(input1.encode('ascii'))\n encodedValid = s.recv(1024)\n isValid = pickle.loads(encodedValid)\n\n while not isValid:\n print('Coordenada invalida!')\n input1 = input('Digite as coordenadas da primeira peca(x,y): ')\n s.send(input1.encode('ascii'))\n encodedValid = s.recv(1024)\n isValid = pickle.loads(encodedValid) \n\n coordenadasTxt = input1.split(',')\n x = int(coordenadasTxt[0]) + 2\n y = int(coordenadasTxt[1]) + 2 \n\n input2 = input('Digite as coordenadas da segunda peca(x,y): ')\n s.send(input2.encode('ascii'))\n encodedValid = s.recv(1024)\n isValid = pickle.loads(encodedValid)\n\n while not isValid:\n print('Coordenada invalida!')\n input2 = input('Digite as coordenadas da segunda peca(x,y): ')\n s.send(input2.encode('ascii'))\n encodedValid = s.recv(1024)\n isValid = pickle.loads(encodedValid) \n\n coordenadasTxt2 = input2.split(',')\n x2 = int(coordenadasTxt2[0]) + 2\n y2 = int(coordenadasTxt2[1]) + 2 \n \n jogadas = [[x, y], [x2, y2]]\n\n s.send(pickle.dumps(jogadas))\n cls()\n txt = s.recv(1024)\n print(txt.decode('ascii'))\n \n\ndef jogar():\n print('Bem vindo ao jogo da memoria!!')\n print('Em qual dificuldade deseja jogar?')\n print('a) Facil (8 duplas)')\n print('b) Medio (12 duplas)')\n print('c) Dificil (20 duplas)')\n \n dificuldade = input('Entre com opcao: ')\n while dificuldade != 'a' and dificuldade != 'b' and dificuldade != 'c':\n print('Opcao invalida! Entre apenas com a letra a, b ou c.')\n dificuldade = input('Entre com opcao: ')\n\n s.send(dificuldade.encode('ascii'))\n \n cls()\n\n tabuleiro = s.recv(1024).decode('ascii')\n while tabuleiro != 'O jogo acabou':\n print(tabuleiro)\n fazerJogada()\n time.sleep(5)\n cls()\n tabuleiro = s.recv(1024).decode('ascii')\n \n cls()\n\n print('Obrigado por jogar nosso jogo da memoria!!')\n\njogar()"
}
] | 2 |
armboi/SCanDet
|
https://github.com/armboi/SCanDet
|
e1acb7a76f5b7199cd9e550caa7bb46fe99a55fe
|
1b1723ed6fb2febac29bc85a650e0b4b97244ff2
|
c381799aceaf5faa693b698e00d27196d8fdd16d
|
refs/heads/main
| 2023-04-06T13:54:50.789303 | 2021-04-25T16:18:21 | 2021-04-25T16:18:21 | 361,473,915 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6498810648918152,
"alphanum_fraction": 0.6605868339538574,
"avg_line_length": 29.385541915893555,
"blob_id": "1d3b5fb9045f677ad97473b91e4df9aa20ede23c",
"content_id": "529ceee060a8ea1fc2d22a879884fcded4fc174d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2522,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 83,
"path": "/app.py",
"repo_name": "armboi/SCanDet",
"src_encoding": "UTF-8",
"text": "from __future__ import division, print_function\n# coding=utf-8\nimport sys\nimport os\nimport glob\nimport re\nimport numpy as np\n\n# Keras\nfrom keras.applications.imagenet_utils import preprocess_input, decode_predictions\nfrom keras.models import load_model\nfrom keras.preprocessing import image\n\n# Flask utils\nfrom flask import Flask, redirect, url_for, request, render_template\nfrom werkzeug.utils import secure_filename\nfrom gevent.pywsgi import WSGIServer\n\n# Define a flask app\napp = Flask(__name__)\n\n# Model saved with Keras model.save()\nMODEL_PATH = 'CNN_MODEL.h5'\n\n# Load your trained model\nmodel = load_model(MODEL_PATH)\n# model._make_predict_function() # Necessary\n# print('Model loaded. Start serving...')\n\n# You can also use pretrained model from Keras\n# Check https://keras.io/applications/\n#from keras.applications.resnet50 import ResNet50\n#model = ResNet50(weights='imagenet')\n# model.save('')\nprint('Model loaded. Check http://127.0.0.1:5000/')\n\n\ndef model_predict(img_path, model):\n test_image = image.load_img(img_path, target_size=(120, 160, 3))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis=0)\n test_image__mean = np.mean(test_image)\n test_image_std = np.std(test_image)\n test_image = (test_image - test_image__mean)/test_image_std\n preds = model.predict(test_image)\n pred_class = model.predict_classes(test_image)\n return preds, pred_class\n\n\[email protected]('/', methods=['GET'])\ndef index():\n # Main page\n return render_template('index.html')\n\n\[email protected]('/predict', methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n # Get the file from post request\n f = request.files['file']\n\n # Save the file to ./uploads\n basepath = os.path.dirname(__file__)\n file_path = os.path.join(\n basepath, 'uploads', secure_filename(f.filename))\n f.save(file_path)\n\n # Make prediction\n preds, pred_class = model_predict(file_path, model)\n label_list = ['akiec', 'bcc', 'bkl', 'df', 'nv', 'mel', 'vasc']\n l = ['Actinic keratoses', 'Basal cell carcinoma', 'Benign keratosis-like lesions',\n 'Dermatofibroma', 'Melanocytic nevi', 'Melanoma', 'Vascular lesions']\n\n result = l[preds.argmax()]\n # Process your result for human\n # pred_class = preds.argmax(axis=-1) # Simple argmax\n # Convert to string\n return result\n return \"hello\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
}
] | 1 |
leenagupte/reverse-string
|
https://github.com/leenagupte/reverse-string
|
da2205d3be954109af53f0f45713806f615d5485
|
ff5d84bbf67c26ea48a7b27975b18c17458bd769
|
d8bc9d7d2d5dfc8e39c7a65fee98ca5b60c2bf1b
|
refs/heads/master
| 2021-01-10T20:39:34.544469 | 2015-01-11T13:48:44 | 2015-01-11T13:48:44 | 29,093,029 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.643552303314209,
"alphanum_fraction": 0.6459854245185852,
"avg_line_length": 25.516128540039062,
"blob_id": "d51ef005cc6c3ef657dce801165998f0ffe3c503",
"content_id": "37907ba890e2b32f87055d56b23e9dad8696ab08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 822,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 31,
"path": "/unit_tests/test_reverse_string.py",
"repo_name": "leenagupte/reverse-string",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom hamcrest import assert_that, equal_to\nfrom reverse_string import reverse_string, reverse_words, reverse_word_order\n\n\nclass TestReverseString(unittest.TestCase):\n def test_get_last_character(self):\n str = \"abcdefg\"\n\n assert_that(str[-1], equal_to(\"g\"))\n\n def test_remove_last_character_from_string(self):\n str = \"abcdefg\"\n\n assert_that(str[:-1], equal_to(\"abcdef\"))\n\n def test_get_reverse_string(self):\n str = \"abcdefg\"\n\n assert_that(reverse_string(str), equal_to(\"gfedcba\"))\n\n def test_reverse_each_word_in_string(self):\n\n str = \"star wars\"\n\n assert_that(reverse_words(str), equal_to(\"rats sraw\"))\n\n def test_reverse_word_order(self):\n str = \"star wars\"\n\n assert_that(reverse_word_order(str), equal_to(\"wars star\"))\n"
},
{
"alpha_fraction": 0.5439330339431763,
"alphanum_fraction": 0.5481171607971191,
"avg_line_length": 16.10714340209961,
"blob_id": "67fff3f0a988d19370cef973718091b37dc52499",
"content_id": "e2267b5c131ed3e83edffe861d47d246f9c4c475",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 478,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 28,
"path": "/reverse_string.py",
"repo_name": "leenagupte/reverse-string",
"src_encoding": "UTF-8",
"text": "def reverse_string(str):\n reverse_str = \"\"\n\n for s in str:\n reverse_str += str[-1]\n str = str[:-1]\n\n return reverse_str\n\n\ndef reverse_words(str):\n reverse_wrd = \"\"\n\n for s in str.split(\" \"):\n reverse_wrd += reverse_string(s) + \" \"\n\n return reverse_wrd.strip()\n\n\ndef reverse_word_order(str):\n reverse_str = \"\"\n\n str_list = str.split(\" \")\n\n for s in reversed(str_list):\n reverse_str += s + \" \"\n\n return reverse_str.strip()"
}
] | 2 |
AzcarGabriel/python_audio_splitter
|
https://github.com/AzcarGabriel/python_audio_splitter
|
3b685dce73026bfbb670d4219567181cd2f30401
|
7703ce5018e66d301efffe71b3c91140b701c779
|
7522d98cb72758b41049634f8db95aa3e8d8bea8
|
refs/heads/master
| 2020-03-28T14:40:02.309988 | 2018-09-12T16:42:31 | 2018-09-12T16:42:31 | 148,509,810 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.603723406791687,
"alphanum_fraction": 0.6130319237709045,
"avg_line_length": 31.7391300201416,
"blob_id": "b47fe09ed182097ddc98879a1baada280091ccb4",
"content_id": "590c64f69cafce374069f6b915efeb18d949cc88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 752,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 23,
"path": "/splitter.py",
"repo_name": "AzcarGabriel/python_audio_splitter",
"src_encoding": "UTF-8",
"text": "import scipy.io.wavfile as wav\nimport sys\n\ndef split_audio(f_in_name, init_sec, init_desv, end_sec, end_desv, f_out_name):\n print(\"From \" + f_in_name + \" extracting \" + f_out_name)\n (rate, sig) = wav.read(f_in_name)\n i = rate * init_sec + int(rate * init_desv)\n e = rate * end_sec + int(rate * end_desv)\n wav.write(f_out_name+\".wav\", rate, sig[i:e])\n\nif len(sys.argv) != 7:\n print(\"Wrong format. It should be f_in_name init_sec init_desv end_sec end_desv f_out_name\")\nelse:\n f_in = str(sys.argv[1])\n f_out = str(sys.argv[6])\n\n init_sec = int(sys.argv[2])\n init_desv = float(sys.argv[3])\n\n end_sec = int(sys.argv[4])\n end_desv = float(sys.argv[5])\n\n split_audio(f_in, init_sec, init_desv, end_sec, end_desv, f_out)"
},
{
"alpha_fraction": 0.6853741407394409,
"alphanum_fraction": 0.7176870703697205,
"avg_line_length": 24.60869598388672,
"blob_id": "d464cb9365853a00eae4a161c251c47eb992d00e",
"content_id": "2ca1c7e7730513483b9e7dc5cb254e3aafae11c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 23,
"path": "/README.md",
"repo_name": "AzcarGabriel/python_audio_splitter",
"src_encoding": "UTF-8",
"text": "# Python audio splitter\n## Using scipy.io.wavfile\n\n\n# Use\n## Input arguments\n- f_in_name: name of the wav to split.\n- init_sec: integer second where the new audio will start.\n- init_desv: fraction of the second where the new audio will start.\n- end_sec: integer second where the new audio will end.\n- end_desv: fraction of the second where the new audio will end.\n- f_out_name: name of the output wav (without the .wav extension).\n\n\n## Output\n- Wav file.\n\n# Example\nSplit audio1.wav between 60.5 to 70.2 seconds. Output: a1.wav\n\n```sh\n> python3 splitter.py audio1.wav 60 0.5 70 0.2 a1\n```"
}
] | 2 |
riadnwu/Nural-Network-And-Fuzy-System
|
https://github.com/riadnwu/Nural-Network-And-Fuzy-System
|
d04f6efe553e68eac872c9fbac35c908168f3c64
|
ee0cb822ce4df8b1bfca68fea2f79e0b03a9be72
|
edef99857e610c46763049f100e5db74c6bf1d6b
|
refs/heads/master
| 2020-03-24T21:38:48.664410 | 2018-08-03T17:47:22 | 2018-08-03T17:47:22 | 143,043,276 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.2718949019908905,
"alphanum_fraction": 0.4104299247264862,
"avg_line_length": 23.163461685180664,
"blob_id": "4b9dad7c68f77e197ae781df30256294a9294faf",
"content_id": "8bc92fba6ad425b9314b0a70973461bf022caed8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2512,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 104,
"path": "/Backpropagation.py",
"repo_name": "riadnwu/Nural-Network-And-Fuzy-System",
"src_encoding": "UTF-8",
"text": "'''\nMd. Riadul Islam\[email protected]\n'''\nfrom math import exp\n\n'''i1=input(\"I1:\")#0.05\ni2=input(\"I2:\")#0.10\nw=[]\nfor i in range(8): #w=[0.15,0.20,0.25,0.30,0.40,0.45,0.50,0.55]\n w.append(input(\"W\"+`i+1`+\":\"))\nb1=input(\"B1:\")#0.35\nb2=input(\"B2:\")#0.60\nto1=input(\"O1:\")#0.01\nto2=input(\"O2:\")#0.99\nn=input(\"N:\")#1.0\nn1=input(\"N1:\")#-0.5 Input Manually'''\n\ni1=0.05\ni2=0.10\nw=[]\nw=[0.15,0.20,0.25,0.30,0.40,0.45,0.50,0.55]\nb1=0.35\nb2=0.60\nto1=1\nto2=1\nn=1.0\nn1=-0.4\n\ndef ForwardFacc(w):\n count=0\n while True:\n nh1=(i1*w[0]+i2*w[1]+b1*n)\n h1=(1/(1+exp(-nh1)))\n nh2 = (i1 * w[2] + i2 * w[3] + b1 * n)\n h2 = (1 / (1 + exp(-nh2)))\n\n no1 = (h1 * w[4] + h2 * w[5] + b2 * n)\n o1 = (1 / (1 + exp(-no1)))\n no2 = (h1 * w[6] + h2 * w[7] + b2 * n)\n o2 = (1 / (1 + exp(-no2)))\n c=0\n if float(format(o1, '.2f')) != to1:\n e1=.5*pow((to1-o1),2)\n c+=1\n if float(format(o2, '.2f')) != to2:\n e2=.5*pow((to2-o2),2)\n c+=1\n\n te=e1+e2\n print \"Output o1:\"+format(o1, '.5f')\n print \"Output o2:\"+format(o2, '.5f')\n #print `c`\n print \"Total Error:\"+format(te, '.5f')+\"\\n\"\n if(c == 1 ):\n w=BackwardFacc(w,h1,h2,o1,o2)\n c=1\n elif (c == 2):\n w = BackwardFacc(w, h1, h2, o1, o2)\n c = 0\n else:\n print \"\\n\\nTotal Steaps:\"+`count`\n print \"Match Output o1:\"+format(o1, '.2f')\n print \"Match Output o2:\" + format(o2, '.2f')\n break\n count+=1\n\ndef BackwardFacc(w,h1,h2,o1,o2):\n w1=[]\n w1.append(w[0] + (n1 * (((-(to1 - o1) * w[4] *( o1*(1-o1))) + (- (to2 - o2) * w[6] *(o2*(1 - o2)) )) * (h1 * (1 - h1)) * i1)))\n w1.append(w[1] + (n1 * (((-(to1 - o1) * w[4] *( o1*(1-o1))) + (- (to2 - o2) * w[6] *(o2*(1 - o2)) )) * (h1 * (1 - h1)) * i2)))\n\n w1.append(w[2] + (n1 * (((-(to1 - o1) * w[5] * (o1 * (1 - o1))) + (- (to2 - o2) * w[7] * (o2 * (1 - o2)))) * (h2 * (1 - h2)) * i1)))\n w1.append(w[3] + (n1 * (((-(to1 - o1) * w[5] * (o1 * (1 - o1))) + (- (to2 - o2) * w[7] * (o2 * (1 - o2)))) * (h2 * (1 - h2)) * i2)))\n\n w1.append(w[4] + (n1*((-(to1-o1))* ( o1*(1-o1)) * (h1))))\n w1.append(w[5] +(n1*((-(to1-o1))* ( o1*(1-o1)) * (h2))))\n\n w1.append(w[6]+ (n1*((-(to2 - o2)) * (o2*(1 - o2)) * (h1))))\n w1.append(w[7]+ (n1*((-(to2 - o2)) * (o2*(1 - o2)) * (h2))))\n\n return w1\n\n\nForwardFacc(w)\n\n'''input\n.05\n.10\n.15\n.20\n.25\n.30\n.40\n.45\n.50\n.55\n.35\n.60\n.01\n.99\n1.0\n-.5\n'''"
},
{
"alpha_fraction": 0.5104281306266785,
"alphanum_fraction": 0.5406147241592407,
"avg_line_length": 27.66929054260254,
"blob_id": "cdc35be1a8965053b38090bf5ff960934fee663a",
"content_id": "ab70020a98406a256c812608a7abe3ab0e2322a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3644,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 127,
"path": "/BackPropagationNew.py",
"repo_name": "riadnwu/Nural-Network-And-Fuzy-System",
"src_encoding": "UTF-8",
"text": "'''\nMd. Riadul Islam\n06\[email protected]\n'''\nfrom math import exp\nfrom random import uniform\nimport numpy as np\n\ndef forwardPass(wij,wjk,wkl):\n while(True):\n hiIn = additionBious(np.matmul(input, wij), m)# Hidden Layer 1\n hi = relu(hiIn)\n hjIn = additionBious(np.matmul(hi, wjk), o) # Hidden Layer 2\n hj = sigmoid(hjIn)\n hkIn =additionBious(np.matmul(hj,wkl),p) # Output Layer\n hk = softMax(hkIn)\n if float(format(hk[0],\"0.2\"))!= y[0] or float(format(hk[1],\"0.2\"))!= y[1]:\n print (format(hk[0], \"0.4\"))+\" : \"+(format(hk[1],\"0.4\"))\n wij, wjk, wkl=backwardPass(wij,wjk,wkl,hiIn,hjIn,hkIn,hi,hj,hk)\n #print wjk\n else:\n print (format(hk[0], \"0.2\"))+(format(hk[1],\"0.2\"))\n break\n\ndef backwardPass(wij,wjk,wkl,hiIn,hjIn,hkIn,hi,hj,hk):\n nwkl,oOut,oIn = outputLayerToHiddenlayer2(wkl, hkIn, hk,hj)\n nwjk,h2In,eTotal=hiddenLayer2ToHiddenLayer1(wjk,wkl,hjIn,oOut,oIn,hi)\n nwij, e2Total=hiddenLayer1ToInput(wij, wjk, eTotal, h2In, hi)\n #print nwjk\n return nwij,nwjk,nwkl\n\n\ndef hiddenLayer1ToInput(wij,wjk,eTotal,h2In,hi):\n e2Total = []\n nwij = np.zeros((n, m), dtype='float')\n for i in range(m):\n e2Total.append(0)\n for j in range(o):\n e2Total[i] = e2Total[i] + (eTotal[i] * h2In[i] * wjk[i][j])\n for i in range(m):\n for j in range(o):\n nwij[i][j] = wij[i][j] - (learningRate * (e2Total[j] * hi[j] * input[i]))\n return nwij,e2Total\n\ndef hiddenLayer2ToHiddenLayer1(wjk,wkl,hjIn,oOut,oIn,hi):\n h2In=[]\n eTotal=[]\n nwjk = np.zeros((m, o), dtype='float')\n for i in range(o):\n h2In.append(sigmoid1(hjIn[i])*(1-sigmoid1(hjIn[i])))\n\n for i in range(o):\n eTotal.append(0)\n for j in range(p):\n eTotal[i]=eTotal[i]+(oOut[j] * oIn[j] * wkl[i][j])\n for i in range(m):\n for j in range(o):\n nwjk[i][j]=wjk[i][j]-(learningRate* (eTotal[j]*h2In[j]*hi[i]))\n return nwjk,h2In,eTotal\n\n\ndef outputLayerToHiddenlayer2(wkl,hkIn,hk,hj):\n oOut = []\n oIn = []\n nwkl = np.zeros((o, p), dtype='float')\n for i in range(p):\n oOut.append(-1 * ((y[i] * (1 / hk[i])) + ((1 - y[i]) * (1 / (1 - hk[i])))))\n oIn.append((exp(hkIn[0]) * exp(hkIn[1])) / ((exp(hkIn[0]) + exp(hkIn[1])) * (exp(hkIn[0]) + exp(hkIn[1]))))\n for i in range(o):\n for j in range(p):\n nwkl[i][j] =wkl[i][j]-(learningRate* (oOut[j] * oIn[j] * hj[j]))\n return nwkl,oOut,oIn\n\n\n\n\n\ndef relu(hi):\n for i in range(m):\n hi[i]=max(hi[i],0)\n return hi\ndef sigmoid(hj):\n for i in range(o):\n hj[i]=1/(1+exp(-hj[i]))\n return hj\ndef sigmoid1(w):\n w=1/(1+exp(-w))\n return w\ndef softMax(hk):\n sum=0\n for i in range(p):\n sum=sum+exp(hk[i])\n try:\n for i in range(p):\n hk[i] =exp(hk[i])/sum\n except:\n return hk\n return hk\ndef additionBious(w,n):\n for i in range(n):\n w[i]=w[i]+bious\n return w\ndef rnadomWightGenerator():\n wij = np.zeros((n, m), dtype='float')\n wjk = np.zeros((m, o), dtype='float')\n wkl = np.zeros((o,p), dtype='float')\n for i in range(n):\n for j in range(m):\n wij[i][j]=(float(format(uniform(.1, .9), '.3f')))\n for i in range(m):\n for j in range(o):\n wjk[i][j]=(float(format(uniform(.1, .9), '.3f')))\n for i in range(o):\n for j in range(p):\n wkl[i][j]=(float(format(uniform(.1, .9), '.3f')))\n forwardPass(wij,wjk,wkl)\n\nn=13\nm=3\no=3\np=2\nbious=1\nlearningRate=-0.01\ninput=[.1,.2,.7,.8,.45,-.64,.32,.1,.2,-.7,.8,-.45,-.64]\ny=[0.0,1.0]\nrnadomWightGenerator()\n\n\n\n"
},
{
"alpha_fraction": 0.4114864766597748,
"alphanum_fraction": 0.4628378450870514,
"avg_line_length": 24.44827651977539,
"blob_id": "a3232c67d2c4cf1ff7c368e08afa801e83476eec",
"content_id": "77332cfd7c76553f6ffe8553881ed832ca063a18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1480,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 58,
"path": "/AND_OR_NAND_NOR.py",
"repo_name": "riadnwu/Nural-Network-And-Fuzy-System",
"src_encoding": "UTF-8",
"text": "\n\ndef PercepTion (w,z):\n x=[0,0,1,1]\n y=[0,1,0,1]\n l=-1\n for i in range(4):\n print \"For x: \"+`x[i]`+\" y: \"+`y[i]`+\" z: \"+`z[i]`\n WaightChack(w,l,x[i],y[i],z[i])\n print \"________________________________\"\n\n\ndef WaightChack(w,l,x,y,z):\n if (l * w[0]) + (x * w[1]) + (y * w[2]) < 0:\n if (z != 0):\n print \"Not Match!! Value:0 != z:1\\n\"\n data = [l, x, y]\n t=0\n NewWaight(w, data, z,t)\n else:\n print \"Value Match: Value: 0 == z: 0\\n\"\n\n elif (l * w[0]) + (x * w[1]) + (y * w[2]) > 0:\n if (z != 1):\n print \"Not Match!! Value: 1 != z: 0\\n\"\n data=[l,x,y]\n t=1\n NewWaight(w,data,z,t)\n else:\n print \"Value Match: Value: 1 == z: 1\\n\"\n\ndef NewWaight(w,data,z,m):\n N = .20\n tw = [0, 0, 0]\n for i in range(3):\n tw[i] = w[i] + N * (z - m) * data[i]\n print\"New Waight: w0:\"+`tw[0]`+\" w1:\"+`tw[1]`+\" w2:\"+`tw[2]`+\"\\n\"\n WaightChack(tw,data[0],data[1],data[2], z)\n\n#Main Function\nw=[.3,.5,-.4]\nand1=[0,0,0,1]\nor1=[0,1,1,1]\nnAnd=[1,1,1,0]\nnOr=[1,0,0,0]\n#xOr=[0,1,1,0]\n#xNor=[1,0,0,1]\n\nprint(\"Perceptron for AND\\n\")\nPercepTion(w,and1)\nprint(\"\\n\\nPerceptron for OR\\n\")\nPercepTion(w,or1)\nprint(\"\\n\\nPerceptron for NAND\\n\")\nPercepTion(w,nAnd)\nprint(\"\\n\\nPerceptron for NOR\\n\")\nPercepTion(w,nOr)\n'''print(\"\\n\\nPerceptron for XOR\\n\")\nPercepTion(w,xOr)\nprint(\"\\n\\nPerceptron for XNOR\\n\")\nPercepTion(w,xNor)'''\n\n\n"
},
{
"alpha_fraction": 0.297587126493454,
"alphanum_fraction": 0.4262734651565552,
"avg_line_length": 24.741378784179688,
"blob_id": "ea612cadf710a91c4e81867195be33758a42ad9e",
"content_id": "1c14fae7eecb5087a2a2043cc00301f60137593c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1492,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 58,
"path": "/XOR and XNOR.py",
"repo_name": "riadnwu/Nural-Network-And-Fuzy-System",
"src_encoding": "UTF-8",
"text": "from math import exp\n\ndef ForwardFacc(w,i1,i2,to1):\n count=0\n c=0\n while True:\n nh1=(i1*w[0]+i2*w[1]+b1*n)\n h1=(1/(1+exp(-nh1)))\n nh2 = (i1 * w[2] + i2 * w[3] + b1 * n)\n h2 = (1 / (1 + exp(-nh2)))\n\n no1 = (h1 * w[4] + h2 * w[5] + b2 * n)\n o1 = (1 / (1 + exp(-no1)))\n\n if float(format(o1, '.2f')) != to1:\n e1=.5*pow((to1-o1),2)\n c+=1\n\n te=e1\n #print \"Output o1:\"+format(o1, '.2f')\n\n if(c == 1 ):\n w=BackwardFacc(w,i1,i2,h1,h2,to1,o1)\n c=0\n else:\n return format(o1, '.2f'),count\n count+=1\n\ndef BackwardFacc(w,i1,i2,h1,h2,to1,o1):\n w1=[]\n w1.append(w[0] + (n1 * ((-(to1 - o1) * w[4] *( o1*(1-o1))) * (h1 * (1 - h1)) * i1)))\n w1.append(w[1] + (n1 * ((-(to1 - o1) * w[4] *( o1*(1-o1))) * (h1 * (1 - h1)) * i2)))\n\n w1.append(w[2] + (n1 * ((-(to1 - o1) * w[5] * (o1 * (1 - o1))) * (h2 * (1 - h2)) * i1)))\n w1.append(w[3] + (n1 * ((-(to1 - o1) * w[5] * (o1 * (1 - o1))) * (h2 * (1 - h2)) * i2)))\n\n w1.append(w[4] + (n1*((-(to1-o1))* ( o1*(1-o1)) * (h1))))\n w1.append(w[5] +(n1*((-(to1-o1))* ( o1*(1-o1)) * (h2))))\n\n return w1\n\ni1=[0,0,1,1]\ni2=[0,1,0,1]\nxOr=[0,1,1,0]\nxNor=[1,0,0,1]\nw=[0.15,0.20,0.25,0.30,0.40,0.45]\n\nb1=0.35\nb2=0.60\nn=1.0\nn1=-.05\n\nprint \"XOR:\\t Steps\"\nfor i in range(4):\n print ForwardFacc(w, i1[i], i2[i], xOr[i])\nprint \"\\nXNOR:\\t Steps\"\nfor i in range(4):\n print ForwardFacc(w, i1[i], i2[i], xNor[i])"
}
] | 4 |
janebrowncutie/mongodb2021
|
https://github.com/janebrowncutie/mongodb2021
|
d5e094376327311d8d5a7e37ad16ad39f8592610
|
2d94869d72708e60ea068150deaa0ed4b07c6427
|
89efa044e4164229d3fa7b758268191702be4f44
|
refs/heads/main
| 2023-07-01T21:26:46.569134 | 2021-08-02T13:34:24 | 2021-08-02T13:34:24 | 388,825,603 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5846063494682312,
"alphanum_fraction": 0.6004700064659119,
"avg_line_length": 18.56321907043457,
"blob_id": "52a48de37c4dc146990857e3e4f8ace960f02448",
"content_id": "8594812d25467e241164ad1997e9d5dce9815d48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1702,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 87,
"path": "/app.py",
"repo_name": "janebrowncutie/mongodb2021",
"src_encoding": "UTF-8",
"text": "# ---- YOUR APP STARTS HERE ----\n# -- Import section --\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask_pymongo import PyMongo\n\n\n# -- Initialization section --\napp = Flask(__name__)\n\n# events = [\n# {\"event\":\"First Day of Classes\", \"date\":\"2019-08-21\"},\n# {\"event\":\"Winter Break\", \"date\":\"2019-12-20\"},\n# {\"event\":\"Finals Begin\", \"date\":\"2019-12-01\"}\n# ]\n\n# name of database\napp.config['MONGO_DBNAME'] = 'upperline'\n\n# URI of database\napp.config['MONGO_URI'] = 'mongodb+srv://admin:[email protected]/upperline?retryWrites=true&w=majority'\n\nmongo = PyMongo(app)\n\n# -- Routes section --\n# INDEX\n\[email protected]('/')\[email protected]('/index')\n\ndef index():\n user = mongo.db.users\n return render_template('index.html', user = user)\n\n\n# CONNECT TO DB, ADD DATA\n\[email protected]('/add')\n\ndef add():\n # connect to the database\n user = mongo.db.users\n\n # insert new data\n user.insert({'name' : \"Kcaysha\"})\n \n # return a message to the user\n return \"Added user!\"\n\[email protected]('/events', methods = [\"GET\", \"POST\"])\ndef events():\n if request.method == 'GET':\n return \"<h1> Trt again!</h1>\"\n else:\n user_name = request.form['first_name']\n user_date = request.form['date']\n\n events = mongo.db.events\n events.insert({'first_name': user_name, 'date': user_date})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# @app.route('/event')\n# def event():\n# # collection = mongo.db.events\n# # events = collection.find({})\n\n# # return render_template('event.html', events = events)\n\n# joy = mongo.db.users\n# users = joy.find({'name' : \"Kcaysha\"})\n\n# return render_template('event.html', users = users)\n"
}
] | 1 |
mustafaakin1/find-job
|
https://github.com/mustafaakin1/find-job
|
5d921cd640235503873cc0578adeeca5da8ace5f
|
809164c7538484f166d0bdddff84cbc377769727
|
b2499c99e82e7d10a5a20827152fefdcfd21de66
|
refs/heads/master
| 2020-04-24T20:41:11.010537 | 2019-02-23T21:07:48 | 2019-02-23T21:07:48 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5244444608688354,
"alphanum_fraction": 0.5288888812065125,
"avg_line_length": 27.125,
"blob_id": "071dab0e2f8cd7957b0279b2f94d6fb86b1eefb7",
"content_id": "08c5f746c9d18a1c54b8c98388f85b8a30e7bf09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 450,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 16,
"path": "/src/templates/applications/list.html",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n\n{% block title %} Jobs {% endblock %}\n\n{% block content %}\n {% for job in jobs %}\n <div class=\"card\">\n\t\t\t<h5 class=\"card-header\">{{job.title}}</h5>\n\t\t\t<div class=\"card-body\">\n\t\t \t<p class=\"card-text\">{{job.expire}}</p>\n\t\t\t\t<p class=\"card-title\">{{job.description}}</p>\n\t\t \t<a href=\"{% url 'jobdetail' job.pk %}\" class=\"btn btn-primary\">Details</a>\n\t\t \t</div>\n\t\t</div>\n {% endfor %}\n{% endblock %}\n"
},
{
"alpha_fraction": 0.6411698460578918,
"alphanum_fraction": 0.6625421643257141,
"avg_line_length": 31.962963104248047,
"blob_id": "a2da89f2a40b914f3535c85231675df129810097",
"content_id": "3823caeaa7e32c68ad9cf7b5df96ee25d186a794",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 889,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 27,
"path": "/src/jobs/forms.py",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "from django.forms import ModelForm\nfrom django.forms.widgets import Textarea, FileInput, TextInput, DateInput\n\nfrom .models import Job, Application\n\nclass JobForm(ModelForm):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(JobForm, self).__init__(*args, **kwargs)\n\n\tclass Meta:\n\t\tmodel = Job\n\t\tfields = (\"title\", \"description\", \"count\", \"expire\")\n\t\twidgets = {\n\t\t\t'description': Textarea(attrs={'class':\"form-control\"}),\n\t\t\t\"expire\": DateInput(attrs= {\"class\": \"form-control date\", \"data-mask\": \"00/00/0000\"})\n\t\t}\n\n\nclass ApplicationForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Application\n\t\tfields = (\"name\", \"email\", \"phone\", \"address\", \"thoughts\", \"resume\")\n\t\twidget = {\n\t\t\t\"thoughts\": Textarea(attrs={\"class\": \"form-control date\"}),\n\t\t\t\"resume\": FileInput(attrs={\"class\": \"form-control-file\"}),\n\t\t\t\"phone\" : TextInput(attrs={\"class\": \"form-control phone\", \"data-mask\": \"0000 000 00 00\"})\n\t\t}"
},
{
"alpha_fraction": 0.7419990301132202,
"alphanum_fraction": 0.7429837584495544,
"avg_line_length": 26.445945739746094,
"blob_id": "8f0b40a023a099e1e47851723a2e7e39f448f597",
"content_id": "e39a7bf10bbc0c57ea36bde93bb8b404fceca962",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2031,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 74,
"path": "/src/jobs/views.py",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.urls import reverse_lazy, reverse\nfrom django.views.generic import ListView, DetailView\nfrom django.views.generic.edit import CreateView, DeleteView\nfrom django.contrib.auth.mixins import *\nfrom django.core.serializers import serialize\nfrom django.http import JsonResponse\nfrom .models import Job, Application\nfrom .forms import JobForm, ApplicationForm\n# Create your views here.\n\nclass JobList(ListView):\n\tmodel = Job\n\ttemplate_name = \"jobs/list.html\"\n\tcontext_object_name = \"jobs\"\n\tpaginate_by = 10\n\tqueryset = Job.objects.all().order_by(\"-pk\")\n\n\nclass JobDetail(DetailView):\n\tmodel = Job\n\ttemplate_name = \"jobs/detail.html\"\n\tcontext_object_name = \"job\"\n\n\tdef get(self, req, pk, *args, **kwargs):\n\t\tjob = Job.objects.get(pk=pk)\n\t\tapplicant = Application.objects.filter(job=job)\n\n\t\treturn render(req, self.template_name, {\"job\": job, \"applicants\": applicant})\n\n\nclass JobCreate(LoginRequiredMixin, CreateView):\n\tmodel = Job\n\ttemplate_name = \"jobs/create.html\"\n\tform_class = JobForm\n\t#fields = (\"title\", \"description\", \"count\", \"expire\")\n\n\tdef form_valid(self, form):\n\t\tprint(form.is_valid())\n\t\treturn super().form_valid(form)\n\n\nclass JobDelete(LoginRequiredMixin, DeleteView):\n\tmodel = Job\n\ttemplate_name = \"jobs/delete.html\"\n\tsuccess_url = reverse_lazy(\"joblist\")\n\n\nclass ApplicationList(LoginRequiredMixin, ListView):\n\tmodel = Application\n\t#template_name = \"applications/list.html\"\n\tcontext_object_name = \"jobs\"\n\n\tdef get(self, req, *args, **kwargs):\n\t\tqs = self.model.objects.all()\n\t\treturn JsonResponse(serialize(\"json\", qs), safe = False)\n\n\nclass ApplicationCreate(CreateView):\n\tmodel = Application\n\ttemplate_name = \"applications/create.html\"\n\tform_class = ApplicationForm\n\n\tdef form_valid(self, form):\n\t\tform.instance.job = Job.objects.get(pk=self.kwargs[\"pk\"])\n\t\tprint(form.is_valid())\n\t\treturn super().form_valid(form)\n\n\n\nclass ApplicationDetail(LoginRequiredMixin, DetailView):\n\tmodel = Application\n\ttemplate_name = \"applications/detail.html\"\n\tcontext_object_name = \"app\"\n"
},
{
"alpha_fraction": 0.6391304135322571,
"alphanum_fraction": 0.6391304135322571,
"avg_line_length": 40.90909194946289,
"blob_id": "51e7255d43e5bf5a8cdec655ac5936fc54101eab",
"content_id": "c227f9a08f27113b52a5f60e093e716def0d0662",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 11,
"path": "/src/jobs/urls.py",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', JobList.as_view(), name=\"joblist\"),\n path('add/', JobCreate.as_view(), name= \"addjob\"),\n path('<int:pk>/', JobDetail.as_view(), name= \"jobdetail\"),\n path('<int:pk>/delete', JobDelete.as_view(), name= \"deletejob\"),\n path(\"<int:pk>/apply\", ApplicationCreate.as_view(), name= \"apply\"),\n path(\"application/<int:pk>\", ApplicationDetail.as_view(), name= \"applydetail\"),\n]"
},
{
"alpha_fraction": 0.6957831382751465,
"alphanum_fraction": 0.7319276928901672,
"avg_line_length": 32.29999923706055,
"blob_id": "7d3958b089f31e0ac14842053e9952d9bb00e555",
"content_id": "0895ef24f37da687e2a793fc13efa729d8715a6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 332,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 10,
"path": "/README.txt",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "Windows:\n\t- download python from https://www.python.org/downloads/ and install it\n\t- run install-django.bat batch file to install depencies\n\t- run \"run.bat\" to start server\n\t- open browser and go 127.0.0.1\n\nLinux \n\t- run install-django.sh as root to install depencies\n\t- run \"run.sh\" to start server\n\t- open browser and go 127.0.0.1"
},
{
"alpha_fraction": 0.6226266026496887,
"alphanum_fraction": 0.6447784900665283,
"avg_line_length": 31.435897827148438,
"blob_id": "ecd75ac95a83389c07fd34248b11858a71fa64c4",
"content_id": "fff1cff64925c656a1936009df2102354b221cd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1264,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 39,
"path": "/src/jobs/tests.py",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom .models import *\nfrom .views import *\nfrom .forms import *\nimport datetime\n# Create your tests here.\n\nclass JobTest(TestCase):\n\tdef __init__(self, *args, **kwargs):\n\t\tprint(\"running job test\")\n\t\tself.j1 = Job(title=\"CEO\", description= \"CEO for company\", count= 1, expire= datetime.datetime.today())\n\t\tself.j2 = Job(title=\"Staff\", description= \"Staff for company\", count= 1, expire= datetime.datetime.today())\n\t\tself.j1.save()\n\t\tself.j2.save()\n\n\tdef test_jobs(self):\n\t\tprint(\"Jobs equal\")\n\t\tself.assertEqual(Job.objects.get(title=\"CEO\"), self.j1)\n\n\nclass JobFormTest(TestCase):\n\tdef __init__(self, *args, **kwargs):\n\t\tprint(\"testin JobForm\")\n\t\t\n\n\tdef test_validation(self):\n\t\tform = JobForm(data={\"title\": \"ceo\", \"description\": \"example\",\n\t\t\t\"count\": 2, \"expire\": \"12/03/2018\"})\n\t\tself.assertTrue(form.is_valid())\n\t\tprint(\"......................... OK!\")\n\nclass ApplicationFormTest(TestCase):\n\tdef __init__(self, *args, **kwargs):\n\t\tprint(\"Testing ApplicationForm\")\n\n\tdef test_form_validation(self):\n\t\tform = ApplicationForm(data= {\"name\": \"Name\", \"email\": \"[email protected]\", \"phone\": \"0555 555 5555\", \"address\": \"dgsdgsgs\", \"thoughts\": \"dsfsfs\", \"job\":1})\n\t\tself.assertFalse(form.is_valid())\n\t\tprint(\"........................... OK!\")"
},
{
"alpha_fraction": 0.8117154836654663,
"alphanum_fraction": 0.8410041928291321,
"avg_line_length": 29,
"blob_id": "a00c23f7ad6d88b6d0ff78cd205fd05d969e43c7",
"content_id": "421d2fd6b30e46ad771403c0ed213643d3f637df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 8,
"path": "/src/install-django.sh",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "python3 -m pip install -U pip setuptools wheel\npython3 -m install -U django\n\npython3 manage.py makemigrations\npython3 manage.py makemigrations jobs\npython3 manage.py migrate\npython3 manage.py collectstatic\npython3 manage.py createsuperuser"
},
{
"alpha_fraction": 0.7349397540092468,
"alphanum_fraction": 0.7458926439285278,
"avg_line_length": 35.560001373291016,
"blob_id": "4a5d6889cd87c93b0af9d86edef25c5f464aad65",
"content_id": "5eae2f6b3e62b6a43f68226622c637394023c1a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 913,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 25,
"path": "/src/jobs/models.py",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\n\nclass Job(models.Model):\n\ttitle = models.CharField(verbose_name='Job Title', max_length=150)\n\tdescription = models.TextField(verbose_name='Job Description')\n\tcount = models.IntegerField(verbose_name='Number of people to hire', default= 1)\n\texpire = models.DateField(verbose_name='Last application date')\n\n\n\tdef get_absolute_url(self):\n\t\treturn \"/{}/\".format(self.pk)\n\nclass Application(models.Model):\n\tname = models.CharField(verbose_name='Name', max_length=150)\n\temail = models.EmailField(verbose_name='Email')\n\tphone = models.IntegerField(verbose_name='Phone number')\n\taddress = models.CharField(verbose_name='Address', max_length=250)\n\tthoughts = models.TextField(verbose_name='Thoughts on job')\n\tresume = models.FileField(upload_to = \"static/files/\")\n\tjob = models.ForeignKey(Job, on_delete = models.CASCADE)\n\n\tdef get_absolute_url(self):\n\t\treturn \"/\""
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 10,
"blob_id": "33d65f47999db937489901d211be7e5d07cf9be5",
"content_id": "8151c3ecf60b8d9afed2ec8e752db80ce6f367b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 1,
"path": "/README.md",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "# find-job\n\n"
},
{
"alpha_fraction": 0.574999988079071,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 40,
"blob_id": "cdbb5e05f7071f95f0902d18b1eea78531d10e0a",
"content_id": "15e387eb89b3de31c9dc1eba1b5efd633a901e58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 1,
"path": "/src/run.sh",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "python3 manage.py runserver 127.0.0.1:80"
},
{
"alpha_fraction": 0.6930091381072998,
"alphanum_fraction": 0.6930091381072998,
"avg_line_length": 28.81818199157715,
"blob_id": "1e8c074e79cefb94ff5b8838d34987aa0ab0129f",
"content_id": "6059bae514257fcbceafe2780d631d92f5385e3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 11,
"path": "/src/jobs/admin.py",
"repo_name": "mustafaakin1/find-job",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import *\n\n# Register your models here.\n\nclass JobAdmin(admin.ModelAdmin):\n\tlist_display = [\"title\", \"description\", \"count\", \"expire\"]\n\tordering = [\"-pk\"]\n\nclass ApplicationsAdmin(admin.ModelAdmin):\n\tlist_display = [\"name\", \"email\", \"phone\", \"address\", \"thoughts\", \"resume\", \"job\"]\n\n"
}
] | 11 |
yash872/Face_Recognition_KNN
|
https://github.com/yash872/Face_Recognition_KNN
|
98079c439aa28a40e23853f8b84f62be9de9306c
|
c07d45beee6c0aba6246e9ce314b64a85c211f52
|
21091cdf6cad5833c59fad9f5104a7916dcefff6
|
refs/heads/master
| 2022-12-04T06:34:54.034768 | 2020-08-10T06:46:49 | 2020-08-10T06:46:49 | 286,393,634 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7218468189239502,
"alphanum_fraction": 0.7308558821678162,
"avg_line_length": 48.33333206176758,
"blob_id": "15875540eea51d0024e5292c3212c73693871087",
"content_id": "4cb0b51d01e252fadf1a50aa3b6e6fba5218985a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 888,
"license_type": "permissive",
"max_line_length": 173,
"num_lines": 18,
"path": "/README.md",
"repo_name": "yash872/Face_Recognition_KNN",
"src_encoding": "UTF-8",
"text": "# Face_Recognition_KNN \nFace Recognition using KNN algorithm and opencv in python.\nThis is a implementation of knn classifier.\n\n## Dependencies\n Python 3.6 , OpenCv , Numpy\n\n## Breakdown of the code for knn classifier\n 1. Read a video stream using opencv\n 2. Extract faces out of it and store as a numpy array in dataset\n 3. Load the stored data as a Training data (data + label)\n 4. Use the Knnto find the prediction of face\n 5. Map the predicted ID to name of the user\n 6. Finally, Display the prediction on the screen using bounding box and name of user\n\n## How it works! :wink: \n* Run the \"face_collect.py\" , it will ask for users name,this will collect the face data of user until the user press key 'q' and save in dataset/data/[username].npy format.\n* Now run the \"face_recog.py\", and it will show the detected face of user with bounding box and user's name.\n"
},
{
"alpha_fraction": 0.5173420906066895,
"alphanum_fraction": 0.5414384603500366,
"avg_line_length": 26.852632522583008,
"blob_id": "7d61783d1eb2274af12e1280e7224be09f417807",
"content_id": "5f32257b25b55f195ca570ba34bcd6d1dc457ecd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2739,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 95,
"path": "/face_recog.py",
"repo_name": "yash872/Face_Recognition_KNN",
"src_encoding": "UTF-8",
"text": "import cv2\r\nimport numpy as np\r\nimport os\r\n\r\n#------------------- KNN ----------------------#\r\ndef distance(v1,v2):\r\n #-----------------Eucledian distance\r\n return np.sqrt(((v1-v2)**2).sum())\r\n\r\ndef knn(train,test,k=5):\r\n dist=[]\r\n \r\n for i in range(train.shape[0]):\r\n #get the vector and label\r\n ix = train[i, :-1]\r\n iy = train[i, -1]\r\n \r\n #compute distance from test point\r\n d = distance(test, ix)\r\n dist.append([d,iy])\r\n #sort based on distance and get top k\r\n dk = sorted(dist,key=lambda x:x[0])[:k]\r\n #retrives only the label\r\n labels = np.array(dk)[:,-1]\r\n \r\n #get frequency of each label\r\n output = np.unique(labels,return_counts=True)\r\n #Find max frequency and corresponding label\r\n index = np.argmax(output[1])\r\n return output[0][index]\r\n#----------------------------------------------------#\r\n \r\ncap = cv2.VideoCapture(0)\r\n\r\nface_cascade = cv2.CascadeClassifier(\"dataset/haarcascade_frontalface_alt.xml\")\r\n\r\nskip=0\r\nface_data = []\r\nlabels = []\r\ndataset_path = './dataset/data/'\r\n\r\nclass_id = 0 #labels for given files\r\nnames = {} #Mapping btw id and name\r\n\r\nfor fx in os.listdir(dataset_path):\r\n if fx.endswith('.npy'):\r\n names[class_id]=fx[:-4]\r\n \r\n data_item = np.load(dataset_path+fx)\r\n face_data.append(data_item)\r\n \r\n #create label for the class \r\n target = class_id*np.ones((data_item.shape[0],))\r\n class_id +=1\r\n labels.append(target)\r\n \r\nface_dataset = np.concatenate(face_data,axis=0)\r\nface_labels = np.concatenate(labels,axis=0).reshape((-1,1))\r\n#print(face_dataset.shape)\r\n#print(face_labels.shape)\r\n\r\ntrainset = np.concatenate((face_dataset,face_labels),axis=1)\r\n#print(trainset.shape)\r\n\r\n#Testing-----------------------------------\r\nwhile True:\r\n ret,frame = cap.read()\r\n if ret==False:\r\n continue\r\n \r\n faces =face_cascade.detectMultiScale(frame,1.3,5)\r\n \r\n for face in faces:\r\n x,y,w,h = face\r\n \r\n if ret:\r\n offset = 10\r\n face_section = frame[y-offset:y+h+offset,x-offset:x+w+offset]\r\n face_section = cv2.resize(face_section,(100,100),cv2.INTER_AREA)\r\n \r\n #prediction\r\n output = knn(trainset,face_section.flatten())\r\n \r\n #Display name and rectangle\r\n pred_name = names[int(output)]\r\n cv2.putText(frame,pred_name,(x,y-10),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2,cv2.LINE_AA)\r\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)\r\n \r\n cv2.imshow(\"Faces\",frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n if key ==ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()"
}
] | 2 |
rafaeltedesco/pandas_csv_to_html
|
https://github.com/rafaeltedesco/pandas_csv_to_html
|
27b671a4ef147a23293f179918a2054df5af888f
|
17d74e327bc72fe9d783f67d421b08231f0a8e60
|
7aa41a113395f6db45175e6a244d5643bab55b35
|
refs/heads/main
| 2023-03-17T23:40:52.305459 | 2021-03-05T23:13:50 | 2021-03-05T23:13:50 | 344,957,154 | 0 | 1 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6123033165931702,
"alphanum_fraction": 0.6123033165931702,
"avg_line_length": 26.41176414489746,
"blob_id": "9125332b0037381d3e2053e77cbaa03f5c0d9367",
"content_id": "66051ed5a5db7b3ed6102ce4869424f7c556f620",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1398,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 51,
"path": "/format_path.py",
"repo_name": "rafaeltedesco/pandas_csv_to_html",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport os\n\ndef _format_path(path):\n return '\\\\'.join(path.replace('\\\\', ' ').split())\n\ndef pandarize(path, filename):\n \"\"\"\n filename: str (.csv)\n \"\"\"\n path = _format_path(path)\n return pd.read_csv(os.path.join(path,filename))\n\n\ndef format_date_time(df, column):\n return pd.to_datetime(df[column].str.upper(), format='%Y-%m-%d %I:%M %p')\n\ndef order_date_time(df, column):\n return df.sort_values(by=column, ascending=True)\n\n\ndef filter_by_columns(df, *columns):\n \"\"\"\n df: pandas DataFrame\n columns: str args separeted by comma\n e.g 'price', 'address'\n\n \"\"\"\n new_df = pd.DataFrame()\n\n for column in columns:\n new_df[column] = df[column]\n\n return new_df \n\ndef generate_html(df, filename='students_ordered.html'):\n df.to_html(filename)\n\ndef get_format_df(path=r'C:\\Users\\Rafael\\Downloads', filename='events-export.csv'):\n date_column = 'Start Date & Time'\n df = pandarize(path, filename)\n date_df = format_date_time(df, date_column)\n new_df = df.iloc[:,:]\n new_df[date_column] = date_df\n ordered_df = order_date_time(new_df, date_column)\n filtered_df = filter_by_columns(ordered_df, 'Invitee Name', 'Start Date & Time')\n generate_html(filtered_df)\n print('HTML file created with success')\n return filtered_df\n \ndf = get_format_df()\n"
},
{
"alpha_fraction": 0.7519999742507935,
"alphanum_fraction": 0.7519999742507935,
"avg_line_length": 24,
"blob_id": "c019db8fff2f3a0c4dd42f041d428c8f9d7feb17",
"content_id": "d710ecd0fbfe81c3441369e9a84c7c0677e90f1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 125,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 5,
"path": "/README.md",
"repo_name": "rafaeltedesco/pandas_csv_to_html",
"src_encoding": "UTF-8",
"text": "# pandas_csv_to_html\n\nIt's working and do the work for me\n\n## maybe you can make some adjustments to fit into your necessity\n"
}
] | 2 |
AlexAdvent/Awesome_Python_Scripts
|
https://github.com/AlexAdvent/Awesome_Python_Scripts
|
c365211f1b30d248370029bbfd6e8ec4d13f145b
|
583d4806c93020a99110ebe95f91f37ebb5021af
|
8f2ab11873561d10b3d913bf493a24620c89cfcb
|
refs/heads/main
| 2023-06-17T16:13:14.237429 | 2021-07-22T08:00:23 | 2021-07-22T08:00:23 | 387,848,631 | 0 | 0 |
MIT
| 2021-07-20T16:16:11 | 2021-07-20T16:16:12 | 2021-07-22T08:00:23 | null |
[
{
"alpha_fraction": 0.6288723945617676,
"alphanum_fraction": 0.6511772274971008,
"avg_line_length": 25.459016799926758,
"blob_id": "bef7d8f7ce135c2f1a4aaf792156eb48a60c3904",
"content_id": "82635ce89977d74180a3ae1f52eddb76a7a392ba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1614,
"license_type": "permissive",
"max_line_length": 178,
"num_lines": 61,
"path": "/PyGamesScripts/Snake Game/Enhanced/main.py",
"repo_name": "AlexAdvent/Awesome_Python_Scripts",
"src_encoding": "UTF-8",
"text": "from turtle import Screen\nfrom snake import Snake\nfrom food import Food\nfrom scoreboard import Scoreboard\nimport time\nfrom try_file import Try\n\na = Try(1)\nb = Try(2)\nprint(b.collection)\n\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor(\"black\")\nscreen.title(\"Snake Game\")\nscreen.tracer(0)\n\n\nsnake_player = Snake()\nfood = Food()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(snake_player.up,\"Up\")\nscreen.onkey(snake_player.down,\"Down\")\nscreen.onkey(snake_player.left,\"Left\")\nscreen.onkey(snake_player.right,\"Right\")\n\n\ngame_over = False\n\nwhile not game_over:\n screen.update()\n time.sleep(0.1)\n snake_player.move()\n if snake_player.snakes_bit[0].distance(food) < 15:\n food.refresh()\n scoreboard.incr_score()\n print(snake_player.snakes_bit[0].position() ,\"player at\")\n print(snake_player.snakes_bit[-1].position() ,\"last player at\")\n snake_player.extend()\n\n# detect collision with wall\n if snake_player.snakes_bit[0].xcor() > 280 or snake_player.snakes_bit[0].xcor() < -280 or snake_player.snakes_bit[0].ycor() > 280 or snake_player.snakes_bit[0].ycor() < -280:\n # scoreboard.game_over()\n # game_over = True\n scoreboard.reset()\n snake_player.reset()\n\n# DETECT COLLISION WITH TAIL\n for bit in snake_player.snakes_bit:\n if bit == snake_player.snakes_bit[0]:\n pass\n elif snake_player.snakes_bit[0].distance(bit) < 10:\n scoreboard.reset()\n snake_player.reset()\n # game_over = True\n # scoreboard.game_over()\n\nscreen.exitonclick()\n"
}
] | 1 |
richdevboston/aiml_bot
|
https://github.com/richdevboston/aiml_bot
|
530c02059b35f3f5b53408ca1c1ceae2c66a4c87
|
3bff265284d0f2687896bb031d72103004452779
|
3d4ffdd5c35e5bcc8eb93713722e181b6108c8d2
|
refs/heads/master
| 2023-03-24T23:20:44.807910 | 2017-09-09T16:08:51 | 2017-09-09T16:08:51 | null | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.5040322542190552,
"alphanum_fraction": 0.7096773982048035,
"avg_line_length": 14.5,
"blob_id": "be5a23c4684b9e81a8c296666ca343138095325d",
"content_id": "db2c5453fa16c914a29f1ea1a7c5491fdd4870ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 248,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 16,
"path": "/requirements.txt",
"repo_name": "richdevboston/aiml_bot",
"src_encoding": "UTF-8",
"text": "chardet==2.3.0\nclick==6.7\neatiht==0.1.14\nFlask==0.12.1\ngunicorn==19.7.1\nitsdangerous==0.24\nJinja2==2.9.6\nMarkupSafe==1.0\nrequests==2.13.0\nrequests-toolbelt==0.7.1\nsimplejson==3.10.0\nsix==1.10.0\nurllib3==1.20\nwebencodings==0.5\nWerkzeug==0.12.1\naiml\n"
},
{
"alpha_fraction": 0.6231759786605835,
"alphanum_fraction": 0.6369098424911499,
"avg_line_length": 22.795917510986328,
"blob_id": "477d333b81988137d3e7c86e333d580417c3ed2f",
"content_id": "d17ed0e401607728a39e3e1cbb3ec3fe330147ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1165,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 49,
"path": "/app.py",
"repo_name": "richdevboston/aiml_bot",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request\nfrom pymessenger import Bot\nfrom utils import fetch_reply\n\napp = Flask(__name__)\n\nFB_ACCESS_TOKEN = \"YOUR PAGE ACCESS TOKEN\"\n\nbot = Bot(FB_ACCESS_TOKEN)\n\n\[email protected]('/', methods=['GET'])\ndef verify():\n if request.args.get(\"hub.mode\") == \"subscribe\" and request.args.get(\"hub.challenge\"):\n if not request.args.get(\"hub.verify_token\") == \"hello\":\n return \"Verification token mismatch\", 403\n return request.args[\"hub.challenge\"], 200\n return \"Hello world\", 200\n\n\[email protected]('/', methods=['POST'])\ndef webhook():\n\tdata = request.get_json()\n\tlog(data)\n\n\tif data['object'] == \"page\":\n\t\tfor entry in data['entry']:\n\t\t\tfor messaging_event in entry['messaging']:\n\n\t\t\t\t# IDs\n\t\t\t\tsender_id = messaging_event['sender']['id']\n\t\t\t\trecipient_id = messaging_event['recipient']['id']\n\n\t\t\t\tif messaging_event.get('message'):\n\t\t\t\t\tquery = messaging_event['message']['text']\n\t\t\t\t\treply = fetch_reply(query, sender_id)\n\n\t\t\t\t\tif reply['type'] == 'text':\n\t\t\t\t\t\tbot.send_text_message(sender_id, reply['data'])\n\treturn \"ok\", 200\n\n\n\ndef log(msg):\n\tprint(msg)\n\n\nif __name__ == \"__main__\":\n\tapp.run(port=8000, use_reloader=True)"
}
] | 2 |
sharmaharshit4262/PythonAssignment
|
https://github.com/sharmaharshit4262/PythonAssignment
|
02ce20d166c9c8fe953bf7b5a933ae133cbfce50
|
20c0c91c25b8745fd4f041b9ec4e324196092d39
|
992e66566924ab6296efe067254d6657e1e1c939
|
refs/heads/master
| 2020-09-16T12:02:43.912975 | 2019-11-24T15:19:05 | 2019-11-24T15:19:05 | 223,763,703 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.526416003704071,
"alphanum_fraction": 0.5278438925743103,
"avg_line_length": 22.886363983154297,
"blob_id": "7330f3a753674eb7ce38b4956fa74f8cae5db5b9",
"content_id": "2fa0622d303590c4f9e3b954755d3ab2d19588b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2101,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 88,
"path": "/templates/index.html",
"repo_name": "sharmaharshit4262/PythonAssignment",
"src_encoding": "UTF-8",
"text": "{% extends 'base.html' %}\n{% block head %}\n<title>Python Assignment</title>\n{% endblock %}\n\n{% block body %}\n<div id=\"external--container\">\n\n\t<div class=\"header_data\">\n\t\t<h1>Student Records</h1>\t\n\t</div>\n\n\t<div id=\"add-new-button-container\" style=\"text-align:center;\">\n\t\t<a class=\"btn btn-primary\" id=\"Add-new-button\" href=\"/new_student\">Add new Students</a>\n\t</div>\n\n\t<div class=\"row justify-content-center\">\n\t\t<div class=\"col-auto\">\n\n\n\t\t\t<table class=\"table table-dark\">\n\t\t\t\t<thead>\n\t\t\t\t\t<tr>\n\t\t\t\t\t\t<th scope=\"col\">Student ID</th>\n\t\t\t\t\t\t<th scope=\"col\">Name</th>\n\t\t\t\t\t\t<th scope=\"col\">Class ID</th>\n\t\t\t\t\t\t<th scope=\"col\">Created On</th>\n\t\t\t\t\t\t<th scope=\"col\">Updated On</th>\n\t\t\t\t\t\t<th scope=\"col\">Actions</th>\n\t\t\t\t\t</tr>\n\t\t\t\t</thead>\n\t\t\t\t{% for student in students %}\n\n\t\t\t\t<tbody>\n\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>{{ student.ID }}</td>\n\t\t\t\t\t\t\t<td>{{ student.name }}</td>\n\t\t\t\t\t\t\t<td>{{ student.classID }}</td>\n\t\t\t\t\t\t\t<td>{{ student.created_on.date() }} {{ student.created_on.strftime(\"%X\") }}</td>\n\t\t\t\t\t\t\t<td>{{ student.updated_on.date() }} {{ student.created_on.strftime(\"%X\") }}</td>\n\t\t\t\t\t\t\t<td><a href=#>Update</a> <br>\n\t\t\t\t\t\t\t\t<a href=#>Delete</a>\n\n\t\t\t\t\t\t</tr>\n\t\t\t\t</tbody>\n\t\t\t\t{% endfor %}\n\t\t\t</table>\n\n\t\t</div>\n\t</div>\n\n\t<div class=\"row justify-content-center\">\n\t\t<div class=\"col-auto\">\n\t\n\t\t\t<table class=\"table table-dark\">\n\t\t\t\t<thead>\n\t\t\t\t\t<tr>\n\t\t\t\t\t\t<th scope=\"col\">Class ID</th>\n\t\t\t\t\t\t<th scope=\"col\">Name</th>\n\t\t\t\t\t\t<th scope=\"col\">Class Leader</th>\n\t\t\t\t\t\t<th scope=\"col\">Created On</th>\n\t\t\t\t\t\t<th scope=\"col\">Updated On</th>\n\t\t\t\t\t</tr>\n\t\t\t\t</thead>\n\t\t\t\t{% for class1 in classes %}\n\t\t\t\t<tbody>\n\n\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>{{ ClassTable.ID }}</td>\n\t\t\t\t\t\t\t<td>{{ ClassTable.Class_name }}</td>\n\t\t\t\t\t\t\t<td>{{ ClassTable.Class_Leader }}</td>\n\t\t\t\t\t\t\t<td>{{ ClassTable.created_on.date() }} {{ ClassTable.created_on.strftime(\"%X\") }}</td>\n\t\t\t\t\t\t\t<td>{{ ClassTable.updated_on.date() }} {{ ClassTable.created_on.strftime(\"%X\") }}</td>\n\t\t\t\t\t\t\t<td><a href=#>Update</a>\n\t\t\t\t\t\t\t\t<br>\n\t\t\t\t\t\t\t\t<a href=#>Delete</a>\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t</tr>\n\t\t\t\t</tbody>\n\t\t\t\t{% endfor %}\n\t\t\t</table>\n\t\t</div>\n\t</div>\t\n\t\n\n</div>\n{% endblock %}"
},
{
"alpha_fraction": 0.6452862620353699,
"alphanum_fraction": 0.6560482382774353,
"avg_line_length": 31.71830940246582,
"blob_id": "f430ae88f7cbf95c5e30cb250dea498be03b7e02",
"content_id": "0946a7edfda3a129aeb93624cccee71fef654a03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2323,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 71,
"path": "/app.py",
"repo_name": "sharmaharshit4262/PythonAssignment",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test1.db'\ndb = SQLAlchemy(app)\n\n\nclass Student(db.Model):\n ID = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(200), nullable=False)\n classID = db.Column(db.Integer)\n created_on = db.Column(db.DateTime, default=datetime.utcnow())\n updated_on = db.Column(db.DateTime, default=datetime.utcnow())\n children = db.relationship('ClassTable', backref='student', lazy=True)\n\n\nclass ClassTable(db.Model):\n Class_ID = db.Column(db.Integer, db.ForeignKey('student.classID'), primary_key=True)\n Class_name = db.Column(db.String(10))\n Class_Leader = db.Column(db.String(200))\n created_on = db.Column(db.DateTime, default=datetime.utcnow())\n updated_on = db.Column(db.DateTime, default=datetime.utcnow())\n\n\ndef pre_populate():\n classID1 = 1\n class1Name = 'Extc'\n classID2 = 2\n class2Name = 'CMPN'\n class1 = ClassTable(Class_ID=classID1, Class_name=class1Name)\n class2 = ClassTable(Class_ID=classID2, Class_name=class2Name)\n try:\n db.session.add(class1)\n db.session.add(class2)\n except Exception as e:\n return e\n\n\[email protected](\"/\", methods=['POST', 'GET'])\ndef index():\n if request.method == 'POST':\n pass\n else:\n students = Student.query.order_by(Student.created_on).all()\n classes = ClassTable.query.order_by(ClassTable.created_on).all()\n return render_template('index.html', students=students, classes=classes)\n\n\[email protected](\"/new_student\", methods=['POST', 'GET'])\ndef add_new_student():\n if request.method == 'POST':\n student_name = request.form['studentName']\n student_class = request.form['className']\n new_student = Student(classID=2, name=student_name)\n new_class = ClassTable(Class_ID=1, Class_name=student_class)\n try:\n db.session.add(new_student)\n db.session.commit()\n return \"data added successfully\"\n except Exception as e:\n return \"There was an issue adding your data {}\".format(str(e))\n else:\n return render_template(\"NewStudent.html\")\n\n\nif __name__ == \"__main__\":\n pre_populate()\n app.run()\n"
}
] | 2 |
brianbrix/draw_polygon
|
https://github.com/brianbrix/draw_polygon
|
73883b47adc562f6a9338a6d7900498c3b7b80cc
|
8c575c1cd95f7bb06d84fc351d2476c50cbede27
|
40f874082f56a2090d8a7c5f0f2b0c3e0da38aaf
|
refs/heads/master
| 2021-03-11T21:57:33.980157 | 2020-03-11T12:36:34 | 2020-03-11T12:36:34 | 246,562,582 | 1 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.7004086971282959,
"alphanum_fraction": 0.7164413928985596,
"avg_line_length": 53.82758712768555,
"blob_id": "f58dd62bf0e2da06f083fd4b3b46e4b18c3aabb7",
"content_id": "310f1a08a1b32efda843a8bbace182c87014b1ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3181,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 58,
"path": "/Draw_Polygon_dialog_base.py",
"repo_name": "brianbrix/draw_polygon",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'Draw_Polygon_dialog_base.ui'\n#\n# Created by: PyQt5 UI code generator 5.10.1\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Draw_PolygonDialogBase(object):\n def setupUi(self, Draw_PolygonDialogBase):\n Draw_PolygonDialogBase.setObjectName(\"Draw_PolygonDialogBase\")\n Draw_PolygonDialogBase.resize(589, 242)\n self.gridLayout = QtWidgets.QGridLayout(Draw_PolygonDialogBase)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.x_point = QtWidgets.QLineEdit(Draw_PolygonDialogBase)\n self.x_point.setObjectName(\"x_point\")\n self.gridLayout.addWidget(self.x_point, 0, 1, 1, 2)\n self.y_point = QtWidgets.QLineEdit(Draw_PolygonDialogBase)\n self.y_point.setObjectName(\"y_point\")\n self.gridLayout.addWidget(self.y_point, 1, 1, 1, 2)\n self.add_plotting_point = QtWidgets.QPushButton(Draw_PolygonDialogBase)\n self.add_plotting_point.setMinimumSize(QtCore.QSize(0, 30))\n self.add_plotting_point.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.add_plotting_point.setObjectName(\"add_plotting_point\")\n self.gridLayout.addWidget(self.add_plotting_point, 2, 0, 1, 3)\n self.points_table = QtWidgets.QTableWidget(Draw_PolygonDialogBase)\n self.points_table.setEnabled(True)\n self.points_table.setObjectName(\"points_table\")\n self.points_table.setColumnCount(0)\n self.points_table.setRowCount(0)\n self.points_table.horizontalHeader().setSortIndicatorShown(False)\n self.points_table.horizontalHeader().setStretchLastSection(False)\n self.gridLayout.addWidget(self.points_table, 0, 3, 4, 1)\n self.button_box = QtWidgets.QDialogButtonBox(Draw_PolygonDialogBase)\n self.button_box.setOrientation(QtCore.Qt.Horizontal)\n self.button_box.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)\n self.button_box.setObjectName(\"button_box\")\n self.gridLayout.addWidget(self.button_box, 4, 2, 1, 2)\n self.label = QtWidgets.QLabel(Draw_PolygonDialogBase)\n self.label.setObjectName(\"label\")\n self.gridLayout.addWidget(self.label, 0, 0, 1, 1)\n self.label_2 = QtWidgets.QLabel(Draw_PolygonDialogBase)\n self.label_2.setObjectName(\"label_2\")\n self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)\n\n self.retranslateUi(Draw_PolygonDialogBase)\n self.button_box.accepted.connect(Draw_PolygonDialogBase.accept)\n self.button_box.rejected.connect(Draw_PolygonDialogBase.reject)\n QtCore.QMetaObject.connectSlotsByName(Draw_PolygonDialogBase)\n\n def retranslateUi(self, Draw_PolygonDialogBase):\n _translate = QtCore.QCoreApplication.translate\n Draw_PolygonDialogBase.setWindowTitle(_translate(\"Draw_PolygonDialogBase\", \"Draw_Polygon\"))\n self.add_plotting_point.setText(_translate(\"Draw_PolygonDialogBase\", \"&Add\"))\n self.label.setText(_translate(\"Draw_PolygonDialogBase\", \"X: \"))\n self.label_2.setText(_translate(\"Draw_PolygonDialogBase\", \"Y: \"))\n\n"
},
{
"alpha_fraction": 0.7215189933776855,
"alphanum_fraction": 0.7341772317886353,
"avg_line_length": 38.75,
"blob_id": "2e04ac81d3d854a2854af904adb2398e101970a3",
"content_id": "a7f92d144de388bcd93be0d80075419f5f837819",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 4,
"path": "/run_files",
"repo_name": "brianbrix/draw_polygon",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\npyuic5 --from-imports --resource-suffix '' Draw_Polygon_dialog_base.ui > Draw_Polygon_dialog_base.py\necho \"...\"\npyrcc5 -o resources.py resources.qrc"
}
] | 2 |
meet-minimalist/Learn-pytorch-in-one-example
|
https://github.com/meet-minimalist/Learn-pytorch-in-one-example
|
6fa1077df93ed7ee7fb53ffc2a0ff01a2c04f062
|
6a5093b964a5ad756bba5bbc5b6d7613fb8c41e6
|
f49811d763c3574b1993b20d1b097413ebf4cd99
|
refs/heads/main
| 2023-04-30T19:12:40.851037 | 2021-05-23T06:46:03 | 2021-05-23T06:46:03 | 369,977,598 | 0 | 0 | null | null | null | null | null |
[
{
"alpha_fraction": 0.6084905862808228,
"alphanum_fraction": 0.6155660152435303,
"avg_line_length": 43.157894134521484,
"blob_id": "bdd799ed1e4b90cd0e3fd523311c1fe83267d1a0",
"content_id": "7cf39a66200615ede8bfa68de1a8ac6701b091d3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 19,
"path": "/utils/LRScheduler/ExpDecayLR.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "import config\nimport numpy as np\nfrom utils.LRScheduler.LRScheduler import LearningRateScheduler\n\n\nclass ExpDecay(LearningRateScheduler):\n def __init__(self):\n super().__init__()\n \n\n def get_lr(self, g_step):\n # we will scale lr from 0 to 1e-3 in first 3 epochs and then exp decay for rest of the training.\n if g_step < config.burn_in_steps:\n lr = (config.init_lr) * (g_step / config.burn_in_steps) # Linear Scaling\n #lr = config.init_lr * (g_step / config.burn_in_steps) ** 4 # Polynomial scaling\n return lr\n else:\n # For exponential decay learning rate uncomment below line and comment subsequent lines.\n return config.init_lr * np.exp( -(1 - config.lr_exp_decay) * (g_step - config.burn_in_steps) / config.steps_per_epoch)\n \n"
},
{
"alpha_fraction": 0.635200023651123,
"alphanum_fraction": 0.6976000070571899,
"avg_line_length": 43.57143020629883,
"blob_id": "8f26d0274fd262ac1992ac3c0e2352e3cb5af51e",
"content_id": "c9e350c87519131f9fc545704146d7eade6eed27",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 625,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 14,
"path": "/main.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "from TrainingHelper import TrainingHelper\n\nif __name__ == \"__main__\":\n # starting training from if __name__ == \"__main__\" block \n # as the dataloader has some bugs which throws error when not initiated within this block.\n # Ref: https://github.com/pytorch/pytorch/issues/2341#issuecomment-346551098\n\n trainer = TrainingHelper()\n\n trainer.train()\n # restore_ckpt = \"./summaries/2021_05_22_17_12_01/ckpt/model_eps_2_test_loss_1.4546.pth\"\n # trainer.train(resume=True, resume_ckpt=restore_ckpt)\n # resnet_ckpt = \"./pretrained_ckpt/resnet18-5c106cde.pth\"\n # trainer.train(pretrained_ckpt=resnet_ckpt)\n\n"
},
{
"alpha_fraction": 0.49824199080467224,
"alphanum_fraction": 0.5269907116889954,
"avg_line_length": 35.6136360168457,
"blob_id": "3fca92aa7a799d311c0404e310b558a736bdb275",
"content_id": "7a3360c8013281ceb8efef6c984b6b1445190404",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4835,
"license_type": "permissive",
"max_line_length": 144,
"num_lines": 132,
"path": "/models/SimpleCNN_old.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport config\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import pad\n\n\ndef get_padded_tensor(x, k_size=(3, 3), stride=1, dilation=1, padding='same'):\n # Taken from : https://github.com/pytorch/pytorch/issues/3867#issuecomment-458423010\n \n if str(padding).upper() == 'SAME':\n input_rows, input_cols = [int(x) for x in x.shape[2:4]] \n # x.shape returns pytorch tensor rather than python int list\n # And doing further computation based on that will grow the graph with such nodes\n # Which needs to be avoided when converting the model to onnx or torchscript.\n filter_rows, filter_cols = k_size\n \n out_rows = (input_rows + stride - 1) // stride\n out_cols = (input_cols + stride - 1) // stride\n \n padding_rows = max(0, (out_rows - 1) * stride +\n (filter_rows - 1) * dilation + 1 - input_rows)\n rows_odd = (padding_rows % 2 != 0)\n \n padding_cols = max(0, (out_cols - 1) * stride +\n (filter_cols - 1) * dilation + 1 - input_cols)\n cols_odd = (padding_rows % 2 != 0)\n \n x = pad(x, [padding_cols // 2, (padding_cols // 2) + int(cols_odd),\n padding_rows // 2, (padding_rows // 2) + int(rows_odd)]) # This is only true for NCHW\n # First 2 elements are for last dims\n # Next 2 elements are for second last dims\n # Or alternatively we can do as below.\n #x = nn.ZeroPad2d((padding_cols // 2, (padding_cols // 2) + int(cols_odd),\n # padding_rows // 2, (padding_rows // 2) + int(rows_odd)))(x)\n\n return x\n else:\n return x\n\n\nclass ConvLayer(nn.Module):\n def __init__(self, in_channels, out_channels, conv_k_size=(3, 3), mx_k_size=(3, 3), conv_stride=1, mx_stride=2, padding='same', bias=False):\n super(ConvLayer, self).__init__()\n self.conv_k_size = conv_k_size\n self.mx_k_size = mx_k_size\n self.conv_stride = conv_stride\n self.mx_stride = mx_stride\n self.padding = padding\n\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=conv_k_size, stride=conv_stride, padding=0, bias=bias)\n self.bn = nn.BatchNorm2d(num_features=out_channels, eps=1e-6)\n self.act = nn.ReLU()\n self.mxpool = nn.MaxPool2d(kernel_size=mx_k_size, stride=mx_stride, padding=0)\n\n\n def forward(self, x):\n x = get_padded_tensor(x, k_size=self.conv_k_size, stride=self.conv_stride, padding=self.padding)\n x = self.conv(x)\n x = self.bn(x)\n x = self.act(x)\n \n x = get_padded_tensor(x, k_size=self.mx_k_size, stride=self.mx_stride, padding=self.padding)\n x = self.mxpool(x)\n return x\n \n\nclass ConvModel(nn.Module):\n def __init__(self, num_classes=2, inference=False):\n super(ConvModel, self).__init__()\n\n self.layer1 = ConvLayer(3, 32)\n\n self.layer2 = ConvLayer(32, 64)\n \n self.layer3 = ConvLayer(64, 128)\n \n self.layer4 = ConvLayer(128, 256)\n\n self.fc = nn.Linear(in_features=256, out_features=num_classes, bias=True)\n\n self.softmax = nn.Softmax(dim=1)\n\n self.inference = inference\n\n self.__init_weights()\n \n\n def __init_weights(self):\n for module in self.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n\n if config.weight_init_method == 'xavier_normal':\n nn.init.xavier_normal_(module.weight, gain=1.0)\n elif config.weight_init_method == 'msra':\n nn.init.kaiming_normal_(module.weight, a=0, mode='fan_in', nonlinearity='relu')\n else:\n print(\"Unsupported weight init method.\")\n exit()\n \n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n \n if isinstance(module, nn.BatchNorm2d):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n \n \n def forward(self, x):\n # x : [B x 3 x 224 x 224]\n\n out = self.layer1(x)\n # x : [B x 32 x 112 x 112]\n\n out = self.layer2(out)\n # x : [B x 64 x 56 x 56]\n\n out = self.layer3(out)\n # x : [B x 128 x 28 x 28]\n\n out = self.layer4(out)\n # x : [B x 256 x 14 x 14]\n \n out = torch.mean(out.view(out.size(0), out.size(1), -1), dim=2)\n # x : [B x 256]\n\n out = self.fc(out)\n # x : [B x 2]\n \n if self.inference:\n out = self.softmax(out)\n return out\n\n"
},
{
"alpha_fraction": 0.7851534485816956,
"alphanum_fraction": 0.7887223362922668,
"avg_line_length": 62.681819915771484,
"blob_id": "704ce9e1b8ae16763ba5d34e28ec7e6158c40535",
"content_id": "e48944f2d9fd44cb388045e0259fdea90b18615f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1401,
"license_type": "permissive",
"max_line_length": 206,
"num_lines": 22,
"path": "/README.md",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "# Learn-pytorch-in-one-example\nOne example to learn all the core concepts of Pytorch. This repo will also work as a training template for any experiment.\n\nIn this repo, I tried to implement training routines and inference routines. Along with this, I tried to add various reference links for some of the concepts. Following concepts are implemented in the repo.\n\n- [x] Custom weight initialization \n- [x] SAME and VALID padding for Conv2D and Pooling layer in Pytorch just like Tensorflow \n- [x] Custom Learning Rate Schedules\n- [x] Cosine Anneling Learning Rate\n- [x] Learning rate plotting in Tensorboard before starting training.\n- [x] Custom implementation of regularization loss\n- [x] Model Summary just like Keras\n- [x] Reproducibility of each experiments with setting seeds for pytorch and other modules\n- [x] TensorBoard Summary Support\n- [x] Automatic Mixed Precision Training\n- [x] Inference scripts for Pytorch and ONNX\n- [x] Custom Dataloader mechanism for dataset handling \n- [x] Using pretrained models such as e.g. resnet18\n- [x] Restore weights to architecture just like Tensorflow 1.x for pretrained models for finetuning purpose\n- [x] Save Checkpoint during training and resume training from that checkpoint\n- [x] Remove old checkpoint just like Tensorflow 1.x checkpoint saver mechanism.\n- [x] Experiment management: Saving of experiment files in a separate folder during each run.\n"
},
{
"alpha_fraction": 0.5492662191390991,
"alphanum_fraction": 0.5555555820465088,
"avg_line_length": 27,
"blob_id": "237794f188a2c303d36f4dafd02727db30cf6996",
"content_id": "69450d20ed124987d4a9c9ed4d32274806b51e34",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 477,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 17,
"path": "/utils/transforms/resize.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport imgaug.augmenters as iaa\n\n\nclass PaddedResize(object):\n\n def __init__(self, size=224):\n self.seq_resize = iaa.Sequential([\n iaa.CropToSquare(position='center'),\n iaa.Resize({'height': size, 'width':'keep-aspect-ratio'})\n ])\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n\n image = self.seq_resize(image=image)\n\n return {'image':image, 'label':label}\n"
},
{
"alpha_fraction": 0.6920565962791443,
"alphanum_fraction": 0.7421109676361084,
"avg_line_length": 35.68000030517578,
"blob_id": "5daf02ee8b577079126160eb096d7ec0283aafd8",
"content_id": "2418e7e28a9de54292161b5b893f1c24ce52052a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 919,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 25,
"path": "/model_export/convert_to_pytorch.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport torch\nimport sys\nsys.path.append(\"../\")\n\nimport config\n# from models.SimpleCNN import ConvModel\nfrom models.ResNetModel import ResNetModel\n\n\n# ckpt_path = \"../summaries/2021_05_22_17_45_42/ckpt/model_eps_58_test_loss_0.1504.pth\"\t\t# simple cnn\n# op_model_path = \"./frozen_models/simplecnn_model_final.pt\"\n\nckpt_path = \"../summaries/2021_05_22_23_11_17/ckpt/model_eps_58_test_loss_0.2679.pth\"\t\t# resnet18-full train\nop_model_path = \"./frozen_models/resnet18_model_final.pt\"\n\n\nwith torch.no_grad(): # Disabling the gradient calculations will reduce the calculation overhead.\n # model = ConvModel(num_classes=config.num_classes, inference=True)\n model = ResNetModel(num_classes=config.num_classes, freeze_backend=False, inference=True)\n\ncheckpoint = torch.load(ckpt_path)\nmodel.load_state_dict(checkpoint['model']) # Only restoring model variables only \nmodel.eval()\n\ntorch.save(model, op_model_path)\n\n"
},
{
"alpha_fraction": 0.6356784105300903,
"alphanum_fraction": 0.6356784105300903,
"avg_line_length": 28.850000381469727,
"blob_id": "369cba29a748a4b62f6954edbef0fff98cf9ee3e",
"content_id": "a01be49baff6dbe9c668c22d9d36d0a7e62bfe77",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1194,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 40,
"path": "/LRHelper.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "import config\nimport numpy as np\nfrom utils.LRScheduler.ExpDecayLR import ExpDecay\nfrom utils.LRScheduler.CosineAnnelingLR import CosineAnneling\n\nclass LRHelper:\n def __init__(self):\n self.lr_scheduler = config.lr_scheduler\n if self.lr_scheduler == 'exp_decay':\n self.lr_class = ExpDecay()\n elif self.lr_scheduler == 'cosine_annealing':\n self.lr_class = CosineAnneling()\n else:\n raise NotImplementedError('Invalid lr_scheduler called.')\n\n \n def step(self, g_step, opt):\n lr = self.lr_class.step(g_step, opt)\n return lr\n\n\n def lr(self, g_step):\n return self.lr_class.get_lr(g_step)\n\n\n def plot_lr(self, op_path, eps, steps_per_eps):\n self.lr_class.plot_lr(op_path, eps, steps_per_eps) \n\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Plot LR Curve.')\n parser.add_argument('--op_path', action='store', type=str, help='Output folder where LR tensorboard summary to be stored.')\n\n args = parser.parse_args()\n\n lr_handler = LRHelper()\n lr_handler.plot_lr(args.op_path, config.epochs, config.steps_per_epoch)\n"
},
{
"alpha_fraction": 0.5575188398361206,
"alphanum_fraction": 0.5661675930023193,
"avg_line_length": 47.42792892456055,
"blob_id": "9e0d6c22f211e544c3a973ebf54de1beeab659cb",
"content_id": "ce6d666b6e78b5a16bc11f4ea1e9d2aa076e5151",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10759,
"license_type": "permissive",
"max_line_length": 161,
"num_lines": 222,
"path": "/TrainingHelper.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport os\nimport time\nimport torch\nimport datetime\nimport numpy as np\nfrom tqdm import tqdm\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\n\nimport config\nfrom LRHelper import LRHelper\nfrom DatasetHelper import get_train_loader, get_test_loader\n\nfrom utils.Logger import Logger\nfrom utils.CheckpointHandler import CheckpointHandler\nfrom utils.SummaryHelper import SummaryHelper\nfrom utils.misc import init_training, np_cpu, LossAverager\n\ncuda = torch.device('cuda:0')\ncpu = torch.device(\"cpu:0\")\n\n\nclass TrainingHelper:\n def __init__(self):\n self.log, self.exp_path = init_training()\n self.lr_helper = LRHelper()\n\n ckpt_folder = self.exp_path + \"/ckpt/\"\n os.makedirs(ckpt_folder, exist_ok=True)\n \n ckpt_path = ckpt_folder + \"model.pth\"\n self.ckpt_handler = CheckpointHandler(ckpt_path, max_to_keep=3)\n\n\n def get_loss_and_accuracy(self, labels, logits, model, ce_loss_fn):\n # labels : [N] dims tensor\n # logits : [N x C] dims tensor\n\n loss_reg = torch.tensor(0, dtype=torch.float32, device=cuda, requires_grad=False)\n for layer in model.modules():\n if isinstance(layer,torch.nn.Conv2d):\n for p in layer.named_parameters():\n if 'weight' in p[0]:\n loss_reg += torch.sum((torch.square(p[1]) / 2)) \n\n loss_reg *= config.l2_weight_decay\n\n loss_cls = ce_loss_fn(logits, labels) \n\n loss_total = loss_cls + loss_reg\n\n sm_outputs = F.softmax(logits.detach(), dim=-1)\n accuracy = (torch.argmax(sm_outputs, dim=1) == labels).sum() * 100 / labels.size(0)\n\n return loss_total, loss_cls, loss_reg, accuracy\n\n\n def get_model(self):\n if config.model_type == 'simplecnn':\n from models.SimpleCNN import ConvModel\n model = ConvModel(num_classes=config.num_classes).to(cuda, non_blocking=True)\n \n elif config.model_type == 'resnet18':\n from models.ResNetModel import ResNetModel\n model = ResNetModel(num_classes=config.num_classes, freeze_backend=config.freeze_backend).to(cuda, non_blocking=True)\n\n else:\n print(\"Unsupported model type.\")\n exit()\n return model\n\n\n def train(self, resume=False, resume_ckpt=None, pretrained_ckpt=None):\n model = self.get_model()\n \n model_stats = summary(model, (3, config.input_size[0], config.input_size[1]))\n\n for line in str(model_stats).split('\\n'):\n self.log(line)\n \n ce_loss_fn = nn.CrossEntropyLoss()\n # Why opt for nn.CrossEntropyLoss over nn.functional.cross_entropy\n # Ref : https://discuss.pytorch.org/t/f-cross-entropy-vs-torch-nn-cross-entropy-loss/25505/2\n\n opt = torch.optim.Adam(model.parameters(), lr=0.0, weight_decay=0.0)\n # Setting lr equal to 0.0 here so that it wont work as per this line.\n # But we will explicitly set lr for each weights dynamically, at every step.\n # Same is case for weight_decay, We will calculate L2_regularization_loss on our own separately.\n \n scaler = torch.cuda.amp.GradScaler(enabled=config.use_amp)\n \n if resume:\n checkpoint = torch.load(resume_ckpt)\n model.load_state_dict(checkpoint['model'])\n opt.load_state_dict(checkpoint['optimizer'])\n scaler.load_state_dict(checkpoint['scalar'])\n resume_g_step = checkpoint['global_step']\n resume_eps = checkpoint['epoch']\n self.log(\"Resuming training from {} epochs.\".format(resume_eps))\n elif pretrained_ckpt is not None and config.model_type == 'resnet18':\n self.log(\"Using pre-trained checkpoint from :\".format(pretrained_ckpt))\n checkpoint = torch.load(pretrained_ckpt)\n \n filtered_checkpoint = {}\n self.log(\"\\nFollowing variables will be restored:\")\n for var_name, var_value in checkpoint.items():\n if var_name == 'fc.weight' or var_name == 'fc.bias': \n # As these layers change due to change in num classes\n continue\n new_var_name = 'resnet_feat.' + var_name \n # why this prefix? This comes as the model that we created contains a variable resnet_feat \n # which is sequential group of layers containing resnet layers. So all the layers and parameters \n # within it are prefixed with resnet_feat and for restoring resnet pretrained weights \n # we need to update the statedict according to the model architectural definition.\n self.log(f\"{new_var_name} : {list(var_value.size())}\")\n filtered_checkpoint[new_var_name] = var_value\n\n self.log(\"\\n\\nFollowing variables will be initialized:\")\n remaining_vars = model.load_state_dict(filtered_checkpoint, strict=False)\n for var_name in remaining_vars.missing_keys:\n self.log(var_name)\n \n resume_g_step = 0\n resume_eps = 0\n else:\n resume_g_step = 0\n resume_eps = 0\n\n train_writer = SummaryHelper(self.exp_path + \"/train/\")\n test_writer = SummaryHelper(self.exp_path + \"/test/\")\n\n input_x = torch.randn((1,3, config.input_size[0], config.input_size[1])).to(cuda, non_blocking=True)\n train_writer.add_graph(model, input_x)\n\n g_step = max(0, resume_g_step)\n for eps in range(resume_eps, config.epochs):\n # I hope you noticed one particular statement in the code, to which I assigned a comment “What is this?!?” — model.train().\n # In PyTorch, models have a train() method which, somewhat disappointingly, does NOT perform a training step. \n # Its only purpose is to set the model to training mode. Why is this important? Some models may use mechanisms like Dropout, \n # for instance, which have distinct behaviors in training and evaluation phases.\n # Ref: https://towardsdatascience.com/understanding-pytorch-with-an-example-a-step-by-step-tutorial-81fc5f8c4e8e\n model.train()\n\n train_loader = get_train_loader()\n train_iter = iter(train_loader) # This is creating issues sometimes. Check required.\n \n self.log(\"Epoch: {} Started\".format(eps+1))\n \n for batch_num in tqdm(range(config.train_steps)):\n start = time.time()\n batch = next(train_iter)\n\n opt.zero_grad() # Zeroing out gradients before backprop\n # We cab avoid to zero out if we want accumulate gradients for \n # Multiple forward pass and single backward pass.\n with torch.cuda.amp.autocast(enabled=config.use_amp):\n logits = model(batch['image'].to(cuda, non_blocking=True))\n\n loss_total, loss_cls, loss_reg, accuracy = self.get_loss_and_accuracy(batch['label'].to(cuda, non_blocking=True), logits, model, ce_loss_fn)\n\n #loss_total.backward()\t\t\t\t# Used for normal training without AMP\n scaler.scale(loss_total).backward()\t\t# Used when AMP is applied. The enabled flag will trigger normal FP32 behaviour or Mixed precision behaviour\n scaler.step(opt)\n scaler.update()\n\n lr = self.lr_helper.step(g_step, opt)\n opt.step()\n delta = (time.time() - start) * 1000 # in milliseconds\n print(\"Time: {:.2f} ms\".format(delta))\n\n if (batch_num+1) % config.loss_logging_frequency == 0:\n self.log(\"Epoch: {}/{}, Batch No.: {}/{}, Total Loss: {:.4f}, Loss Cls: {:.4f}, Loss Reg: {:.4f}, Accuracy: {:.2f}\".format(\\\n eps+1, config.epochs, batch_num+1, config.train_steps, np_cpu(loss_total), \\\n np_cpu(loss_cls), np_cpu(loss_reg), np_cpu(accuracy)))\n \n train_writer.add_summary({'total_loss' : np_cpu(loss_total),\n 'loss_cls' : np_cpu(loss_cls),\n 'loss_reg' : np_cpu(loss_reg), \n 'accuracy' : np_cpu(accuracy),\n 'lr' : lr}, g_step)\n \n g_step += 1\n \n model.eval() # Putting model in eval mode so that batch normalization and dropout will work in inference mode.\n\n test_loader = get_test_loader()\n test_iter = iter(test_loader)\n test_losses = LossAverager(num_elements=4)\n\n with torch.no_grad(): # Disabling the gradient calculations will reduce the calculation overhead.\n\n for batch_num in tqdm(range(config.test_steps)):\n batch = next(test_iter)\n logits = model(batch['image'].to(cuda))\n\n loss_total, loss_cls, loss_reg, accuracy = self.get_loss_and_accuracy(batch['label'].to(cuda, non_blocking=True), logits, model, ce_loss_fn)\n test_losses([np_cpu(loss_total), np_cpu(loss_cls), np_cpu(loss_reg), np_cpu(accuracy)])\n \n self.log(\"Epoch: {}/{} Completed, Test Total Loss: {:.4f}, Loss Cls: {:.4f}, Loss Reg: {:.4f}, Accuracy: {:.2f}\".format(\\\n eps+1, config.epochs, test_losses.avg[0], test_losses.avg[1], test_losses.avg[2], test_losses.avg[3]))\n \n test_writer.add_summary({'total_loss' : test_losses.avg[0], \n 'loss_cls' : test_losses.avg[1], \n 'loss_reg' : test_losses.avg[2], \n 'accuracy' : test_losses.avg[3]}, g_step)\n\n checkpoint = {\n 'epoch': eps + 1,\n 'global_step': g_step,\n 'test_loss': test_losses.avg[0],\n 'model': model.state_dict(),\n 'optimizer': opt.state_dict(),\n\t\t\t\t'scalar': scaler.state_dict()\n }\n # Above code taken from : https://towardsdatascience.com/how-to-save-and-load-a-model-in-pytorch-with-a-complete-example-c2920e617dee\n self.ckpt_handler.save(checkpoint)\n self.log(\"Epoch {} completed. Checkpoint saved.\".format(eps+1))\n\n print(\"Training Completed.\")\n train_writer.close()\n test_writer.close()\n\n"
},
{
"alpha_fraction": 0.4950721561908722,
"alphanum_fraction": 0.5249912142753601,
"avg_line_length": 39,
"blob_id": "0d5fcc03b992258428147ca03c5e416707345a14",
"content_id": "9f69c6d5fd34cd82708815cb366338e61c3359d8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5682,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 142,
"path": "/models/SimpleCNN.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport config\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import pad\n\n\ndef get_padded_tensor(x, k_size=(3, 3), stride=1, dilation=1, padding='same'):\n # Taken from : https://github.com/pytorch/pytorch/issues/3867#issuecomment-458423010\n \n if str(padding).upper() == 'SAME':\n input_rows, input_cols = [int(x) for x in x.shape[2:4]] \n # x.shape returns pytorch tensor rather than python int list\n # And doing further computation based on that will grow the graph with such nodes\n # Which needs to be avoided when converting the model to onnx or torchscript.\n filter_rows, filter_cols = k_size\n \n out_rows = (input_rows + stride - 1) // stride\n out_cols = (input_cols + stride - 1) // stride\n \n padding_rows = max(0, (out_rows - 1) * stride +\n (filter_rows - 1) * dilation + 1 - input_rows)\n rows_odd = (padding_rows % 2 != 0)\n \n padding_cols = max(0, (out_cols - 1) * stride +\n (filter_cols - 1) * dilation + 1 - input_cols)\n cols_odd = (padding_rows % 2 != 0)\n \n x = pad(x, [padding_cols // 2, (padding_cols // 2) + int(cols_odd),\n padding_rows // 2, (padding_rows // 2) + int(rows_odd)]) # This is only true for NCHW\n # First 2 elements are for last dims\n # Next 2 elements are for second last dims\n # Or alternatively we can do as below.\n #x = nn.ZeroPad2d((padding_cols // 2, (padding_cols // 2) + int(cols_odd),\n # padding_rows // 2, (padding_rows // 2) + int(rows_odd)))(x)\n\n return x\n else:\n return x\n\n\ndef ConvLayer(in_channels, out_channels, conv_k_size=(3, 3), conv_stride=1, padding='same', bias=False):\n if bias:\n layer = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=conv_k_size, stride=conv_stride, padding=0, bias=bias),\n nn.ReLU(),\n )\n else:\n layer = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=conv_k_size, stride=conv_stride, padding=0, bias=bias),\n nn.BatchNorm2d(num_features=out_channels, eps=1e-6),\n nn.ReLU()\n )\n return layer\n\n\ndef MaxPoolLayer(mx_k_size=(3, 3), mx_stride=2):\n mxpool = nn.MaxPool2d(kernel_size=mx_k_size, stride=mx_stride, padding=0)\n return mxpool\n \n\nclass ConvModel(nn.Module):\n def __init__(self, num_classes=2, inference=False):\n super(ConvModel, self).__init__()\n\n self.layer1 = ConvLayer(3, 32)\n \n self.layer2 = ConvLayer(32, 64)\n \n self.layer3 = ConvLayer(64, 128)\n \n self.layer4 = ConvLayer(128, 256)\n\n self.mx_pool = MaxPoolLayer((3, 3), 2)\n\n self.fc = nn.Linear(in_features=256, out_features=num_classes, bias=True)\n\n self.softmax = nn.Softmax(dim=1)\n\n self.inference = inference\n\n self.__init_weights()\n \n\n def __init_weights(self):\n for module in self.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n\n if config.weight_init_method == 'xavier_normal':\n nn.init.xavier_normal_(module.weight, gain=1.0)\n elif config.weight_init_method == 'msra':\n nn.init.kaiming_normal_(module.weight, a=0, mode='fan_in', nonlinearity='relu')\n else:\n print(\"Unsupported weight init method.\")\n exit()\n \n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n \n if isinstance(module, nn.BatchNorm2d):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n nn.init.constant_(module.running_var, 1)\n nn.init.constant_(module.running_mean, 0)\n \n \n def forward(self, x):\n # x : [B x 3 x 224 x 224]\n \n x = get_padded_tensor(x, k_size=(3, 3), stride=1, padding='Same') # Padding for conv\n out = self.layer1(x)\n out = get_padded_tensor(out, k_size=(3, 3), stride=2, padding='Same') # Padding for maxpool\n out = self.mx_pool(out)\n # x : [B x 32 x 112 x 112]\n\n out = get_padded_tensor(out, k_size=(3, 3), stride=1, padding='Same') # Padding for conv\n out = self.layer2(out)\n out = get_padded_tensor(out, k_size=(3, 3), stride=2, padding='Same') # Padding for maxpool\n out = self.mx_pool(out)\n # x : [B x 64 x 56 x 56]\n\n out = get_padded_tensor(out, k_size=(3, 3), stride=1, padding='Same') # Padding for conv\n out = self.layer3(out)\n out = get_padded_tensor(out, k_size=(3, 3), stride=2, padding='Same') # Padding for maxpool\n out = self.mx_pool(out)\n # x : [B x 128 x 28 x 28]\n\n out = get_padded_tensor(out, k_size=(3, 3), stride=1, padding='Same') # Padding for conv\n out = self.layer4(out)\n out = get_padded_tensor(out, k_size=(3, 3), stride=2, padding='Same') # Padding for maxpool\n out = self.mx_pool(out)\n # x : [B x 256 x 14 x 14]\n \n out = torch.mean(out.view(out.size(0), out.size(1), -1), dim=2)\n # x : [B x 256]\n\n out = self.fc(out)\n # x : [B x 2]\n \n if self.inference:\n out = self.softmax(out)\n return out\n\n"
},
{
"alpha_fraction": 0.5618839263916016,
"alphanum_fraction": 0.5662650465965271,
"avg_line_length": 29.433332443237305,
"blob_id": "e7ae6b7835ce40be5c8fa0946194086c5397763c",
"content_id": "409e59b0e94d674667dfdd915ee87baeebce295e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 913,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 30,
"path": "/utils/LRScheduler/LRScheduler.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nfrom tqdm import tqdm\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass LearningRateScheduler:\n def __init__(self):\n pass\n\n def get_lr(self, g_step):\n # To enable this function as a virtual function exception can be raised if it is not implemented in child class.\n raise NotImplementedError('Child class should implement get_lr function.')\n\n\n def step(self, g_step, opt):\n lr = self.get_lr(g_step)\n\n for grp in opt.param_groups:\n grp['lr'] = lr\n\n return lr\n \n \n def plot_lr(self, op_path, eps, steps_per_eps):\n lr_sum_writer = SummaryWriter(op_path)\n\n for e in tqdm(range(eps)):\n for s in range(steps_per_eps):\n if (s+1) % 10 == 0:\n g_step = steps_per_eps * e + s\n lr = self.get_lr(g_step)\n lr_sum_writer.add_scalar('lr', lr, g_step)"
},
{
"alpha_fraction": 0.6319218277931213,
"alphanum_fraction": 0.6416938304901123,
"avg_line_length": 29.799999237060547,
"blob_id": "189e74525cd7ee87fb89d6407d29d81b3237955c",
"content_id": "5fedeb1e37b0bb6c494467e069cc407211c3f6e6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 10,
"path": "/utils/transforms/to_tensor.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "import torch\nimport numpy as np\n\nclass ToTensorOwn(object):\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n\n image = image.transpose(2, 0, 1)\n return {'image' : torch.from_numpy(image).type(torch.long), 'label' : torch.tensor(label, dtype=torch.long)}"
},
{
"alpha_fraction": 0.5373765826225281,
"alphanum_fraction": 0.5909731984138489,
"avg_line_length": 31.136363983154297,
"blob_id": "76c89c3131cd8c8865e075c7b309c10ff8b0f9b3",
"content_id": "ff6f063760fe0b4505737102f23f3d3b093eb829",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 709,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 22,
"path": "/utils/transforms/normalize.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "import torch\nimport numpy as np\nfrom torchvision import transforms\n\n\nclass Normalize(object):\n def __init__(self, model_type):\n assert model_type in ['resnet18', 'simplecnn']\n self.model_type = model_type\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n\n if self.model_type == 'simplecnn':\n image = image / 255.0\n elif self.model_type == 'resnet18':\n image = image / 255.0\n image = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image.type(torch.float32))\n else:\n print(\"Normalization type not supported\")\n\n return {'image': image, 'label': label}\n\n\n"
},
{
"alpha_fraction": 0.5660579800605774,
"alphanum_fraction": 0.5698174238204956,
"avg_line_length": 30.016666412353516,
"blob_id": "0de56964ce86a13b0c32acbb2cb3b2684aeb5300",
"content_id": "8e163e5b208b93e7094a0178940a675f35ae7e9a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1862,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 60,
"path": "/utils/misc.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport os\nimport glob\nimport torch\nimport shutil\nimport datetime\n\nimport config\nfrom utils.Logger import Logger\n\ncpu = torch.device(\"cpu:0\")\n\ndef init_training(exp_path=config.exp_path):\n start_time = datetime.datetime.now()\n exp_name = start_time.strftime(\"%Y_%m_%d_%H_%M_%S\")\n cur_exp_path = exp_path + \"/\" + exp_name\n os.makedirs(cur_exp_path, exist_ok=True)\n \n logger_path = cur_exp_path + \"/log.txt\"\n log = Logger(logger_path)\n\n # =========== Take a backup of training files ============= #\n current_files = glob.glob(\"*\")\n for i in range(len(current_files)):\n if os.path.isfile(current_files[i]):\n shutil.copy2(current_files[i], cur_exp_path)\n \n shutil.copytree(\"./models/\", cur_exp_path + \"/models\")\n shutil.copytree(\"./utils/\", cur_exp_path + \"/utils\")\n # ================================================ #\n \n log(\"Experiment Start time: {}\".format(start_time.strftime(\"%Y-%m-%d %H:%M:%S\")))\n log(\"Experiment files are saved at: {}\".format(cur_exp_path))\n log(\"Training initialization completed.\")\n return log, cur_exp_path\n\n\ndef np_cpu(tensor):\n return float(tensor.detach().to(cpu).numpy())\n\n\nclass LossAverager:\n def __init__(self, num_elements):\n self.num_elements = num_elements\n self.reset()\n\n def reset(self):\n self.val = [0 for _ in range(self.num_elements)]\n self.count = [0 for _ in range(self.num_elements)]\n self.sum = [0 for _ in range(self.num_elements)]\n self.avg = [0 for _ in range(self.num_elements)]\n\n\n def __call__(self, val_list):\n assert len(val_list) == self.num_elements\n\n for i, val in enumerate(val_list):\n self.val[i] = val\n self.sum[i] += self.val[i]\n self.count[i] += 1\n self.avg[i] = self.sum[i] / self.count[i]\n"
},
{
"alpha_fraction": 0.553530752658844,
"alphanum_fraction": 0.5634016990661621,
"avg_line_length": 33.864864349365234,
"blob_id": "96578c54b1418efb4d5d7c774f5bed81f8ca7378",
"content_id": "a15bc4adca6eb1d3e42bfc69c9f4aa8a263b74a8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1317,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 37,
"path": "/utils/Logger.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\n\n# Note: This code is based on Singleton Design pattern where object instance creation will happen once for multiple instance \n# and same is case for object deletion.\n\nclass Logger:\n logger = None # This would work as a static variable and shared among all the instances of Logger classes.\n count = 0\n def __init__(self, log_path, log_level=None):\n \n if log_level is None:\n self.log_level = \"\"\n else:\n self.log_level = \"[{}] \".format(log_level) \n\n if Logger.count == 0:\n # Only once logger should be initialized with given path.\n Logger.logger = open(log_path, 'w', encoding='utf-8')\n \n Logger.count += 1\n\n self.log_ctr = 0\n\n\n def __call__(self, message):\n print(self.log_level + message)\n Logger.logger.writelines(self.log_level + message + \"\\n\")\n self.log_ctr += 1\n if(self.log_ctr % 10 == 0):\n # Flush data into logger at every 10 writes.\n Logger.logger.flush()\n\n\n def __del__(self):\n Logger.count -= 1\n if Logger.count == 0:\n # Closing the file just for the safety purpose, when all the instances are deleted.\n Logger.logger.close()\n print(\"Logger file closed\")\n \n "
},
{
"alpha_fraction": 0.3216232657432556,
"alphanum_fraction": 0.3676559627056122,
"avg_line_length": 40.25,
"blob_id": "000563efca96f9ebd33d94c7c198a3ceafbc374c",
"content_id": "660b98c8f06d600291a17f992a5611d5d87353c6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1651,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 40,
"path": "/utils/transforms/augmenter.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport imgaug.augmenters as iaa\n\n\nclass Augmenter(object):\n\n def __init__(self):\n \n self.seq_aug = iaa.SomeOf((1, 2), [\n iaa.OneOf([\n #iaa.Dropout(p=(0.1, 0.2)),\n iaa.CoarseDropout(0.05, size_percent=0.1, per_channel=0.5),\n iaa.SaltAndPepper(0.05),\n iaa.CoarseSaltAndPepper(0.03, size_percent=(0.1, 0.2))\n ]),\n iaa.OneOf([\n iaa.GaussianBlur(sigma=(0.5, 1.0)),\n iaa.MedianBlur(k=(3, 5)),\n iaa.MotionBlur(k=5, angle=[-45, 45])\n ]),\n iaa.OneOf([\n iaa.MultiplyAndAddToBrightness(mul=(0.5, 1.5), add=(-30, 30)),\n iaa.Grayscale(alpha=(0.5, 1.0)),\n iaa.AddToHueAndSaturation((-50, 50))\n ]),\n iaa.OneOf([\n iaa.Fliplr(0.5),\n iaa.Affine(scale=(0.8, 1.2)),\n iaa.Affine(translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)}),\n iaa.Affine(rotate=(-30, 30)),\n iaa.Affine(shear={'x': (-15, 15), 'y': (-15, 15)})\n ])\n ], random_order=True)\n\n\n def __call__(self, sample):\n image, label = sample['image'], sample['label']\n\n image = self.seq_aug(image=image)\n\n return {'image':image, 'label':label}\n"
},
{
"alpha_fraction": 0.5284703969955444,
"alphanum_fraction": 0.5463342070579529,
"avg_line_length": 39.398494720458984,
"blob_id": "1be76f08090b5d7568098a83b6b89ccf6a8fb162",
"content_id": "953b5e6fa5f0b2dc982a75d0c248f388c8039dc8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5374,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 133,
"path": "/models/ResNetModel.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport config\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import pad\nimport torchvision.models as models\n\ndef get_padded_tensor(x, k_size=(3, 3), stride=1, dilation=1, padding='same'):\n # Taken from : https://github.com/pytorch/pytorch/issues/3867#issuecomment-458423010\n \n if str(padding).upper() == 'SAME':\n input_rows, input_cols = [int(x) for x in x.shape[2:4]] \n # x.shape returns pytorch tensor rather than python int list\n # And doing further computation based on that will grow the graph with such nodes\n # Which needs to be avoided when converting the model to onnx or torchscript.\n filter_rows, filter_cols = k_size\n \n out_rows = (input_rows + stride - 1) // stride\n out_cols = (input_cols + stride - 1) // stride\n \n padding_rows = max(0, (out_rows - 1) * stride +\n (filter_rows - 1) * dilation + 1 - input_rows)\n rows_odd = (padding_rows % 2 != 0)\n \n padding_cols = max(0, (out_cols - 1) * stride +\n (filter_cols - 1) * dilation + 1 - input_cols)\n cols_odd = (padding_rows % 2 != 0)\n \n x = pad(x, [padding_cols // 2, (padding_cols // 2) + int(cols_odd),\n padding_rows // 2, (padding_rows // 2) + int(rows_odd)]) # This is only true for NCHW\n # First 2 elements are for last dims\n # Next 2 elements are for second last dims\n # Or alternatively we can do as below.\n #x = nn.ZeroPad2d((padding_cols // 2, (padding_cols // 2) + int(cols_odd),\n # padding_rows // 2, (padding_rows // 2) + int(rows_odd)))(x)\n\n return x\n else:\n return x\n\n\ndef ConvLayer(in_channels, out_channels, conv_k_size=(3, 3), conv_stride=1, padding='same', bias=False):\n if bias:\n layer = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=conv_k_size, stride=conv_stride, padding=0, bias=bias),\n nn.ReLU(),\n )\n else:\n layer = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=conv_k_size, stride=conv_stride, padding=0, bias=bias),\n nn.BatchNorm2d(num_features=out_channels, eps=1e-6),\n nn.ReLU()\n )\n return layer\n\n\ndef MaxPoolLayer(mx_k_size=(3, 3), mx_stride=2):\n mxpool = nn.MaxPool2d(kernel_size=mx_k_size, stride=mx_stride, padding=0)\n return mxpool\n \n\nclass ResNetModel(nn.Module):\n def __init__(self, num_classes=2, freeze_backend=False, inference=False):\n super(ResNetModel, self).__init__()\n\n resnet = models.resnet18(pretrained=False) \n # Intentionally used pretrained=False, as the pretrained checkpoint will be fed from outside\n # before training. \n\n self.resnet_feat = nn.Sequential()\n for layer_name, layer_module in list(resnet.named_children())[:-1]:\n # Used this approach so as to preserve the name of layers/parameters which \n # can be used while restoring them before training separately\n self.resnet_feat.add_module(layer_name, layer_module)\n\n # Alternatively we can use below line, but name of parameters will not be in accordance with the \n # resnet pretrained checkpoint file.\n #self.resnet_feat = nn.Sequential(*(list(resnet.children())[:-1])) \n\n if freeze_backend:\n # Fix the parameters of the feature extractor: \n for param in self.resnet_feat.parameters(): \n param.requires_grad = False\n\n resnet_op_feat = resnet.layer4[1].conv2.out_channels\n\n self.fc = nn.Linear(in_features=resnet_op_feat, out_features=num_classes, bias=True)\n\n self.softmax = nn.Softmax(dim=1)\n\n self.inference = inference\n\n self.__init_weights()\n \n\n def __init_weights(self):\n for module in self.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n \n if config.weight_init_method == 'xavier_normal':\n nn.init.xavier_normal_(module.weight, gain=1.0)\n elif config.weight_init_method == 'msra':\n nn.init.kaiming_normal_(module.weight, a=0, mode='fan_in', nonlinearity='relu')\n else:\n print(\"Unsupported weight init method.\")\n exit()\n \n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n \n if isinstance(module, nn.BatchNorm2d):\n nn.init.constant_(module.weight, 1)\n nn.init.constant_(module.bias, 0)\n nn.init.constant_(module.running_var, 1)\n nn.init.constant_(module.running_mean, 0)\n \n \n def forward(self, x):\n # x : [B x 3 x 224 x 224]\n \n out = self.resnet_feat(x)\n # x : [B x 512 x 1 x 1]\n\n out = out.squeeze(dim=3).squeeze(dim=2)\n # x : [B x 512]\n\t\t\n out = self.fc(out)\n # x : [B x 2]\n \n if self.inference:\n out = self.softmax(out)\n \n return out\n"
},
{
"alpha_fraction": 0.616487443447113,
"alphanum_fraction": 0.6272401213645935,
"avg_line_length": 42.578948974609375,
"blob_id": "e0ceafeb6d0daf5bfa142ad8ef4b132d363984ae",
"content_id": "3feaa5be2761b6da605ea8122ab6fab9a7af0d5d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 837,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 19,
"path": "/utils/LRScheduler/CosineAnnelingLR.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "import config\nimport numpy as np\nfrom utils.LRScheduler.LRScheduler import LearningRateScheduler\n\n\nclass CosineAnneling(LearningRateScheduler):\n def __init__(self):\n super().__init__()\n self.cosine_iters = config.steps_per_epoch * config.epochs - config.burn_in_steps\n \n\n def get_lr(self, g_step):\n # we will scale lr from 0 to 1e-3 in first 3 epochs and then exp decay for rest of the training.\n if g_step < config.burn_in_steps:\n lr = (config.init_lr) * (g_step / config.burn_in_steps) # Linear Scaling\n return lr\n else:\n # For exponential decay learning rate uncomment below line and comment subsequent lines.\n return 0 + (config.init_lr - 0) * 0.5 * (1 + np.cos(np.pi * (g_step - config.burn_in_steps) / self.cosine_iters))\n \n"
},
{
"alpha_fraction": 0.5629099607467651,
"alphanum_fraction": 0.5764261484146118,
"avg_line_length": 35.39855194091797,
"blob_id": "7eb4a3a1228a34873b765b92cd1266c42d88db85",
"content_id": "8b97acb4e1a25f5ef6facfbd4489c148c5c2d556",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5031,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 138,
"path": "/DatasetHelper.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport os\nimport cv2\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\nimport config\nfrom utils.transforms.to_tensor import ToTensorOwn\nfrom utils.transforms.normalize import Normalize\nfrom utils.transforms.resize import PaddedResize\nfrom utils.transforms.augmenter import Augmenter\n\n\nclass CatvsDogDataset(Dataset):\n def __init__(self, img_paths, num_classes, augment=False, basic_transforms=None, augment_transforms=None):\n \n self.img_paths = img_paths\n\n np.random.shuffle(self.img_paths)\n\n self.img_path_list = []\n for img_path in self.img_paths:\n if 'cat' in os.path.basename(img_path):\n self.img_path_list.append([img_path, 0]) # For cat label is 0\n elif 'dog' in os.path.basename(img_path):\n self.img_path_list.append([img_path, 1]) # For dog label is 1\n else:\n print(\"Class not found\")\n exit()\n\n self.num_classes = num_classes\n self.basic_transforms = basic_transforms\n self.augment_transforms = augment_transforms\n self.augment = augment\n\n\n def __len__(self):\n return len(self.img_path_list)\n\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_path, label = self.img_path_list[idx]\n # label can be 0 or 1 based on Cat or Dog respectively\n\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n sample = {'image' : img, 'label' : label}\n\n if self.augment:\n sample = self.augment_transforms(sample)\n \n # Mandatory transformations like resizing image to same size, normalizing the image and converting to tensor.\n sample = self.basic_transforms(sample)\n\n return sample\n\n\n\nbasic_transforms = transforms.Compose([\n PaddedResize(size=config.input_size),\n ToTensorOwn(), # Custom ToTensor transform, converts to CHW from HWC only\n Normalize(config.model_type),\n ])\n\naugment_transforms = Augmenter()\n\n\ndef get_train_loader():\n train_set = CatvsDogDataset(config.train_files, num_classes=config.num_classes, \\\n augment=True, basic_transforms=basic_transforms, augment_transforms=augment_transforms)\n\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=config.batch_size, \\\n shuffle=True, num_workers=4, pin_memory=True, \\\n drop_last=False, prefetch_factor=2, \\\n # persistent_workers=True)\n persistent_workers=False)\n # persistent_workers and pin_memory both cant be set to true at the same time due to some bug.\n # Ref: https://github.com/pytorch/pytorch/issues/48370\n\n # For windows num_workers should be set to 0 due to some know issue. In ubuntu it works fine.\n # Ref: https://github.com/pytorch/pytorch/issues/4418#issuecomment-354614162\n return train_loader\n\n\ndef get_test_loader():\n test_set = CatvsDogDataset(config.test_files, num_classes=config.num_classes, \\\n augment=False, basic_transforms=basic_transforms)\n\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=config.batch_size, \\\n shuffle=False, num_workers=0, pin_memory=True, \\\n drop_last=False, prefetch_factor=2, \\\n # persistent_workers=True)\n persistent_workers=False)\n # persistent_workers and pin_memory both cant be set to true at the same time due to some bug.\n # Ref: https://github.com/pytorch/pytorch/issues/48370\n\n # For windows num_workers should be set to 0 due to some know issue. In ubuntu it works fine.\n # Ref: https://github.com/pytorch/pytorch/issues/4418#issuecomment-354614162\n return test_loader\n\n\n\"\"\"\n# Below code is for debugging purpose only.\nif __name__ == \"__main__\":\n \n iterator = iter(train_loader)\n \n for i in range(3):\n batch = next(iterator)\n\n train_img = batch['image']\n train_label = batch['label']\n print(type(train_img))\n print(type(train_label))\n print(train_img.shape)\n print(train_label.shape)\n \n for img, label in zip(train_img, train_label):\n img = np.transpose(img, (1, 2, 0))\n img = np.uint8(img * 255)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n if label == 0:\n label = 'cat'\n else:\n label = 'dog'\n print(\"Label: \", label)\n cv2.imshow('img', img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n \n #exit() \n\"\"\" "
},
{
"alpha_fraction": 0.6026200652122498,
"alphanum_fraction": 0.6026200652122498,
"avg_line_length": 33.653846740722656,
"blob_id": "e4c0171572af93ecdaf7000a5556c6b010def889",
"content_id": "aa167919395e556b9b2d7a7701abc9d97540ac96",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 916,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 26,
"path": "/utils/SummaryHelper.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nclass SummaryHelper:\n def __init__(self, summary_path):\n self.summary_writer = SummaryWriter(summary_path)\n\n def add_graph(self, model, ip_tensor):\n self.summary_writer.add_graph(model, ip_tensor)\n\n def add_summary(self, summary_dict, gstep):\n for key, value in summary_dict.items():\n\n if isinstance(value, float) or isinstance(value, int):\n # For scalar values,\n self.summary_writer.add_scalar(key, value, gstep)\n elif isinstance(value, np.ndarray):\n # For images,\n self.summary_writer.add_image(key, value, gstep, dataformats='CHW')\n else:\n print(\"Summary Input not identified\", type(value))\n\n self.summary_writer.flush()\n\n def close(self):\n self.summary_writer.close()\n\n \n "
},
{
"alpha_fraction": 0.7064538598060608,
"alphanum_fraction": 0.7411519885063171,
"avg_line_length": 33.261905670166016,
"blob_id": "b3040695c4df2db1fb28dffcb3bf83075f4f5bac",
"content_id": "a511d6552677812366d5e004cbba6e80e92adb51",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1441,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 42,
"path": "/model_export/convert_to_onnx.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\n\nimport onnx\nimport torch\nfrom onnxsim import simplify\nimport sys\nsys.path.append(\"../\")\n\nimport config\n# from models.SimpleCNN import ConvModel\nfrom models.ResNetModel import ResNetModel\n\n\n# ckpt_path = \"../summaries/2021_05_22_17_45_42/ckpt/model_eps_58_test_loss_0.1504.pth\"\t\t# simple cnn\n# op_onnx_model_path = \"./frozen_models/simplecnn_model_final.onnx\"\n\nckpt_path = \"../summaries/2021_05_22_23_11_17/ckpt/model_eps_58_test_loss_0.2679.pth\"\t\t# resnet18-full train\nop_onnx_model_path = \"./frozen_models/resnet18_model_final.onnx\"\ninput_names = [ \"input\" ]\noutput_names = [ \"output\" ]\n\n\nwith torch.no_grad(): # Disabling the gradient calculations will reduce the calculation overhead.\n # model = ConvModel(num_classes=config.num_classes, inference=True)\n model = ResNetModel(num_classes=config.num_classes, freeze_backend=False, inference=True)\n\ncheckpoint = torch.load(ckpt_path)\nmodel.load_state_dict(checkpoint['model']) # Only restoring model variables only \nmodel.eval()\n\n\ndummy_input = torch.randn(1, 3, config.input_size[0], config.input_size[1])\n\n# Saving onnx model\ntorch.onnx.export(model, dummy_input, op_onnx_model_path, input_names=input_names, output_names=output_names)\n\n# Load onnx model to simplify it.\nonnx_model = onnx.load(op_onnx_model_path)\n\n# Simplify the model by removing redundant nodes.\nmodel_simp, check = simplify(onnx_model)\n\nprint(\"Status: \", check)\nonnx.save(model_simp, op_onnx_model_path)\n"
},
{
"alpha_fraction": 0.6195122003555298,
"alphanum_fraction": 0.6268292665481567,
"avg_line_length": 34.65217208862305,
"blob_id": "62bd7f739febbbac888465c37887c26f61465f24",
"content_id": "9e56309c44a8081372dc5cc533f71af7fdf7034e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 820,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 23,
"path": "/utils/CheckpointHandler.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "import os\nimport glob\nimport torch\n\nclass CheckpointHandler:\n def __init__(self, ckpt_path, max_to_keep=3):\n self.ckpt_path = ckpt_path\n self.max_to_keep = max_to_keep\n self.ckpt_path_history = []\n\n def save(self, checkpoint_state):\n eps = checkpoint_state['epoch']\n test_loss = checkpoint_state['test_loss']\n\n cur_ckpt_path = os.path.splitext(self.ckpt_path)[0] + \"_eps_{}_test_loss_{:.4f}\".format(eps, test_loss) + os.path.splitext(self.ckpt_path)[1]\n torch.save(checkpoint_state, cur_ckpt_path)\n\n self.ckpt_path_history.append(cur_ckpt_path)\n\n if len(self.ckpt_path_history) > 3:\n # Remove old checkpoints as per max_to_keep arguments.\n remove_ckpt_path = self.ckpt_path_history.pop(0)\n os.remove(remove_ckpt_path)\n"
},
{
"alpha_fraction": 0.554046094417572,
"alphanum_fraction": 0.5971648097038269,
"avg_line_length": 28.6842098236084,
"blob_id": "0775a531732f114288b9690be47280b3fb72f30f",
"content_id": "7870054648ad13a59bcd77e98458b7c0ba7724ba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1693,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 57,
"path": "/model_export/run_onnx_model.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "\nimport cv2\nimport torch\nimport numpy as np\nimport onnxruntime as ort\nimport sys\nsys.path.append(\"../\")\n\nimport config\n\n# op_onnx_path = \"./frozen_models/simplecnn_model_final.onnx\"\nop_onnx_path = \"./frozen_models/resnet18_model_final.onnx\"\n# img_path = \"./test_imgs/cat.3.jpg\"\nimg_path = \"./test_imgs/dog.3426.jpg\"\n\n\ndef preprocess(img, normalize):\n h, w = img.shape[:2]\n if h > w:\n off_t = (h - w) // 2\n square_img = img[off_t : off_t + w, : , :]\n else:\n off_l = (w - h) // 2\n square_img = img[: , off_l : off_l + h , :]\n\n square_img = cv2.resize(square_img, (config.input_size[1], config.input_size[0])) # [w x h]\n if normalize == 'resnet18':\n square_img = square_img / 255.\n square_img[:, :, 0] = (square_img[:, :, 0] - 0.485) / 0.229\n square_img[:, :, 1] = (square_img[:, :, 1] - 0.456) / 0.224\n square_img[:, :, 2] = (square_img[:, :, 2] - 0.406) / 0.225\n elif normalize == 'simplecnn':\n square_img = square_img / 255.\n square_img = np.transpose(square_img, (2, 0, 1)) # CHW from HWC\n square_img = np.expand_dims(square_img, axis=0)\n square_img = np.float32(square_img)\n return square_img\n\n\n\n\nort_session = ort.InferenceSession(op_onnx_path)\n\nori_img = cv2.imread(img_path)\nimg = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB)\nimg_ip = preprocess(img, normalize='resnet18')\n\nsoftmax_op = ort_session.run(None, {'input': img_ip})[0]\nidx = np.argmax(softmax_op)\n\nif idx == 0:\n print(\"Class: Cat, Probability: {:.4f}\".format(softmax_op[0][idx]))\nelse:\n print(\"Class: Dog, Probability: {:.4f}\".format(softmax_op[0][idx]))\n\ncv2.imshow('img', ori_img)\ncv2.waitKey()\ncv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.6002089977264404,
"alphanum_fraction": 0.6342737674713135,
"avg_line_length": 44.81730651855469,
"blob_id": "48faaa80a9bc9f0976d8bcba6352c5720a829840",
"content_id": "4dee4337aeb832e4c1dba7c70194f45d2c0e218d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4785,
"license_type": "permissive",
"max_line_length": 243,
"num_lines": 104,
"path": "/config.py",
"repo_name": "meet-minimalist/Learn-pytorch-in-one-example",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 22 22:59:35 2020\n\n@author: Meet\n\"\"\"\n\nimport os\nimport glob\nimport torch\nimport numpy as np\nimport imgaug\n\n# --------------- Setting Random Seeds ------------------ #\nos.environ['PYTHONHASHSEED']=str(42)\nos.environ[\"PL_GLOBAL_SEED\"] = str(42) \nos.environ[\"CUBLAS_WORKSPACE_CONFIG\"] = ':16:8' \n# Added above due to torch.set_deterministic(True) \n# Ref: https://github.com/pytorch/pytorch/issues/47672#issuecomment-725404192\n\nnp.random.seed(42)\nimgaug.seed(42) \n# Although, imgaug seed and torch seed are set but internally when torch will be using multi threads and \n# We might not be having control over which thread will call imgaug augmenter with which img sequence.\n# e.g. For exp-1, img-1, img-2, img-3 will be provided by thread-1, thread-3, thread-2 respectively.\n# In exp-2, img-1, img-2, img-3 might be provided by thread-3, thread-2, thread-1 respectively.\n# And imgaug will provide augmentations to these img in same sequence.\n# E.g. In exp-1, img-1, img-2, img-3 are provided to imgaug module in sequence 1, 3, 2, then\n# img-1 will face augmentation-1, img-3 will face augmentation-2 and img-2 --> augmentation-3\n# In exp-2, img-1, img-2, img-3 are provided to imgaug module in sequence 3, 2, 1, then\n# img-3 will face augmentation-1, img-2 will face augmentation-2 and img-1 --> augmentation-3\n# So complete control over randomness is not achieved due to irregularities/randomness between imgaug and pytorch dataloader\n\ntorch.set_deterministic(True) # This will set deterministic behaviour for cuda operations\ntorch.manual_seed(42)\ntorch.cuda.manual_seed_all(42) # sets random seed for cuda for all gpus\n\ntorch.backends.cudnn.benchmark = False # Cuda will not try to find the best possible algorithm implementations, performance might degrade due to this being set to False \ntorch.backends.cudnn.deterministic=True # cuda will use only deterministic implementations\n# print(\"ERROR SEED NOT SET PROPERLY\")\n\n# pytorch reproducibility\n# Ref: https://stackoverflow.com/q/56354461 \n# Ref: https://learnopencv.com/ensuring-training-reproducibility-in-pytorch/\n\n# -------------------------------------------------------- #\n\n\n# ----------------------- Dataset ------------------------ #\ndataset_path = \"M:/Datasets/dogs-vs-cats/train/\"\nnum_classes = 2\n\ncat_files = glob.glob(dataset_path + \"/cat*.jpg\")\ndog_files = glob.glob(dataset_path + \"/dog*.jpg\")\n\nnp.random.shuffle(cat_files)\nnp.random.shuffle(dog_files)\n\ntrain_files = []\ntest_files = []\n\ntrain_files.extend(cat_files[:int(len(cat_files)*0.9)])\ntrain_files.extend(dog_files[:int(len(dog_files)*0.9)])\n\ntest_files.extend(cat_files[int(len(cat_files)*0.9):])\ntest_files.extend(dog_files[int(len(dog_files)*0.9):])\n\nnp.random.shuffle(train_files)\nnp.random.shuffle(test_files)\n# ------------------------------------------------------- #\n\n\n# ------------------- Training Routine ------------------ #\nuse_amp \t\t\t\t= False\t\t\t\t # AMP will give reduced memory footprint and reduced computation time for GPU having Tensor Cores \nmodel_type = 'resnet18' # ['resnet18', 'simplecnn']\nfreeze_backend = False\t\t\t\t # Only used when 'resnet18' pretrained model is used\t\nbatch_size = 256\nepochs = 60\ninput_size = [112, 112] # [H x W]\nl2_weight_decay = 0.00005\nweight_init_method = 'msra' # ['msra', 'xavier_normal'] # 'msra' also known as variance scaling initializer and Kaiming He (normal dist) initialization \n\n# He initialization works better for layers with ReLu activation.\n# Xavier initialization works better for layers with sigmoid activation.\n# Ref: https://stats.stackexchange.com/a/319849\n\nexp_path = \"./summaries/\"\nos.makedirs(exp_path, exist_ok=True)\n\ntrain_steps = int(np.ceil(len(train_files) / batch_size))\ntest_steps = int(np.ceil(len(test_files) / batch_size))\nloss_logging_frequency = (train_steps // 100) if 0 < (train_steps // 100) < 100 else 1 if (train_steps // 100) == 0 else 100 # At every 100 steps or num_training_steps/3 steps training loss will be printed and summary will be saved.\n# ------------------------------------------------------- #\n\n\n# --------------- Learning Rate --------------- #\nwarm_up = True\nwarm_up_eps = 2\ninit_lr = 0.001\nlr_scheduler = 'exp_decay' # ['exp_decay', 'cosine_annealing']\nlr_exp_decay = 0.94 # Only for 'burn_in_decay'. Set this such that at the end of training (= after \"epochs\" number of iterations), the lr will be of scale 1e-6 or 1e-7.\nsteps_per_epoch = train_steps\nburn_in_steps = steps_per_epoch * warm_up_eps\n# --------------------------------------------- # \n"
}
] | 23 |
ktwalsh/trace_camp
|
https://github.com/ktwalsh/trace_camp
|
1a774e346ab330ee18ab632d0dd00aa17341746b
|
a29f4f4fdf243e42f31c40addbf7c8d71caef022
|
8bd610ebad4344a1b494a8f457bac00c09d533ba
|
refs/heads/master
| 2023-01-23T04:00:10.324930 | 2019-08-16T17:36:15 | 2019-08-16T17:36:15 | 200,055,803 | 0 | 0 | null | 2019-08-01T13:27:57 | 2019-08-16T17:36:18 | 2023-01-04T06:58:33 |
HTML
|
[
{
"alpha_fraction": 0.6504854559898376,
"alphanum_fraction": 0.6537216901779175,
"avg_line_length": 32.33333206176758,
"blob_id": "1682df07dbfa1ceaecd66760b98874ee6204e8e2",
"content_id": "d89dac97735df1cc15af9fe9bad4775879e069b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 309,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 9,
"path": "/asteroids/src/nasaWorker.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "// This file is responsible for getting data from Nasa\r\nimport axios from 'axios';\r\nimport App from './App';\r\n\r\nconst API_KEY = \"xdmrFmM68ruLMzXiIAi3SfpVGJr5ofncLv7NxA45\"\r\n\r\nexport const getAsteroids = (start, end) => {\r\n return axios.get(`https://api.nasa.gov/neo/rest/v1/feed?start_date=${start}&end_date=${end}&api_key=${API_KEY}`)\r\n}\r\n"
},
{
"alpha_fraction": 0.7488986849784851,
"alphanum_fraction": 0.7621145248413086,
"avg_line_length": 37,
"blob_id": "76d1e0716006a6dea8d7295ae1fc2216ba854b71",
"content_id": "2f0e982bb8af910544a86ba99001ba75066314a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 227,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 6,
"path": "/nasagram/nasacomment/views.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\ndef view_home(request):\n #print(\"this is the homepage\", request)\n return HttpResponse(\"This is the home page\", status=200)"
},
{
"alpha_fraction": 0.7450980544090271,
"alphanum_fraction": 0.7450980544090271,
"avg_line_length": 38.230770111083984,
"blob_id": "9a8fb21ab83702d92e822c1ebe75cfde1b8ebf9f",
"content_id": "12db316cff0a62df2b6896dcfa6e46bdb56c0f4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 510,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 13,
"path": "/project_1/eventsite/eventapp/models.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AbstractUser, User\n\n# Create your models here.\nclass Event(models.Model):\n creator = models.ForeignKey(User, on_delete=models.CASCADE, related_name='creator')\n title = models.TextField()\n description = models.TextField()\n location = models.TextField()\n date = models.DateField()\n time = models.TimeField()\n attendees = models.ManyToManyField(User, related_name='attendees')\n"
},
{
"alpha_fraction": 0.4803149700164795,
"alphanum_fraction": 0.5413385629653931,
"avg_line_length": 21.086956024169922,
"blob_id": "709b2167fd3d858b4fa0ed2063c6d295c3f598e7",
"content_id": "8ad1b94493a0eff9a7baeddfc1eb18e6a8f8271c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 23,
"path": "/project_1/eventsite/eventapp/migrations/0004_auto_20190815_1445.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.4 on 2019-08-15 14:45\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('eventapp', '0003_auto_20190813_1742'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='event',\n name='date',\n field=models.DateField(),\n ),\n migrations.AlterField(\n model_name='event',\n name='time',\n field=models.TimeField(),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5029239654541016,
"alphanum_fraction": 0.5458089709281921,
"avg_line_length": 21.30434799194336,
"blob_id": "e7a4faa0bd25719e8d170ccfdade073eeb34f057",
"content_id": "fa0c65f5d339820238a19a1c41b943f1edbf0313",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 513,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 23,
"path": "/project_1/eventsite/eventapp/migrations/0002_auto_20190803_1719.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.3 on 2019-08-03 17:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('eventapp', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='event',\n name='attenders',\n ),\n migrations.AddField(\n model_name='event',\n name='time',\n field=models.TextField(default='0:00'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.464342325925827,
"alphanum_fraction": 0.464342325925827,
"avg_line_length": 29.600000381469727,
"blob_id": "4bd206380160ec66d02b6b202b2e459ec4185bde",
"content_id": "101bca2c109b794d89dfdb90e1c68ce5d629b03b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1262,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 40,
"path": "/project_2/attendee-checker/src/components/Event.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React, { useState, useEffect } from 'react'\r\nimport eventWorker, { getEvents } from '../eventWorker'\r\nimport { BrowserRouter, Link } from 'react-router-dom'\r\n\r\nfunction Event({ match, history }) {\r\n const [eventData, setEventData] = useState([])\r\n\r\n const parseEvent = (event) => {\r\n return <tbody key={event.id}><tr><td><Link to={`/event/${event.id}/`}>{event.name.text}</Link></td><td>{event.description.text}</td></tr></tbody>\r\n }\r\n\r\n const handleClick = (event) => {\r\n // console.log(event)\r\n //history.push(`attendees/${event.id}`)\r\n }\r\n\r\n useEffect(() => {\r\n getEvents().then((response) => {\r\n // console.log(response.data.events)\r\n setEventData(response.data.events.map(parseEvent))\r\n }\r\n )\r\n }, [])\r\n return (\r\n <div>\r\n <table>\r\n <thead>\r\n <tr>\r\n <th>Name of Event</th>\r\n <th>Description of Event</th>\r\n </tr>\r\n </thead>\r\n {eventData}\r\n </table>\r\n <Link to=\"/create\">Create a new Event</Link>\r\n </div>\r\n )\r\n}\r\n\r\nexport default Event"
},
{
"alpha_fraction": 0.7203579545021057,
"alphanum_fraction": 0.7203579545021057,
"avg_line_length": 42.900001525878906,
"blob_id": "f04e7d32c7a811901e53c1d930b0c287a9e1a9b7",
"content_id": "31b25fea5a642cf1a0621878e0fa7ca7965a0651",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 447,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 10,
"path": "/portfolio/blog/urls.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.urls import path\r\nimport blog.views as views\r\nfrom django.views.generic import TemplateView\r\n\r\nurlpatterns = [\r\n path('create', views.PostCreate.as_view(template_name=\"post_form.html\")),\r\n path('list', views.PostList.as_view(template_name=\"post_list.html\")),\r\n path('update', views.PostUpdate.as_view(template_name=\"post_form.html\")),\r\n path('delete', views.PostDelete.as_view(template_name=\"post_confirm_delete.html\")),\r\n]"
},
{
"alpha_fraction": 0.42041078209877014,
"alphanum_fraction": 0.42041078209877014,
"avg_line_length": 33.45454406738281,
"blob_id": "9073a0e310103c8cc267510adf5e9e83af9ab7c1",
"content_id": "7eba84a33edef8584f4b85acbf41fab53541e3e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1558,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 44,
"path": "/project_2/attendee-checker/src/components/Details.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React, { useState, useEffect } from 'react'\r\nimport { BrowserRouter, NavLink } from 'react-router-dom'\r\nimport { getAttendees, getDetails } from '../eventWorker';\r\nimport Moment from 'react-moment'\r\n\r\nfunction Details({ match }) {\r\n const [details, setDetails] = useState([])\r\n const [profile, setProfile] = useState([])\r\n // const parseDetails = (details) => {\r\n // return <tr><td></td><td></td></tr>\r\n // }\r\n useEffect(() => {\r\n getDetails(match.params.event_id, match.params.attendee_id).then((response) => {\r\n console.log(response.data)\r\n setProfile(response.data.profile)\r\n setDetails(response.data)\r\n })\r\n }, [])\r\n console.log(details.name)\r\n return (\r\n <div className=\"App\">\r\n <table>\r\n <thead>\r\n <tr>\r\n <th>Name</th>\r\n <th>Email</th>\r\n <th>status</th>\r\n <th>Order Time</th>\r\n </tr>\r\n </thead>\r\n <tbody>\r\n <tr>\r\n <td>{profile.name}</td>\r\n <td>{profile.email}</td>\r\n <td>{details.status}</td>\r\n <td><Moment format=\"MM/DD/YYYY HH:MM\">{details.created}</Moment></td>\r\n </tr>\r\n </tbody>\r\n </table>\r\n </div>\r\n )\r\n}\r\n\r\nexport default Details"
},
{
"alpha_fraction": 0.5578634738922119,
"alphanum_fraction": 0.5697329640388489,
"avg_line_length": 24,
"blob_id": "1e54cf5d23da8d9a07bdfbb449225d666291454a",
"content_id": "a8cff5ebb3b8efa7f8ed957ba0536f891990cccc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 674,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 26,
"path": "/wwk2dy3/app/src/components/Datepicker.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React from 'react'\r\nimport moment from 'moment'\r\nimport {Route} from 'react-router-dom'\r\n\r\nconst Datepicker = ({match, history}) => {\r\n const date = match.params.date\r\n const today = moment().format('YYYY-MM-DD')\r\n const minDate = moment('1995-06-15').format('YYYY-MM-DD')\r\n\r\n const dateHandler = (event) => {\r\n const date = event.target.value\r\n history.push(`/apods/${date}`)\r\n }\r\n\r\n return (\r\n <div>\r\n <label htmlFor=\"date\">Date: </label>\r\n\r\n <input onChange={dateHandler} type=\"date\" id=\"start\" value={date} min={minDate} max={today} />\r\n\r\n </div>\r\n \r\n )\r\n}\r\n\r\nexport default Datepicker;"
},
{
"alpha_fraction": 0.6644462943077087,
"alphanum_fraction": 0.6852622628211975,
"avg_line_length": 32.38888931274414,
"blob_id": "c34099bbe2fcc7e28f11402ff5bdb4cfc13d0404",
"content_id": "d2d14b7d0c343b6d2a329c6ee1949a2d8252da4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1201,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 36,
"path": "/project_2/attendee-checker/src/eventWorker.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "// This file is responsible for getting data from Eventbrite\nimport axios from 'axios'\n\nconst API_KEY = '5ONPWYLI6RWSTP3U636F'\nconst my_id ='311550779588'\n\nexport const getEvents = () => {\n return axios.get(`https://www.eventbriteapi.com/v3/events/search/?token=${API_KEY}&user.id=${my_id}`)\n}\n\nexport const getAllEvents = () => {\n const mod= ''\n return axios.get(`https://www.eventbriteapi.com/v3/events/search/?token=${API_KEY}${mod}`)\n}\n\nexport const getEventDetails = (event_id) => {\n return axios.get(`https://www.eventbriteapi.com/v3/events/${event_id}/?token=${API_KEY}`)\n}\n\nexport const getAttendees = (event_id) => {\n return axios.get(`https://www.eventbriteapi.com/v3/events/${event_id}/attendees/?token=${API_KEY}`)\n}\n\nexport const getDetails = (event_id, attendee_id) => {\n return axios.get(`https://www.eventbriteapi.com/v3/events/${event_id}/attendees/${attendee_id}/?token=${API_KEY}`)\n}\n\nexport const getVenue = (venue_id) => {\n return axios.get(`https://www.eventbriteapi.com/v3/venues/${venue_id}/?token=${API_KEY}`)\n}\n\nexport const postEvent = (org_id) => {\n return axios.post(`https://www.eventbriteapi.com/v3/organizations/${org_id}/events/?token=${API_KEY}`,\n {}\n )\n}"
},
{
"alpha_fraction": 0.5741572976112366,
"alphanum_fraction": 0.6078651547431946,
"avg_line_length": 33.230770111083984,
"blob_id": "0b597a639b146d360f274dac4d7efa3f058772a9",
"content_id": "52f6154b233d31946c47120376436bd38affc843",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 890,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 26,
"path": "/wk3d1-livecode/product/crud/migrations/0001_initial.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.4 on 2019-08-12 15:03\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='ProductModel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=140)),\n ('brand', models.CharField(max_length=140)),\n ('price', models.DecimalField(decimal_places=2, max_digits=10)),\n ('description', models.CharField(max_length=280)),\n ('ethic_rating', models.IntegerField(validators=[django.core.validators.MaxValueValidator(10), django.core.validators.MinValueValidator(0)])),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.6834170818328857,
"alphanum_fraction": 0.6834170818328857,
"avg_line_length": 26.428571701049805,
"blob_id": "c7f0b031b8118087ea203b66f80fb322d97cfb4a",
"content_id": "badaa7607ad66bdd8ee94c79ed250dd37058182f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 199,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 7,
"path": "/api_fetch/app/src/Swapi.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "// This file is responsible for getting data from Nasa\r\nimport axios from 'axios';\r\nimport App from './App';\r\n\r\n\r\nexport const getPlanets = {\r\n return axios.get(\"https://swapi.co/api/planets/)\"}\r\n"
},
{
"alpha_fraction": 0.6775068044662476,
"alphanum_fraction": 0.6802167892456055,
"avg_line_length": 22.733333587646484,
"blob_id": "b9eb6cd249a687927757555e8c0622e8c8bdaeeb",
"content_id": "4330f533806a79ded725af740e9e314ab1d72a1b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 15,
"path": "/wk3d1-livecode/context-counter/src/components/Context/CounterContext/CounterContext.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React from 'react'\r\n\r\nconst CounterContext = React.createContext()\r\n\r\nexport default CounterContext\r\n\r\nexport const CounterProvider = props => {\r\n const [counter, setCounter] = React.useState(0)\r\n\r\n const value = {\r\n counter,\r\n setCounter\r\n }\r\n return <CounterContext.Provider value={value}>{props.children}</CounterContext.Provider>\r\n}"
},
{
"alpha_fraction": 0.617943525314331,
"alphanum_fraction": 0.6275201439857483,
"avg_line_length": 28.611940383911133,
"blob_id": "5ca257d59bc947bde9319cbc24f9c973a7a28923",
"content_id": "80fc0fb7186b6c238fa34fd5f34893f14afbb202",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1984,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 67,
"path": "/asteroids/src/App.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React, { useState, useEffect } from 'react';\nimport logo from './logo.svg';\nimport './App.css';\nimport { getAsteroids } from './nasaWorker'\nimport moment from 'moment';\nimport { VictoryChart, VictoryTheme, VictoryScatter } from 'victory';\n\n\nfunction App() {\n\n const [ startDate, setStartDate ] = useState(moment().subtract(1, 'week').format(\"YYYY-MM-DD\"));\n const [ endDate, setEndDate ] = useState(moment().format(\"YYYY-MM-DD\"));\n const [ asteroidData, setAsteroidData ] = useState([]);\n\n const [ graphData, setGraphData ] = useState([]);\n\n const parseAsteroid = (asteroid) => {\n console.log(asteroid)\n return {x: Number(asteroid.close_approach_data[0].miss_distance.miles), y: asteroid.estimated_diameter.miles.estimated_diameter_max}\n }\n\n useEffect(() => {\n console.log(asteroidData.map(parseAsteroid))\n setGraphData(asteroidData.map(parseAsteroid))\n }, [asteroidData] )\n\n useEffect(() => {\n console.log(\"This is coming from the useEffect: \", startDate, endDate)\n\n var closeAsteroids= []\n getAsteroids(startDate, endDate).then(\n (response) => {\n console.log(response.data.near_earth_objects)\n Object.entries(response.data.near_earth_objects).forEach((value) => {\n closeAsteroids = closeAsteroids.concat(value[1])\n })\n }).then(() => {\n setAsteroidData(closeAsteroids)\n })\n\n }, [startDate, endDate] )\n\nconst handler = (event) => {\n console.log(event)\n}\n return (\n <div className=\"App\">\n Test\n <input value = {startDate.toString()} type=\"date\" id=\"start\" onChange={ (e) => setStartDate(e.target.value) }/>\n <input value = {endDate.toString()} type=\"date\" id=\"end\" onChange={ (e) => setEndDate(e.target.value) }/>\n \n\n <VictoryChart \n domain={{ x: [0, 42457944], y: [0, 2] }}\n >\n <VictoryScatter\n style={{ data: { fill: \"#c43a31\" } }}\n size={2}\n data={graphData}\n />\n </VictoryChart>\n </div>\n );\n}\n\n\nexport default App;\n"
},
{
"alpha_fraction": 0.6891545653343201,
"alphanum_fraction": 0.6891545653343201,
"avg_line_length": 63.16666793823242,
"blob_id": "cea8cf04c7b28e36b2198bae8752cf7721ebcf17",
"content_id": "05d2920e8afb7df9c49e8600aaac4e851fc77e78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1171,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 18,
"path": "/project_1/eventsite/eventapp/urls.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.urls import path\r\nimport eventapp.views as views\r\nfrom django.views.generic import TemplateView\r\n\r\nurlpatterns = [\r\n path('/', views.redirect_view),\r\n path('signup/', views.SignUp.as_view(template_name=\"signup.html\"), name=\"signup\"),\r\n path('view', views.ListEvent.as_view(template_name=\"event_list.html\")),\r\n path('create', views.CreateEvent.as_view(template_name=\"event_form.html\")),\r\n path('update/<slug:pk>', views.UpdateEvent.as_view(template_name=\"event_form.html\")),\r\n path('delete/<slug:pk>', views.DeleteEvent.as_view(template_name=\"event_confirm_delete.html\")),\r\n path('details/<slug:pk>', views.EventDetails.as_view(template_name=\"event_details.html\")),\r\n path('attendees/<slug:pk>', views.AttendeeDetails.as_view(template_name=\"attendee_list.html\")), \r\n path('addattendees/<slug:pk>', views.addAttendee), \r\n path('view/search', views.searchEventList.as_view(template_name=\"event_list.html\")),\r\n path('profile/<slug:pk>', views.ViewProfile.as_view(template_name=\"profile.html\")),\r\n path('profile/edit/<slug:pk>', views.EditProfile.as_view(template_name=\"signup.html\"), name=\"signup\"),\r\n]"
},
{
"alpha_fraction": 0.5347029566764832,
"alphanum_fraction": 0.5358134508132935,
"avg_line_length": 30.781818389892578,
"blob_id": "f55584d8ec7088f110ad34fdfe1c68d8fbfec5ae",
"content_id": "6a3c58314a35f5cfbe335811468a773923684778",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1801,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 55,
"path": "/wk2dy4/frontend/src/components/Form.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React, {useState, useEffect} from 'react'\r\nimport useForm from \"./hooks/useForm\"\r\nimport { createKick } from '../apiservice'\r\n\r\nfunction Form(props) {\r\n const { values, handleSubmit, handleChange, setValues } = useForm({\r\n blurb: \"\",\r\n backers: 0,\r\n pledged: 0,\r\n created: \"\"\r\n }, sendData)\r\n\r\n const [loading, setLoading] = useState(true);\r\n useEffect(() => {\r\n console.log(props);\r\n if (props.form_data) {\r\n setValues(props.form_data);\r\n setLoading(false);\r\n } else {\r\n setLoading(false);\r\n }\r\n }, []);\r\n\r\n function sendData() {\r\n const payload = values\r\n const created = new Date()\r\n payload[\"created\"] = created.toISOString()\r\n createKick(payload).then(res => {\r\n console.log(payload)\r\n\r\n })\r\n }\r\n\r\n const theForm = (\r\n <form onSubmit={handleSubmit}>\r\n <div className=\"form-group\">\r\n <label>Blurb</label>\r\n <input className=\"form-control\" type=\"text\" name='blurb' value={values.blurb} onChange={handleChange} />\r\n </div>\r\n <div className=\"form-group\">\r\n <label>Backers</label>\r\n <input className=\"form-control\" type=\"number\" name='backers' value={values.backers} onChange={handleChange} />\r\n </div>\r\n <div className=\"form-group\">\r\n <label>Pledged</label>\r\n <input className=\"form-control\" type=\"number\" step=\"any\" name='pledged' value={values.pledged} onChange={handleChange} />\r\n </div>\r\n <input className=\"form-control\" type=\"submit\" value=\"submit\" />\r\n </form>\r\n\r\n )\r\n return loading ? <div>Loading</div> : theForm\r\n}\r\n\r\nexport default Form;"
},
{
"alpha_fraction": 0.7410358786582947,
"alphanum_fraction": 0.7410358786582947,
"avg_line_length": 27,
"blob_id": "082d781bc0d487121c4ba252e7ccd4bb703a3b80",
"content_id": "b5edf2357e93bf7426b2bce74a64bbb23645ced4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 9,
"path": "/wk3d1-livecode/product/crud/views.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.vies.generic.edit import CreateView\nfrom crud.models import ProductModel\n\n# Create your views here.\nclass ProductCreateView(CreateView):\n model = Product\n fields = ['__all__']\n success_url = '/'"
},
{
"alpha_fraction": 0.4205128252506256,
"alphanum_fraction": 0.4266666769981384,
"avg_line_length": 19.2391300201416,
"blob_id": "4c77b294f4ce53011a497ad3b4b958e2ee5dec07",
"content_id": "c8af1054b2f2f07885a675234cbfa352298263d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 975,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 46,
"path": "/wk2d2-livecode/reactlivecode/src/components/Pages.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React from 'react'\r\nimport {BrowserRouter, Route, NavLink} from 'react-router-dom'\r\n\r\nfunction Home() {\r\n return(\r\n <div>\r\n <h1>Home Pages</h1>\r\n </div>\r\n )\r\n}\r\n\r\nfunction About() {\r\n return(\r\n <div>\r\n <h1>About</h1>\r\n </div>\r\n )\r\n}\r\n\r\nfunction Contact() {\r\n return(\r\n <div>\r\n <h1>Contact</h1>\r\n </div>\r\n )\r\n}\r\n\r\nfunction Pages() {\r\n return(\r\n <BrowserRouter>\r\n <div>\r\n <nav>\r\n <NavLink to=\"/\">Home</NavLink>\r\n <NavLink to=\"/about\">About</NavLink>\r\n <NavLink to=\"/contact\">Contact</NavLink>\r\n </nav> \r\n <Route path=\"/\" exact component={Home} />\r\n <Route path=\"/about\" component={About} />\r\n <Route path=\"/contact\" component={Contact} />\r\n </div>\r\n </BrowserRouter>\r\n )\r\n}\r\n\r\n\r\nexport default Pages;"
},
{
"alpha_fraction": 0.7109580039978027,
"alphanum_fraction": 0.7109580039978027,
"avg_line_length": 31.4255313873291,
"blob_id": "bab9aa09f6c55c9b19e72b838d4810be9f402a74",
"content_id": "1b7ee4759a77770befa73c094a18e4eac67282b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3048,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 94,
"path": "/project_1/eventsite/eventapp/views.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom eventapp.models import Event\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.urls import reverse_lazy\nfrom django.views import generic\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.shortcuts import redirect\nfrom django.db.models import Q\nfrom django.contrib.auth.models import AbstractUser, User\nfrom authentication.forms import UserCreateForm\n# Create your views here.\nclass ListEvent(ListView):\n model = Event\n\ndef redirect_view(request):\n return redirect(\"/home\")\n\nclass CreateEvent(LoginRequiredMixin, CreateView):\n model = Event\n fields = ['title', 'description', 'location', 'date', 'time']\n\n def get_success_url(self):\n return '/event/view'\n\n def form_valid(self, form):\n form.instance.creator_id = self.request.user.id\n return super(CreateEvent, self).form_valid(form)\n\n\nclass UpdateEvent(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Event\n fields = ['title', 'description', 'location', 'date', 'time']\n \n def test_func(self):\n return self.request.user.id == self.get_object().creator.id\n\n def get_success_url(self):\n return '/event/view'\n\nclass DeleteEvent(LoginRequiredMixin, DeleteView):\n model = Event\n success_url = \"/event/view\"\n\nclass EventDetails(DetailView):\n model = Event\n\nclass AttendeeDetails(DetailView):\n model = Event\n\nclass SignUp(CreateView): \n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'signup.html'\n\nclass ViewProfile(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n model = User\n\n def test_func(self):\n return self.request.user.id == int(self.kwargs['pk'])\n\n def get_context_data(self, **kwargs):\n context = super(ViewProfile, self).get_context_data(**kwargs)\n context['events'] = Event.objects.all()\n return context\n\nclass EditProfile(LoginRequiredMixin, UpdateView):\n model = User\n form_class = UserCreateForm\n success_url = reverse_lazy('login')\n template_name = 'signup.html'\n \n # def user_events(self, pk):\n # current_user=User.objects.get(pk=pk)\n # print(User.objects)\n # object_list = Event.objects.filter(creator=current_user)\n # return object_list\n \n# Searches for events based on zip code query \nclass searchEventList(ListView):\n model = Event\n \n def get_queryset(self):\n query = self.request.GET.get('find_nearby')\n object_list = Event.objects.filter(Q(location__icontains=query))\n return object_list\n\n#Takes request from event_details.html and add current user to attendee list\ndef addAttendee(request, pk): \n Event.objects.get(pk=pk).attendees.add(request.user)\n print(Event.objects.get(pk=pk).creator)\n return redirect(f'/event/details/{pk}')\n"
},
{
"alpha_fraction": 0.5410447716712952,
"alphanum_fraction": 0.5509950518608093,
"avg_line_length": 30.239999771118164,
"blob_id": "9c62216b3a05355adc7a1b1c02b067d1341a3087",
"content_id": "44214b7c7245e483a830d833f01cf001b2b5f34f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 25,
"path": "/wk2dy4/frontend/src/components/Detail.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React, { useState, useEffect } from 'react'\r\nimport { getKick } from '../apiservice'\r\nimport {NavLink} from 'react-router-dom'\r\nfunction Detail(props) {\r\n const [kick, setKick] = useState({})\r\n const [loading, setLoading] = useState(true)\r\n\r\n useEffect(() => {\r\n const id = props.match.params.id\r\n getKick(id).then((res => {\r\n setKick(res.data)\r\n }))\r\n }, [])\r\n return (\r\n <div className='container'>\r\n <h1 className='text-center'>Detail</h1>\r\n <h2>Blurb: {kick.blurb}</h2>\r\n <h2>Backers: {kick.backers}</h2>\r\n <h2>Pledged: {kick.pledged}</h2>\r\n <NavLink to={`update/${kick.id}`} className=\"btn btn-primary\">Update Kickstarter</NavLink>\r\n </div>\r\n )\r\n}\r\n\r\nexport default Detail;"
},
{
"alpha_fraction": 0.7440860271453857,
"alphanum_fraction": 0.7763440608978271,
"avg_line_length": 45.5,
"blob_id": "58deaa507810273c6abc60a79bde40700ab5c5de",
"content_id": "41b4b83f1306840ea6d339c9b8bc68639e6e0c6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 10,
"path": "/wk3d1-livecode/product/crud/models.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\n# Create your models here.\nclass ProductModel(models.Model):\n name = models.CharField(max_length=140)\n brand = models.CharField(max_length=140)\n price = models.DecimalField(decimal_places=2, max_digits=10)\n description = models.CharField(max_length=280)\n ethic_rating = models.IntegerField(validators=[MaxValueValidator(10), MinValueValidator(0)])\n"
},
{
"alpha_fraction": 0.7731958627700806,
"alphanum_fraction": 0.7731958627700806,
"avg_line_length": 18.399999618530273,
"blob_id": "8e05b9caaf49fd00bd82903c4b0324c26b8b55aa",
"content_id": "5e052c7232c13ea5ded67970898bf52f819a86a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 5,
"path": "/nasagram/nasacomment/apps.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass NasacommentConfig(AppConfig):\n name = 'nasacomment'\n"
},
{
"alpha_fraction": 0.5717689394950867,
"alphanum_fraction": 0.5771292448043823,
"avg_line_length": 29.129629135131836,
"blob_id": "64f222fd0e60a09551be11966b3f12e26b243c10",
"content_id": "d4c51a741c3208411be0bfede114ed2c437c13f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "JavaScript",
"length_bytes": 1679,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 54,
"path": "/react-blog/blog/src/components/Blog.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React, {useState} from 'react'\r\n\r\n\r\n\r\nfunction Blog(props) {\r\n const [bloglist, setBloglist] = useState({})\r\n const [title,setTitle] = useState([\"Title\"])\r\n const [body,setBody] = useState([\"Body\"])\r\n const[bvalue, setbValue] = useState(\"\")\r\n const[tvalue, settValue] = useState(\"\")\r\n\r\n function handlebChange(event) {\r\n setbValue(event.target.value) \r\n }\r\n\r\n function handletChange(event) {\r\n settValue(event.target.value) \r\n }\r\n\r\n function addItem(e) {\r\n e.preventDefault()\r\n setBody(body.concat(bvalue));\r\n setTitle(title.concat(tvalue));\r\n setbValue(\"\");\r\n settValue(\"\");\r\n }\r\n\r\n console.log(body)\r\n console.log(title)\r\n return(\r\n <div>\r\n <h1>Blogs</h1>\r\n <ul>\r\n \r\n </ul>\r\n <form onSubmit={addItem}>\r\n <input type=\"text\" placeholder=\"title\" value={tvalue} onChange={handletChange}/>\r\n <input type=\"text\" placeholder=\"body\" value={bvalue} onChange={handlebChange}/>\r\n <input type=\"submit\" value=\"Create Blog Post\" />\r\n </form>\r\n </div>\r\n )\r\n}\r\n\r\nexport default Blog;\r\n// ASSIGNMENT: Create A React Blog\r\n// Minimum goal is to accomplish steps 1 and 3\r\n// 1. Create a State that holds a list of blog posts. Blog post will include a title and text\r\n// 2. Include starter blog posts for testing\r\n// 3. Create a Form that creates blog posts\r\n// 4. Add seperate pages for Home page and page that lists and creates posts(Routers)\r\n// 5. Create a Page that Lists The Blog Posts (Context Api)\r\n\r\n// When done finish portfolio"
},
{
"alpha_fraction": 0.543367326259613,
"alphanum_fraction": 0.5484693646430969,
"avg_line_length": 22.625,
"blob_id": "7ad872e1661697bb95745b3c769a17d1ac4c699c",
"content_id": "0048a2b10d2289b2883fa2952804509319fb16b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 392,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 16,
"path": "/project_2/attendee-checker/src/components/Home.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React from 'react'\r\nimport { BrowserRouter, NavLink } from 'react-router-dom'\r\n\r\nfunction Home () {\r\n return (\r\n <div>\r\n <h1>Welcome to my EventBrite API website!</h1>\r\n <NavLink to=\"/myevents\">Go to my events</NavLink>\r\n <br />\r\n <NavLink to=\"/allevents\">See all events</NavLink>\r\n </div>\r\n\r\n )\r\n}\r\n\r\nexport default Home"
},
{
"alpha_fraction": 0.731249988079071,
"alphanum_fraction": 0.731249988079071,
"avg_line_length": 25,
"blob_id": "9406705c2beeed44153fc117b0bb5680898c9c52",
"content_id": "54ca4fc2333b0d07568f62b8cd418c3b7716ac65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 6,
"path": "/wk3d1-livecode/product/crud/urls.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.urls import path\r\nfrom views import ProductCreateView\r\n\r\nurlpatterns = [\r\n path('create/', ProductCreateView.as_view(), name=\"product_create\")\r\n]"
},
{
"alpha_fraction": 0.5630990266799927,
"alphanum_fraction": 0.5646964907646179,
"avg_line_length": 24.5510196685791,
"blob_id": "23b785b9584aa960d9b5b8262dc5ab1ee59ac91f",
"content_id": "3736071c4096b8b3b2234ad26b63625ca4079471",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1252,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 49,
"path": "/api_fetch/app/src/App.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React, { useEffect, useState } from 'react';\nimport './App.css';\nimport axios from 'axios'\n\n\nconst getPlanets = (page) => {\n return axios.get(`https://swapi.co/api/planets/?page=${page}`);\n}\n\n\nfunction App() {\n var planets = []\n\n const [ planetData, setPlanetData ] = useState([]);\n const [ page, setPage ] = useState(1);\n const [ tableData, setTableData ] = useState([]);\n\n const parsePlanet = (planet) => {\n return <tr> <td>{planet.name}</td> <td>{planet.population}</td> <td>{planet.rotation_period}</td> <td>{planet.orbital_period}</td> <td>{planet.climate}</td></tr>\n }\n\n\n useEffect(() => {\n console.log(page)\n getPlanets(page).then((response) => {setPlanetData(response.data.results.map(parsePlanet)\n )\n console.log(response)\n })\n }, [page])\nconsole.log(planetData)\n return (\n <div className=\"App\">\n <input type=\"number\" value={page} onChange = {(e) => setPage(e.target.value)} max=\"7\" />\n <table>\n <tr>\n <th>Planet</th>\n <th>Population</th>\n <th>Rotational Period</th>\n <th>Orbital Period</th>\n <th>Climate</th>\n </tr> \n {planetData}\n </table>\n </div>\n );\n \n}\n\nexport default App;\n"
},
{
"alpha_fraction": 0.6928104758262634,
"alphanum_fraction": 0.6928104758262634,
"avg_line_length": 22.538461685180664,
"blob_id": "552b0df7a72e8355f6660614dab026840b4bebb3",
"content_id": "6ff867da03ba53788dbab197def694974558eec3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 612,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 26,
"path": "/portfolio/blog/views.py",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.list import ListView\nfrom blog.models import Post\n\n# Create your views here.\nclass PostCreate(CreateView):\n model = Post\n fields = ['title', 'body']\n\n def get_success_url(self):\n return '/blog/list'\n\nclass PostList(ListView):\n model = Post\n\nclass PostUpdate(UpdateView):\n model = Post\n fields = ['title', 'body']\n\n def get_success_url(self):\n return '/blog/list'\n\nclass PostDelete(DeleteView):\n model = Post\n success_url = '/blog/list'\n"
},
{
"alpha_fraction": 0.6753122210502625,
"alphanum_fraction": 0.6753122210502625,
"avg_line_length": 34.89655303955078,
"blob_id": "b297a732b08525f29dcef76d999e73c4f871b496",
"content_id": "894cea8fa3ad9c22d5eedc4776d069cd3d1c0d42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1041,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 29,
"path": "/project_2/attendee-checker/src/App.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React from 'react';\nimport './App.css';\nimport { getEvents } from './eventWorker'\nimport Event from \"./components/Event\"\nimport Attendee from './components/Attendee';\nimport { BrowserRouter, Route, Link } from 'react-router-dom'\nimport Details from './components/Details';\nimport Home from './components/Home';\nimport AllEvents from \"./components/AllEvents\"\nimport EventDetail from './components/EventDetail'\nimport CreateEvent from './components/CreateEvent'\n\nfunction App() {\n return (\n <div>\n <BrowserRouter>\n <Route path=\"/\" exact component={Home} />\n <Route path=\"/allevents\" exact component={AllEvents} />\n <Route path=\"/event/:event_id\" component={EventDetail} />\n <Route path=\"/myevents\" component={Event} />\n <Route path='/create' component={CreateEvent} />\n <Route path=\"/attendees/:event_id/\" exact component={Attendee} />\n <Route path=\"/attendees/:event_id/details/:attendee_id\" component={Details} />\n </BrowserRouter>\n </div>\n );\n}\n\nexport default App;\n"
},
{
"alpha_fraction": 0.5009451508522034,
"alphanum_fraction": 0.5009451508522034,
"avg_line_length": 31.125,
"blob_id": "cd4e71c0bb0e76a50704324ee98253ee1191311e",
"content_id": "3313cd846b567aacc468bd5439da0040d3fb7335",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1058,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 32,
"path": "/project_2/attendee-checker/src/components/Attendee.js",
"repo_name": "ktwalsh/trace_camp",
"src_encoding": "UTF-8",
"text": "import React, { useState, useEffect } from 'react'\r\nimport { getAttendees } from '../eventWorker';\r\nimport { BrowserRouter, Link } from 'react-router-dom'\r\n\r\n\r\n\r\nfunction Attendee({ match }) {\r\n const [attendees, setAttendees] = useState([])\r\n const parseAttendees = (attendees) => {\r\n return <tbody key={attendees.id}><tr><td >{attendees.profile.name}</td><td><Link to={`details/${attendees.id}`}>{attendees.status}</Link></td></tr></tbody>\r\n }\r\n useEffect(() => {\r\n getAttendees(match.params.event_id).then((response) => {\r\n setAttendees(response.data.attendees.map(parseAttendees))\r\n })\r\n }, [match.params.event_id])\r\n return (\r\n <div className=\"App\">\r\n <table>\r\n <thead>\r\n <tr>\r\n <th>Attendee Name</th>\r\n <th>Attendee Status</th>\r\n </tr>\r\n </thead>\r\n {attendees}\r\n </table>\r\n </div>\r\n )\r\n}\r\n\r\nexport default Attendee"
}
] | 29 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.